xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c (revision f6f4cb8ada400367a1921f6b93fb9e02f53ac5e6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * EHCI Host Controller Driver (EHCI)
29  *
30  * The EHCI driver is a software driver which interfaces to the Universal
31  * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
32  * the Host Controller is defined by the EHCI Host Controller Interface.
33  *
34  * This module contains the main EHCI driver code which handles all USB
35  * transfers, bandwidth allocations and other general functionalities.
36  */
37 
38 #include <sys/usb/hcd/ehci/ehcid.h>
39 #include <sys/usb/hcd/ehci/ehci_isoch.h>
40 #include <sys/usb/hcd/ehci/ehci_xfer.h>
41 
42 /*
43  * EHCI MSI tunable:
44  *
45  * By default MSI is enabled on all supported platforms except for the
46  * EHCI controller of ULI1575 South bridge.
47  */
48 boolean_t ehci_enable_msi = B_TRUE;
49 
50 /* Pointer to the state structure */
51 extern void *ehci_statep;
52 
53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
54 
55 extern uint_t ehci_vt62x2_workaround;
56 extern int force_ehci_off;
57 
58 /* Adjustable variables for the size of the pools */
59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
61 
62 /*
63  * Initialize the values which the order of 32ms intr qh are executed
64  * by the host controller in the lattice tree.
65  */
66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
67 	{0x00, 0x10, 0x08, 0x18,
68 	0x04, 0x14, 0x0c, 0x1c,
69 	0x02, 0x12, 0x0a, 0x1a,
70 	0x06, 0x16, 0x0e, 0x1e,
71 	0x01, 0x11, 0x09, 0x19,
72 	0x05, 0x15, 0x0d, 0x1d,
73 	0x03, 0x13, 0x0b, 0x1b,
74 	0x07, 0x17, 0x0f, 0x1f};
75 
76 /*
77  * Initialize the values which are used to calculate start split mask
78  * for the low/full/high speed interrupt and isochronous endpoints.
79  */
80 static uint_t ehci_start_split_mask[15] = {
81 		/*
82 		 * For high/full/low speed usb devices. For high speed
83 		 * device with polling interval greater than or equal
84 		 * to 8us (125us).
85 		 */
86 		0x01,	/* 00000001 */
87 		0x02,	/* 00000010 */
88 		0x04,	/* 00000100 */
89 		0x08,	/* 00001000 */
90 		0x10,	/* 00010000 */
91 		0x20,	/* 00100000 */
92 		0x40,	/* 01000000 */
93 		0x80,	/* 10000000 */
94 
95 		/* Only for high speed devices with polling interval 4us */
96 		0x11,	/* 00010001 */
97 		0x22,	/* 00100010 */
98 		0x44,	/* 01000100 */
99 		0x88,	/* 10001000 */
100 
101 		/* Only for high speed devices with polling interval 2us */
102 		0x55,	/* 01010101 */
103 		0xaa,	/* 10101010 */
104 
105 		/* Only for high speed devices with polling interval 1us */
106 		0xff	/* 11111111 */
107 };
108 
109 /*
110  * Initialize the values which are used to calculate complete split mask
111  * for the low/full speed interrupt and isochronous endpoints.
112  */
113 static uint_t ehci_intr_complete_split_mask[7] = {
114 		/* Only full/low speed devices */
115 		0x1c,	/* 00011100 */
116 		0x38,	/* 00111000 */
117 		0x70,	/* 01110000 */
118 		0xe0,	/* 11100000 */
119 		0x00,	/* Need FSTN feature */
120 		0x00,	/* Need FSTN feature */
121 		0x00	/* Need FSTN feature */
122 };
123 
124 
125 /*
126  * EHCI Internal Function Prototypes
127  */
128 
129 /* Host Controller Driver (HCD) initialization functions */
130 void		ehci_set_dma_attributes(ehci_state_t	*ehcip);
131 int		ehci_allocate_pools(ehci_state_t	*ehcip);
132 void		ehci_decode_ddi_dma_addr_bind_handle_result(
133 				ehci_state_t		*ehcip,
134 				int			result);
135 int		ehci_map_regs(ehci_state_t		*ehcip);
136 int		ehci_register_intrs_and_init_mutex(
137 				ehci_state_t		*ehcip);
138 static int	ehci_add_intrs(ehci_state_t		*ehcip,
139 				int			intr_type);
140 int		ehci_init_ctlr(ehci_state_t		*ehcip,
141 				int			init_type);
142 static int	ehci_take_control(ehci_state_t		*ehcip);
143 static int	ehci_init_periodic_frame_lst_table(
144 				ehci_state_t		*ehcip);
145 static void	ehci_build_interrupt_lattice(
146 				ehci_state_t		*ehcip);
147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t	*ehcip);
148 
149 /* Host Controller Driver (HCD) deinitialization functions */
150 int		ehci_cleanup(ehci_state_t		*ehcip);
151 static void	ehci_rem_intrs(ehci_state_t		*ehcip);
152 int		ehci_cpr_suspend(ehci_state_t		*ehcip);
153 int		ehci_cpr_resume(ehci_state_t		*ehcip);
154 
155 /* Bandwidth Allocation functions */
156 int		ehci_allocate_bandwidth(ehci_state_t	*ehcip,
157 				usba_pipe_handle_data_t	*ph,
158 				uint_t			*pnode,
159 				uchar_t			*smask,
160 				uchar_t			*cmask);
161 static int	ehci_allocate_high_speed_bandwidth(
162 				ehci_state_t		*ehcip,
163 				usba_pipe_handle_data_t	*ph,
164 				uint_t			*hnode,
165 				uchar_t			*smask,
166 				uchar_t			*cmask);
167 static int	ehci_allocate_classic_tt_bandwidth(
168 				ehci_state_t		*ehcip,
169 				usba_pipe_handle_data_t	*ph,
170 				uint_t			pnode);
171 void		ehci_deallocate_bandwidth(ehci_state_t	*ehcip,
172 				usba_pipe_handle_data_t	*ph,
173 				uint_t			pnode,
174 				uchar_t			smask,
175 				uchar_t			cmask);
176 static void	ehci_deallocate_high_speed_bandwidth(
177 				ehci_state_t		*ehcip,
178 				usba_pipe_handle_data_t	*ph,
179 				uint_t			hnode,
180 				uchar_t			smask,
181 				uchar_t			cmask);
182 static void	ehci_deallocate_classic_tt_bandwidth(
183 				ehci_state_t		*ehcip,
184 				usba_pipe_handle_data_t	*ph,
185 				uint_t			pnode);
186 static int	ehci_compute_high_speed_bandwidth(
187 				ehci_state_t		*ehcip,
188 				usb_ep_descr_t		*endpoint,
189 				usb_port_status_t	port_status,
190 				uint_t			*sbandwidth,
191 				uint_t			*cbandwidth);
192 static int	ehci_compute_classic_bandwidth(
193 				usb_ep_descr_t		*endpoint,
194 				usb_port_status_t	port_status,
195 				uint_t			*bandwidth);
196 int		ehci_adjust_polling_interval(
197 				ehci_state_t		*ehcip,
198 				usb_ep_descr_t		*endpoint,
199 				usb_port_status_t	port_status);
200 static int	ehci_adjust_high_speed_polling_interval(
201 				ehci_state_t		*ehcip,
202 				usb_ep_descr_t		*endpoint);
203 static uint_t	ehci_lattice_height(uint_t		interval);
204 static uint_t	ehci_lattice_parent(uint_t		node);
205 static uint_t	ehci_find_periodic_node(
206 				uint_t			leaf,
207 				int			interval);
208 static uint_t	ehci_leftmost_leaf(uint_t		node,
209 				uint_t			height);
210 static uint_t	ehci_pow_2(uint_t x);
211 static uint_t	ehci_log_2(uint_t x);
212 static int	ehci_find_bestfit_hs_mask(
213 				ehci_state_t		*ehcip,
214 				uchar_t			*smask,
215 				uint_t			*pnode,
216 				usb_ep_descr_t		*endpoint,
217 				uint_t			bandwidth,
218 				int			interval);
219 static int	ehci_find_bestfit_ls_intr_mask(
220 				ehci_state_t		*ehcip,
221 				uchar_t			*smask,
222 				uchar_t			*cmask,
223 				uint_t			*pnode,
224 				uint_t			sbandwidth,
225 				uint_t			cbandwidth,
226 				int			interval);
227 static int	ehci_find_bestfit_sitd_in_mask(
228 				ehci_state_t		*ehcip,
229 				uchar_t			*smask,
230 				uchar_t			*cmask,
231 				uint_t			*pnode,
232 				uint_t			sbandwidth,
233 				uint_t			cbandwidth,
234 				int			interval);
235 static int	ehci_find_bestfit_sitd_out_mask(
236 				ehci_state_t		*ehcip,
237 				uchar_t			*smask,
238 				uint_t			*pnode,
239 				uint_t			sbandwidth,
240 				int			interval);
241 static uint_t	ehci_calculate_bw_availability_mask(
242 				ehci_state_t		*ehcip,
243 				uint_t			bandwidth,
244 				int			leaf,
245 				int			leaf_count,
246 				uchar_t			*bw_mask);
247 static void	ehci_update_bw_availability(
248 				ehci_state_t		*ehcip,
249 				int			bandwidth,
250 				int			leftmost_leaf,
251 				int			leaf_count,
252 				uchar_t			mask);
253 
254 /* Miscellaneous functions */
255 ehci_state_t	*ehci_obtain_state(
256 				dev_info_t		*dip);
257 int		ehci_state_is_operational(
258 				ehci_state_t		*ehcip);
259 int		ehci_do_soft_reset(
260 				ehci_state_t		*ehcip);
261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t	*ehcip,
262 				ehci_pipe_private_t	*pp,
263 				ehci_trans_wrapper_t	*tw);
264 usb_frame_number_t ehci_get_current_frame_number(
265 				ehci_state_t		*ehcip);
266 static void	ehci_cpr_cleanup(
267 				ehci_state_t		*ehcip);
268 int		ehci_wait_for_sof(
269 				ehci_state_t		*ehcip);
270 void		ehci_toggle_scheduler(
271 				ehci_state_t		*ehcip);
272 void		ehci_print_caps(ehci_state_t		*ehcip);
273 void		ehci_print_regs(ehci_state_t		*ehcip);
274 void		ehci_print_qh(ehci_state_t		*ehcip,
275 				ehci_qh_t		*qh);
276 void		ehci_print_qtd(ehci_state_t		*ehcip,
277 				ehci_qtd_t		*qtd);
278 void		ehci_create_stats(ehci_state_t		*ehcip);
279 void		ehci_destroy_stats(ehci_state_t		*ehcip);
280 void		ehci_do_intrs_stats(ehci_state_t	*ehcip,
281 				int		val);
282 void		ehci_do_byte_stats(ehci_state_t		*ehcip,
283 				size_t		len,
284 				uint8_t		attr,
285 				uint8_t		addr);
286 
287 /*
288  * check if this ehci controller can support PM
289  */
290 int
291 ehci_hcdi_pm_support(dev_info_t *dip)
292 {
293 	ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
294 	    ddi_get_instance(dip));
295 
296 	if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
297 	    (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
298 
299 	    ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
300 	    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
301 
302 	    (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
303 
304 		return (USB_SUCCESS);
305 	}
306 
307 	return (USB_FAILURE);
308 }
309 
310 void
311 ehci_dma_attr_workaround(ehci_state_t	*ehcip)
312 {
313 	/*
314 	 * Some Nvidia chips can not handle qh dma address above 2G.
315 	 * The bit 31 of the dma address might be omitted and it will
316 	 * cause system crash or other unpredicable result. So force
317 	 * the dma address allocated below 2G to make ehci work.
318 	 */
319 	if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
320 		switch (ehcip->ehci_device_id) {
321 			case PCI_DEVICE_NVIDIA_CK804:
322 				USB_DPRINTF_L2(PRINT_MASK_ATTA,
323 				    ehcip->ehci_log_hdl,
324 				    "ehci_dma_attr_workaround: NVIDIA dma "
325 				    "workaround enabled, force dma address "
326 				    "to be allocated below 2G");
327 				ehcip->ehci_dma_attr.dma_attr_addr_hi =
328 				    0x7fffffffull;
329 				break;
330 			default:
331 				break;
332 
333 		}
334 	}
335 }
336 
337 /*
338  * Host Controller Driver (HCD) initialization functions
339  */
340 
341 /*
342  * ehci_set_dma_attributes:
343  *
344  * Set the limits in the DMA attributes structure. Most of the values used
345  * in the  DMA limit structures are the default values as specified by	the
346  * Writing PCI device drivers document.
347  */
348 void
349 ehci_set_dma_attributes(ehci_state_t	*ehcip)
350 {
351 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
352 	    "ehci_set_dma_attributes:");
353 
354 	/* Initialize the DMA attributes */
355 	ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
356 	ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
357 	ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
358 
359 	/* 32 bit addressing */
360 	ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
361 
362 	/* Byte alignment */
363 	ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
364 
365 	/*
366 	 * Since PCI  specification is byte alignment, the
367 	 * burst size field should be set to 1 for PCI devices.
368 	 */
369 	ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
370 
371 	ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
372 	ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
373 	ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
374 	ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
375 	ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
376 	ehcip->ehci_dma_attr.dma_attr_flags = 0;
377 	ehci_dma_attr_workaround(ehcip);
378 }
379 
380 
381 /*
382  * ehci_allocate_pools:
383  *
384  * Allocate the system memory for the Endpoint Descriptor (QH) and for the
385  * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
386  * to a 16 byte boundary.
387  */
388 int
389 ehci_allocate_pools(ehci_state_t	*ehcip)
390 {
391 	ddi_device_acc_attr_t		dev_attr;
392 	size_t				real_length;
393 	int				result;
394 	uint_t				ccount;
395 	int				i;
396 
397 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
398 	    "ehci_allocate_pools:");
399 
400 	/* The host controller will be little endian */
401 	dev_attr.devacc_attr_version	= DDI_DEVICE_ATTR_V0;
402 	dev_attr.devacc_attr_endian_flags  = DDI_STRUCTURE_LE_ACC;
403 	dev_attr.devacc_attr_dataorder	= DDI_STRICTORDER_ACC;
404 
405 	/* Byte alignment */
406 	ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
407 
408 	/* Allocate the QTD pool DMA handle */
409 	if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
410 	    DDI_DMA_SLEEP, 0,
411 	    &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
412 
413 		goto failure;
414 	}
415 
416 	/* Allocate the memory for the QTD pool */
417 	if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
418 	    ehci_qtd_pool_size * sizeof (ehci_qtd_t),
419 	    &dev_attr,
420 	    DDI_DMA_CONSISTENT,
421 	    DDI_DMA_SLEEP,
422 	    0,
423 	    (caddr_t *)&ehcip->ehci_qtd_pool_addr,
424 	    &real_length,
425 	    &ehcip->ehci_qtd_pool_mem_handle)) {
426 
427 		goto failure;
428 	}
429 
430 	/* Map the QTD pool into the I/O address space */
431 	result = ddi_dma_addr_bind_handle(
432 	    ehcip->ehci_qtd_pool_dma_handle,
433 	    NULL,
434 	    (caddr_t)ehcip->ehci_qtd_pool_addr,
435 	    real_length,
436 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
437 	    DDI_DMA_SLEEP,
438 	    NULL,
439 	    &ehcip->ehci_qtd_pool_cookie,
440 	    &ccount);
441 
442 	bzero((void *)ehcip->ehci_qtd_pool_addr,
443 	    ehci_qtd_pool_size * sizeof (ehci_qtd_t));
444 
445 	/* Process the result */
446 	if (result == DDI_DMA_MAPPED) {
447 		/* The cookie count should be 1 */
448 		if (ccount != 1) {
449 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
450 			    "ehci_allocate_pools: More than 1 cookie");
451 
452 		goto failure;
453 		}
454 	} else {
455 		USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
456 		    "ehci_allocate_pools: Result = %d", result);
457 
458 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
459 
460 		goto failure;
461 	}
462 
463 	/*
464 	 * DMA addresses for QTD pools are bound
465 	 */
466 	ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
467 
468 	/* Initialize the QTD pool */
469 	for (i = 0; i < ehci_qtd_pool_size; i ++) {
470 		Set_QTD(ehcip->ehci_qtd_pool_addr[i].
471 		    qtd_state, EHCI_QTD_FREE);
472 	}
473 
474 	/* Allocate the QTD pool DMA handle */
475 	if (ddi_dma_alloc_handle(ehcip->ehci_dip,
476 	    &ehcip->ehci_dma_attr,
477 	    DDI_DMA_SLEEP,
478 	    0,
479 	    &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
480 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
481 		    "ehci_allocate_pools: ddi_dma_alloc_handle failed");
482 
483 		goto failure;
484 	}
485 
486 	/* Allocate the memory for the QH pool */
487 	if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
488 	    ehci_qh_pool_size * sizeof (ehci_qh_t),
489 	    &dev_attr,
490 	    DDI_DMA_CONSISTENT,
491 	    DDI_DMA_SLEEP,
492 	    0,
493 	    (caddr_t *)&ehcip->ehci_qh_pool_addr,
494 	    &real_length,
495 	    &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
496 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
497 		    "ehci_allocate_pools: ddi_dma_mem_alloc failed");
498 
499 		goto failure;
500 	}
501 
502 	result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
503 	    NULL,
504 	    (caddr_t)ehcip->ehci_qh_pool_addr,
505 	    real_length,
506 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
507 	    DDI_DMA_SLEEP,
508 	    NULL,
509 	    &ehcip->ehci_qh_pool_cookie,
510 	    &ccount);
511 
512 	bzero((void *)ehcip->ehci_qh_pool_addr,
513 	    ehci_qh_pool_size * sizeof (ehci_qh_t));
514 
515 	/* Process the result */
516 	if (result == DDI_DMA_MAPPED) {
517 		/* The cookie count should be 1 */
518 		if (ccount != 1) {
519 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
520 			    "ehci_allocate_pools: More than 1 cookie");
521 
522 			goto failure;
523 		}
524 	} else {
525 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
526 
527 		goto failure;
528 	}
529 
530 	/*
531 	 * DMA addresses for QH pools are bound
532 	 */
533 	ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
534 
535 	/* Initialize the QH pool */
536 	for (i = 0; i < ehci_qh_pool_size; i ++) {
537 		Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
538 	}
539 
540 	/* Byte alignment */
541 	ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
542 
543 	return (DDI_SUCCESS);
544 
545 failure:
546 	/* Byte alignment */
547 	ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
548 
549 	return (DDI_FAILURE);
550 }
551 
552 
553 /*
554  * ehci_decode_ddi_dma_addr_bind_handle_result:
555  *
556  * Process the return values of ddi_dma_addr_bind_handle()
557  */
558 void
559 ehci_decode_ddi_dma_addr_bind_handle_result(
560 	ehci_state_t	*ehcip,
561 	int		result)
562 {
563 	USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
564 	    "ehci_decode_ddi_dma_addr_bind_handle_result:");
565 
566 	switch (result) {
567 	case DDI_DMA_PARTIAL_MAP:
568 		USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
569 		    "Partial transfers not allowed");
570 		break;
571 	case DDI_DMA_INUSE:
572 		USB_DPRINTF_L2(PRINT_MASK_ALL,	ehcip->ehci_log_hdl,
573 		    "Handle is in use");
574 		break;
575 	case DDI_DMA_NORESOURCES:
576 		USB_DPRINTF_L2(PRINT_MASK_ALL,	ehcip->ehci_log_hdl,
577 		    "No resources");
578 		break;
579 	case DDI_DMA_NOMAPPING:
580 		USB_DPRINTF_L2(PRINT_MASK_ALL,	ehcip->ehci_log_hdl,
581 		    "No mapping");
582 		break;
583 	case DDI_DMA_TOOBIG:
584 		USB_DPRINTF_L2(PRINT_MASK_ALL,	ehcip->ehci_log_hdl,
585 		    "Object is too big");
586 		break;
587 	default:
588 		USB_DPRINTF_L2(PRINT_MASK_ALL,	ehcip->ehci_log_hdl,
589 		    "Unknown dma error");
590 	}
591 }
592 
593 
594 /*
595  * ehci_map_regs:
596  *
597  * The Host Controller (HC) contains a set of on-chip operational registers
598  * and which should be mapped into a non-cacheable portion of the  system
599  * addressable space.
600  */
601 int
602 ehci_map_regs(ehci_state_t	*ehcip)
603 {
604 	ddi_device_acc_attr_t	attr;
605 	uint16_t		cmd_reg;
606 	uint_t			length;
607 
608 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
609 
610 	/* Check to make sure we have memory access */
611 	if (pci_config_setup(ehcip->ehci_dip,
612 	    &ehcip->ehci_config_handle) != DDI_SUCCESS) {
613 
614 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
615 		    "ehci_map_regs: Config error");
616 
617 		return (DDI_FAILURE);
618 	}
619 
620 	/* Make sure Memory Access Enable is set */
621 	cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
622 
623 	if (!(cmd_reg & PCI_COMM_MAE)) {
624 
625 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
626 		    "ehci_map_regs: Memory base address access disabled");
627 
628 		return (DDI_FAILURE);
629 	}
630 
631 	/* The host controller will be little endian */
632 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
633 	attr.devacc_attr_endian_flags  = DDI_STRUCTURE_LE_ACC;
634 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
635 
636 	/* Map in EHCI Capability registers */
637 	if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
638 	    (caddr_t *)&ehcip->ehci_capsp, 0,
639 	    sizeof (ehci_caps_t), &attr,
640 	    &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
641 
642 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
643 		    "ehci_map_regs: Map setup error");
644 
645 		return (DDI_FAILURE);
646 	}
647 
648 	length = ddi_get8(ehcip->ehci_caps_handle,
649 	    (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
650 
651 	/* Free the original mapping */
652 	ddi_regs_map_free(&ehcip->ehci_caps_handle);
653 
654 	/* Re-map in EHCI Capability and Operational registers */
655 	if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
656 	    (caddr_t *)&ehcip->ehci_capsp, 0,
657 	    length + sizeof (ehci_regs_t), &attr,
658 	    &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
659 
660 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
661 		    "ehci_map_regs: Map setup error");
662 
663 		return (DDI_FAILURE);
664 	}
665 
666 	/* Get the pointer to EHCI Operational Register */
667 	ehcip->ehci_regsp = (ehci_regs_t *)
668 	    ((uintptr_t)ehcip->ehci_capsp + length);
669 
670 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
671 	    "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
672 	    (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
673 
674 	return (DDI_SUCCESS);
675 }
676 
677 /*
678  * The following simulated polling is for debugging purposes only.
679  * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
680  */
681 static int
682 ehci_is_polled(dev_info_t *dip)
683 {
684 	int ret;
685 	char *propval;
686 
687 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
688 	    "usb-polling", &propval) != DDI_SUCCESS)
689 
690 		return (0);
691 
692 	ret = (strcmp(propval, "true") == 0);
693 	ddi_prop_free(propval);
694 
695 	return (ret);
696 }
697 
698 static void
699 ehci_poll_intr(void *arg)
700 {
701 	/* poll every msec */
702 	for (;;) {
703 		(void) ehci_intr(arg, NULL);
704 		delay(drv_usectohz(1000));
705 	}
706 }
707 
708 /*
709  * ehci_register_intrs_and_init_mutex:
710  *
711  * Register interrupts and initialize each mutex and condition variables
712  */
713 int
714 ehci_register_intrs_and_init_mutex(ehci_state_t	*ehcip)
715 {
716 	int	intr_types;
717 
718 #if defined(__x86)
719 	uint8_t iline;
720 #endif
721 
722 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
723 	    "ehci_register_intrs_and_init_mutex:");
724 
725 	/*
726 	 * There is a known MSI hardware bug with the EHCI controller
727 	 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
728 	 */
729 	if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
730 	    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
731 		ehcip->ehci_msi_enabled = B_FALSE;
732 	} else {
733 		/* Set the MSI enable flag from the global EHCI MSI tunable */
734 		ehcip->ehci_msi_enabled = ehci_enable_msi;
735 	}
736 
737 	/* launch polling thread instead of enabling pci interrupt */
738 	if (ehci_is_polled(ehcip->ehci_dip)) {
739 		extern pri_t maxclsyspri;
740 
741 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
742 		    "ehci_register_intrs_and_init_mutex: "
743 		    "running in simulated polled mode");
744 
745 		(void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
746 		    TS_RUN, maxclsyspri);
747 
748 		goto skip_intr;
749 	}
750 
751 #if defined(__x86)
752 	/*
753 	 * Make sure that the interrupt pin is connected to the
754 	 * interrupt controller on x86.	 Interrupt line 255 means
755 	 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
756 	 * If we would return failure when interrupt line equals 255, then
757 	 * high speed devices will be routed to companion host controllers.
758 	 * However, it is not necessary to return failure here, and
759 	 * o/uhci codes don't check the interrupt line either.
760 	 * But it's good to log a message here for debug purposes.
761 	 */
762 	iline = pci_config_get8(ehcip->ehci_config_handle,
763 	    PCI_CONF_ILINE);
764 
765 	if (iline == 255) {
766 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
767 		    "ehci_register_intrs_and_init_mutex: "
768 		    "interrupt line value out of range (%d)",
769 		    iline);
770 	}
771 #endif	/* __x86 */
772 
773 	/* Get supported interrupt types */
774 	if (ddi_intr_get_supported_types(ehcip->ehci_dip,
775 	    &intr_types) != DDI_SUCCESS) {
776 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
777 		    "ehci_register_intrs_and_init_mutex: "
778 		    "ddi_intr_get_supported_types failed");
779 
780 		return (DDI_FAILURE);
781 	}
782 
783 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
784 	    "ehci_register_intrs_and_init_mutex: "
785 	    "supported interrupt types 0x%x", intr_types);
786 
787 	if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
788 		if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
789 		    != DDI_SUCCESS) {
790 			USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
791 			    "ehci_register_intrs_and_init_mutex: MSI "
792 			    "registration failed, trying FIXED interrupt \n");
793 		} else {
794 			USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
795 			    "ehci_register_intrs_and_init_mutex: "
796 			    "Using MSI interrupt type\n");
797 
798 			ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
799 			ehcip->ehci_flags |= EHCI_INTR;
800 		}
801 	}
802 
803 	if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
804 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
805 		if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
806 		    != DDI_SUCCESS) {
807 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
808 			    "ehci_register_intrs_and_init_mutex: "
809 			    "FIXED interrupt registration failed\n");
810 
811 			return (DDI_FAILURE);
812 		}
813 
814 		USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
815 		    "ehci_register_intrs_and_init_mutex: "
816 		    "Using FIXED interrupt type\n");
817 
818 		ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
819 		ehcip->ehci_flags |= EHCI_INTR;
820 	}
821 
822 skip_intr:
823 	/* Create prototype for advance on async schedule */
824 	cv_init(&ehcip->ehci_async_schedule_advance_cv,
825 	    NULL, CV_DRIVER, NULL);
826 
827 	return (DDI_SUCCESS);
828 }
829 
830 
831 /*
832  * ehci_add_intrs:
833  *
834  * Register FIXED or MSI interrupts.
835  */
836 static int
837 ehci_add_intrs(ehci_state_t	*ehcip,
838 		int		intr_type)
839 {
840 	int	actual, avail, intr_size, count = 0;
841 	int	i, flag, ret;
842 
843 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
844 	    "ehci_add_intrs: interrupt type 0x%x", intr_type);
845 
846 	/* Get number of interrupts */
847 	ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
848 	if ((ret != DDI_SUCCESS) || (count == 0)) {
849 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
850 		    "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
851 		    "ret: %d, count: %d", ret, count);
852 
853 		return (DDI_FAILURE);
854 	}
855 
856 	/* Get number of available interrupts */
857 	ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
858 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
859 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
860 		    "ehci_add_intrs: ddi_intr_get_navail() failure, "
861 		    "ret: %d, count: %d", ret, count);
862 
863 		return (DDI_FAILURE);
864 	}
865 
866 	if (avail < count) {
867 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
868 		    "ehci_add_intrs: ehci_add_intrs: nintrs () "
869 		    "returned %d, navail returned %d\n", count, avail);
870 	}
871 
872 	/* Allocate an array of interrupt handles */
873 	intr_size = count * sizeof (ddi_intr_handle_t);
874 	ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
875 
876 	flag = (intr_type == DDI_INTR_TYPE_MSI) ?
877 	    DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
878 
879 	/* call ddi_intr_alloc() */
880 	ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
881 	    intr_type, 0, count, &actual, flag);
882 
883 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
884 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
885 		    "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
886 
887 		kmem_free(ehcip->ehci_htable, intr_size);
888 
889 		return (DDI_FAILURE);
890 	}
891 
892 	if (actual < count) {
893 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
894 		    "ehci_add_intrs: Requested: %d, Received: %d\n",
895 		    count, actual);
896 
897 		for (i = 0; i < actual; i++)
898 			(void) ddi_intr_free(ehcip->ehci_htable[i]);
899 
900 		kmem_free(ehcip->ehci_htable, intr_size);
901 
902 		return (DDI_FAILURE);
903 	}
904 
905 	ehcip->ehci_intr_cnt = actual;
906 
907 	if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
908 	    &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
909 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
910 		    "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
911 
912 		for (i = 0; i < actual; i++)
913 			(void) ddi_intr_free(ehcip->ehci_htable[i]);
914 
915 		kmem_free(ehcip->ehci_htable, intr_size);
916 
917 		return (DDI_FAILURE);
918 	}
919 
920 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
921 	    "ehci_add_intrs: Supported Interrupt priority 0x%x",
922 	    ehcip->ehci_intr_pri);
923 
924 	/* Test for high level mutex */
925 	if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
926 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
927 		    "ehci_add_intrs: Hi level interrupt not supported");
928 
929 		for (i = 0; i < actual; i++)
930 			(void) ddi_intr_free(ehcip->ehci_htable[i]);
931 
932 		kmem_free(ehcip->ehci_htable, intr_size);
933 
934 		return (DDI_FAILURE);
935 	}
936 
937 	/* Initialize the mutex */
938 	mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
939 	    DDI_INTR_PRI(ehcip->ehci_intr_pri));
940 
941 	/* Call ddi_intr_add_handler() */
942 	for (i = 0; i < actual; i++) {
943 		if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
944 		    ehci_intr, (caddr_t)ehcip,
945 		    (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
946 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
947 			    "ehci_add_intrs:ddi_intr_add_handler() "
948 			    "failed %d", ret);
949 
950 			for (i = 0; i < actual; i++)
951 				(void) ddi_intr_free(ehcip->ehci_htable[i]);
952 
953 			mutex_destroy(&ehcip->ehci_int_mutex);
954 			kmem_free(ehcip->ehci_htable, intr_size);
955 
956 			return (DDI_FAILURE);
957 		}
958 	}
959 
960 	if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
961 	    &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
962 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
963 		    "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
964 
965 		for (i = 0; i < actual; i++) {
966 			(void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
967 			(void) ddi_intr_free(ehcip->ehci_htable[i]);
968 		}
969 
970 		mutex_destroy(&ehcip->ehci_int_mutex);
971 		kmem_free(ehcip->ehci_htable, intr_size);
972 
973 		return (DDI_FAILURE);
974 	}
975 
976 	/* Enable all interrupts */
977 	if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
978 		/* Call ddi_intr_block_enable() for MSI interrupts */
979 		(void) ddi_intr_block_enable(ehcip->ehci_htable,
980 		    ehcip->ehci_intr_cnt);
981 	} else {
982 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
983 		for (i = 0; i < ehcip->ehci_intr_cnt; i++)
984 			(void) ddi_intr_enable(ehcip->ehci_htable[i]);
985 	}
986 
987 	return (DDI_SUCCESS);
988 }
989 
990 
991 /*
992  * ehci_init_hardware
993  *
994  * take control from BIOS, reset EHCI host controller, and check version, etc.
995  */
996 int
997 ehci_init_hardware(ehci_state_t	*ehcip)
998 {
999 	int			revision;
1000 	uint16_t		cmd_reg;
1001 	int			abort_on_BIOS_take_over_failure;
1002 
1003 	/* Take control from the BIOS */
1004 	if (ehci_take_control(ehcip) != USB_SUCCESS) {
1005 
1006 		/* read .conf file properties */
1007 		abort_on_BIOS_take_over_failure =
1008 		    ddi_prop_get_int(DDI_DEV_T_ANY,
1009 		    ehcip->ehci_dip, DDI_PROP_DONTPASS,
1010 		    "abort-on-BIOS-take-over-failure", 0);
1011 
1012 		if (abort_on_BIOS_take_over_failure) {
1013 
1014 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1015 			    "Unable to take control from BIOS.");
1016 
1017 			return (DDI_FAILURE);
1018 		}
1019 
1020 		USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1021 		    "Unable to take control from BIOS. Failure is ignored.");
1022 	}
1023 
1024 	/* set Memory Master Enable */
1025 	cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1026 	cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1027 	pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1028 
1029 	/* Reset the EHCI host controller */
1030 	Set_OpReg(ehci_command,
1031 	    Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1032 
1033 	/* Wait 10ms for reset to complete */
1034 	drv_usecwait(EHCI_RESET_TIMEWAIT);
1035 
1036 	ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1037 
1038 	/* Verify the version number */
1039 	revision = Get_16Cap(ehci_version);
1040 
1041 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1042 	    "ehci_init_hardware: Revision 0x%x", revision);
1043 
1044 	/*
1045 	 * EHCI driver supports EHCI host controllers compliant to
1046 	 * 0.95 and higher revisions of EHCI specifications.
1047 	 */
1048 	if (revision < EHCI_REVISION_0_95) {
1049 
1050 		USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1051 		    "Revision 0x%x is not supported", revision);
1052 
1053 		return (DDI_FAILURE);
1054 	}
1055 
1056 	if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1057 
1058 		/* Initialize the Frame list base address area */
1059 		if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1060 
1061 			return (DDI_FAILURE);
1062 		}
1063 
1064 		/*
1065 		 * For performance reasons, do not insert anything into the
1066 		 * asynchronous list or activate the asynch list schedule until
1067 		 * there is a valid QH.
1068 		 */
1069 		ehcip->ehci_head_of_async_sched_list = NULL;
1070 
1071 		if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1072 		    (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1073 			/*
1074 			 * The driver is unable to reliably stop the asynch
1075 			 * list schedule on VIA VT6202 controllers, so we
1076 			 * always keep a dummy QH on the list.
1077 			 */
1078 			ehci_qh_t *dummy_async_qh =
1079 			    ehci_alloc_qh(ehcip, NULL, NULL);
1080 
1081 			Set_QH(dummy_async_qh->qh_link_ptr,
1082 			    ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1083 			    EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1084 
1085 			/* Set this QH to be the "head" of the circular list */
1086 			Set_QH(dummy_async_qh->qh_ctrl,
1087 			    Get_QH(dummy_async_qh->qh_ctrl) |
1088 			    EHCI_QH_CTRL_RECLAIM_HEAD);
1089 
1090 			Set_QH(dummy_async_qh->qh_next_qtd,
1091 			    EHCI_QH_NEXT_QTD_PTR_VALID);
1092 			Set_QH(dummy_async_qh->qh_alt_next_qtd,
1093 			    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1094 
1095 			ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1096 			ehcip->ehci_open_async_count++;
1097 		}
1098 	}
1099 
1100 	return (DDI_SUCCESS);
1101 }
1102 
1103 
1104 /*
1105  * ehci_init_workaround
1106  *
1107  * some workarounds during initializing ehci
1108  */
1109 int
1110 ehci_init_workaround(ehci_state_t	*ehcip)
1111 {
1112 	/*
1113 	 * Acer Labs Inc. M5273 EHCI controller does not send
1114 	 * interrupts unless the Root hub ports are routed to the EHCI
1115 	 * host controller; so route the ports now, before we test for
1116 	 * the presence of SOFs interrupts.
1117 	 */
1118 	if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1119 		/* Route all Root hub ports to EHCI host controller */
1120 		Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1121 	}
1122 
1123 	/*
1124 	 * VIA chips have some issues and may not work reliably.
1125 	 * Revisions >= 0x80 are part of a southbridge and appear
1126 	 * to be reliable with the workaround.
1127 	 * For revisions < 0x80, if we	were bound using class
1128 	 * complain, else proceed. This will allow the user to
1129 	 * bind ehci specifically to this chip and not have the
1130 	 * warnings
1131 	 */
1132 	if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1133 
1134 		if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1135 
1136 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1137 			    "ehci_init_workaround: Applying VIA workarounds "
1138 			    "for the 6212 chip.");
1139 
1140 		} else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1141 		    "pciclass,0c0320") == 0) {
1142 
1143 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1144 			    "Due to recently discovered incompatibilities");
1145 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1146 			    "with this USB controller, USB2.x transfer");
1147 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1148 			    "support has been disabled. This device will");
1149 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1150 			    "continue to function as a USB1.x controller.");
1151 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1152 			    "If you are interested in enabling USB2.x");
1153 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1154 			    "support please, refer to the ehci(7D) man page.");
1155 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1156 			    "Please also refer to www.sun.com/io for");
1157 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1158 			    "Solaris Ready products and to");
1159 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1160 			    "www.sun.com/bigadmin/hcl for additional");
1161 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1162 			    "compatible USB products.");
1163 
1164 			return (DDI_FAILURE);
1165 
1166 			} else if (ehci_vt62x2_workaround) {
1167 
1168 			USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1169 			    "Applying VIA workarounds");
1170 		}
1171 	}
1172 
1173 	return (DDI_SUCCESS);
1174 }
1175 
1176 
1177 /*
1178  * ehci_init_check_status
1179  *
1180  * Check if EHCI host controller is running
1181  */
1182 int
1183 ehci_init_check_status(ehci_state_t	*ehcip)
1184 {
1185 	clock_t			sof_time_wait;
1186 
1187 	/*
1188 	 * Get the number of clock ticks to wait.
1189 	 * This is based on the maximum time it takes for a frame list rollover
1190 	 * and maximum time wait for SOFs to begin.
1191 	 */
1192 	sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) +
1193 	    EHCI_SOF_TIMEWAIT);
1194 
1195 	/* Tell the ISR to broadcast ehci_async_schedule_advance_cv */
1196 	ehcip->ehci_flags |= EHCI_CV_INTR;
1197 
1198 	/* We need to add a delay to allow the chip time to start running */
1199 	(void) cv_timedwait(&ehcip->ehci_async_schedule_advance_cv,
1200 	    &ehcip->ehci_int_mutex, ddi_get_lbolt() + sof_time_wait);
1201 
1202 	/*
1203 	 * Check EHCI host controller is running, otherwise return failure.
1204 	 */
1205 	if ((ehcip->ehci_flags & EHCI_CV_INTR) ||
1206 	    (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
1207 
1208 		USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1209 		    "No SOF interrupts have been received, this USB EHCI host"
1210 		    "controller is unusable");
1211 
1212 		/*
1213 		 * Route all Root hub ports to Classic host
1214 		 * controller, in case this is an unusable ALI M5273
1215 		 * EHCI controller.
1216 		 */
1217 		if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1218 			Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1219 		}
1220 
1221 		return (DDI_FAILURE);
1222 	}
1223 
1224 	return (DDI_SUCCESS);
1225 }
1226 
1227 
1228 /*
1229  * ehci_init_ctlr:
1230  *
1231  * Initialize the Host Controller (HC).
1232  */
1233 int
1234 ehci_init_ctlr(ehci_state_t	*ehcip,
1235 		int		init_type)
1236 {
1237 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1238 
1239 	if (init_type == EHCI_NORMAL_INITIALIZATION) {
1240 
1241 		if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1242 
1243 			return (DDI_FAILURE);
1244 		}
1245 	}
1246 
1247 	/*
1248 	 * Check for Asynchronous schedule park capability feature. If this
1249 	 * feature is supported, then, program ehci command register with
1250 	 * appropriate values..
1251 	 */
1252 	if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1253 
1254 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1255 		    "ehci_init_ctlr: Async park mode is supported");
1256 
1257 		Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1258 		    (EHCI_CMD_ASYNC_PARK_ENABLE |
1259 		    EHCI_CMD_ASYNC_PARK_COUNT_3)));
1260 	}
1261 
1262 	/*
1263 	 * Check for programmable periodic frame list feature. If this
1264 	 * feature is supported, then, program ehci command register with
1265 	 * 1024 frame list value.
1266 	 */
1267 	if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1268 
1269 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1270 		    "ehci_init_ctlr: Variable programmable periodic "
1271 		    "frame list is supported");
1272 
1273 		Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1274 		    EHCI_CMD_FRAME_1024_SIZE));
1275 	}
1276 
1277 	/*
1278 	 * Currently EHCI driver doesn't support 64 bit addressing.
1279 	 *
1280 	 * If we are using 64 bit addressing capability, then, program
1281 	 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1282 	 * of the interface data structures are allocated.
1283 	 */
1284 	if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1285 
1286 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1287 		    "ehci_init_ctlr: EHCI driver doesn't support "
1288 		    "64 bit addressing");
1289 	}
1290 
1291 	/* 64 bit addressing is not support */
1292 	Set_OpReg(ehci_ctrl_segment, 0x00000000);
1293 
1294 	/* Turn on/off the schedulers */
1295 	ehci_toggle_scheduler(ehcip);
1296 
1297 	/* Set host controller soft state to operational */
1298 	ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1299 
1300 	/*
1301 	 * Set the Periodic Frame List Base Address register with the
1302 	 * starting physical address of the Periodic Frame List.
1303 	 */
1304 	Set_OpReg(ehci_periodic_list_base,
1305 	    (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1306 	    EHCI_PERIODIC_LIST_BASE));
1307 
1308 	/*
1309 	 * Set ehci_interrupt to enable all interrupts except Root
1310 	 * Hub Status change interrupt.
1311 	 */
1312 	Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1313 	    EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1314 	    EHCI_INTR_USB);
1315 
1316 	/*
1317 	 * Set the desired interrupt threshold and turn on EHCI host controller.
1318 	 */
1319 	Set_OpReg(ehci_command,
1320 	    ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) |
1321 	    (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
1322 
1323 	ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1324 
1325 	if (init_type == EHCI_NORMAL_INITIALIZATION) {
1326 
1327 		if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1328 
1329 			/* Set host controller soft state to error */
1330 			ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1331 
1332 			return (DDI_FAILURE);
1333 		}
1334 
1335 		if (ehci_init_check_status(ehcip) != DDI_SUCCESS) {
1336 
1337 			/* Set host controller soft state to error */
1338 			ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1339 
1340 			return (DDI_FAILURE);
1341 		}
1342 
1343 		USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1344 		    "ehci_init_ctlr: SOF's have started");
1345 	}
1346 
1347 	/* Route all Root hub ports to EHCI host controller */
1348 	Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1349 
1350 	return (DDI_SUCCESS);
1351 }
1352 
1353 /*
1354  * ehci_take_control:
1355  *
1356  * Handshake to take EHCI control from BIOS if necessary.  Its only valid for
1357  * x86 machines, because sparc doesn't have a BIOS.
1358  * On x86 machine, the take control process includes
1359  *    o get the base address of the extended capability list
1360  *    o find out the capability for handoff synchronization in the list.
1361  *    o check if BIOS has owned the host controller.
1362  *    o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1363  *    o wait for a constant time and check if BIOS has relinquished control.
1364  */
1365 /* ARGSUSED */
1366 static int
1367 ehci_take_control(ehci_state_t *ehcip)
1368 {
1369 #if defined(__x86)
1370 	uint32_t		extended_cap;
1371 	uint32_t		extended_cap_offset;
1372 	uint32_t		extended_cap_id;
1373 	uint_t			retry;
1374 
1375 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1376 	    "ehci_take_control:");
1377 
1378 	/*
1379 	 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1380 	 * register.
1381 	 */
1382 	extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1383 	    EHCI_HCC_EECP_SHIFT;
1384 
1385 	/*
1386 	 * According EHCI Spec 2.2.4, if the extended capability offset is
1387 	 * less than 40h then its not valid.  This means we don't need to
1388 	 * worry about BIOS handoff.
1389 	 */
1390 	if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1391 
1392 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1393 		    "ehci_take_control: Hardware doesn't support legacy.");
1394 
1395 		goto success;
1396 	}
1397 
1398 	/*
1399 	 * According EHCI Spec 2.1.7, A zero offset indicates the
1400 	 * end of the extended capability list.
1401 	 */
1402 	while (extended_cap_offset) {
1403 
1404 		/* Get the extended capability value. */
1405 		extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1406 		    extended_cap_offset);
1407 
1408 		/* Get the capability ID */
1409 		extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1410 		    EHCI_EX_CAP_ID_SHIFT;
1411 
1412 		/* Check if the card support legacy */
1413 		if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1414 			break;
1415 		}
1416 
1417 		/* Get the offset of the next capability */
1418 		extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1419 		    EHCI_EX_CAP_NEXT_PTR_SHIFT;
1420 	}
1421 
1422 	/*
1423 	 * Unable to find legacy support in hardware's extended capability list.
1424 	 * This means we don't need to worry about BIOS handoff.
1425 	 */
1426 	if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1427 
1428 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1429 		    "ehci_take_control: Hardware doesn't support legacy");
1430 
1431 		goto success;
1432 	}
1433 
1434 	/* Check if BIOS has owned it. */
1435 	if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1436 
1437 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1438 		    "ehci_take_control: BIOS does not own EHCI");
1439 
1440 		goto success;
1441 	}
1442 
1443 	/*
1444 	 * According EHCI Spec 5.1, The OS driver initiates an ownership
1445 	 * request by setting the OS Owned semaphore to a one. The OS
1446 	 * waits for the BIOS Owned bit to go to a zero before attempting
1447 	 * to use the EHCI controller. The time that OS must wait for BIOS
1448 	 * to respond to the request for ownership is beyond the scope of
1449 	 * this specification.
1450 	 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1451 	 * for BIOS to release the ownership.
1452 	 */
1453 	extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1454 	pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1455 	    extended_cap);
1456 
1457 	for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1458 
1459 		/* wait a special interval */
1460 #ifndef __lock_lint
1461 		delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1462 #endif
1463 		/* Check to see if the BIOS has released the ownership */
1464 		extended_cap = pci_config_get32(
1465 		    ehcip->ehci_config_handle, extended_cap_offset);
1466 
1467 		if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1468 
1469 			USB_DPRINTF_L3(PRINT_MASK_ATTA,
1470 			    ehcip->ehci_log_hdl,
1471 			    "ehci_take_control: BIOS has released "
1472 			    "the ownership. retry = %d", retry);
1473 
1474 			goto success;
1475 		}
1476 
1477 	}
1478 
1479 	USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1480 	    "ehci_take_control: take control from BIOS failed.");
1481 
1482 	return (USB_FAILURE);
1483 
1484 success:
1485 
1486 #endif	/* __x86 */
1487 	return (USB_SUCCESS);
1488 }
1489 
1490 
1491 /*
1492  * ehci_init_periodic_frame_list_table :
1493  *
1494  * Allocate the system memory and initialize Host Controller
1495  * Periodic Frame List table area. The starting of the Periodic
1496  * Frame List Table area must be 4096 byte aligned.
1497  */
1498 static int
1499 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1500 {
1501 	ddi_device_acc_attr_t	dev_attr;
1502 	size_t			real_length;
1503 	uint_t			ccount;
1504 	int			result;
1505 
1506 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1507 
1508 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1509 	    "ehci_init_periodic_frame_lst_table:");
1510 
1511 	/* The host controller will be little endian */
1512 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1513 	dev_attr.devacc_attr_endian_flags  = DDI_STRUCTURE_LE_ACC;
1514 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1515 
1516 	/* Force the required 4K restrictive alignment */
1517 	ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1518 
1519 	/* Create space for the Periodic Frame List */
1520 	if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1521 	    DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1522 
1523 		goto failure;
1524 	}
1525 
1526 	if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1527 	    sizeof (ehci_periodic_frame_list_t),
1528 	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1529 	    0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1530 	    &real_length, &ehcip->ehci_pflt_mem_handle)) {
1531 
1532 		goto failure;
1533 	}
1534 
1535 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1536 	    "ehci_init_periodic_frame_lst_table: "
1537 	    "Real length %lu", real_length);
1538 
1539 	/* Map the whole Periodic Frame List into the I/O address space */
1540 	result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1541 	    NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1542 	    real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1543 	    DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1544 
1545 	if (result == DDI_DMA_MAPPED) {
1546 		/* The cookie count should be 1 */
1547 		if (ccount != 1) {
1548 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1549 			    "ehci_init_periodic_frame_lst_table: "
1550 			    "More than 1 cookie");
1551 
1552 			goto failure;
1553 		}
1554 	} else {
1555 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1556 
1557 		goto failure;
1558 	}
1559 
1560 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1561 	    "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1562 	    (void *)ehcip->ehci_periodic_frame_list_tablep,
1563 	    ehcip->ehci_pflt_cookie.dmac_address);
1564 
1565 	/*
1566 	 * DMA addresses for Periodic Frame List are bound.
1567 	 */
1568 	ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1569 
1570 	bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1571 
1572 	/* Initialize the Periodic Frame List */
1573 	ehci_build_interrupt_lattice(ehcip);
1574 
1575 	/* Reset Byte Alignment to Default */
1576 	ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1577 
1578 	return (DDI_SUCCESS);
1579 failure:
1580 	/* Byte alignment */
1581 	ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1582 
1583 	return (DDI_FAILURE);
1584 }
1585 
1586 
1587 /*
1588  * ehci_build_interrupt_lattice:
1589  *
1590  * Construct the interrupt lattice tree using static Endpoint Descriptors
1591  * (QH). This interrupt lattice tree will have total of 32 interrupt  QH
1592  * lists and the Host Controller (HC) processes one interrupt QH list in
1593  * every frame. The Host Controller traverses the periodic schedule by
1594  * constructing an array offset reference from the Periodic List Base Address
1595  * register and bits 12 to 3 of Frame Index register. It fetches the element
1596  * and begins traversing the graph of linked schedule data structures.
1597  */
1598 static void
1599 ehci_build_interrupt_lattice(ehci_state_t	*ehcip)
1600 {
1601 	ehci_qh_t	*list_array = ehcip->ehci_qh_pool_addr;
1602 	ushort_t	ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1603 	ehci_periodic_frame_list_t *periodic_frame_list =
1604 	    ehcip->ehci_periodic_frame_list_tablep;
1605 	ushort_t	*temp, num_of_nodes;
1606 	uintptr_t	addr;
1607 	int		i, j, k;
1608 
1609 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1610 	    "ehci_build_interrupt_lattice:");
1611 
1612 	/*
1613 	 * Reserve the first 63 Endpoint Descriptor (QH) structures
1614 	 * in the pool as static endpoints & these are required for
1615 	 * constructing interrupt lattice tree.
1616 	 */
1617 	for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1618 		Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1619 		Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1620 		Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1621 		Set_QH(list_array[i].qh_alt_next_qtd,
1622 		    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1623 	}
1624 
1625 	/*
1626 	 * Make sure that last Endpoint on the periodic frame list terminates
1627 	 * periodic schedule.
1628 	 */
1629 	Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1630 
1631 	/* Build the interrupt lattice tree */
1632 	for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1633 		/*
1634 		 * The next  pointer in the host controller  endpoint
1635 		 * descriptor must contain an iommu address. Calculate
1636 		 * the offset into the cpu address and add this to the
1637 		 * starting iommu address.
1638 		 */
1639 		addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1640 
1641 		Set_QH(list_array[2*i + 1].qh_link_ptr,
1642 		    addr | EHCI_QH_LINK_REF_QH);
1643 		Set_QH(list_array[2*i + 2].qh_link_ptr,
1644 		    addr | EHCI_QH_LINK_REF_QH);
1645 	}
1646 
1647 	/* Build the tree bottom */
1648 	temp = (unsigned short *)
1649 	    kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1650 
1651 	num_of_nodes = 1;
1652 
1653 	/*
1654 	 * Initialize the values which are used for setting up head pointers
1655 	 * for the 32ms scheduling lists which starts from the Periodic Frame
1656 	 * List.
1657 	 */
1658 	for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1659 		for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1660 			ehci_index[j++] = temp[k];
1661 			ehci_index[j]	= temp[k] + ehci_pow_2(i);
1662 		}
1663 
1664 		num_of_nodes *= 2;
1665 		for (k = 0; k < num_of_nodes; k++)
1666 			temp[k] = ehci_index[k];
1667 	}
1668 
1669 	kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1670 
1671 	/*
1672 	 * Initialize the interrupt list in the Periodic Frame List Table
1673 	 * so that it points to the bottom of the tree.
1674 	 */
1675 	for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1676 		addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1677 		    (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1678 
1679 		ASSERT(addr);
1680 
1681 		for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1682 			Set_PFLT(periodic_frame_list->
1683 			    ehci_periodic_frame_list_table[ehci_index[j++]],
1684 			    (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1685 		}
1686 	}
1687 }
1688 
1689 
1690 /*
1691  * ehci_alloc_hcdi_ops:
1692  *
1693  * The HCDI interfaces or entry points are the software interfaces used by
1694  * the Universal Serial Bus Driver  (USBA) to  access the services of the
1695  * Host Controller Driver (HCD).  During HCD initialization, inform  USBA
1696  * about all available HCDI interfaces or entry points.
1697  */
1698 usba_hcdi_ops_t *
1699 ehci_alloc_hcdi_ops(ehci_state_t	*ehcip)
1700 {
1701 	usba_hcdi_ops_t			*usba_hcdi_ops;
1702 
1703 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1704 	    "ehci_alloc_hcdi_ops:");
1705 
1706 	usba_hcdi_ops = usba_alloc_hcdi_ops();
1707 
1708 	usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1709 
1710 	usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1711 	usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1712 	usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1713 
1714 	usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1715 
1716 	usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1717 	usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1718 	usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1719 	usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1720 
1721 	usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1722 	    ehci_hcdi_bulk_transfer_size;
1723 
1724 	usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1725 	    ehci_hcdi_pipe_stop_intr_polling;
1726 	usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1727 	    ehci_hcdi_pipe_stop_isoc_polling;
1728 
1729 	usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1730 	    ehci_hcdi_get_current_frame_number;
1731 	usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1732 	    ehci_hcdi_get_max_isoc_pkts;
1733 
1734 	usba_hcdi_ops->usba_hcdi_console_input_init =
1735 	    ehci_hcdi_polled_input_init;
1736 	usba_hcdi_ops->usba_hcdi_console_input_enter =
1737 	    ehci_hcdi_polled_input_enter;
1738 	usba_hcdi_ops->usba_hcdi_console_read =
1739 	    ehci_hcdi_polled_read;
1740 	usba_hcdi_ops->usba_hcdi_console_input_exit =
1741 	    ehci_hcdi_polled_input_exit;
1742 	usba_hcdi_ops->usba_hcdi_console_input_fini =
1743 	    ehci_hcdi_polled_input_fini;
1744 	return (usba_hcdi_ops);
1745 }
1746 
1747 
1748 /*
1749  * Host Controller Driver (HCD) deinitialization functions
1750  */
1751 
1752 /*
1753  * ehci_cleanup:
1754  *
1755  * Cleanup on attach failure or detach
1756  */
1757 int
1758 ehci_cleanup(ehci_state_t	*ehcip)
1759 {
1760 	ehci_trans_wrapper_t	*tw;
1761 	ehci_pipe_private_t	*pp;
1762 	ehci_qtd_t		*qtd;
1763 	int			i, ctrl, rval;
1764 	int			flags = ehcip->ehci_flags;
1765 
1766 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1767 
1768 	if (flags & EHCI_RHREG) {
1769 		/* Unload the root hub driver */
1770 		if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1771 
1772 			return (DDI_FAILURE);
1773 		}
1774 	}
1775 
1776 	if (flags & EHCI_USBAREG) {
1777 		/* Unregister this HCD instance with USBA */
1778 		usba_hcdi_unregister(ehcip->ehci_dip);
1779 	}
1780 
1781 	if (flags & EHCI_INTR) {
1782 
1783 		mutex_enter(&ehcip->ehci_int_mutex);
1784 
1785 		/* Disable all EHCI QH list processing */
1786 		Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1787 		    ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1788 		    EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1789 
1790 		/* Disable all EHCI interrupts */
1791 		Set_OpReg(ehci_interrupt, 0);
1792 
1793 		/* wait for the next SOF */
1794 		(void) ehci_wait_for_sof(ehcip);
1795 
1796 		/* Route all Root hub ports to Classic host controller */
1797 		Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1798 
1799 		/* Stop the EHCI host controller */
1800 		Set_OpReg(ehci_command,
1801 		    Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1802 
1803 		mutex_exit(&ehcip->ehci_int_mutex);
1804 
1805 		/* Wait for sometime */
1806 		delay(drv_usectohz(EHCI_TIMEWAIT));
1807 
1808 		ehci_rem_intrs(ehcip);
1809 	}
1810 
1811 	/* Unmap the EHCI registers */
1812 	if (ehcip->ehci_caps_handle) {
1813 		ddi_regs_map_free(&ehcip->ehci_caps_handle);
1814 	}
1815 
1816 	if (ehcip->ehci_config_handle) {
1817 		pci_config_teardown(&ehcip->ehci_config_handle);
1818 	}
1819 
1820 	/* Free all the buffers */
1821 	if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1822 		for (i = 0; i < ehci_qtd_pool_size; i ++) {
1823 			qtd = &ehcip->ehci_qtd_pool_addr[i];
1824 			ctrl = Get_QTD(ehcip->
1825 			    ehci_qtd_pool_addr[i].qtd_state);
1826 
1827 			if ((ctrl != EHCI_QTD_FREE) &&
1828 			    (ctrl != EHCI_QTD_DUMMY) &&
1829 			    (qtd->qtd_trans_wrapper)) {
1830 
1831 				mutex_enter(&ehcip->ehci_int_mutex);
1832 
1833 				tw = (ehci_trans_wrapper_t *)
1834 				    EHCI_LOOKUP_ID((uint32_t)
1835 				    Get_QTD(qtd->qtd_trans_wrapper));
1836 
1837 				/* Obtain the pipe private structure */
1838 				pp = tw->tw_pipe_private;
1839 
1840 				/* Stop the the transfer timer */
1841 				ehci_stop_xfer_timer(ehcip, tw,
1842 				    EHCI_REMOVE_XFER_ALWAYS);
1843 
1844 				ehci_deallocate_tw(ehcip, pp, tw);
1845 
1846 				mutex_exit(&ehcip->ehci_int_mutex);
1847 			}
1848 		}
1849 
1850 		/*
1851 		 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1852 		 * the handle for QTD pools.
1853 		 */
1854 		if ((ehcip->ehci_dma_addr_bind_flag &
1855 		    EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1856 
1857 			rval = ddi_dma_unbind_handle(
1858 			    ehcip->ehci_qtd_pool_dma_handle);
1859 
1860 			ASSERT(rval == DDI_SUCCESS);
1861 		}
1862 		ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1863 	}
1864 
1865 	/* Free the QTD pool */
1866 	if (ehcip->ehci_qtd_pool_dma_handle) {
1867 		ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1868 	}
1869 
1870 	if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1871 		/*
1872 		 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1873 		 * the handle for QH pools.
1874 		 */
1875 		if ((ehcip->ehci_dma_addr_bind_flag &
1876 		    EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1877 
1878 			rval = ddi_dma_unbind_handle(
1879 			    ehcip->ehci_qh_pool_dma_handle);
1880 
1881 			ASSERT(rval == DDI_SUCCESS);
1882 		}
1883 
1884 		ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1885 	}
1886 
1887 	/* Free the QH pool */
1888 	if (ehcip->ehci_qh_pool_dma_handle) {
1889 		ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1890 	}
1891 
1892 	/* Free the Periodic frame list table (PFLT) area */
1893 	if (ehcip->ehci_periodic_frame_list_tablep &&
1894 	    ehcip->ehci_pflt_mem_handle) {
1895 		/*
1896 		 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1897 		 * the handle for PFLT.
1898 		 */
1899 		if ((ehcip->ehci_dma_addr_bind_flag &
1900 		    EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1901 
1902 			rval = ddi_dma_unbind_handle(
1903 			    ehcip->ehci_pflt_dma_handle);
1904 
1905 			ASSERT(rval == DDI_SUCCESS);
1906 		}
1907 
1908 		ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1909 	}
1910 
1911 	(void) ehci_isoc_cleanup(ehcip);
1912 
1913 	if (ehcip->ehci_pflt_dma_handle) {
1914 		ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1915 	}
1916 
1917 	if (flags & EHCI_INTR) {
1918 		/* Destroy the mutex */
1919 		mutex_destroy(&ehcip->ehci_int_mutex);
1920 
1921 		/* Destroy the async schedule advance condition variable */
1922 		cv_destroy(&ehcip->ehci_async_schedule_advance_cv);
1923 	}
1924 
1925 	/* clean up kstat structs */
1926 	ehci_destroy_stats(ehcip);
1927 
1928 	/* Free ehci hcdi ops */
1929 	if (ehcip->ehci_hcdi_ops) {
1930 		usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1931 	}
1932 
1933 	if (flags & EHCI_ZALLOC) {
1934 
1935 		usb_free_log_hdl(ehcip->ehci_log_hdl);
1936 
1937 		/* Remove all properties that might have been created */
1938 		ddi_prop_remove_all(ehcip->ehci_dip);
1939 
1940 		/* Free the soft state */
1941 		ddi_soft_state_free(ehci_statep,
1942 		    ddi_get_instance(ehcip->ehci_dip));
1943 	}
1944 
1945 	return (DDI_SUCCESS);
1946 }
1947 
1948 
1949 /*
1950  * ehci_rem_intrs:
1951  *
1952  * Unregister FIXED or MSI interrupts
1953  */
1954 static void
1955 ehci_rem_intrs(ehci_state_t	*ehcip)
1956 {
1957 	int	i;
1958 
1959 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1960 	    "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
1961 
1962 	/* Disable all interrupts */
1963 	if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
1964 		(void) ddi_intr_block_disable(ehcip->ehci_htable,
1965 		    ehcip->ehci_intr_cnt);
1966 	} else {
1967 		for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1968 			(void) ddi_intr_disable(ehcip->ehci_htable[i]);
1969 		}
1970 	}
1971 
1972 	/* Call ddi_intr_remove_handler() */
1973 	for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1974 		(void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
1975 		(void) ddi_intr_free(ehcip->ehci_htable[i]);
1976 	}
1977 
1978 	kmem_free(ehcip->ehci_htable,
1979 	    ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
1980 }
1981 
1982 
1983 /*
1984  * ehci_cpr_suspend
1985  */
1986 int
1987 ehci_cpr_suspend(ehci_state_t	*ehcip)
1988 {
1989 	int	i;
1990 
1991 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1992 	    "ehci_cpr_suspend:");
1993 
1994 	/* Call into the root hub and suspend it */
1995 	if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
1996 
1997 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1998 		    "ehci_cpr_suspend: root hub fails to suspend");
1999 
2000 		return (DDI_FAILURE);
2001 	}
2002 
2003 	/* Only root hub's intr pipe should be open at this time */
2004 	mutex_enter(&ehcip->ehci_int_mutex);
2005 
2006 	ASSERT(ehcip->ehci_open_pipe_count == 0);
2007 
2008 	/* Just wait till all resources are reclaimed */
2009 	i = 0;
2010 	while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
2011 		ehci_handle_endpoint_reclaimation(ehcip);
2012 		(void) ehci_wait_for_sof(ehcip);
2013 	}
2014 	ASSERT(ehcip->ehci_reclaim_list == NULL);
2015 
2016 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2017 	    "ehci_cpr_suspend: Disable HC QH list processing");
2018 
2019 	/* Disable all EHCI QH list processing */
2020 	Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
2021 	    ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
2022 
2023 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2024 	    "ehci_cpr_suspend: Disable HC interrupts");
2025 
2026 	/* Disable all EHCI interrupts */
2027 	Set_OpReg(ehci_interrupt, 0);
2028 
2029 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2030 	    "ehci_cpr_suspend: Wait for the next SOF");
2031 
2032 	/* Wait for the next SOF */
2033 	if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
2034 
2035 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2036 		    "ehci_cpr_suspend: ehci host controller suspend failed");
2037 
2038 		mutex_exit(&ehcip->ehci_int_mutex);
2039 		return (DDI_FAILURE);
2040 	}
2041 
2042 	/*
2043 	 * Stop the ehci host controller
2044 	 * if usb keyboard is not connected.
2045 	 */
2046 	if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2047 		Set_OpReg(ehci_command,
2048 		    Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2049 	}
2050 
2051 	/* Set host controller soft state to suspend */
2052 	ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2053 
2054 	mutex_exit(&ehcip->ehci_int_mutex);
2055 
2056 	return (DDI_SUCCESS);
2057 }
2058 
2059 
2060 /*
2061  * ehci_cpr_resume
2062  */
2063 int
2064 ehci_cpr_resume(ehci_state_t	*ehcip)
2065 {
2066 	mutex_enter(&ehcip->ehci_int_mutex);
2067 
2068 	USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2069 	    "ehci_cpr_resume: Restart the controller");
2070 
2071 	/* Cleanup ehci specific information across cpr */
2072 	ehci_cpr_cleanup(ehcip);
2073 
2074 	/* Restart the controller */
2075 	if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2076 
2077 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2078 		    "ehci_cpr_resume: ehci host controller resume failed ");
2079 
2080 		mutex_exit(&ehcip->ehci_int_mutex);
2081 
2082 		return (DDI_FAILURE);
2083 	}
2084 
2085 	mutex_exit(&ehcip->ehci_int_mutex);
2086 
2087 	/* Now resume the root hub */
2088 	if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2089 
2090 		return (DDI_FAILURE);
2091 	}
2092 
2093 	return (DDI_SUCCESS);
2094 }
2095 
2096 
2097 /*
2098  * Bandwidth Allocation functions
2099  */
2100 
2101 /*
2102  * ehci_allocate_bandwidth:
2103  *
2104  * Figure out whether or not this interval may be supported. Return the index
2105  * into the  lattice if it can be supported.  Return allocation failure if it
2106  * can not be supported.
2107  */
2108 int
2109 ehci_allocate_bandwidth(
2110 	ehci_state_t		*ehcip,
2111 	usba_pipe_handle_data_t	*ph,
2112 	uint_t			*pnode,
2113 	uchar_t			*smask,
2114 	uchar_t			*cmask)
2115 {
2116 	int			error = USB_SUCCESS;
2117 
2118 	/* This routine is protected by the ehci_int_mutex */
2119 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2120 
2121 	/* Reset the pnode to the last checked pnode */
2122 	*pnode = 0;
2123 
2124 	/* Allocate high speed bandwidth */
2125 	if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2126 	    ph, pnode, smask, cmask)) != USB_SUCCESS) {
2127 
2128 		return (error);
2129 	}
2130 
2131 	/*
2132 	 * For low/full speed usb devices, allocate classic TT bandwidth
2133 	 * in additional to high speed bandwidth.
2134 	 */
2135 	if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2136 
2137 		/* Allocate classic TT bandwidth */
2138 		if ((error = ehci_allocate_classic_tt_bandwidth(
2139 		    ehcip, ph, *pnode)) != USB_SUCCESS) {
2140 
2141 			/* Deallocate high speed bandwidth */
2142 			ehci_deallocate_high_speed_bandwidth(
2143 			    ehcip, ph, *pnode, *smask, *cmask);
2144 		}
2145 	}
2146 
2147 	return (error);
2148 }
2149 
2150 
2151 /*
2152  * ehci_allocate_high_speed_bandwidth:
2153  *
2154  * Allocate high speed bandwidth for the low/full/high speed interrupt and
2155  * isochronous endpoints.
2156  */
2157 static int
2158 ehci_allocate_high_speed_bandwidth(
2159 	ehci_state_t		*ehcip,
2160 	usba_pipe_handle_data_t	*ph,
2161 	uint_t			*pnode,
2162 	uchar_t			*smask,
2163 	uchar_t			*cmask)
2164 {
2165 	uint_t			sbandwidth, cbandwidth;
2166 	int			interval;
2167 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2168 	usba_device_t		*child_ud;
2169 	usb_port_status_t	port_status;
2170 	int			error;
2171 
2172 	/* This routine is protected by the ehci_int_mutex */
2173 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2174 
2175 	/* Get child's usba device structure */
2176 	child_ud = ph->p_usba_device;
2177 
2178 	mutex_enter(&child_ud->usb_mutex);
2179 
2180 	/* Get the current usb device's port status */
2181 	port_status = ph->p_usba_device->usb_port_status;
2182 
2183 	mutex_exit(&child_ud->usb_mutex);
2184 
2185 	/*
2186 	 * Calculate the length in bytes of a transaction on this
2187 	 * periodic endpoint. Return failure if maximum packet is
2188 	 * zero.
2189 	 */
2190 	error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2191 	    port_status, &sbandwidth, &cbandwidth);
2192 	if (error != USB_SUCCESS) {
2193 
2194 		return (error);
2195 	}
2196 
2197 	/*
2198 	 * Adjust polling interval to be a power of 2.
2199 	 * If this interval can't be supported, return
2200 	 * allocation failure.
2201 	 */
2202 	interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2203 	if (interval == USB_FAILURE) {
2204 
2205 		return (USB_FAILURE);
2206 	}
2207 
2208 	if (port_status == USBA_HIGH_SPEED_DEV) {
2209 		/* Allocate bandwidth for high speed devices */
2210 		if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2211 		    USB_EP_ATTR_ISOCH) {
2212 			error = USB_SUCCESS;
2213 		} else {
2214 
2215 			error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2216 			    endpoint, sbandwidth, interval);
2217 		}
2218 
2219 		*cmask = 0x00;
2220 
2221 	} else {
2222 		if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2223 		    USB_EP_ATTR_INTR) {
2224 
2225 			/* Allocate bandwidth for low speed interrupt */
2226 			error = ehci_find_bestfit_ls_intr_mask(ehcip,
2227 			    smask, cmask, pnode, sbandwidth, cbandwidth,
2228 			    interval);
2229 		} else {
2230 			if ((endpoint->bEndpointAddress &
2231 			    USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2232 
2233 				/* Allocate bandwidth for sitd in */
2234 				error = ehci_find_bestfit_sitd_in_mask(ehcip,
2235 				    smask, cmask, pnode, sbandwidth, cbandwidth,
2236 				    interval);
2237 			} else {
2238 
2239 				/* Allocate bandwidth for sitd out */
2240 				error = ehci_find_bestfit_sitd_out_mask(ehcip,
2241 				    smask, pnode, sbandwidth, interval);
2242 				*cmask = 0x00;
2243 			}
2244 		}
2245 	}
2246 
2247 	if (error != USB_SUCCESS) {
2248 		USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2249 		    "ehci_allocate_high_speed_bandwidth: Reached maximum "
2250 		    "bandwidth value and cannot allocate bandwidth for a "
2251 		    "given high-speed periodic endpoint");
2252 
2253 		return (USB_NO_BANDWIDTH);
2254 	}
2255 
2256 	return (error);
2257 }
2258 
2259 
2260 /*
2261  * ehci_allocate_classic_tt_speed_bandwidth:
2262  *
2263  * Allocate classic TT bandwidth for the low/full speed interrupt and
2264  * isochronous endpoints.
2265  */
2266 static int
2267 ehci_allocate_classic_tt_bandwidth(
2268 	ehci_state_t		*ehcip,
2269 	usba_pipe_handle_data_t	*ph,
2270 	uint_t			pnode)
2271 {
2272 	uint_t			bandwidth, min;
2273 	uint_t			height, leftmost, list;
2274 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2275 	usba_device_t		*child_ud, *parent_ud;
2276 	usb_port_status_t	port_status;
2277 	int			i, interval;
2278 
2279 	/* This routine is protected by the ehci_int_mutex */
2280 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2281 
2282 	/* Get child's usba device structure */
2283 	child_ud = ph->p_usba_device;
2284 
2285 	mutex_enter(&child_ud->usb_mutex);
2286 
2287 	/* Get the current usb device's port status */
2288 	port_status = child_ud->usb_port_status;
2289 
2290 	/* Get the parent high speed hub's usba device structure */
2291 	parent_ud = child_ud->usb_hs_hub_usba_dev;
2292 
2293 	mutex_exit(&child_ud->usb_mutex);
2294 
2295 	USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2296 	    "ehci_allocate_classic_tt_bandwidth: "
2297 	    "child_ud 0x%p parent_ud 0x%p",
2298 	    (void *)child_ud, (void *)parent_ud);
2299 
2300 	/*
2301 	 * Calculate the length in bytes of a transaction on this
2302 	 * periodic endpoint. Return failure if maximum packet is
2303 	 * zero.
2304 	 */
2305 	if (ehci_compute_classic_bandwidth(endpoint,
2306 	    port_status, &bandwidth) != USB_SUCCESS) {
2307 
2308 		USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2309 		    "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2310 		    "with zero endpoint maximum packet size is not supported");
2311 
2312 		return (USB_NOT_SUPPORTED);
2313 	}
2314 
2315 	USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2316 	    "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2317 
2318 	mutex_enter(&parent_ud->usb_mutex);
2319 
2320 	/*
2321 	 * If the length in bytes plus the allocated bandwidth exceeds
2322 	 * the maximum, return bandwidth allocation failure.
2323 	 */
2324 	if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2325 	    FS_PERIODIC_BANDWIDTH) {
2326 
2327 		mutex_exit(&parent_ud->usb_mutex);
2328 
2329 		USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2330 		    "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2331 		    "bandwidth value and cannot allocate bandwidth for a "
2332 		    "given low/full speed periodic endpoint");
2333 
2334 		return (USB_NO_BANDWIDTH);
2335 	}
2336 
2337 	mutex_exit(&parent_ud->usb_mutex);
2338 
2339 	/* Adjust polling interval to be a power of 2 */
2340 	interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2341 
2342 	/* Find the height in the tree */
2343 	height = ehci_lattice_height(interval);
2344 
2345 	/* Find the leftmost leaf in the subtree specified by the node. */
2346 	leftmost = ehci_leftmost_leaf(pnode, height);
2347 
2348 	mutex_enter(&parent_ud->usb_mutex);
2349 
2350 	for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2351 		list = ehci_index[leftmost + i];
2352 
2353 		if ((parent_ud->usb_hs_hub_bandwidth[list] +
2354 		    bandwidth) > FS_PERIODIC_BANDWIDTH) {
2355 
2356 			mutex_exit(&parent_ud->usb_mutex);
2357 
2358 			USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2359 			    "ehci_allocate_classic_tt_bandwidth: Reached "
2360 			    "maximum bandwidth value and cannot allocate "
2361 			    "bandwidth for low/full periodic endpoint");
2362 
2363 			return (USB_NO_BANDWIDTH);
2364 		}
2365 	}
2366 
2367 	/*
2368 	 * All the leaves for this node must be updated with the bandwidth.
2369 	 */
2370 	for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2371 		list = ehci_index[leftmost + i];
2372 		parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2373 	}
2374 
2375 	/* Find the leaf with the smallest allocated bandwidth */
2376 	min = parent_ud->usb_hs_hub_bandwidth[0];
2377 
2378 	for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2379 		if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2380 			min = parent_ud->usb_hs_hub_bandwidth[i];
2381 		}
2382 	}
2383 
2384 	/* Save the minimum for later use */
2385 	parent_ud->usb_hs_hub_min_bandwidth = min;
2386 
2387 	mutex_exit(&parent_ud->usb_mutex);
2388 
2389 	return (USB_SUCCESS);
2390 }
2391 
2392 
2393 /*
2394  * ehci_deallocate_bandwidth:
2395  *
2396  * Deallocate bandwidth for the given node in the lattice and the length
2397  * of transfer.
2398  */
2399 void
2400 ehci_deallocate_bandwidth(
2401 	ehci_state_t		*ehcip,
2402 	usba_pipe_handle_data_t	*ph,
2403 	uint_t			pnode,
2404 	uchar_t			smask,
2405 	uchar_t			cmask)
2406 {
2407 	/* This routine is protected by the ehci_int_mutex */
2408 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2409 
2410 	ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2411 
2412 	/*
2413 	 * For low/full speed usb devices, deallocate classic TT bandwidth
2414 	 * in additional to high speed bandwidth.
2415 	 */
2416 	if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2417 
2418 		/* Deallocate classic TT bandwidth */
2419 		ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2420 	}
2421 }
2422 
2423 
2424 /*
2425  * ehci_deallocate_high_speed_bandwidth:
2426  *
2427  * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2428  */
2429 static void
2430 ehci_deallocate_high_speed_bandwidth(
2431 	ehci_state_t		*ehcip,
2432 	usba_pipe_handle_data_t	*ph,
2433 	uint_t			pnode,
2434 	uchar_t			smask,
2435 	uchar_t			cmask)
2436 {
2437 	uint_t			height, leftmost;
2438 	uint_t			list_count;
2439 	uint_t			sbandwidth, cbandwidth;
2440 	int			interval;
2441 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2442 	usba_device_t		*child_ud;
2443 	usb_port_status_t	port_status;
2444 
2445 	/* This routine is protected by the ehci_int_mutex */
2446 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2447 
2448 	/* Get child's usba device structure */
2449 	child_ud = ph->p_usba_device;
2450 
2451 	mutex_enter(&child_ud->usb_mutex);
2452 
2453 	/* Get the current usb device's port status */
2454 	port_status = ph->p_usba_device->usb_port_status;
2455 
2456 	mutex_exit(&child_ud->usb_mutex);
2457 
2458 	(void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2459 	    port_status, &sbandwidth, &cbandwidth);
2460 
2461 	/* Adjust polling interval to be a power of 2 */
2462 	interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2463 
2464 	/* Find the height in the tree */
2465 	height = ehci_lattice_height(interval);
2466 
2467 	/*
2468 	 * Find the leftmost leaf in the subtree specified by the node
2469 	 */
2470 	leftmost = ehci_leftmost_leaf(pnode, height);
2471 
2472 	list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2473 
2474 	/* Delete the bandwidth from the appropriate lists */
2475 	if (port_status == USBA_HIGH_SPEED_DEV) {
2476 
2477 		ehci_update_bw_availability(ehcip, -sbandwidth,
2478 		    leftmost, list_count, smask);
2479 	} else {
2480 		if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2481 		    USB_EP_ATTR_INTR) {
2482 
2483 			ehci_update_bw_availability(ehcip, -sbandwidth,
2484 			    leftmost, list_count, smask);
2485 			ehci_update_bw_availability(ehcip, -cbandwidth,
2486 			    leftmost, list_count, cmask);
2487 		} else {
2488 			if ((endpoint->bEndpointAddress &
2489 			    USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2490 
2491 				ehci_update_bw_availability(ehcip, -sbandwidth,
2492 				    leftmost, list_count, smask);
2493 				ehci_update_bw_availability(ehcip,
2494 				    -MAX_UFRAME_SITD_XFER, leftmost,
2495 				    list_count, cmask);
2496 			} else {
2497 
2498 				ehci_update_bw_availability(ehcip,
2499 				    -MAX_UFRAME_SITD_XFER, leftmost,
2500 				    list_count, smask);
2501 			}
2502 		}
2503 	}
2504 }
2505 
2506 /*
2507  * ehci_deallocate_classic_tt_bandwidth:
2508  *
2509  * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2510  */
2511 static void
2512 ehci_deallocate_classic_tt_bandwidth(
2513 	ehci_state_t		*ehcip,
2514 	usba_pipe_handle_data_t	*ph,
2515 	uint_t			pnode)
2516 {
2517 	uint_t			bandwidth, height, leftmost, list, min;
2518 	int			i, interval;
2519 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2520 	usba_device_t		*child_ud, *parent_ud;
2521 	usb_port_status_t	port_status;
2522 
2523 	/* This routine is protected by the ehci_int_mutex */
2524 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2525 
2526 	/* Get child's usba device structure */
2527 	child_ud = ph->p_usba_device;
2528 
2529 	mutex_enter(&child_ud->usb_mutex);
2530 
2531 	/* Get the current usb device's port status */
2532 	port_status = child_ud->usb_port_status;
2533 
2534 	/* Get the parent high speed hub's usba device structure */
2535 	parent_ud = child_ud->usb_hs_hub_usba_dev;
2536 
2537 	mutex_exit(&child_ud->usb_mutex);
2538 
2539 	/* Obtain the bandwidth */
2540 	(void) ehci_compute_classic_bandwidth(endpoint,
2541 	    port_status, &bandwidth);
2542 
2543 	/* Adjust polling interval to be a power of 2 */
2544 	interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2545 
2546 	/* Find the height in the tree */
2547 	height = ehci_lattice_height(interval);
2548 
2549 	/* Find the leftmost leaf in the subtree specified by the node */
2550 	leftmost = ehci_leftmost_leaf(pnode, height);
2551 
2552 	mutex_enter(&parent_ud->usb_mutex);
2553 
2554 	/* Delete the bandwidth from the appropriate lists */
2555 	for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2556 		list = ehci_index[leftmost + i];
2557 		parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2558 	}
2559 
2560 	/* Find the leaf with the smallest allocated bandwidth */
2561 	min = parent_ud->usb_hs_hub_bandwidth[0];
2562 
2563 	for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2564 		if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2565 			min = parent_ud->usb_hs_hub_bandwidth[i];
2566 		}
2567 	}
2568 
2569 	/* Save the minimum for later use */
2570 	parent_ud->usb_hs_hub_min_bandwidth = min;
2571 
2572 	mutex_exit(&parent_ud->usb_mutex);
2573 }
2574 
2575 
2576 /*
2577  * ehci_compute_high_speed_bandwidth:
2578  *
2579  * Given a periodic endpoint (interrupt or isochronous) determine the total
2580  * bandwidth for one transaction. The EHCI host controller traverses the
2581  * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2582  * services an endpoint, only a single transaction attempt is made. The  HC
2583  * moves to the next Endpoint Descriptor after the first transaction attempt
2584  * rather than finishing the entire Transfer Descriptor. Therefore, when  a
2585  * Transfer Descriptor is inserted into the lattice, we will only count the
2586  * number of bytes for one transaction.
2587  *
2588  * The following are the formulas used for  calculating bandwidth in  terms
2589  * bytes and it is for the single USB high speed transaction.  The protocol
2590  * overheads will be different for each of type of USB transfer & all these
2591  * formulas & protocol overheads are derived from the 5.11.3 section of the
2592  * USB 2.0 Specification.
2593  *
2594  * High-Speed:
2595  *		Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2596  *
2597  * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2598  *
2599  *		Protocol overhead + Split transaction overhead +
2600  *			((MaxPktSz * 7)/6) + Host_Delay;
2601  */
2602 /* ARGSUSED */
2603 static int
2604 ehci_compute_high_speed_bandwidth(
2605 	ehci_state_t		*ehcip,
2606 	usb_ep_descr_t		*endpoint,
2607 	usb_port_status_t	port_status,
2608 	uint_t			*sbandwidth,
2609 	uint_t			*cbandwidth)
2610 {
2611 	ushort_t		maxpacketsize = endpoint->wMaxPacketSize;
2612 
2613 	/* Return failure if endpoint maximum packet is zero */
2614 	if (maxpacketsize == 0) {
2615 		USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2616 		    "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2617 		    "with zero endpoint maximum packet size is not supported");
2618 
2619 		return (USB_NOT_SUPPORTED);
2620 	}
2621 
2622 	/* Add bit-stuffing overhead */
2623 	maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2624 
2625 	/* Add Host Controller specific delay to required bandwidth */
2626 	*sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2627 
2628 	/* Add xfer specific protocol overheads */
2629 	if ((endpoint->bmAttributes &
2630 	    USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2631 		/* High speed interrupt transaction */
2632 		*sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2633 	} else {
2634 		/* Isochronous transaction */
2635 		*sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2636 	}
2637 
2638 	/*
2639 	 * For low/full speed devices, add split transaction specific
2640 	 * overheads.
2641 	 */
2642 	if (port_status != USBA_HIGH_SPEED_DEV) {
2643 		/*
2644 		 * Add start and complete split transaction
2645 		 * tokens overheads.
2646 		 */
2647 		*cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2648 		*sbandwidth += START_SPLIT_OVERHEAD;
2649 
2650 		/* Add data overhead depending on data direction */
2651 		if ((endpoint->bEndpointAddress &
2652 		    USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2653 			*cbandwidth += maxpacketsize;
2654 		} else {
2655 			if ((endpoint->bmAttributes &
2656 			    USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2657 				/* There is no compete splits for out */
2658 				*cbandwidth = 0;
2659 			}
2660 			*sbandwidth += maxpacketsize;
2661 		}
2662 	} else {
2663 		uint_t		xactions;
2664 
2665 		/* Get the max transactions per microframe */
2666 		xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2667 		    USB_EP_MAX_XACTS_SHIFT) + 1;
2668 
2669 		/* High speed transaction */
2670 		*sbandwidth += maxpacketsize;
2671 
2672 		/* Calculate bandwidth per micro-frame */
2673 		*sbandwidth *= xactions;
2674 
2675 		*cbandwidth = 0;
2676 	}
2677 
2678 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2679 	    "ehci_allocate_high_speed_bandwidth: "
2680 	    "Start split bandwidth %d Complete split bandwidth %d",
2681 	    *sbandwidth, *cbandwidth);
2682 
2683 	return (USB_SUCCESS);
2684 }
2685 
2686 
2687 /*
2688  * ehci_compute_classic_bandwidth:
2689  *
2690  * Given a periodic endpoint (interrupt or isochronous) determine the total
2691  * bandwidth for one transaction. The EHCI host controller traverses the
2692  * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2693  * services an endpoint, only a single transaction attempt is made. The  HC
2694  * moves to the next Endpoint Descriptor after the first transaction attempt
2695  * rather than finishing the entire Transfer Descriptor. Therefore, when  a
2696  * Transfer Descriptor is inserted into the lattice, we will only count the
2697  * number of bytes for one transaction.
2698  *
2699  * The following are the formulas used for  calculating bandwidth in  terms
2700  * bytes and it is for the single USB high speed transaction.  The protocol
2701  * overheads will be different for each of type of USB transfer & all these
2702  * formulas & protocol overheads are derived from the 5.11.3 section of the
2703  * USB 2.0 Specification.
2704  *
2705  * Low-Speed:
2706  *		Protocol overhead + Hub LS overhead +
2707  *		(Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2708  *
2709  * Full-Speed:
2710  *		Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2711  */
2712 /* ARGSUSED */
2713 static int
2714 ehci_compute_classic_bandwidth(
2715 	usb_ep_descr_t		*endpoint,
2716 	usb_port_status_t	port_status,
2717 	uint_t			*bandwidth)
2718 {
2719 	ushort_t		maxpacketsize = endpoint->wMaxPacketSize;
2720 
2721 	/*
2722 	 * If endpoint maximum packet is zero, then return immediately.
2723 	 */
2724 	if (maxpacketsize == 0) {
2725 
2726 		return (USB_NOT_SUPPORTED);
2727 	}
2728 
2729 	/* Add TT delay to required bandwidth */
2730 	*bandwidth = TT_DELAY;
2731 
2732 	/* Add bit-stuffing overhead */
2733 	maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2734 
2735 	switch (port_status) {
2736 	case USBA_LOW_SPEED_DEV:
2737 		/* Low speed interrupt transaction */
2738 		*bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2739 		    HUB_LOW_SPEED_PROTO_OVERHEAD +
2740 		    (LOW_SPEED_CLOCK * maxpacketsize));
2741 		break;
2742 	case USBA_FULL_SPEED_DEV:
2743 		/* Full speed transaction */
2744 		*bandwidth += maxpacketsize;
2745 
2746 		/* Add xfer specific protocol overheads */
2747 		if ((endpoint->bmAttributes &
2748 		    USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2749 			/* Full speed interrupt transaction */
2750 			*bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2751 		} else {
2752 			/* Isochronous and input transaction */
2753 			if ((endpoint->bEndpointAddress &
2754 			    USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2755 				*bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2756 			} else {
2757 				/* Isochronous and output transaction */
2758 				*bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2759 			}
2760 		}
2761 		break;
2762 	}
2763 
2764 	return (USB_SUCCESS);
2765 }
2766 
2767 
2768 /*
2769  * ehci_adjust_polling_interval:
2770  *
2771  * Adjust bandwidth according usb device speed.
2772  */
2773 /* ARGSUSED */
2774 int
2775 ehci_adjust_polling_interval(
2776 	ehci_state_t		*ehcip,
2777 	usb_ep_descr_t		*endpoint,
2778 	usb_port_status_t	port_status)
2779 {
2780 	uint_t			interval;
2781 	int			i = 0;
2782 
2783 	/* Get the polling interval */
2784 	interval = endpoint->bInterval;
2785 
2786 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2787 	    "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2788 
2789 	/*
2790 	 * According USB 2.0 Specifications, a high-speed endpoint's
2791 	 * polling intervals are specified interms of 125us or micro
2792 	 * frame, where as full/low endpoint's polling intervals are
2793 	 * specified in milliseconds.
2794 	 *
2795 	 * A high speed interrupt/isochronous endpoints can specify
2796 	 * desired polling interval between 1 to 16 micro-frames,
2797 	 * where as full/low endpoints can specify between 1 to 255
2798 	 * milliseconds.
2799 	 */
2800 	switch (port_status) {
2801 	case USBA_LOW_SPEED_DEV:
2802 		/*
2803 		 * Low speed  endpoints are limited to	specifying
2804 		 * only 8ms to 255ms in this driver. If a device
2805 		 * reports a polling interval that is less than 8ms,
2806 		 * it will use 8 ms instead.
2807 		 */
2808 		if (interval < LS_MIN_POLL_INTERVAL) {
2809 
2810 			USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2811 			    "Low speed endpoint's poll interval of %d ms "
2812 			    "is below threshold. Rounding up to %d ms",
2813 			    interval, LS_MIN_POLL_INTERVAL);
2814 
2815 			interval = LS_MIN_POLL_INTERVAL;
2816 		}
2817 
2818 		/*
2819 		 * Return an error if the polling interval is greater
2820 		 * than 255ms.
2821 		 */
2822 		if (interval > LS_MAX_POLL_INTERVAL) {
2823 
2824 			USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2825 			    "Low speed endpoint's poll interval is "
2826 			    "greater than %d ms", LS_MAX_POLL_INTERVAL);
2827 
2828 			return (USB_FAILURE);
2829 		}
2830 		break;
2831 
2832 	case USBA_FULL_SPEED_DEV:
2833 		/*
2834 		 * Return an error if the polling interval is less
2835 		 * than 1ms and greater than 255ms.
2836 		 */
2837 		if ((interval < FS_MIN_POLL_INTERVAL) &&
2838 		    (interval > FS_MAX_POLL_INTERVAL)) {
2839 
2840 			USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2841 			    "Full speed endpoint's poll interval must "
2842 			    "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2843 			    FS_MAX_POLL_INTERVAL);
2844 
2845 			return (USB_FAILURE);
2846 		}
2847 		break;
2848 	case USBA_HIGH_SPEED_DEV:
2849 		/*
2850 		 * Return an error if the polling interval is less 1
2851 		 * and greater than 16. Convert this value to 125us
2852 		 * units using 2^(bInterval -1). refer usb 2.0 spec
2853 		 * page 51 for details.
2854 		 */
2855 		if ((interval < HS_MIN_POLL_INTERVAL) &&
2856 		    (interval > HS_MAX_POLL_INTERVAL)) {
2857 
2858 			USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2859 			    "High speed endpoint's poll interval "
2860 			    "must be between %d and %d units",
2861 			    HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2862 
2863 			return (USB_FAILURE);
2864 		}
2865 
2866 		/* Adjust high speed device polling interval */
2867 		interval =
2868 		    ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2869 
2870 		break;
2871 	}
2872 
2873 	/*
2874 	 * If polling interval is greater than 32ms,
2875 	 * adjust polling interval equal to 32ms.
2876 	 */
2877 	if (interval > EHCI_NUM_INTR_QH_LISTS) {
2878 		interval = EHCI_NUM_INTR_QH_LISTS;
2879 	}
2880 
2881 	/*
2882 	 * Find the nearest power of 2 that's less
2883 	 * than interval.
2884 	 */
2885 	while ((ehci_pow_2(i)) <= interval) {
2886 		i++;
2887 	}
2888 
2889 	return (ehci_pow_2((i - 1)));
2890 }
2891 
2892 
2893 /*
2894  * ehci_adjust_high_speed_polling_interval:
2895  */
2896 /* ARGSUSED */
2897 static int
2898 ehci_adjust_high_speed_polling_interval(
2899 	ehci_state_t		*ehcip,
2900 	usb_ep_descr_t		*endpoint)
2901 {
2902 	uint_t			interval;
2903 
2904 	/* Get the polling interval */
2905 	interval = ehci_pow_2(endpoint->bInterval - 1);
2906 
2907 	/*
2908 	 * Convert polling interval from micro seconds
2909 	 * to milli seconds.
2910 	 */
2911 	if (interval <= EHCI_MAX_UFRAMES) {
2912 		interval = 1;
2913 	} else {
2914 		interval = interval/EHCI_MAX_UFRAMES;
2915 	}
2916 
2917 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2918 	    "ehci_adjust_high_speed_polling_interval: "
2919 	    "High speed adjusted interval 0x%x", interval);
2920 
2921 	return (interval);
2922 }
2923 
2924 
2925 /*
2926  * ehci_lattice_height:
2927  *
2928  * Given the requested bandwidth, find the height in the tree at which the
2929  * nodes for this bandwidth fall.  The height is measured as the number of
2930  * nodes from the leaf to the level specified by bandwidth The root of the
2931  * tree is at height TREE_HEIGHT.
2932  */
2933 static uint_t
2934 ehci_lattice_height(uint_t interval)
2935 {
2936 	return (TREE_HEIGHT - (ehci_log_2(interval)));
2937 }
2938 
2939 
2940 /*
2941  * ehci_lattice_parent:
2942  *
2943  * Given a node in the lattice, find the index of the parent node
2944  */
2945 static uint_t
2946 ehci_lattice_parent(uint_t node)
2947 {
2948 	if ((node % 2) == 0) {
2949 
2950 		return ((node/2) - 1);
2951 	} else {
2952 
2953 		return ((node + 1)/2 - 1);
2954 	}
2955 }
2956 
2957 
2958 /*
2959  * ehci_find_periodic_node:
2960  *
2961  * Based on the "real" array leaf node and interval, get the periodic node.
2962  */
2963 static uint_t
2964 ehci_find_periodic_node(uint_t leaf, int interval) {
2965 	uint_t	lattice_leaf;
2966 	uint_t	height = ehci_lattice_height(interval);
2967 	uint_t	pnode;
2968 	int	i;
2969 
2970 	/* Get the leaf number in the lattice */
2971 	lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
2972 
2973 	/* Get the node in the lattice based on the height and leaf */
2974 	pnode = lattice_leaf;
2975 	for (i = 0; i < height; i++) {
2976 		pnode = ehci_lattice_parent(pnode);
2977 	}
2978 
2979 	return (pnode);
2980 }
2981 
2982 
2983 /*
2984  * ehci_leftmost_leaf:
2985  *
2986  * Find the leftmost leaf in the subtree specified by the node. Height refers
2987  * to number of nodes from the bottom of the tree to the node,	including the
2988  * node.
2989  *
2990  * The formula for a zero based tree is:
2991  *     2^H * Node + 2^H - 1
2992  * The leaf of the tree is an array, convert the number for the array.
2993  *     Subtract the size of nodes not in the array
2994  *     2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
2995  *     2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
2996  *     2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
2997  *	   0
2998  *	 1   2
2999  *	0 1 2 3
3000  */
3001 static uint_t
3002 ehci_leftmost_leaf(
3003 	uint_t	node,
3004 	uint_t	height)
3005 {
3006 	return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
3007 }
3008 
3009 
3010 /*
3011  * ehci_pow_2:
3012  *
3013  * Compute 2 to the power
3014  */
3015 static uint_t
3016 ehci_pow_2(uint_t x)
3017 {
3018 	if (x == 0) {
3019 
3020 		return (1);
3021 	} else {
3022 
3023 		return (2 << (x - 1));
3024 	}
3025 }
3026 
3027 
3028 /*
3029  * ehci_log_2:
3030  *
3031  * Compute log base 2 of x
3032  */
3033 static uint_t
3034 ehci_log_2(uint_t x)
3035 {
3036 	int i = 0;
3037 
3038 	while (x != 1) {
3039 		x = x >> 1;
3040 		i++;
3041 	}
3042 
3043 	return (i);
3044 }
3045 
3046 
3047 /*
3048  * ehci_find_bestfit_hs_mask:
3049  *
3050  * Find the smask and cmask in the bandwidth allocation, and update the
3051  * bandwidth allocation.
3052  */
3053 static int
3054 ehci_find_bestfit_hs_mask(
3055 	ehci_state_t	*ehcip,
3056 	uchar_t		*smask,
3057 	uint_t		*pnode,
3058 	usb_ep_descr_t	*endpoint,
3059 	uint_t		bandwidth,
3060 	int		interval)
3061 {
3062 	int		i;
3063 	uint_t		elements, index;
3064 	int		array_leaf, best_array_leaf;
3065 	uint_t		node_bandwidth, best_node_bandwidth;
3066 	uint_t		leaf_count;
3067 	uchar_t		bw_mask;
3068 	uchar_t		best_smask;
3069 
3070 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3071 	    "ehci_find_bestfit_hs_mask: ");
3072 
3073 	/* Get all the valid smasks */
3074 	switch (ehci_pow_2(endpoint->bInterval - 1)) {
3075 	case EHCI_INTR_1US_POLL:
3076 		index = EHCI_1US_MASK_INDEX;
3077 		elements = EHCI_INTR_1US_POLL;
3078 		break;
3079 	case EHCI_INTR_2US_POLL:
3080 		index = EHCI_2US_MASK_INDEX;
3081 		elements = EHCI_INTR_2US_POLL;
3082 		break;
3083 	case EHCI_INTR_4US_POLL:
3084 		index = EHCI_4US_MASK_INDEX;
3085 		elements = EHCI_INTR_4US_POLL;
3086 		break;
3087 	case EHCI_INTR_XUS_POLL:
3088 	default:
3089 		index = EHCI_XUS_MASK_INDEX;
3090 		elements = EHCI_INTR_XUS_POLL;
3091 		break;
3092 	}
3093 
3094 	leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3095 
3096 	/*
3097 	 * Because of the way the leaves are setup, we will automatically
3098 	 * hit the leftmost leaf of every possible node with this interval.
3099 	 */
3100 	best_smask = 0x00;
3101 	best_node_bandwidth = 0;
3102 	for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3103 		/* Find the bandwidth mask */
3104 		node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3105 		    bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3106 
3107 		/*
3108 		 * If this node cannot support our requirements skip to the
3109 		 * next leaf.
3110 		 */
3111 		if (bw_mask == 0x00) {
3112 			continue;
3113 		}
3114 
3115 		/*
3116 		 * Now make sure our bandwidth requirements can be
3117 		 * satisfied with one of smasks in this node.
3118 		 */
3119 		*smask = 0x00;
3120 		for (i = index; i < (index + elements); i++) {
3121 			/* Check the start split mask value */
3122 			if (ehci_start_split_mask[index] & bw_mask) {
3123 				*smask = ehci_start_split_mask[index];
3124 				break;
3125 			}
3126 		}
3127 
3128 		/*
3129 		 * If an appropriate smask is found save the information if:
3130 		 * o best_smask has not been found yet.
3131 		 * - or -
3132 		 * o This is the node with the least amount of bandwidth
3133 		 */
3134 		if ((*smask != 0x00) &&
3135 		    ((best_smask == 0x00) ||
3136 		    (best_node_bandwidth > node_bandwidth))) {
3137 
3138 			best_node_bandwidth = node_bandwidth;
3139 			best_array_leaf = array_leaf;
3140 			best_smask = *smask;
3141 		}
3142 	}
3143 
3144 	/*
3145 	 * If we find node that can handle the bandwidth populate the
3146 	 * appropriate variables and return success.
3147 	 */
3148 	if (best_smask) {
3149 		*smask = best_smask;
3150 		*pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3151 		    interval);
3152 		ehci_update_bw_availability(ehcip, bandwidth,
3153 		    ehci_index[best_array_leaf], leaf_count, best_smask);
3154 
3155 		return (USB_SUCCESS);
3156 	}
3157 
3158 	return (USB_FAILURE);
3159 }
3160 
3161 
3162 /*
3163  * ehci_find_bestfit_ls_intr_mask:
3164  *
3165  * Find the smask and cmask in the bandwidth allocation.
3166  */
3167 static int
3168 ehci_find_bestfit_ls_intr_mask(
3169 	ehci_state_t	*ehcip,
3170 	uchar_t		*smask,
3171 	uchar_t		*cmask,
3172 	uint_t		*pnode,
3173 	uint_t		sbandwidth,
3174 	uint_t		cbandwidth,
3175 	int		interval)
3176 {
3177 	int		i;
3178 	uint_t		elements, index;
3179 	int		array_leaf, best_array_leaf;
3180 	uint_t		node_sbandwidth, node_cbandwidth;
3181 	uint_t		best_node_bandwidth;
3182 	uint_t		leaf_count;
3183 	uchar_t		bw_smask, bw_cmask;
3184 	uchar_t		best_smask, best_cmask;
3185 
3186 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3187 	    "ehci_find_bestfit_ls_intr_mask: ");
3188 
3189 	/* For low and full speed devices */
3190 	index = EHCI_XUS_MASK_INDEX;
3191 	elements = EHCI_INTR_4MS_POLL;
3192 
3193 	leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3194 
3195 	/*
3196 	 * Because of the way the leaves are setup, we will automatically
3197 	 * hit the leftmost leaf of every possible node with this interval.
3198 	 */
3199 	best_smask = 0x00;
3200 	best_node_bandwidth = 0;
3201 	for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3202 		/* Find the bandwidth mask */
3203 		node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3204 		    sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3205 		node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3206 		    cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3207 
3208 		/*
3209 		 * If this node cannot support our requirements skip to the
3210 		 * next leaf.
3211 		 */
3212 		if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3213 			continue;
3214 		}
3215 
3216 		/*
3217 		 * Now make sure our bandwidth requirements can be
3218 		 * satisfied with one of smasks in this node.
3219 		 */
3220 		*smask = 0x00;
3221 		*cmask = 0x00;
3222 		for (i = index; i < (index + elements); i++) {
3223 			/* Check the start split mask value */
3224 			if ((ehci_start_split_mask[index] & bw_smask) &&
3225 			    (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3226 				*smask = ehci_start_split_mask[index];
3227 				*cmask = ehci_intr_complete_split_mask[index];
3228 				break;
3229 			}
3230 		}
3231 
3232 		/*
3233 		 * If an appropriate smask is found save the information if:
3234 		 * o best_smask has not been found yet.
3235 		 * - or -
3236 		 * o This is the node with the least amount of bandwidth
3237 		 */
3238 		if ((*smask != 0x00) &&
3239 		    ((best_smask == 0x00) ||
3240 		    (best_node_bandwidth >
3241 		    (node_sbandwidth + node_cbandwidth)))) {
3242 			best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3243 			best_array_leaf = array_leaf;
3244 			best_smask = *smask;
3245 			best_cmask = *cmask;
3246 		}
3247 	}
3248 
3249 	/*
3250 	 * If we find node that can handle the bandwidth populate the
3251 	 * appropriate variables and return success.
3252 	 */
3253 	if (best_smask) {
3254 		*smask = best_smask;
3255 		*cmask = best_cmask;
3256 		*pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3257 		    interval);
3258 		ehci_update_bw_availability(ehcip, sbandwidth,
3259 		    ehci_index[best_array_leaf], leaf_count, best_smask);
3260 		ehci_update_bw_availability(ehcip, cbandwidth,
3261 		    ehci_index[best_array_leaf], leaf_count, best_cmask);
3262 
3263 		return (USB_SUCCESS);
3264 	}
3265 
3266 	return (USB_FAILURE);
3267 }
3268 
3269 
3270 /*
3271  * ehci_find_bestfit_sitd_in_mask:
3272  *
3273  * Find the smask and cmask in the bandwidth allocation.
3274  */
3275 static int
3276 ehci_find_bestfit_sitd_in_mask(
3277 	ehci_state_t	*ehcip,
3278 	uchar_t		*smask,
3279 	uchar_t		*cmask,
3280 	uint_t		*pnode,
3281 	uint_t		sbandwidth,
3282 	uint_t		cbandwidth,
3283 	int		interval)
3284 {
3285 	int		i, uFrames, found;
3286 	int		array_leaf, best_array_leaf;
3287 	uint_t		node_sbandwidth, node_cbandwidth;
3288 	uint_t		best_node_bandwidth;
3289 	uint_t		leaf_count;
3290 	uchar_t		bw_smask, bw_cmask;
3291 	uchar_t		best_smask, best_cmask;
3292 
3293 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3294 	    "ehci_find_bestfit_sitd_in_mask: ");
3295 
3296 	leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3297 
3298 	/*
3299 	 * Because of the way the leaves are setup, we will automatically
3300 	 * hit the leftmost leaf of every possible node with this interval.
3301 	 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3302 	 */
3303 	/*
3304 	 * Need to add an additional 2 uFrames, if the "L"ast
3305 	 * complete split is before uFrame 6.  See section
3306 	 * 11.8.4 in USB 2.0 Spec.  Currently we do not support
3307 	 * the "Back Ptr" which means we support on IN of
3308 	 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3309 	 */
3310 	uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3311 	if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3312 		uFrames++;
3313 	}
3314 	if (uFrames > 6) {
3315 
3316 		return (USB_FAILURE);
3317 	}
3318 	*smask = 0x1;
3319 	*cmask = 0x00;
3320 	for (i = 0; i < uFrames; i++) {
3321 		*cmask = *cmask << 1;
3322 		*cmask |= 0x1;
3323 	}
3324 	/* cmask must start 2 frames after the smask */
3325 	*cmask = *cmask << 2;
3326 
3327 	found = 0;
3328 	best_smask = 0x00;
3329 	best_node_bandwidth = 0;
3330 	for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3331 		node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3332 		    sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3333 		node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3334 		    MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3335 		    &bw_cmask);
3336 
3337 		/*
3338 		 * If this node cannot support our requirements skip to the
3339 		 * next leaf.
3340 		 */
3341 		if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3342 			continue;
3343 		}
3344 
3345 		for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3346 			if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3347 				found = 1;
3348 				break;
3349 			}
3350 			*smask = *smask << 1;
3351 			*cmask = *cmask << 1;
3352 		}
3353 
3354 		/*
3355 		 * If an appropriate smask is found save the information if:
3356 		 * o best_smask has not been found yet.
3357 		 * - or -
3358 		 * o This is the node with the least amount of bandwidth
3359 		 */
3360 		if (found &&
3361 		    ((best_smask == 0x00) ||
3362 		    (best_node_bandwidth >
3363 		    (node_sbandwidth + node_cbandwidth)))) {
3364 			best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3365 			best_array_leaf = array_leaf;
3366 			best_smask = *smask;
3367 			best_cmask = *cmask;
3368 		}
3369 	}
3370 
3371 	/*
3372 	 * If we find node that can handle the bandwidth populate the
3373 	 * appropriate variables and return success.
3374 	 */
3375 	if (best_smask) {
3376 		*smask = best_smask;
3377 		*cmask = best_cmask;
3378 		*pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3379 		    interval);
3380 		ehci_update_bw_availability(ehcip, sbandwidth,
3381 		    ehci_index[best_array_leaf], leaf_count, best_smask);
3382 		ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3383 		    ehci_index[best_array_leaf], leaf_count, best_cmask);
3384 
3385 		return (USB_SUCCESS);
3386 	}
3387 
3388 	return (USB_FAILURE);
3389 }
3390 
3391 
3392 /*
3393  * ehci_find_bestfit_sitd_out_mask:
3394  *
3395  * Find the smask in the bandwidth allocation.
3396  */
3397 static int
3398 ehci_find_bestfit_sitd_out_mask(
3399 	ehci_state_t	*ehcip,
3400 	uchar_t		*smask,
3401 	uint_t		*pnode,
3402 	uint_t		sbandwidth,
3403 	int		interval)
3404 {
3405 	int		i, uFrames, found;
3406 	int		array_leaf, best_array_leaf;
3407 	uint_t		node_sbandwidth;
3408 	uint_t		best_node_bandwidth;
3409 	uint_t		leaf_count;
3410 	uchar_t		bw_smask;
3411 	uchar_t		best_smask;
3412 
3413 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3414 	    "ehci_find_bestfit_sitd_out_mask: ");
3415 
3416 	leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3417 
3418 	/*
3419 	 * Because of the way the leaves are setup, we will automatically
3420 	 * hit the leftmost leaf of every possible node with this interval.
3421 	 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3422 	 */
3423 	*smask = 0x00;
3424 	uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3425 	if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3426 		uFrames++;
3427 	}
3428 	for (i = 0; i < uFrames; i++) {
3429 		*smask = *smask << 1;
3430 		*smask |= 0x1;
3431 	}
3432 
3433 	found = 0;
3434 	best_smask = 0x00;
3435 	best_node_bandwidth = 0;
3436 	for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3437 		node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3438 		    MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3439 		    &bw_smask);
3440 
3441 		/*
3442 		 * If this node cannot support our requirements skip to the
3443 		 * next leaf.
3444 		 */
3445 		if (bw_smask == 0x00) {
3446 			continue;
3447 		}
3448 
3449 		/* You cannot have a start split on the 8th uFrame */
3450 		for (i = 0; (*smask & 0x80) == 0; i++) {
3451 			if (*smask & bw_smask) {
3452 				found = 1;
3453 				break;
3454 			}
3455 			*smask = *smask << 1;
3456 		}
3457 
3458 		/*
3459 		 * If an appropriate smask is found save the information if:
3460 		 * o best_smask has not been found yet.
3461 		 * - or -
3462 		 * o This is the node with the least amount of bandwidth
3463 		 */
3464 		if (found &&
3465 		    ((best_smask == 0x00) ||
3466 		    (best_node_bandwidth > node_sbandwidth))) {
3467 			best_node_bandwidth = node_sbandwidth;
3468 			best_array_leaf = array_leaf;
3469 			best_smask = *smask;
3470 		}
3471 	}
3472 
3473 	/*
3474 	 * If we find node that can handle the bandwidth populate the
3475 	 * appropriate variables and return success.
3476 	 */
3477 	if (best_smask) {
3478 		*smask = best_smask;
3479 		*pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3480 		    interval);
3481 		ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3482 		    ehci_index[best_array_leaf], leaf_count, best_smask);
3483 
3484 		return (USB_SUCCESS);
3485 	}
3486 
3487 	return (USB_FAILURE);
3488 }
3489 
3490 
3491 /*
3492  * ehci_calculate_bw_availability_mask:
3493  *
3494  * Returns the "total bandwidth used" in this node.
3495  * Populates bw_mask with the uFrames that can support the bandwidth.
3496  *
3497  * If all the Frames cannot support this bandwidth, then bw_mask
3498  * will return 0x00 and the "total bandwidth used" will be invalid.
3499  */
3500 static uint_t
3501 ehci_calculate_bw_availability_mask(
3502 	ehci_state_t	*ehcip,
3503 	uint_t		bandwidth,
3504 	int		leaf,
3505 	int		leaf_count,
3506 	uchar_t		*bw_mask)
3507 {
3508 	int			i, j;
3509 	uchar_t			bw_uframe;
3510 	int			uframe_total;
3511 	ehci_frame_bandwidth_t	*fbp;
3512 	uint_t			total_bandwidth = 0;
3513 
3514 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3515 	    "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3516 	    leaf, leaf_count);
3517 
3518 	/* Start by saying all uFrames are available */
3519 	*bw_mask = 0xFF;
3520 
3521 	for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3522 		fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3523 
3524 		total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3525 
3526 		for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3527 			/*
3528 			 * If the uFrame in bw_mask is available check to see if
3529 			 * it can support the additional bandwidth.
3530 			 */
3531 			bw_uframe = (*bw_mask & (0x1 << j));
3532 			uframe_total =
3533 			    fbp->ehci_micro_frame_bandwidth[j] +
3534 			    bandwidth;
3535 			if ((bw_uframe) &&
3536 			    (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3537 				*bw_mask = *bw_mask & ~bw_uframe;
3538 			}
3539 		}
3540 	}
3541 
3542 	USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3543 	    "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3544 	    *bw_mask);
3545 
3546 	return (total_bandwidth);
3547 }
3548 
3549 
3550 /*
3551  * ehci_update_bw_availability:
3552  *
3553  * The leftmost leaf needs to be in terms of array position and
3554  * not the actual lattice position.
3555  */
3556 static void
3557 ehci_update_bw_availability(
3558 	ehci_state_t	*ehcip,
3559 	int		bandwidth,
3560 	int		leftmost_leaf,
3561 	int		leaf_count,
3562 	uchar_t		mask)
3563 {
3564 	int			i, j;
3565 	ehci_frame_bandwidth_t	*fbp;
3566 	int			uFrame_bandwidth[8];
3567 
3568 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3569 	    "ehci_update_bw_availability: "
3570 	    "leaf %d count %d bandwidth 0x%x mask 0x%x",
3571 	    leftmost_leaf, leaf_count, bandwidth, mask);
3572 
3573 	ASSERT(leftmost_leaf < 32);
3574 	ASSERT(leftmost_leaf >= 0);
3575 
3576 	for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3577 		if (mask & 0x1) {
3578 			uFrame_bandwidth[j] = bandwidth;
3579 		} else {
3580 			uFrame_bandwidth[j] = 0;
3581 		}
3582 
3583 		mask = mask >> 1;
3584 	}
3585 
3586 	/* Updated all the effected leafs with the bandwidth */
3587 	for (i = 0; i < leaf_count; i++) {
3588 		fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3589 
3590 		for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3591 			fbp->ehci_micro_frame_bandwidth[j] +=
3592 			    uFrame_bandwidth[j];
3593 			fbp->ehci_allocated_frame_bandwidth +=
3594 			    uFrame_bandwidth[j];
3595 		}
3596 	}
3597 }
3598 
3599 /*
3600  * Miscellaneous functions
3601  */
3602 
3603 /*
3604  * ehci_obtain_state:
3605  *
3606  * NOTE: This function is also called from POLLED MODE.
3607  */
3608 ehci_state_t *
3609 ehci_obtain_state(dev_info_t	*dip)
3610 {
3611 	int			instance = ddi_get_instance(dip);
3612 
3613 	ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3614 
3615 	ASSERT(state != NULL);
3616 
3617 	return (state);
3618 }
3619 
3620 
3621 /*
3622  * ehci_state_is_operational:
3623  *
3624  * Check the Host controller state and return proper values.
3625  */
3626 int
3627 ehci_state_is_operational(ehci_state_t	*ehcip)
3628 {
3629 	int	val;
3630 
3631 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3632 
3633 	switch (ehcip->ehci_hc_soft_state) {
3634 	case EHCI_CTLR_INIT_STATE:
3635 	case EHCI_CTLR_SUSPEND_STATE:
3636 		val = USB_FAILURE;
3637 		break;
3638 	case EHCI_CTLR_OPERATIONAL_STATE:
3639 		val = USB_SUCCESS;
3640 		break;
3641 	case EHCI_CTLR_ERROR_STATE:
3642 		val = USB_HC_HARDWARE_ERROR;
3643 		break;
3644 	default:
3645 		val = USB_FAILURE;
3646 		break;
3647 	}
3648 
3649 	return (val);
3650 }
3651 
3652 
3653 /*
3654  * ehci_do_soft_reset
3655  *
3656  * Do soft reset of ehci host controller.
3657  */
3658 int
3659 ehci_do_soft_reset(ehci_state_t	*ehcip)
3660 {
3661 	usb_frame_number_t	before_frame_number, after_frame_number;
3662 	ehci_regs_t		*ehci_save_regs;
3663 
3664 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3665 
3666 	/* Increment host controller error count */
3667 	ehcip->ehci_hc_error++;
3668 
3669 	USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3670 	    "ehci_do_soft_reset:"
3671 	    "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3672 
3673 	/*
3674 	 * Allocate space for saving current Host Controller
3675 	 * registers. Don't do any recovery if allocation
3676 	 * fails.
3677 	 */
3678 	ehci_save_regs = (ehci_regs_t *)
3679 	    kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3680 
3681 	if (ehci_save_regs == NULL) {
3682 		USB_DPRINTF_L2(PRINT_MASK_INTR,  ehcip->ehci_log_hdl,
3683 		    "ehci_do_soft_reset: kmem_zalloc failed");
3684 
3685 		return (USB_FAILURE);
3686 	}
3687 
3688 	/* Save current ehci registers */
3689 	ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3690 	ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3691 	ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3692 	ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3693 	ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3694 	ehci_save_regs->ehci_periodic_list_base =
3695 	    Get_OpReg(ehci_periodic_list_base);
3696 
3697 	USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3698 	    "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3699 
3700 	/* Disable all list processing and interrupts */
3701 	Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3702 	    ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3703 
3704 	/* Disable all EHCI interrupts */
3705 	Set_OpReg(ehci_interrupt, 0);
3706 
3707 	/* Wait for few milliseconds */
3708 	drv_usecwait(EHCI_SOF_TIMEWAIT);
3709 
3710 	/* Do light soft reset of ehci host controller */
3711 	Set_OpReg(ehci_command,
3712 	    Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3713 
3714 	USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3715 	    "ehci_do_soft_reset: Reset in progress");
3716 
3717 	/* Wait for reset to complete */
3718 	drv_usecwait(EHCI_RESET_TIMEWAIT);
3719 
3720 	/*
3721 	 * Restore previous saved EHCI register value
3722 	 * into the current EHCI registers.
3723 	 */
3724 	Set_OpReg(ehci_ctrl_segment, (uint32_t)
3725 	    ehci_save_regs->ehci_ctrl_segment);
3726 
3727 	Set_OpReg(ehci_periodic_list_base, (uint32_t)
3728 	    ehci_save_regs->ehci_periodic_list_base);
3729 
3730 	Set_OpReg(ehci_async_list_addr, (uint32_t)
3731 	    ehci_save_regs->ehci_async_list_addr);
3732 
3733 	/*
3734 	 * For some reason this register might get nulled out by
3735 	 * the Uli M1575 South Bridge. To workaround the hardware
3736 	 * problem, check the value after write and retry if the
3737 	 * last write fails.
3738 	 */
3739 	if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3740 	    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3741 	    (ehci_save_regs->ehci_async_list_addr !=
3742 	    Get_OpReg(ehci_async_list_addr))) {
3743 		int retry = 0;
3744 
3745 		Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3746 		    ehci_save_regs->ehci_async_list_addr, retry);
3747 		if (retry >= EHCI_MAX_RETRY) {
3748 			USB_DPRINTF_L2(PRINT_MASK_ATTA,
3749 			    ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3750 			    " ASYNCLISTADDR write failed.");
3751 
3752 			return (USB_FAILURE);
3753 		}
3754 		USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3755 		    "ehci_do_soft_reset: ASYNCLISTADDR "
3756 		    "write failed, retry=%d", retry);
3757 	}
3758 
3759 	Set_OpReg(ehci_config_flag, (uint32_t)
3760 	    ehci_save_regs->ehci_config_flag);
3761 
3762 	/* Enable both Asynchronous and Periodic Schedule if necessary */
3763 	ehci_toggle_scheduler(ehcip);
3764 
3765 	/*
3766 	 * Set ehci_interrupt to enable all interrupts except Root
3767 	 * Hub Status change and frame list rollover interrupts.
3768 	 */
3769 	Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3770 	    EHCI_INTR_FRAME_LIST_ROLLOVER |
3771 	    EHCI_INTR_USB_ERROR |
3772 	    EHCI_INTR_USB);
3773 
3774 	/*
3775 	 * Deallocate the space that allocated for saving
3776 	 * HC registers.
3777 	 */
3778 	kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
3779 
3780 	/*
3781 	 * Set the desired interrupt threshold, frame list size (if
3782 	 * applicable) and turn EHCI host controller.
3783 	 */
3784 	Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3785 	    ~EHCI_CMD_INTR_THRESHOLD) |
3786 	    (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3787 
3788 	/* Wait 10ms for EHCI to start sending SOF */
3789 	drv_usecwait(EHCI_RESET_TIMEWAIT);
3790 
3791 	/*
3792 	 * Get the current usb frame number before waiting for
3793 	 * few milliseconds.
3794 	 */
3795 	before_frame_number = ehci_get_current_frame_number(ehcip);
3796 
3797 	/* Wait for few milliseconds */
3798 	drv_usecwait(EHCI_SOF_TIMEWAIT);
3799 
3800 	/*
3801 	 * Get the current usb frame number after waiting for
3802 	 * few milliseconds.
3803 	 */
3804 	after_frame_number = ehci_get_current_frame_number(ehcip);
3805 
3806 	USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3807 	    "ehci_do_soft_reset: Before Frame Number 0x%llx "
3808 	    "After Frame Number 0x%llx",
3809 	    (unsigned long long)before_frame_number,
3810 	    (unsigned long long)after_frame_number);
3811 
3812 	if ((after_frame_number <= before_frame_number) &&
3813 	    (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3814 
3815 		USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3816 		    "ehci_do_soft_reset: Soft reset failed");
3817 
3818 		return (USB_FAILURE);
3819 	}
3820 
3821 	return (USB_SUCCESS);
3822 }
3823 
3824 
3825 /*
3826  * ehci_get_xfer_attrs:
3827  *
3828  * Get the attributes of a particular xfer.
3829  *
3830  * NOTE: This function is also called from POLLED MODE.
3831  */
3832 usb_req_attrs_t
3833 ehci_get_xfer_attrs(
3834 	ehci_state_t		*ehcip,
3835 	ehci_pipe_private_t	*pp,
3836 	ehci_trans_wrapper_t	*tw)
3837 {
3838 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
3839 	usb_req_attrs_t		attrs = USB_ATTRS_NONE;
3840 
3841 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3842 	    "ehci_get_xfer_attrs:");
3843 
3844 	switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3845 	case USB_EP_ATTR_CONTROL:
3846 		attrs = ((usb_ctrl_req_t *)
3847 		    tw->tw_curr_xfer_reqp)->ctrl_attributes;
3848 		break;
3849 	case USB_EP_ATTR_BULK:
3850 		attrs = ((usb_bulk_req_t *)
3851 		    tw->tw_curr_xfer_reqp)->bulk_attributes;
3852 		break;
3853 	case USB_EP_ATTR_INTR:
3854 		attrs = ((usb_intr_req_t *)
3855 		    tw->tw_curr_xfer_reqp)->intr_attributes;
3856 		break;
3857 	}
3858 
3859 	return (attrs);
3860 }
3861 
3862 
3863 /*
3864  * ehci_get_current_frame_number:
3865  *
3866  * Get the current software based usb frame number.
3867  */
3868 usb_frame_number_t
3869 ehci_get_current_frame_number(ehci_state_t *ehcip)
3870 {
3871 	usb_frame_number_t	usb_frame_number;
3872 	usb_frame_number_t	ehci_fno, micro_frame_number;
3873 
3874 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3875 
3876 	ehci_fno = ehcip->ehci_fno;
3877 	micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3878 
3879 	/*
3880 	 * Calculate current software based usb frame number.
3881 	 *
3882 	 * This code accounts for the fact that frame number is
3883 	 * updated by the Host Controller before the ehci driver
3884 	 * gets an FrameListRollover interrupt that will adjust
3885 	 * Frame higher part.
3886 	 *
3887 	 * Refer ehci specification 1.0, section 2.3.2, page 21.
3888 	 */
3889 	micro_frame_number = ((micro_frame_number & 0x1FFF) |
3890 	    ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3891 	    ehci_fno) & 0x2000);
3892 
3893 	/*
3894 	 * Micro Frame number is equivalent to 125 usec. Eight
3895 	 * Micro Frame numbers are equivalent to one millsecond
3896 	 * or one usb frame number.
3897 	 */
3898 	usb_frame_number = micro_frame_number >>
3899 	    EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3900 
3901 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3902 	    "ehci_get_current_frame_number: "
3903 	    "Current usb uframe number = 0x%llx "
3904 	    "Current usb frame number  = 0x%llx",
3905 	    (unsigned long long)micro_frame_number,
3906 	    (unsigned long long)usb_frame_number);
3907 
3908 	return (usb_frame_number);
3909 }
3910 
3911 
3912 /*
3913  * ehci_cpr_cleanup:
3914  *
3915  * Cleanup ehci state and other ehci specific informations across
3916  * Check Point Resume (CPR).
3917  */
3918 static	void
3919 ehci_cpr_cleanup(ehci_state_t *ehcip)
3920 {
3921 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3922 
3923 	/* Reset software part of usb frame number */
3924 	ehcip->ehci_fno = 0;
3925 }
3926 
3927 
3928 /*
3929  * ehci_wait_for_sof:
3930  *
3931  * Wait for couple of SOF interrupts
3932  */
3933 int
3934 ehci_wait_for_sof(ehci_state_t	*ehcip)
3935 {
3936 	usb_frame_number_t	before_frame_number, after_frame_number;
3937 	int			error = USB_SUCCESS;
3938 
3939 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3940 	    ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3941 
3942 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3943 
3944 	error = ehci_state_is_operational(ehcip);
3945 
3946 	if (error != USB_SUCCESS) {
3947 
3948 		return (error);
3949 	}
3950 
3951 	/* Get the current usb frame number before waiting for two SOFs */
3952 	before_frame_number = ehci_get_current_frame_number(ehcip);
3953 
3954 	mutex_exit(&ehcip->ehci_int_mutex);
3955 
3956 	/* Wait for few milliseconds */
3957 	delay(drv_usectohz(EHCI_SOF_TIMEWAIT));
3958 
3959 	mutex_enter(&ehcip->ehci_int_mutex);
3960 
3961 	/* Get the current usb frame number after woken up */
3962 	after_frame_number = ehci_get_current_frame_number(ehcip);
3963 
3964 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3965 	    "ehci_wait_for_sof: framenumber: before 0x%llx "
3966 	    "after 0x%llx",
3967 	    (unsigned long long)before_frame_number,
3968 	    (unsigned long long)after_frame_number);
3969 
3970 	/* Return failure, if usb frame number has not been changed */
3971 	if (after_frame_number <= before_frame_number) {
3972 
3973 		if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) {
3974 
3975 			USB_DPRINTF_L0(PRINT_MASK_LISTS,
3976 			    ehcip->ehci_log_hdl, "No SOF interrupts");
3977 
3978 			/* Set host controller soft state to error */
3979 			ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
3980 
3981 			return (USB_FAILURE);
3982 		}
3983 
3984 	}
3985 
3986 	return (USB_SUCCESS);
3987 }
3988 
3989 
3990 /*
3991  * ehci_toggle_scheduler:
3992  *
3993  * Turn scheduler based on pipe open count.
3994  */
3995 void
3996 ehci_toggle_scheduler(ehci_state_t *ehcip) {
3997 	uint_t	temp_reg, cmd_reg;
3998 
3999 	cmd_reg = Get_OpReg(ehci_command);
4000 	temp_reg = cmd_reg;
4001 
4002 	/*
4003 	 * Enable/Disable asynchronous scheduler, and
4004 	 * turn on/off async list door bell
4005 	 */
4006 	if (ehcip->ehci_open_async_count) {
4007 		if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) {
4008 			/*
4009 			 * For some reason this address might get nulled out by
4010 			 * the ehci chip. Set it here just in case it is null.
4011 			 */
4012 			Set_OpReg(ehci_async_list_addr,
4013 			    ehci_qh_cpu_to_iommu(ehcip,
4014 				ehcip->ehci_head_of_async_sched_list));
4015 
4016 			/*
4017 			 * For some reason this register might get nulled out by
4018 			 * the Uli M1575 Southbridge. To workaround the HW
4019 			 * problem, check the value after write and retry if the
4020 			 * last write fails.
4021 			 *
4022 			 * If the ASYNCLISTADDR remains "stuck" after
4023 			 * EHCI_MAX_RETRY retries, then the M1575 is broken
4024 			 * and is stuck in an inconsistent state and is about
4025 			 * to crash the machine with a trn_oor panic when it
4026 			 * does a DMA read from 0x0.  It is better to panic
4027 			 * now rather than wait for the trn_oor crash; this
4028 			 * way Customer Service will have a clean signature
4029 			 * that indicts the M1575 chip rather than a
4030 			 * mysterious and hard-to-diagnose trn_oor panic.
4031 			 */
4032 			if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4033 			    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4034 			    (ehci_qh_cpu_to_iommu(ehcip,
4035 			    ehcip->ehci_head_of_async_sched_list) !=
4036 			    Get_OpReg(ehci_async_list_addr))) {
4037 				int retry = 0;
4038 
4039 				Set_OpRegRetry(ehci_async_list_addr,
4040 				    ehci_qh_cpu_to_iommu(ehcip,
4041 				    ehcip->ehci_head_of_async_sched_list),
4042 				    retry);
4043 				if (retry >= EHCI_MAX_RETRY)
4044 					cmn_err(CE_PANIC,
4045 					    "ehci_toggle_scheduler: "
4046 					    "ASYNCLISTADDR write failed.");
4047 
4048 				USB_DPRINTF_L2(PRINT_MASK_ATTA,
4049 				    ehcip->ehci_log_hdl,
4050 				    "ehci_toggle_scheduler: ASYNCLISTADDR "
4051 					"write failed, retry=%d", retry);
4052 			}
4053 		}
4054 		cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4055 	} else {
4056 		cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4057 	}
4058 
4059 	if (ehcip->ehci_open_periodic_count) {
4060 		if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) {
4061 			/*
4062 			 * For some reason this address get's nulled out by
4063 			 * the ehci chip. Set it here just in case it is null.
4064 			 */
4065 			Set_OpReg(ehci_periodic_list_base,
4066 			    (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4067 				0xFFFFF000));
4068 		}
4069 		cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4070 	} else {
4071 		cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4072 	}
4073 
4074 	/* Just an optimization */
4075 	if (temp_reg != cmd_reg) {
4076 		Set_OpReg(ehci_command, cmd_reg);
4077 	}
4078 }
4079 
4080 /*
4081  * ehci print functions
4082  */
4083 
4084 /*
4085  * ehci_print_caps:
4086  */
4087 void
4088 ehci_print_caps(ehci_state_t	*ehcip)
4089 {
4090 	uint_t			i;
4091 
4092 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4093 	    "\n\tUSB 2.0 Host Controller Characteristics\n");
4094 
4095 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4096 	    "Caps Length: 0x%x Version: 0x%x\n",
4097 	    Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version));
4098 
4099 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4100 	    "Structural Parameters\n");
4101 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4102 	    "Port indicators: %s", (Get_Cap(ehci_hcs_params) &
4103 	    EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No");
4104 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4105 	    "No of Classic host controllers: 0x%x",
4106 	    (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS)
4107 	    >> EHCI_HCS_NUM_COMP_CTRL_SHIFT);
4108 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4109 	    "No of ports per Classic host controller: 0x%x",
4110 	    (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC)
4111 	    >> EHCI_HCS_NUM_PORTS_CC_SHIFT);
4112 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4113 	    "Port routing rules: %s", (Get_Cap(ehci_hcs_params) &
4114 	    EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No");
4115 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4116 	    "Port power control: %s", (Get_Cap(ehci_hcs_params) &
4117 	    EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No");
4118 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4119 	    "No of root hub ports: 0x%x\n",
4120 	    Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS);
4121 
4122 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4123 	    "Capability Parameters\n");
4124 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4125 	    "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) &
4126 	    EHCI_HCC_EECP) ? "Yes" : "No");
4127 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4128 	    "Isoch schedule threshold: 0x%x",
4129 	    Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD);
4130 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4131 	    "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) &
4132 	    EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No");
4133 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4134 	    "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) &
4135 	    EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024");
4136 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4137 	    "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) &
4138 	    EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No");
4139 
4140 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4141 	    "Classic Port Route Description");
4142 
4143 	for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4144 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4145 		    "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i]));
4146 	}
4147 }
4148 
4149 
4150 /*
4151  * ehci_print_regs:
4152  */
4153 void
4154 ehci_print_regs(ehci_state_t	*ehcip)
4155 {
4156 	uint_t			i;
4157 
4158 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4159 	    "\n\tEHCI%d Operational Registers\n",
4160 	    ddi_get_instance(ehcip->ehci_dip));
4161 
4162 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4163 	    "Command: 0x%x Status: 0x%x",
4164 	    Get_OpReg(ehci_command), Get_OpReg(ehci_status));
4165 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4166 	    "Interrupt: 0x%x Frame Index: 0x%x",
4167 	    Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index));
4168 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4169 	    "Control Segment: 0x%x Periodic List Base: 0x%x",
4170 	    Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base));
4171 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4172 	    "Async List Addr: 0x%x Config Flag: 0x%x",
4173 	    Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag));
4174 
4175 	USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4176 	    "Root Hub Port Status");
4177 
4178 	for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4179 		USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4180 		    "\tPort Status 0x%x: 0x%x ", i,
4181 		    Get_OpReg(ehci_rh_port_status[i]));
4182 	}
4183 }
4184 
4185 
4186 /*
4187  * ehci_print_qh:
4188  */
4189 void
4190 ehci_print_qh(
4191 	ehci_state_t	*ehcip,
4192 	ehci_qh_t	*qh)
4193 {
4194 	uint_t		i;
4195 
4196 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4197 	    "ehci_print_qh: qh = 0x%p", (void *)qh);
4198 
4199 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4200 	    "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr));
4201 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4202 	    "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl));
4203 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4204 	    "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl));
4205 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4206 	    "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd));
4207 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4208 	    "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd));
4209 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4210 	    "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd));
4211 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4212 	    "\tqh_status: 0x%x ", Get_QH(qh->qh_status));
4213 
4214 	for (i = 0; i < 5; i++) {
4215 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4216 		    "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i]));
4217 	}
4218 
4219 	for (i = 0; i < 5; i++) {
4220 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4221 		    "\tqh_buf_high[%d]: 0x%x ",
4222 		    i, Get_QH(qh->qh_buf_high[i]));
4223 	}
4224 
4225 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4226 	    "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd));
4227 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4228 	    "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev));
4229 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4230 	    "\tqh_state: 0x%x ", Get_QH(qh->qh_state));
4231 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4232 	    "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next));
4233 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4234 	    "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame));
4235 }
4236 
4237 
4238 /*
4239  * ehci_print_qtd:
4240  */
4241 void
4242 ehci_print_qtd(
4243 	ehci_state_t	*ehcip,
4244 	ehci_qtd_t	*qtd)
4245 {
4246 	uint_t		i;
4247 
4248 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4249 	    "ehci_print_qtd: qtd = 0x%p", (void *)qtd);
4250 
4251 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4252 	    "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd));
4253 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4254 	    "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd));
4255 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4256 	    "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl));
4257 
4258 	for (i = 0; i < 5; i++) {
4259 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4260 		    "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i]));
4261 	}
4262 
4263 	for (i = 0; i < 5; i++) {
4264 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4265 		    "\tqtd_buf_high[%d]: 0x%x ",
4266 		    i, Get_QTD(qtd->qtd_buf_high[i]));
4267 	}
4268 
4269 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4270 	    "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper));
4271 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4272 	    "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd));
4273 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4274 	    "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next));
4275 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4276 	    "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev));
4277 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4278 	    "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state));
4279 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4280 	    "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase));
4281 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4282 	    "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs));
4283 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4284 	    "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len));
4285 }
4286 
4287 /*
4288  * ehci kstat functions
4289  */
4290 
4291 /*
4292  * ehci_create_stats:
4293  *
4294  * Allocate and initialize the ehci kstat structures
4295  */
4296 void
4297 ehci_create_stats(ehci_state_t	*ehcip)
4298 {
4299 	char			kstatname[KSTAT_STRLEN];
4300 	const char		*dname = ddi_driver_name(ehcip->ehci_dip);
4301 	char			*usbtypes[USB_N_COUNT_KSTATS] =
4302 	    {"ctrl", "isoch", "bulk", "intr"};
4303 	uint_t			instance = ehcip->ehci_instance;
4304 	ehci_intrs_stats_t	*isp;
4305 	int			i;
4306 
4307 	if (EHCI_INTRS_STATS(ehcip) == NULL) {
4308 		(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
4309 		    dname, instance);
4310 		EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance,
4311 		    kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
4312 		    sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t),
4313 		    KSTAT_FLAG_PERSISTENT);
4314 
4315 		if (EHCI_INTRS_STATS(ehcip)) {
4316 			isp = EHCI_INTRS_STATS_DATA(ehcip);
4317 			kstat_named_init(&isp->ehci_sts_total,
4318 			    "Interrupts Total", KSTAT_DATA_UINT64);
4319 			kstat_named_init(&isp->ehci_sts_not_claimed,
4320 			    "Not Claimed", KSTAT_DATA_UINT64);
4321 			kstat_named_init(&isp->ehci_sts_async_sched_status,
4322 			    "Async schedule status", KSTAT_DATA_UINT64);
4323 			kstat_named_init(&isp->ehci_sts_periodic_sched_status,
4324 			    "Periodic sched status", KSTAT_DATA_UINT64);
4325 			kstat_named_init(&isp->ehci_sts_empty_async_schedule,
4326 			    "Empty async schedule", KSTAT_DATA_UINT64);
4327 			kstat_named_init(&isp->ehci_sts_host_ctrl_halted,
4328 			    "Host controller Halted", KSTAT_DATA_UINT64);
4329 			kstat_named_init(&isp->ehci_sts_async_advance_intr,
4330 			    "Intr on async advance", KSTAT_DATA_UINT64);
4331 			kstat_named_init(&isp->ehci_sts_host_system_error_intr,
4332 			    "Host system error", KSTAT_DATA_UINT64);
4333 			kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr,
4334 			    "Frame list rollover", KSTAT_DATA_UINT64);
4335 			kstat_named_init(&isp->ehci_sts_rh_port_change_intr,
4336 			    "Port change detect", KSTAT_DATA_UINT64);
4337 			kstat_named_init(&isp->ehci_sts_usb_error_intr,
4338 			    "USB error interrupt", KSTAT_DATA_UINT64);
4339 			kstat_named_init(&isp->ehci_sts_usb_intr,
4340 			    "USB interrupt", KSTAT_DATA_UINT64);
4341 
4342 			EHCI_INTRS_STATS(ehcip)->ks_private = ehcip;
4343 			EHCI_INTRS_STATS(ehcip)->ks_update = nulldev;
4344 			kstat_install(EHCI_INTRS_STATS(ehcip));
4345 		}
4346 	}
4347 
4348 	if (EHCI_TOTAL_STATS(ehcip) == NULL) {
4349 		(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
4350 		    dname, instance);
4351 		EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance,
4352 		    kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
4353 		    KSTAT_FLAG_PERSISTENT);
4354 
4355 		if (EHCI_TOTAL_STATS(ehcip)) {
4356 			kstat_install(EHCI_TOTAL_STATS(ehcip));
4357 		}
4358 	}
4359 
4360 	for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4361 		if (ehcip->ehci_count_stats[i] == NULL) {
4362 			(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
4363 			    dname, instance, usbtypes[i]);
4364 			ehcip->ehci_count_stats[i] = kstat_create("usba",
4365 			    instance, kstatname, "usb_byte_count",
4366 			    KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
4367 
4368 			if (ehcip->ehci_count_stats[i]) {
4369 				kstat_install(ehcip->ehci_count_stats[i]);
4370 			}
4371 		}
4372 	}
4373 }
4374 
4375 
4376 /*
4377  * ehci_destroy_stats:
4378  *
4379  * Clean up ehci kstat structures
4380  */
4381 void
4382 ehci_destroy_stats(ehci_state_t	*ehcip)
4383 {
4384 	int	i;
4385 
4386 	if (EHCI_INTRS_STATS(ehcip)) {
4387 		kstat_delete(EHCI_INTRS_STATS(ehcip));
4388 		EHCI_INTRS_STATS(ehcip) = NULL;
4389 	}
4390 
4391 	if (EHCI_TOTAL_STATS(ehcip)) {
4392 		kstat_delete(EHCI_TOTAL_STATS(ehcip));
4393 		EHCI_TOTAL_STATS(ehcip) = NULL;
4394 	}
4395 
4396 	for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4397 		if (ehcip->ehci_count_stats[i]) {
4398 			kstat_delete(ehcip->ehci_count_stats[i]);
4399 			ehcip->ehci_count_stats[i] = NULL;
4400 		}
4401 	}
4402 }
4403 
4404 
4405 /*
4406  * ehci_do_intrs_stats:
4407  *
4408  * ehci status information
4409  */
4410 void
4411 ehci_do_intrs_stats(
4412 	ehci_state_t	*ehcip,
4413 	int		val)
4414 {
4415 	if (EHCI_INTRS_STATS(ehcip)) {
4416 		EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++;
4417 		switch (val) {
4418 		case EHCI_STS_ASYNC_SCHED_STATUS:
4419 			EHCI_INTRS_STATS_DATA(ehcip)->
4420 			    ehci_sts_async_sched_status.value.ui64++;
4421 			break;
4422 		case EHCI_STS_PERIODIC_SCHED_STATUS:
4423 			EHCI_INTRS_STATS_DATA(ehcip)->
4424 			    ehci_sts_periodic_sched_status.value.ui64++;
4425 			break;
4426 		case EHCI_STS_EMPTY_ASYNC_SCHEDULE:
4427 			EHCI_INTRS_STATS_DATA(ehcip)->
4428 			    ehci_sts_empty_async_schedule.value.ui64++;
4429 			break;
4430 		case EHCI_STS_HOST_CTRL_HALTED:
4431 			EHCI_INTRS_STATS_DATA(ehcip)->
4432 			    ehci_sts_host_ctrl_halted.value.ui64++;
4433 			break;
4434 		case EHCI_STS_ASYNC_ADVANCE_INTR:
4435 			EHCI_INTRS_STATS_DATA(ehcip)->
4436 			    ehci_sts_async_advance_intr.value.ui64++;
4437 			break;
4438 		case EHCI_STS_HOST_SYSTEM_ERROR_INTR:
4439 			EHCI_INTRS_STATS_DATA(ehcip)->
4440 			    ehci_sts_host_system_error_intr.value.ui64++;
4441 			break;
4442 		case EHCI_STS_FRM_LIST_ROLLOVER_INTR:
4443 			EHCI_INTRS_STATS_DATA(ehcip)->
4444 			    ehci_sts_frm_list_rollover_intr.value.ui64++;
4445 			break;
4446 		case EHCI_STS_RH_PORT_CHANGE_INTR:
4447 			EHCI_INTRS_STATS_DATA(ehcip)->
4448 			    ehci_sts_rh_port_change_intr.value.ui64++;
4449 			break;
4450 		case EHCI_STS_USB_ERROR_INTR:
4451 			EHCI_INTRS_STATS_DATA(ehcip)->
4452 			    ehci_sts_usb_error_intr.value.ui64++;
4453 			break;
4454 		case EHCI_STS_USB_INTR:
4455 			EHCI_INTRS_STATS_DATA(ehcip)->
4456 			    ehci_sts_usb_intr.value.ui64++;
4457 			break;
4458 		default:
4459 			EHCI_INTRS_STATS_DATA(ehcip)->
4460 			    ehci_sts_not_claimed.value.ui64++;
4461 			break;
4462 		}
4463 	}
4464 }
4465 
4466 
4467 /*
4468  * ehci_do_byte_stats:
4469  *
4470  * ehci data xfer information
4471  */
4472 void
4473 ehci_do_byte_stats(
4474 	ehci_state_t	*ehcip,
4475 	size_t		len,
4476 	uint8_t		attr,
4477 	uint8_t		addr)
4478 {
4479 	uint8_t 	type = attr & USB_EP_ATTR_MASK;
4480 	uint8_t 	dir = addr & USB_EP_DIR_MASK;
4481 
4482 	if (dir == USB_EP_DIR_IN) {
4483 		EHCI_TOTAL_STATS_DATA(ehcip)->reads++;
4484 		EHCI_TOTAL_STATS_DATA(ehcip)->nread += len;
4485 		switch (type) {
4486 			case USB_EP_ATTR_CONTROL:
4487 				EHCI_CTRL_STATS(ehcip)->reads++;
4488 				EHCI_CTRL_STATS(ehcip)->nread += len;
4489 				break;
4490 			case USB_EP_ATTR_BULK:
4491 				EHCI_BULK_STATS(ehcip)->reads++;
4492 				EHCI_BULK_STATS(ehcip)->nread += len;
4493 				break;
4494 			case USB_EP_ATTR_INTR:
4495 				EHCI_INTR_STATS(ehcip)->reads++;
4496 				EHCI_INTR_STATS(ehcip)->nread += len;
4497 				break;
4498 			case USB_EP_ATTR_ISOCH:
4499 				EHCI_ISOC_STATS(ehcip)->reads++;
4500 				EHCI_ISOC_STATS(ehcip)->nread += len;
4501 				break;
4502 		}
4503 	} else if (dir == USB_EP_DIR_OUT) {
4504 		EHCI_TOTAL_STATS_DATA(ehcip)->writes++;
4505 		EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len;
4506 		switch (type) {
4507 			case USB_EP_ATTR_CONTROL:
4508 				EHCI_CTRL_STATS(ehcip)->writes++;
4509 				EHCI_CTRL_STATS(ehcip)->nwritten += len;
4510 				break;
4511 			case USB_EP_ATTR_BULK:
4512 				EHCI_BULK_STATS(ehcip)->writes++;
4513 				EHCI_BULK_STATS(ehcip)->nwritten += len;
4514 				break;
4515 			case USB_EP_ATTR_INTR:
4516 				EHCI_INTR_STATS(ehcip)->writes++;
4517 				EHCI_INTR_STATS(ehcip)->nwritten += len;
4518 				break;
4519 			case USB_EP_ATTR_ISOCH:
4520 				EHCI_ISOC_STATS(ehcip)->writes++;
4521 				EHCI_ISOC_STATS(ehcip)->nwritten += len;
4522 				break;
4523 		}
4524 	}
4525 }
4526