1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2018, Joyent, Inc.
25 */
26
27 /*
28 * EHCI Host Controller Driver (EHCI)
29 *
30 * The EHCI driver is a software driver which interfaces to the Universal
31 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
32 * the Host Controller is defined by the EHCI Host Controller Interface.
33 *
34 * This module contains the main EHCI driver code which handles all USB
35 * transfers, bandwidth allocations and other general functionalities.
36 */
37
38 #include <sys/usb/hcd/ehci/ehcid.h>
39 #include <sys/usb/hcd/ehci/ehci_isoch.h>
40 #include <sys/usb/hcd/ehci/ehci_xfer.h>
41
42 /*
43 * EHCI MSI tunable:
44 *
45 * By default MSI is enabled on all supported platforms except for the
46 * EHCI controller of ULI1575 South bridge.
47 */
48 boolean_t ehci_enable_msi = B_TRUE;
49
50 /* Pointer to the state structure */
51 extern void *ehci_statep;
52
53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
54
55 extern uint_t ehci_vt62x2_workaround;
56 extern int force_ehci_off;
57
58 /* Adjustable variables for the size of the pools */
59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
61
62 /*
63 * Initialize the values which the order of 32ms intr qh are executed
64 * by the host controller in the lattice tree.
65 */
66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
67 {0x00, 0x10, 0x08, 0x18,
68 0x04, 0x14, 0x0c, 0x1c,
69 0x02, 0x12, 0x0a, 0x1a,
70 0x06, 0x16, 0x0e, 0x1e,
71 0x01, 0x11, 0x09, 0x19,
72 0x05, 0x15, 0x0d, 0x1d,
73 0x03, 0x13, 0x0b, 0x1b,
74 0x07, 0x17, 0x0f, 0x1f};
75
76 /*
77 * Initialize the values which are used to calculate start split mask
78 * for the low/full/high speed interrupt and isochronous endpoints.
79 */
80 static uint_t ehci_start_split_mask[15] = {
81 /*
82 * For high/full/low speed usb devices. For high speed
83 * device with polling interval greater than or equal
84 * to 8us (125us).
85 */
86 0x01, /* 00000001 */
87 0x02, /* 00000010 */
88 0x04, /* 00000100 */
89 0x08, /* 00001000 */
90 0x10, /* 00010000 */
91 0x20, /* 00100000 */
92 0x40, /* 01000000 */
93 0x80, /* 10000000 */
94
95 /* Only for high speed devices with polling interval 4us */
96 0x11, /* 00010001 */
97 0x22, /* 00100010 */
98 0x44, /* 01000100 */
99 0x88, /* 10001000 */
100
101 /* Only for high speed devices with polling interval 2us */
102 0x55, /* 01010101 */
103 0xaa, /* 10101010 */
104
105 /* Only for high speed devices with polling interval 1us */
106 0xff /* 11111111 */
107 };
108
109 /*
110 * Initialize the values which are used to calculate complete split mask
111 * for the low/full speed interrupt and isochronous endpoints.
112 */
113 static uint_t ehci_intr_complete_split_mask[7] = {
114 /* Only full/low speed devices */
115 0x1c, /* 00011100 */
116 0x38, /* 00111000 */
117 0x70, /* 01110000 */
118 0xe0, /* 11100000 */
119 0x00, /* Need FSTN feature */
120 0x00, /* Need FSTN feature */
121 0x00 /* Need FSTN feature */
122 };
123
124
125 /*
126 * EHCI Internal Function Prototypes
127 */
128
129 /* Host Controller Driver (HCD) initialization functions */
130 void ehci_set_dma_attributes(ehci_state_t *ehcip);
131 int ehci_allocate_pools(ehci_state_t *ehcip);
132 void ehci_decode_ddi_dma_addr_bind_handle_result(
133 ehci_state_t *ehcip,
134 int result);
135 int ehci_map_regs(ehci_state_t *ehcip);
136 int ehci_register_intrs_and_init_mutex(
137 ehci_state_t *ehcip);
138 static int ehci_add_intrs(ehci_state_t *ehcip,
139 int intr_type);
140 int ehci_init_ctlr(ehci_state_t *ehcip,
141 int init_type);
142 static int ehci_take_control(ehci_state_t *ehcip);
143 static int ehci_init_periodic_frame_lst_table(
144 ehci_state_t *ehcip);
145 static void ehci_build_interrupt_lattice(
146 ehci_state_t *ehcip);
147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip);
148
149 /* Host Controller Driver (HCD) deinitialization functions */
150 int ehci_cleanup(ehci_state_t *ehcip);
151 static void ehci_rem_intrs(ehci_state_t *ehcip);
152 int ehci_cpr_suspend(ehci_state_t *ehcip);
153 int ehci_cpr_resume(ehci_state_t *ehcip);
154
155 /* Bandwidth Allocation functions */
156 int ehci_allocate_bandwidth(ehci_state_t *ehcip,
157 usba_pipe_handle_data_t *ph,
158 uint_t *pnode,
159 uchar_t *smask,
160 uchar_t *cmask);
161 static int ehci_allocate_high_speed_bandwidth(
162 ehci_state_t *ehcip,
163 usba_pipe_handle_data_t *ph,
164 uint_t *hnode,
165 uchar_t *smask,
166 uchar_t *cmask);
167 static int ehci_allocate_classic_tt_bandwidth(
168 ehci_state_t *ehcip,
169 usba_pipe_handle_data_t *ph,
170 uint_t pnode);
171 void ehci_deallocate_bandwidth(ehci_state_t *ehcip,
172 usba_pipe_handle_data_t *ph,
173 uint_t pnode,
174 uchar_t smask,
175 uchar_t cmask);
176 static void ehci_deallocate_high_speed_bandwidth(
177 ehci_state_t *ehcip,
178 usba_pipe_handle_data_t *ph,
179 uint_t hnode,
180 uchar_t smask,
181 uchar_t cmask);
182 static void ehci_deallocate_classic_tt_bandwidth(
183 ehci_state_t *ehcip,
184 usba_pipe_handle_data_t *ph,
185 uint_t pnode);
186 static int ehci_compute_high_speed_bandwidth(
187 ehci_state_t *ehcip,
188 usb_ep_descr_t *endpoint,
189 usb_port_status_t port_status,
190 uint_t *sbandwidth,
191 uint_t *cbandwidth);
192 static int ehci_compute_classic_bandwidth(
193 usb_ep_descr_t *endpoint,
194 usb_port_status_t port_status,
195 uint_t *bandwidth);
196 int ehci_adjust_polling_interval(
197 ehci_state_t *ehcip,
198 usb_ep_descr_t *endpoint,
199 usb_port_status_t port_status);
200 static int ehci_adjust_high_speed_polling_interval(
201 ehci_state_t *ehcip,
202 usb_ep_descr_t *endpoint);
203 static uint_t ehci_lattice_height(uint_t interval);
204 static uint_t ehci_lattice_parent(uint_t node);
205 static uint_t ehci_find_periodic_node(
206 uint_t leaf,
207 int interval);
208 static uint_t ehci_leftmost_leaf(uint_t node,
209 uint_t height);
210 static uint_t ehci_pow_2(uint_t x);
211 static uint_t ehci_log_2(uint_t x);
212 static int ehci_find_bestfit_hs_mask(
213 ehci_state_t *ehcip,
214 uchar_t *smask,
215 uint_t *pnode,
216 usb_ep_descr_t *endpoint,
217 uint_t bandwidth,
218 int interval);
219 static int ehci_find_bestfit_ls_intr_mask(
220 ehci_state_t *ehcip,
221 uchar_t *smask,
222 uchar_t *cmask,
223 uint_t *pnode,
224 uint_t sbandwidth,
225 uint_t cbandwidth,
226 int interval);
227 static int ehci_find_bestfit_sitd_in_mask(
228 ehci_state_t *ehcip,
229 uchar_t *smask,
230 uchar_t *cmask,
231 uint_t *pnode,
232 uint_t sbandwidth,
233 uint_t cbandwidth,
234 int interval);
235 static int ehci_find_bestfit_sitd_out_mask(
236 ehci_state_t *ehcip,
237 uchar_t *smask,
238 uint_t *pnode,
239 uint_t sbandwidth,
240 int interval);
241 static uint_t ehci_calculate_bw_availability_mask(
242 ehci_state_t *ehcip,
243 uint_t bandwidth,
244 int leaf,
245 int leaf_count,
246 uchar_t *bw_mask);
247 static void ehci_update_bw_availability(
248 ehci_state_t *ehcip,
249 int bandwidth,
250 int leftmost_leaf,
251 int leaf_count,
252 uchar_t mask);
253
254 /* Miscellaneous functions */
255 ehci_state_t *ehci_obtain_state(
256 dev_info_t *dip);
257 int ehci_state_is_operational(
258 ehci_state_t *ehcip);
259 int ehci_do_soft_reset(
260 ehci_state_t *ehcip);
261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip,
262 ehci_pipe_private_t *pp,
263 ehci_trans_wrapper_t *tw);
264 usb_frame_number_t ehci_get_current_frame_number(
265 ehci_state_t *ehcip);
266 static void ehci_cpr_cleanup(
267 ehci_state_t *ehcip);
268 int ehci_wait_for_sof(
269 ehci_state_t *ehcip);
270 void ehci_toggle_scheduler(
271 ehci_state_t *ehcip);
272 void ehci_print_caps(ehci_state_t *ehcip);
273 void ehci_print_regs(ehci_state_t *ehcip);
274 void ehci_print_qh(ehci_state_t *ehcip,
275 ehci_qh_t *qh);
276 void ehci_print_qtd(ehci_state_t *ehcip,
277 ehci_qtd_t *qtd);
278 void ehci_create_stats(ehci_state_t *ehcip);
279 void ehci_destroy_stats(ehci_state_t *ehcip);
280 void ehci_do_intrs_stats(ehci_state_t *ehcip,
281 int val);
282 void ehci_do_byte_stats(ehci_state_t *ehcip,
283 size_t len,
284 uint8_t attr,
285 uint8_t addr);
286
287 /*
288 * check if this ehci controller can support PM
289 */
290 int
ehci_hcdi_pm_support(dev_info_t * dip)291 ehci_hcdi_pm_support(dev_info_t *dip)
292 {
293 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
294 ddi_get_instance(dip));
295
296 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
297 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
298
299 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
300 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
301
302 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
303
304 return (USB_SUCCESS);
305 }
306
307 return (USB_FAILURE);
308 }
309
310 void
ehci_dma_attr_workaround(ehci_state_t * ehcip)311 ehci_dma_attr_workaround(ehci_state_t *ehcip)
312 {
313 /*
314 * Some Nvidia chips can not handle qh dma address above 2G.
315 * The bit 31 of the dma address might be omitted and it will
316 * cause system crash or other unpredicable result. So force
317 * the dma address allocated below 2G to make ehci work.
318 */
319 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
320 switch (ehcip->ehci_device_id) {
321 case PCI_DEVICE_NVIDIA_CK804:
322 case PCI_DEVICE_NVIDIA_MCP04:
323 USB_DPRINTF_L2(PRINT_MASK_ATTA,
324 ehcip->ehci_log_hdl,
325 "ehci_dma_attr_workaround: NVIDIA dma "
326 "workaround enabled, force dma address "
327 "to be allocated below 2G");
328 ehcip->ehci_dma_attr.dma_attr_addr_hi =
329 0x7fffffffull;
330 break;
331 default:
332 break;
333
334 }
335 }
336 }
337
338 /*
339 * Host Controller Driver (HCD) initialization functions
340 */
341
342 /*
343 * ehci_set_dma_attributes:
344 *
345 * Set the limits in the DMA attributes structure. Most of the values used
346 * in the DMA limit structures are the default values as specified by the
347 * Writing PCI device drivers document.
348 */
349 void
ehci_set_dma_attributes(ehci_state_t * ehcip)350 ehci_set_dma_attributes(ehci_state_t *ehcip)
351 {
352 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
353 "ehci_set_dma_attributes:");
354
355 /* Initialize the DMA attributes */
356 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
357 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
358 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
359
360 /* 32 bit addressing */
361 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
362
363 /* Byte alignment */
364 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
365
366 /*
367 * Since PCI specification is byte alignment, the
368 * burst size field should be set to 1 for PCI devices.
369 */
370 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
371
372 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
373 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
374 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
375 ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
376 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
377 ehcip->ehci_dma_attr.dma_attr_flags = 0;
378 ehci_dma_attr_workaround(ehcip);
379 }
380
381
382 /*
383 * ehci_allocate_pools:
384 *
385 * Allocate the system memory for the Endpoint Descriptor (QH) and for the
386 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
387 * to a 16 byte boundary.
388 */
389 int
ehci_allocate_pools(ehci_state_t * ehcip)390 ehci_allocate_pools(ehci_state_t *ehcip)
391 {
392 ddi_device_acc_attr_t dev_attr;
393 size_t real_length;
394 int result;
395 uint_t ccount;
396 int i;
397
398 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
399 "ehci_allocate_pools:");
400
401 /* The host controller will be little endian */
402 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
403 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
404 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
405
406 /* Byte alignment */
407 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
408
409 /* Allocate the QTD pool DMA handle */
410 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
411 DDI_DMA_SLEEP, 0,
412 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
413
414 goto failure;
415 }
416
417 /* Allocate the memory for the QTD pool */
418 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
419 ehci_qtd_pool_size * sizeof (ehci_qtd_t),
420 &dev_attr,
421 DDI_DMA_CONSISTENT,
422 DDI_DMA_SLEEP,
423 0,
424 (caddr_t *)&ehcip->ehci_qtd_pool_addr,
425 &real_length,
426 &ehcip->ehci_qtd_pool_mem_handle)) {
427
428 goto failure;
429 }
430
431 /* Map the QTD pool into the I/O address space */
432 result = ddi_dma_addr_bind_handle(
433 ehcip->ehci_qtd_pool_dma_handle,
434 NULL,
435 (caddr_t)ehcip->ehci_qtd_pool_addr,
436 real_length,
437 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
438 DDI_DMA_SLEEP,
439 NULL,
440 &ehcip->ehci_qtd_pool_cookie,
441 &ccount);
442
443 bzero((void *)ehcip->ehci_qtd_pool_addr,
444 ehci_qtd_pool_size * sizeof (ehci_qtd_t));
445
446 /* Process the result */
447 if (result == DDI_DMA_MAPPED) {
448 /* The cookie count should be 1 */
449 if (ccount != 1) {
450 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
451 "ehci_allocate_pools: More than 1 cookie");
452
453 goto failure;
454 }
455 } else {
456 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
457 "ehci_allocate_pools: Result = %d", result);
458
459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
460
461 goto failure;
462 }
463
464 /*
465 * DMA addresses for QTD pools are bound
466 */
467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
468
469 /* Initialize the QTD pool */
470 for (i = 0; i < ehci_qtd_pool_size; i ++) {
471 Set_QTD(ehcip->ehci_qtd_pool_addr[i].
472 qtd_state, EHCI_QTD_FREE);
473 }
474
475 /* Allocate the QTD pool DMA handle */
476 if (ddi_dma_alloc_handle(ehcip->ehci_dip,
477 &ehcip->ehci_dma_attr,
478 DDI_DMA_SLEEP,
479 0,
480 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
482 "ehci_allocate_pools: ddi_dma_alloc_handle failed");
483
484 goto failure;
485 }
486
487 /* Allocate the memory for the QH pool */
488 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
489 ehci_qh_pool_size * sizeof (ehci_qh_t),
490 &dev_attr,
491 DDI_DMA_CONSISTENT,
492 DDI_DMA_SLEEP,
493 0,
494 (caddr_t *)&ehcip->ehci_qh_pool_addr,
495 &real_length,
496 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
498 "ehci_allocate_pools: ddi_dma_mem_alloc failed");
499
500 goto failure;
501 }
502
503 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
504 NULL,
505 (caddr_t)ehcip->ehci_qh_pool_addr,
506 real_length,
507 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
508 DDI_DMA_SLEEP,
509 NULL,
510 &ehcip->ehci_qh_pool_cookie,
511 &ccount);
512
513 bzero((void *)ehcip->ehci_qh_pool_addr,
514 ehci_qh_pool_size * sizeof (ehci_qh_t));
515
516 /* Process the result */
517 if (result == DDI_DMA_MAPPED) {
518 /* The cookie count should be 1 */
519 if (ccount != 1) {
520 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
521 "ehci_allocate_pools: More than 1 cookie");
522
523 goto failure;
524 }
525 } else {
526 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
527
528 goto failure;
529 }
530
531 /*
532 * DMA addresses for QH pools are bound
533 */
534 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
535
536 /* Initialize the QH pool */
537 for (i = 0; i < ehci_qh_pool_size; i ++) {
538 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
539 }
540
541 /* Byte alignment */
542 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
543
544 return (DDI_SUCCESS);
545
546 failure:
547 /* Byte alignment */
548 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
549
550 return (DDI_FAILURE);
551 }
552
553
554 /*
555 * ehci_decode_ddi_dma_addr_bind_handle_result:
556 *
557 * Process the return values of ddi_dma_addr_bind_handle()
558 */
559 void
ehci_decode_ddi_dma_addr_bind_handle_result(ehci_state_t * ehcip,int result)560 ehci_decode_ddi_dma_addr_bind_handle_result(
561 ehci_state_t *ehcip,
562 int result)
563 {
564 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
565 "ehci_decode_ddi_dma_addr_bind_handle_result:");
566
567 switch (result) {
568 case DDI_DMA_PARTIAL_MAP:
569 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
570 "Partial transfers not allowed");
571 break;
572 case DDI_DMA_INUSE:
573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
574 "Handle is in use");
575 break;
576 case DDI_DMA_NORESOURCES:
577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
578 "No resources");
579 break;
580 case DDI_DMA_NOMAPPING:
581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
582 "No mapping");
583 break;
584 case DDI_DMA_TOOBIG:
585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
586 "Object is too big");
587 break;
588 default:
589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
590 "Unknown dma error");
591 }
592 }
593
594
595 /*
596 * ehci_map_regs:
597 *
598 * The Host Controller (HC) contains a set of on-chip operational registers
599 * and which should be mapped into a non-cacheable portion of the system
600 * addressable space.
601 */
602 int
ehci_map_regs(ehci_state_t * ehcip)603 ehci_map_regs(ehci_state_t *ehcip)
604 {
605 ddi_device_acc_attr_t attr;
606 uint16_t cmd_reg;
607 uint_t length;
608
609 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
610
611 /* Check to make sure we have memory access */
612 if (pci_config_setup(ehcip->ehci_dip,
613 &ehcip->ehci_config_handle) != DDI_SUCCESS) {
614
615 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
616 "ehci_map_regs: Config error");
617
618 return (DDI_FAILURE);
619 }
620
621 /* Make sure Memory Access Enable is set */
622 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
623
624 if (!(cmd_reg & PCI_COMM_MAE)) {
625
626 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
627 "ehci_map_regs: Memory base address access disabled");
628
629 return (DDI_FAILURE);
630 }
631
632 /* The host controller will be little endian */
633 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
634 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
635 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
636
637 /* Map in EHCI Capability registers */
638 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
639 (caddr_t *)&ehcip->ehci_capsp, 0,
640 sizeof (ehci_caps_t), &attr,
641 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
642
643 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
644 "ehci_map_regs: Map setup error");
645
646 return (DDI_FAILURE);
647 }
648
649 length = ddi_get8(ehcip->ehci_caps_handle,
650 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
651
652 /* Free the original mapping */
653 ddi_regs_map_free(&ehcip->ehci_caps_handle);
654
655 /* Re-map in EHCI Capability and Operational registers */
656 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
657 (caddr_t *)&ehcip->ehci_capsp, 0,
658 length + sizeof (ehci_regs_t), &attr,
659 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
660
661 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
662 "ehci_map_regs: Map setup error");
663
664 return (DDI_FAILURE);
665 }
666
667 /* Get the pointer to EHCI Operational Register */
668 ehcip->ehci_regsp = (ehci_regs_t *)
669 ((uintptr_t)ehcip->ehci_capsp + length);
670
671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
672 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
673 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
674
675 return (DDI_SUCCESS);
676 }
677
678 /*
679 * The following simulated polling is for debugging purposes only.
680 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
681 */
682 static int
ehci_is_polled(dev_info_t * dip)683 ehci_is_polled(dev_info_t *dip)
684 {
685 int ret;
686 char *propval;
687
688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
689 "usb-polling", &propval) != DDI_SUCCESS)
690
691 return (0);
692
693 ret = (strcmp(propval, "true") == 0);
694 ddi_prop_free(propval);
695
696 return (ret);
697 }
698
699 static void
ehci_poll_intr(void * arg)700 ehci_poll_intr(void *arg)
701 {
702 /* poll every msec */
703 for (;;) {
704 (void) ehci_intr(arg, NULL);
705 delay(drv_usectohz(1000));
706 }
707 }
708
709 /*
710 * ehci_register_intrs_and_init_mutex:
711 *
712 * Register interrupts and initialize each mutex and condition variables
713 */
714 int
ehci_register_intrs_and_init_mutex(ehci_state_t * ehcip)715 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip)
716 {
717 int intr_types;
718
719 #if defined(__x86)
720 uint8_t iline;
721 #endif
722
723 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
724 "ehci_register_intrs_and_init_mutex:");
725
726 /*
727 * There is a known MSI hardware bug with the EHCI controller
728 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
729 */
730 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
731 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
732 ehcip->ehci_msi_enabled = B_FALSE;
733 } else {
734 /* Set the MSI enable flag from the global EHCI MSI tunable */
735 ehcip->ehci_msi_enabled = ehci_enable_msi;
736 }
737
738 /* launch polling thread instead of enabling pci interrupt */
739 if (ehci_is_polled(ehcip->ehci_dip)) {
740 extern pri_t maxclsyspri;
741
742 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
743 "ehci_register_intrs_and_init_mutex: "
744 "running in simulated polled mode");
745
746 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
747 TS_RUN, maxclsyspri);
748
749 goto skip_intr;
750 }
751
752 #if defined(__x86)
753 /*
754 * Make sure that the interrupt pin is connected to the
755 * interrupt controller on x86. Interrupt line 255 means
756 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
757 * If we would return failure when interrupt line equals 255, then
758 * high speed devices will be routed to companion host controllers.
759 * However, it is not necessary to return failure here, and
760 * o/uhci codes don't check the interrupt line either.
761 * But it's good to log a message here for debug purposes.
762 */
763 iline = pci_config_get8(ehcip->ehci_config_handle,
764 PCI_CONF_ILINE);
765
766 if (iline == 255) {
767 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
768 "ehci_register_intrs_and_init_mutex: "
769 "interrupt line value out of range (%d)",
770 iline);
771 }
772 #endif /* __x86 */
773
774 /* Get supported interrupt types */
775 if (ddi_intr_get_supported_types(ehcip->ehci_dip,
776 &intr_types) != DDI_SUCCESS) {
777 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
778 "ehci_register_intrs_and_init_mutex: "
779 "ddi_intr_get_supported_types failed");
780
781 return (DDI_FAILURE);
782 }
783
784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
785 "ehci_register_intrs_and_init_mutex: "
786 "supported interrupt types 0x%x", intr_types);
787
788 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
789 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
790 != DDI_SUCCESS) {
791 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
792 "ehci_register_intrs_and_init_mutex: MSI "
793 "registration failed, trying FIXED interrupt \n");
794 } else {
795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
796 "ehci_register_intrs_and_init_mutex: "
797 "Using MSI interrupt type\n");
798
799 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
800 ehcip->ehci_flags |= EHCI_INTR;
801 }
802 }
803
804 if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
805 (intr_types & DDI_INTR_TYPE_FIXED)) {
806 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
807 != DDI_SUCCESS) {
808 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
809 "ehci_register_intrs_and_init_mutex: "
810 "FIXED interrupt registration failed\n");
811
812 return (DDI_FAILURE);
813 }
814
815 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
816 "ehci_register_intrs_and_init_mutex: "
817 "Using FIXED interrupt type\n");
818
819 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
820 ehcip->ehci_flags |= EHCI_INTR;
821 }
822
823 skip_intr:
824 /* Create prototype for advance on async schedule */
825 cv_init(&ehcip->ehci_async_schedule_advance_cv,
826 NULL, CV_DRIVER, NULL);
827
828 return (DDI_SUCCESS);
829 }
830
831
832 /*
833 * ehci_add_intrs:
834 *
835 * Register FIXED or MSI interrupts.
836 */
837 static int
ehci_add_intrs(ehci_state_t * ehcip,int intr_type)838 ehci_add_intrs(ehci_state_t *ehcip, int intr_type)
839 {
840 int actual, avail, intr_size, count = 0;
841 int i, flag, ret;
842
843 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
844 "ehci_add_intrs: interrupt type 0x%x", intr_type);
845
846 /* Get number of interrupts */
847 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
848 if ((ret != DDI_SUCCESS) || (count == 0)) {
849 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
850 "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
851 "ret: %d, count: %d", ret, count);
852
853 return (DDI_FAILURE);
854 }
855
856 /* Get number of available interrupts */
857 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
858 if ((ret != DDI_SUCCESS) || (avail == 0)) {
859 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
860 "ehci_add_intrs: ddi_intr_get_navail() failure, "
861 "ret: %d, count: %d", ret, count);
862
863 return (DDI_FAILURE);
864 }
865
866 if (avail < count) {
867 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
868 "ehci_add_intrs: ehci_add_intrs: nintrs () "
869 "returned %d, navail returned %d\n", count, avail);
870 }
871
872 /* Allocate an array of interrupt handles */
873 intr_size = count * sizeof (ddi_intr_handle_t);
874 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
875
876 flag = (intr_type == DDI_INTR_TYPE_MSI) ?
877 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
878
879 /* call ddi_intr_alloc() */
880 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
881 intr_type, 0, count, &actual, flag);
882
883 if ((ret != DDI_SUCCESS) || (actual == 0)) {
884 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
885 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
886
887 kmem_free(ehcip->ehci_htable, intr_size);
888
889 return (DDI_FAILURE);
890 }
891
892 if (actual < count) {
893 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
894 "ehci_add_intrs: Requested: %d, Received: %d\n",
895 count, actual);
896
897 for (i = 0; i < actual; i++)
898 (void) ddi_intr_free(ehcip->ehci_htable[i]);
899
900 kmem_free(ehcip->ehci_htable, intr_size);
901
902 return (DDI_FAILURE);
903 }
904
905 ehcip->ehci_intr_cnt = actual;
906
907 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
908 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
909 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
910 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
911
912 for (i = 0; i < actual; i++)
913 (void) ddi_intr_free(ehcip->ehci_htable[i]);
914
915 kmem_free(ehcip->ehci_htable, intr_size);
916
917 return (DDI_FAILURE);
918 }
919
920 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
921 "ehci_add_intrs: Supported Interrupt priority 0x%x",
922 ehcip->ehci_intr_pri);
923
924 /* Test for high level mutex */
925 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
926 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
927 "ehci_add_intrs: Hi level interrupt not supported");
928
929 for (i = 0; i < actual; i++)
930 (void) ddi_intr_free(ehcip->ehci_htable[i]);
931
932 kmem_free(ehcip->ehci_htable, intr_size);
933
934 return (DDI_FAILURE);
935 }
936
937 /* Initialize the mutex */
938 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
939 DDI_INTR_PRI(ehcip->ehci_intr_pri));
940
941 /* Call ddi_intr_add_handler() */
942 for (i = 0; i < actual; i++) {
943 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
944 ehci_intr, (caddr_t)ehcip,
945 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
946 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
947 "ehci_add_intrs:ddi_intr_add_handler() "
948 "failed %d", ret);
949
950 for (i = 0; i < actual; i++)
951 (void) ddi_intr_free(ehcip->ehci_htable[i]);
952
953 mutex_destroy(&ehcip->ehci_int_mutex);
954 kmem_free(ehcip->ehci_htable, intr_size);
955
956 return (DDI_FAILURE);
957 }
958 }
959
960 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
961 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
962 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
963 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
964
965 for (i = 0; i < actual; i++) {
966 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
967 (void) ddi_intr_free(ehcip->ehci_htable[i]);
968 }
969
970 mutex_destroy(&ehcip->ehci_int_mutex);
971 kmem_free(ehcip->ehci_htable, intr_size);
972
973 return (DDI_FAILURE);
974 }
975
976 /* Enable all interrupts */
977 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
978 /* Call ddi_intr_block_enable() for MSI interrupts */
979 (void) ddi_intr_block_enable(ehcip->ehci_htable,
980 ehcip->ehci_intr_cnt);
981 } else {
982 /* Call ddi_intr_enable for MSI or FIXED interrupts */
983 for (i = 0; i < ehcip->ehci_intr_cnt; i++)
984 (void) ddi_intr_enable(ehcip->ehci_htable[i]);
985 }
986
987 return (DDI_SUCCESS);
988 }
989
990
991 /*
992 * ehci_init_hardware
993 *
994 * take control from BIOS, reset EHCI host controller, and check version, etc.
995 */
996 int
ehci_init_hardware(ehci_state_t * ehcip)997 ehci_init_hardware(ehci_state_t *ehcip)
998 {
999 int revision;
1000 uint16_t cmd_reg;
1001 int abort_on_BIOS_take_over_failure;
1002
1003 /* Take control from the BIOS */
1004 if (ehci_take_control(ehcip) != USB_SUCCESS) {
1005
1006 /* read .conf file properties */
1007 abort_on_BIOS_take_over_failure =
1008 ddi_prop_get_int(DDI_DEV_T_ANY,
1009 ehcip->ehci_dip, DDI_PROP_DONTPASS,
1010 "abort-on-BIOS-take-over-failure", 0);
1011
1012 if (abort_on_BIOS_take_over_failure) {
1013
1014 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1015 "Unable to take control from BIOS.");
1016
1017 return (DDI_FAILURE);
1018 }
1019
1020 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1021 "Unable to take control from BIOS. Failure is ignored.");
1022 }
1023
1024 /* set Memory Master Enable */
1025 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1026 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1027 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1028
1029 /* Reset the EHCI host controller */
1030 Set_OpReg(ehci_command,
1031 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1032
1033 /* Wait 10ms for reset to complete */
1034 drv_usecwait(EHCI_RESET_TIMEWAIT);
1035
1036 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1037
1038 /* Verify the version number */
1039 revision = Get_16Cap(ehci_version);
1040
1041 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1042 "ehci_init_hardware: Revision 0x%x", revision);
1043
1044 /*
1045 * EHCI driver supports EHCI host controllers compliant to
1046 * 0.95 and higher revisions of EHCI specifications.
1047 */
1048 if (revision < EHCI_REVISION_0_95) {
1049
1050 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1051 "Revision 0x%x is not supported", revision);
1052
1053 return (DDI_FAILURE);
1054 }
1055
1056 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1057
1058 /* Initialize the Frame list base address area */
1059 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1060
1061 return (DDI_FAILURE);
1062 }
1063
1064 /*
1065 * For performance reasons, do not insert anything into the
1066 * asynchronous list or activate the asynch list schedule until
1067 * there is a valid QH.
1068 */
1069 ehcip->ehci_head_of_async_sched_list = NULL;
1070
1071 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1072 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1073 /*
1074 * The driver is unable to reliably stop the asynch
1075 * list schedule on VIA VT6202 controllers, so we
1076 * always keep a dummy QH on the list.
1077 */
1078 ehci_qh_t *dummy_async_qh =
1079 ehci_alloc_qh(ehcip, NULL,
1080 EHCI_INTERRUPT_MODE_FLAG);
1081
1082 Set_QH(dummy_async_qh->qh_link_ptr,
1083 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1084 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1085
1086 /* Set this QH to be the "head" of the circular list */
1087 Set_QH(dummy_async_qh->qh_ctrl,
1088 Get_QH(dummy_async_qh->qh_ctrl) |
1089 EHCI_QH_CTRL_RECLAIM_HEAD);
1090
1091 Set_QH(dummy_async_qh->qh_next_qtd,
1092 EHCI_QH_NEXT_QTD_PTR_VALID);
1093 Set_QH(dummy_async_qh->qh_alt_next_qtd,
1094 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1095
1096 ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1097 ehcip->ehci_open_async_count++;
1098 ehcip->ehci_async_req_count++;
1099 }
1100 }
1101
1102 return (DDI_SUCCESS);
1103 }
1104
1105
1106 /*
1107 * ehci_init_workaround
1108 *
1109 * some workarounds during initializing ehci
1110 */
1111 int
ehci_init_workaround(ehci_state_t * ehcip)1112 ehci_init_workaround(ehci_state_t *ehcip)
1113 {
1114 /*
1115 * Acer Labs Inc. M5273 EHCI controller does not send
1116 * interrupts unless the Root hub ports are routed to the EHCI
1117 * host controller; so route the ports now, before we test for
1118 * the presence of SOFs interrupts.
1119 */
1120 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1121 /* Route all Root hub ports to EHCI host controller */
1122 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1123 }
1124
1125 /*
1126 * VIA chips have some issues and may not work reliably.
1127 * Revisions >= 0x80 are part of a southbridge and appear
1128 * to be reliable with the workaround.
1129 * For revisions < 0x80, if we were bound using class
1130 * complain, else proceed. This will allow the user to
1131 * bind ehci specifically to this chip and not have the
1132 * warnings
1133 */
1134 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1135
1136 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1137
1138 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1139 "ehci_init_workaround: Applying VIA workarounds "
1140 "for the 6212 chip.");
1141
1142 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1143 "pciclass,0c0320") == 0) {
1144
1145 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1146 "Due to recently discovered incompatibilities");
1147 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1148 "with this USB controller, USB2.x transfer");
1149 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1150 "support has been disabled. This device will");
1151 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1152 "continue to function as a USB1.x controller.");
1153 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1154 "If you are interested in enabling USB2.x");
1155 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1156 "support please, refer to the ehci(4D) man page.");
1157 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1158 "Please also refer to www.sun.com/io for");
1159 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1160 "Solaris Ready products and to");
1161 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1162 "www.sun.com/bigadmin/hcl for additional");
1163 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1164 "compatible USB products.");
1165
1166 return (DDI_FAILURE);
1167
1168 } else if (ehci_vt62x2_workaround) {
1169
1170 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1171 "Applying VIA workarounds");
1172 }
1173 }
1174
1175 return (DDI_SUCCESS);
1176 }
1177
1178
1179 /*
1180 * ehci_init_check_status
1181 *
1182 * Check if EHCI host controller is running
1183 */
1184 int
ehci_init_check_status(ehci_state_t * ehcip)1185 ehci_init_check_status(ehci_state_t *ehcip)
1186 {
1187 clock_t sof_time_wait;
1188
1189 /*
1190 * Get the number of clock ticks to wait.
1191 * This is based on the maximum time it takes for a frame list rollover
1192 * and maximum time wait for SOFs to begin.
1193 */
1194 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) +
1195 EHCI_SOF_TIMEWAIT);
1196
1197 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */
1198 ehcip->ehci_flags |= EHCI_CV_INTR;
1199
1200 /* We need to add a delay to allow the chip time to start running */
1201 (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv,
1202 &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
1203
1204 /*
1205 * Check EHCI host controller is running, otherwise return failure.
1206 */
1207 if ((ehcip->ehci_flags & EHCI_CV_INTR) ||
1208 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
1209
1210 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1211 "No SOF interrupts have been received, this USB EHCI host"
1212 "controller is unusable");
1213
1214 /*
1215 * Route all Root hub ports to Classic host
1216 * controller, in case this is an unusable ALI M5273
1217 * EHCI controller.
1218 */
1219 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1220 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1221 }
1222
1223 return (DDI_FAILURE);
1224 }
1225
1226 return (DDI_SUCCESS);
1227 }
1228
1229
1230 /*
1231 * ehci_init_ctlr:
1232 *
1233 * Initialize the Host Controller (HC).
1234 */
1235 int
ehci_init_ctlr(ehci_state_t * ehcip,int init_type)1236 ehci_init_ctlr(ehci_state_t *ehcip, int init_type)
1237 {
1238 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1239
1240 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1241
1242 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1243
1244 return (DDI_FAILURE);
1245 }
1246 }
1247
1248 /*
1249 * Check for Asynchronous schedule park capability feature. If this
1250 * feature is supported, then, program ehci command register with
1251 * appropriate values..
1252 */
1253 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1254
1255 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1256 "ehci_init_ctlr: Async park mode is supported");
1257
1258 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1259 (EHCI_CMD_ASYNC_PARK_ENABLE |
1260 EHCI_CMD_ASYNC_PARK_COUNT_3)));
1261 }
1262
1263 /*
1264 * Check for programmable periodic frame list feature. If this
1265 * feature is supported, then, program ehci command register with
1266 * 1024 frame list value.
1267 */
1268 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1269
1270 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1271 "ehci_init_ctlr: Variable programmable periodic "
1272 "frame list is supported");
1273
1274 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1275 EHCI_CMD_FRAME_1024_SIZE));
1276 }
1277
1278 /*
1279 * Currently EHCI driver doesn't support 64 bit addressing.
1280 *
1281 * If the controller is 64-bit address capable, then program
1282 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1283 * of the interface data structures are allocated.
1284 */
1285 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1286
1287 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1288 "ehci_init_ctlr: EHCI driver doesn't support "
1289 "64 bit addressing");
1290
1291 /* 64 bit addressing is not supported */
1292 Set_OpReg(ehci_ctrl_segment, 0x00000000);
1293 }
1294
1295 /* Turn on/off the schedulers */
1296 ehci_toggle_scheduler(ehcip);
1297
1298 /* Set host controller soft state to operational */
1299 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1300
1301 /*
1302 * Set the Periodic Frame List Base Address register with the
1303 * starting physical address of the Periodic Frame List.
1304 */
1305 Set_OpReg(ehci_periodic_list_base,
1306 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1307 EHCI_PERIODIC_LIST_BASE));
1308
1309 /*
1310 * Set ehci_interrupt to enable all interrupts except Root
1311 * Hub Status change interrupt.
1312 */
1313 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1314 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1315 EHCI_INTR_USB);
1316
1317 /*
1318 * Set the desired interrupt threshold and turn on EHCI host controller.
1319 */
1320 uint32_t cmd_reg = Get_OpReg(ehci_command);
1321
1322 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1323 "%s: cmd_reg: %x\n", __func__, cmd_reg);
1324
1325 cmd_reg &= ~EHCI_CMD_INTR_THRESHOLD;
1326 cmd_reg |= EHCI_CMD_01_INTR;
1327 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
1328
1329 Set_OpReg(ehci_command, cmd_reg | EHCI_CMD_HOST_CTRL_RUN);
1330
1331 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1332
1333 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1334
1335 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1336
1337 /* Set host controller soft state to error */
1338 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1339
1340 return (DDI_FAILURE);
1341 }
1342
1343 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) {
1344
1345 /* Set host controller soft state to error */
1346 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1347
1348 return (DDI_FAILURE);
1349 }
1350
1351 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1352 "ehci_init_ctlr: SOF's have started");
1353 }
1354
1355 /* Route all Root hub ports to EHCI host controller */
1356 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1357
1358 return (DDI_SUCCESS);
1359 }
1360
1361 /*
1362 * ehci_take_control:
1363 *
1364 * Handshake to take EHCI control from BIOS if necessary. Its only valid for
1365 * x86 machines, because sparc doesn't have a BIOS.
1366 * On x86 machine, the take control process includes
1367 * o get the base address of the extended capability list
1368 * o find out the capability for handoff synchronization in the list.
1369 * o check if BIOS has owned the host controller.
1370 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1371 * o wait for a constant time and check if BIOS has relinquished control.
1372 */
1373 /* ARGSUSED */
1374 static int
ehci_take_control(ehci_state_t * ehcip)1375 ehci_take_control(ehci_state_t *ehcip)
1376 {
1377 #if defined(__x86)
1378 uint32_t extended_cap;
1379 uint32_t extended_cap_offset;
1380 uint32_t extended_cap_id;
1381 uint_t retry;
1382
1383 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1384 "ehci_take_control:");
1385
1386 /*
1387 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1388 * register.
1389 */
1390 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1391 EHCI_HCC_EECP_SHIFT;
1392
1393 /*
1394 * According EHCI Spec 2.2.4, if the extended capability offset is
1395 * less than 40h then its not valid. This means we don't need to
1396 * worry about BIOS handoff.
1397 */
1398 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1399
1400 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1401 "ehci_take_control: Hardware doesn't support legacy.");
1402
1403 goto success;
1404 }
1405
1406 /*
1407 * According EHCI Spec 2.1.7, A zero offset indicates the
1408 * end of the extended capability list.
1409 */
1410 while (extended_cap_offset) {
1411
1412 /* Get the extended capability value. */
1413 extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1414 extended_cap_offset);
1415
1416 /*
1417 * It's possible that we'll receive an invalid PCI read here due
1418 * to something going wrong due to platform firmware. This has
1419 * been observed in the wild depending on the version of ACPI in
1420 * use. If this happens, we'll assume that the capability does
1421 * not exist and that we do not need to take control from the
1422 * BIOS.
1423 */
1424 if (extended_cap == PCI_EINVAL32) {
1425 extended_cap_id = EHCI_EX_CAP_ID_RESERVED;
1426 break;
1427 }
1428
1429 /* Get the capability ID */
1430 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1431 EHCI_EX_CAP_ID_SHIFT;
1432
1433 /* Check if the card support legacy */
1434 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1435 break;
1436 }
1437
1438 /* Get the offset of the next capability */
1439 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1440 EHCI_EX_CAP_NEXT_PTR_SHIFT;
1441
1442 }
1443
1444 /*
1445 * Unable to find legacy support in hardware's extended capability list.
1446 * This means we don't need to worry about BIOS handoff.
1447 */
1448 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1449
1450 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1451 "ehci_take_control: Hardware doesn't support legacy");
1452
1453 goto success;
1454 }
1455
1456 /* Check if BIOS has owned it. */
1457 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1458
1459 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1460 "ehci_take_control: BIOS does not own EHCI");
1461
1462 goto success;
1463 }
1464
1465 /*
1466 * According EHCI Spec 5.1, The OS driver initiates an ownership
1467 * request by setting the OS Owned semaphore to a one. The OS
1468 * waits for the BIOS Owned bit to go to a zero before attempting
1469 * to use the EHCI controller. The time that OS must wait for BIOS
1470 * to respond to the request for ownership is beyond the scope of
1471 * this specification.
1472 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1473 * for BIOS to release the ownership.
1474 */
1475 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1476 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1477 extended_cap);
1478
1479 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1480
1481 /* wait a special interval */
1482 #ifndef __lock_lint
1483 delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1484 #endif
1485 /* Check to see if the BIOS has released the ownership */
1486 extended_cap = pci_config_get32(
1487 ehcip->ehci_config_handle, extended_cap_offset);
1488
1489 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1490
1491 USB_DPRINTF_L3(PRINT_MASK_ATTA,
1492 ehcip->ehci_log_hdl,
1493 "ehci_take_control: BIOS has released "
1494 "the ownership. retry = %d", retry);
1495
1496 goto success;
1497 }
1498
1499 }
1500
1501 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1502 "ehci_take_control: take control from BIOS failed.");
1503
1504 return (USB_FAILURE);
1505
1506 success:
1507
1508 #endif /* __x86 */
1509 return (USB_SUCCESS);
1510 }
1511
1512
1513 /*
1514 * ehci_init_periodic_frame_list_table :
1515 *
1516 * Allocate the system memory and initialize Host Controller
1517 * Periodic Frame List table area. The starting of the Periodic
1518 * Frame List Table area must be 4096 byte aligned.
1519 */
1520 static int
ehci_init_periodic_frame_lst_table(ehci_state_t * ehcip)1521 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1522 {
1523 ddi_device_acc_attr_t dev_attr;
1524 size_t real_length;
1525 uint_t ccount;
1526 int result;
1527
1528 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1529
1530 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1531 "ehci_init_periodic_frame_lst_table:");
1532
1533 /* The host controller will be little endian */
1534 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1535 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1536 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1537
1538 /* Force the required 4K restrictive alignment */
1539 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1540
1541 /* Create space for the Periodic Frame List */
1542 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1543 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1544
1545 goto failure;
1546 }
1547
1548 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1549 sizeof (ehci_periodic_frame_list_t),
1550 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1551 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1552 &real_length, &ehcip->ehci_pflt_mem_handle)) {
1553
1554 goto failure;
1555 }
1556
1557 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1558 "ehci_init_periodic_frame_lst_table: "
1559 "Real length %lu", real_length);
1560
1561 /* Map the whole Periodic Frame List into the I/O address space */
1562 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1563 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1564 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1565 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1566
1567 if (result == DDI_DMA_MAPPED) {
1568 /* The cookie count should be 1 */
1569 if (ccount != 1) {
1570 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1571 "ehci_init_periodic_frame_lst_table: "
1572 "More than 1 cookie");
1573
1574 goto failure;
1575 }
1576 } else {
1577 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1578
1579 goto failure;
1580 }
1581
1582 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1583 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1584 (void *)ehcip->ehci_periodic_frame_list_tablep,
1585 ehcip->ehci_pflt_cookie.dmac_address);
1586
1587 /*
1588 * DMA addresses for Periodic Frame List are bound.
1589 */
1590 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1591
1592 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1593
1594 /* Initialize the Periodic Frame List */
1595 ehci_build_interrupt_lattice(ehcip);
1596
1597 /* Reset Byte Alignment to Default */
1598 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1599
1600 return (DDI_SUCCESS);
1601 failure:
1602 /* Byte alignment */
1603 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1604
1605 return (DDI_FAILURE);
1606 }
1607
1608
1609 /*
1610 * ehci_build_interrupt_lattice:
1611 *
1612 * Construct the interrupt lattice tree using static Endpoint Descriptors
1613 * (QH). This interrupt lattice tree will have total of 32 interrupt QH
1614 * lists and the Host Controller (HC) processes one interrupt QH list in
1615 * every frame. The Host Controller traverses the periodic schedule by
1616 * constructing an array offset reference from the Periodic List Base Address
1617 * register and bits 12 to 3 of Frame Index register. It fetches the element
1618 * and begins traversing the graph of linked schedule data structures.
1619 */
1620 static void
ehci_build_interrupt_lattice(ehci_state_t * ehcip)1621 ehci_build_interrupt_lattice(ehci_state_t *ehcip)
1622 {
1623 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr;
1624 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1625 ehci_periodic_frame_list_t *periodic_frame_list =
1626 ehcip->ehci_periodic_frame_list_tablep;
1627 ushort_t *temp, num_of_nodes;
1628 uintptr_t addr;
1629 int i, j, k;
1630
1631 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1632 "ehci_build_interrupt_lattice:");
1633
1634 /*
1635 * Reserve the first 63 Endpoint Descriptor (QH) structures
1636 * in the pool as static endpoints & these are required for
1637 * constructing interrupt lattice tree.
1638 */
1639 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1640 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1641 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1642 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1643 Set_QH(list_array[i].qh_alt_next_qtd,
1644 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1645 }
1646
1647 /*
1648 * Make sure that last Endpoint on the periodic frame list terminates
1649 * periodic schedule.
1650 */
1651 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1652
1653 /* Build the interrupt lattice tree */
1654 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1655 /*
1656 * The next pointer in the host controller endpoint
1657 * descriptor must contain an iommu address. Calculate
1658 * the offset into the cpu address and add this to the
1659 * starting iommu address.
1660 */
1661 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1662
1663 Set_QH(list_array[2*i + 1].qh_link_ptr,
1664 addr | EHCI_QH_LINK_REF_QH);
1665 Set_QH(list_array[2*i + 2].qh_link_ptr,
1666 addr | EHCI_QH_LINK_REF_QH);
1667 }
1668
1669 /* Build the tree bottom */
1670 temp = (unsigned short *)
1671 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1672
1673 num_of_nodes = 1;
1674
1675 /*
1676 * Initialize the values which are used for setting up head pointers
1677 * for the 32ms scheduling lists which starts from the Periodic Frame
1678 * List.
1679 */
1680 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1681 for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1682 ehci_index[j++] = temp[k];
1683 ehci_index[j] = temp[k] + ehci_pow_2(i);
1684 }
1685
1686 num_of_nodes *= 2;
1687 for (k = 0; k < num_of_nodes; k++)
1688 temp[k] = ehci_index[k];
1689 }
1690
1691 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1692
1693 /*
1694 * Initialize the interrupt list in the Periodic Frame List Table
1695 * so that it points to the bottom of the tree.
1696 */
1697 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1698 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1699 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1700
1701 ASSERT(addr);
1702
1703 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1704 Set_PFLT(periodic_frame_list->
1705 ehci_periodic_frame_list_table[ehci_index[j++]],
1706 (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1707 }
1708 }
1709 }
1710
1711
1712 /*
1713 * ehci_alloc_hcdi_ops:
1714 *
1715 * The HCDI interfaces or entry points are the software interfaces used by
1716 * the Universal Serial Bus Driver (USBA) to access the services of the
1717 * Host Controller Driver (HCD). During HCD initialization, inform USBA
1718 * about all available HCDI interfaces or entry points.
1719 */
1720 usba_hcdi_ops_t *
ehci_alloc_hcdi_ops(ehci_state_t * ehcip)1721 ehci_alloc_hcdi_ops(ehci_state_t *ehcip)
1722 {
1723 usba_hcdi_ops_t *usba_hcdi_ops;
1724
1725 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1726 "ehci_alloc_hcdi_ops:");
1727
1728 usba_hcdi_ops = usba_alloc_hcdi_ops();
1729
1730 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1731
1732 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1733 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1734 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1735
1736 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1737 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
1738 ehci_hcdi_pipe_reset_data_toggle;
1739
1740 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1741 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1742 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1743 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1744
1745 usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1746 ehci_hcdi_bulk_transfer_size;
1747
1748 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1749 ehci_hcdi_pipe_stop_intr_polling;
1750 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1751 ehci_hcdi_pipe_stop_isoc_polling;
1752
1753 usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1754 ehci_hcdi_get_current_frame_number;
1755 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1756 ehci_hcdi_get_max_isoc_pkts;
1757
1758 usba_hcdi_ops->usba_hcdi_console_input_init =
1759 ehci_hcdi_polled_input_init;
1760 usba_hcdi_ops->usba_hcdi_console_input_enter =
1761 ehci_hcdi_polled_input_enter;
1762 usba_hcdi_ops->usba_hcdi_console_read =
1763 ehci_hcdi_polled_read;
1764 usba_hcdi_ops->usba_hcdi_console_input_exit =
1765 ehci_hcdi_polled_input_exit;
1766 usba_hcdi_ops->usba_hcdi_console_input_fini =
1767 ehci_hcdi_polled_input_fini;
1768
1769 usba_hcdi_ops->usba_hcdi_console_output_init =
1770 ehci_hcdi_polled_output_init;
1771 usba_hcdi_ops->usba_hcdi_console_output_enter =
1772 ehci_hcdi_polled_output_enter;
1773 usba_hcdi_ops->usba_hcdi_console_write =
1774 ehci_hcdi_polled_write;
1775 usba_hcdi_ops->usba_hcdi_console_output_exit =
1776 ehci_hcdi_polled_output_exit;
1777 usba_hcdi_ops->usba_hcdi_console_output_fini =
1778 ehci_hcdi_polled_output_fini;
1779 return (usba_hcdi_ops);
1780 }
1781
1782
1783 /*
1784 * Host Controller Driver (HCD) deinitialization functions
1785 */
1786
1787 /*
1788 * ehci_cleanup:
1789 *
1790 * Cleanup on attach failure or detach
1791 */
1792 int
ehci_cleanup(ehci_state_t * ehcip)1793 ehci_cleanup(ehci_state_t *ehcip)
1794 {
1795 ehci_trans_wrapper_t *tw;
1796 ehci_pipe_private_t *pp;
1797 ehci_qtd_t *qtd;
1798 int i, ctrl, rval;
1799 int flags = ehcip->ehci_flags;
1800
1801 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1802
1803 if (flags & EHCI_RHREG) {
1804 /* Unload the root hub driver */
1805 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1806
1807 return (DDI_FAILURE);
1808 }
1809 }
1810
1811 if (flags & EHCI_USBAREG) {
1812 /* Unregister this HCD instance with USBA */
1813 usba_hcdi_unregister(ehcip->ehci_dip);
1814 }
1815
1816 if (flags & EHCI_INTR) {
1817
1818 mutex_enter(&ehcip->ehci_int_mutex);
1819
1820 /* Disable all EHCI QH list processing */
1821 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1822 ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1823 EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1824
1825 /* Disable all EHCI interrupts */
1826 Set_OpReg(ehci_interrupt, 0);
1827
1828 /* wait for the next SOF */
1829 (void) ehci_wait_for_sof(ehcip);
1830
1831 /* Route all Root hub ports to Classic host controller */
1832 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1833
1834 /* Stop the EHCI host controller */
1835 Set_OpReg(ehci_command,
1836 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1837
1838 mutex_exit(&ehcip->ehci_int_mutex);
1839
1840 /* Wait for sometime */
1841 delay(drv_usectohz(EHCI_TIMEWAIT));
1842
1843 ehci_rem_intrs(ehcip);
1844 }
1845
1846 /* Unmap the EHCI registers */
1847 if (ehcip->ehci_caps_handle) {
1848 ddi_regs_map_free(&ehcip->ehci_caps_handle);
1849 }
1850
1851 if (ehcip->ehci_config_handle) {
1852 pci_config_teardown(&ehcip->ehci_config_handle);
1853 }
1854
1855 /* Free all the buffers */
1856 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1857 for (i = 0; i < ehci_qtd_pool_size; i ++) {
1858 qtd = &ehcip->ehci_qtd_pool_addr[i];
1859 ctrl = Get_QTD(ehcip->
1860 ehci_qtd_pool_addr[i].qtd_state);
1861
1862 if ((ctrl != EHCI_QTD_FREE) &&
1863 (ctrl != EHCI_QTD_DUMMY) &&
1864 (qtd->qtd_trans_wrapper)) {
1865
1866 mutex_enter(&ehcip->ehci_int_mutex);
1867
1868 tw = (ehci_trans_wrapper_t *)
1869 EHCI_LOOKUP_ID((uint32_t)
1870 Get_QTD(qtd->qtd_trans_wrapper));
1871
1872 /* Obtain the pipe private structure */
1873 pp = tw->tw_pipe_private;
1874
1875 /* Stop the the transfer timer */
1876 ehci_stop_xfer_timer(ehcip, tw,
1877 EHCI_REMOVE_XFER_ALWAYS);
1878
1879 ehci_deallocate_tw(ehcip, pp, tw);
1880
1881 mutex_exit(&ehcip->ehci_int_mutex);
1882 }
1883 }
1884
1885 /*
1886 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1887 * the handle for QTD pools.
1888 */
1889 if ((ehcip->ehci_dma_addr_bind_flag &
1890 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1891
1892 rval = ddi_dma_unbind_handle(
1893 ehcip->ehci_qtd_pool_dma_handle);
1894
1895 ASSERT(rval == DDI_SUCCESS);
1896 }
1897 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1898 }
1899
1900 /* Free the QTD pool */
1901 if (ehcip->ehci_qtd_pool_dma_handle) {
1902 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1903 }
1904
1905 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1906 /*
1907 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1908 * the handle for QH pools.
1909 */
1910 if ((ehcip->ehci_dma_addr_bind_flag &
1911 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1912
1913 rval = ddi_dma_unbind_handle(
1914 ehcip->ehci_qh_pool_dma_handle);
1915
1916 ASSERT(rval == DDI_SUCCESS);
1917 }
1918
1919 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1920 }
1921
1922 /* Free the QH pool */
1923 if (ehcip->ehci_qh_pool_dma_handle) {
1924 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1925 }
1926
1927 /* Free the Periodic frame list table (PFLT) area */
1928 if (ehcip->ehci_periodic_frame_list_tablep &&
1929 ehcip->ehci_pflt_mem_handle) {
1930 /*
1931 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1932 * the handle for PFLT.
1933 */
1934 if ((ehcip->ehci_dma_addr_bind_flag &
1935 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1936
1937 rval = ddi_dma_unbind_handle(
1938 ehcip->ehci_pflt_dma_handle);
1939
1940 ASSERT(rval == DDI_SUCCESS);
1941 }
1942
1943 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1944 }
1945
1946 (void) ehci_isoc_cleanup(ehcip);
1947
1948 if (ehcip->ehci_pflt_dma_handle) {
1949 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1950 }
1951
1952 if (flags & EHCI_INTR) {
1953 /* Destroy the mutex */
1954 mutex_destroy(&ehcip->ehci_int_mutex);
1955
1956 /* Destroy the async schedule advance condition variable */
1957 cv_destroy(&ehcip->ehci_async_schedule_advance_cv);
1958 }
1959
1960 /* clean up kstat structs */
1961 ehci_destroy_stats(ehcip);
1962
1963 /* Free ehci hcdi ops */
1964 if (ehcip->ehci_hcdi_ops) {
1965 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1966 }
1967
1968 if (flags & EHCI_ZALLOC) {
1969
1970 usb_free_log_hdl(ehcip->ehci_log_hdl);
1971
1972 /* Remove all properties that might have been created */
1973 ddi_prop_remove_all(ehcip->ehci_dip);
1974
1975 /* Free the soft state */
1976 ddi_soft_state_free(ehci_statep,
1977 ddi_get_instance(ehcip->ehci_dip));
1978 }
1979
1980 return (DDI_SUCCESS);
1981 }
1982
1983
1984 /*
1985 * ehci_rem_intrs:
1986 *
1987 * Unregister FIXED or MSI interrupts
1988 */
1989 static void
ehci_rem_intrs(ehci_state_t * ehcip)1990 ehci_rem_intrs(ehci_state_t *ehcip)
1991 {
1992 int i;
1993
1994 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1995 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
1996
1997 /* Disable all interrupts */
1998 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
1999 (void) ddi_intr_block_disable(ehcip->ehci_htable,
2000 ehcip->ehci_intr_cnt);
2001 } else {
2002 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
2003 (void) ddi_intr_disable(ehcip->ehci_htable[i]);
2004 }
2005 }
2006
2007 /* Call ddi_intr_remove_handler() */
2008 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
2009 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
2010 (void) ddi_intr_free(ehcip->ehci_htable[i]);
2011 }
2012
2013 kmem_free(ehcip->ehci_htable,
2014 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
2015 }
2016
2017
2018 /*
2019 * ehci_cpr_suspend
2020 */
2021 int
ehci_cpr_suspend(ehci_state_t * ehcip)2022 ehci_cpr_suspend(ehci_state_t *ehcip)
2023 {
2024 int i;
2025
2026 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2027 "ehci_cpr_suspend:");
2028
2029 /* Call into the root hub and suspend it */
2030 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
2031
2032 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2033 "ehci_cpr_suspend: root hub fails to suspend");
2034
2035 return (DDI_FAILURE);
2036 }
2037
2038 /* Only root hub's intr pipe should be open at this time */
2039 mutex_enter(&ehcip->ehci_int_mutex);
2040
2041 ASSERT(ehcip->ehci_open_pipe_count == 0);
2042
2043 /* Just wait till all resources are reclaimed */
2044 i = 0;
2045 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
2046 ehci_handle_endpoint_reclaimation(ehcip);
2047 (void) ehci_wait_for_sof(ehcip);
2048 }
2049 ASSERT(ehcip->ehci_reclaim_list == NULL);
2050
2051 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2052 "ehci_cpr_suspend: Disable HC QH list processing");
2053
2054 /* Disable all EHCI QH list processing */
2055 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
2056 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
2057
2058 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2059 "ehci_cpr_suspend: Disable HC interrupts");
2060
2061 /* Disable all EHCI interrupts */
2062 Set_OpReg(ehci_interrupt, 0);
2063
2064 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2065 "ehci_cpr_suspend: Wait for the next SOF");
2066
2067 /* Wait for the next SOF */
2068 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
2069
2070 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2071 "ehci_cpr_suspend: ehci host controller suspend failed");
2072
2073 mutex_exit(&ehcip->ehci_int_mutex);
2074 return (DDI_FAILURE);
2075 }
2076
2077 /*
2078 * Stop the ehci host controller
2079 * if usb keyboard is not connected.
2080 */
2081 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2082 Set_OpReg(ehci_command,
2083 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2084
2085 }
2086
2087 /* Set host controller soft state to suspend */
2088 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2089
2090 mutex_exit(&ehcip->ehci_int_mutex);
2091
2092 return (DDI_SUCCESS);
2093 }
2094
2095
2096 /*
2097 * ehci_cpr_resume
2098 */
2099 int
ehci_cpr_resume(ehci_state_t * ehcip)2100 ehci_cpr_resume(ehci_state_t *ehcip)
2101 {
2102 mutex_enter(&ehcip->ehci_int_mutex);
2103
2104 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2105 "ehci_cpr_resume: Restart the controller");
2106
2107 /* Cleanup ehci specific information across cpr */
2108 ehci_cpr_cleanup(ehcip);
2109
2110 /* Restart the controller */
2111 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2112
2113 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2114 "ehci_cpr_resume: ehci host controller resume failed ");
2115
2116 mutex_exit(&ehcip->ehci_int_mutex);
2117
2118 return (DDI_FAILURE);
2119 }
2120
2121 mutex_exit(&ehcip->ehci_int_mutex);
2122
2123 /* Now resume the root hub */
2124 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2125
2126 return (DDI_FAILURE);
2127 }
2128
2129 return (DDI_SUCCESS);
2130 }
2131
2132
2133 /*
2134 * Bandwidth Allocation functions
2135 */
2136
2137 /*
2138 * ehci_allocate_bandwidth:
2139 *
2140 * Figure out whether or not this interval may be supported. Return the index
2141 * into the lattice if it can be supported. Return allocation failure if it
2142 * can not be supported.
2143 */
2144 int
ehci_allocate_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t * pnode,uchar_t * smask,uchar_t * cmask)2145 ehci_allocate_bandwidth(
2146 ehci_state_t *ehcip,
2147 usba_pipe_handle_data_t *ph,
2148 uint_t *pnode,
2149 uchar_t *smask,
2150 uchar_t *cmask)
2151 {
2152 int error = USB_SUCCESS;
2153
2154 /* This routine is protected by the ehci_int_mutex */
2155 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2156
2157 /* Reset the pnode to the last checked pnode */
2158 *pnode = 0;
2159
2160 /* Allocate high speed bandwidth */
2161 if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2162 ph, pnode, smask, cmask)) != USB_SUCCESS) {
2163
2164 return (error);
2165 }
2166
2167 /*
2168 * For low/full speed usb devices, allocate classic TT bandwidth
2169 * in additional to high speed bandwidth.
2170 */
2171 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2172
2173 /* Allocate classic TT bandwidth */
2174 if ((error = ehci_allocate_classic_tt_bandwidth(
2175 ehcip, ph, *pnode)) != USB_SUCCESS) {
2176
2177 /* Deallocate high speed bandwidth */
2178 ehci_deallocate_high_speed_bandwidth(
2179 ehcip, ph, *pnode, *smask, *cmask);
2180 }
2181 }
2182
2183 return (error);
2184 }
2185
2186
2187 /*
2188 * ehci_allocate_high_speed_bandwidth:
2189 *
2190 * Allocate high speed bandwidth for the low/full/high speed interrupt and
2191 * isochronous endpoints.
2192 */
2193 static int
ehci_allocate_high_speed_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t * pnode,uchar_t * smask,uchar_t * cmask)2194 ehci_allocate_high_speed_bandwidth(
2195 ehci_state_t *ehcip,
2196 usba_pipe_handle_data_t *ph,
2197 uint_t *pnode,
2198 uchar_t *smask,
2199 uchar_t *cmask)
2200 {
2201 uint_t sbandwidth, cbandwidth;
2202 int interval;
2203 usb_ep_descr_t *endpoint = &ph->p_ep;
2204 usba_device_t *child_ud;
2205 usb_port_status_t port_status;
2206 int error;
2207
2208 /* This routine is protected by the ehci_int_mutex */
2209 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2210
2211 /* Get child's usba device structure */
2212 child_ud = ph->p_usba_device;
2213
2214 mutex_enter(&child_ud->usb_mutex);
2215
2216 /* Get the current usb device's port status */
2217 port_status = ph->p_usba_device->usb_port_status;
2218
2219 mutex_exit(&child_ud->usb_mutex);
2220
2221 /*
2222 * Calculate the length in bytes of a transaction on this
2223 * periodic endpoint. Return failure if maximum packet is
2224 * zero.
2225 */
2226 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2227 port_status, &sbandwidth, &cbandwidth);
2228 if (error != USB_SUCCESS) {
2229
2230 return (error);
2231 }
2232
2233 /*
2234 * Adjust polling interval to be a power of 2.
2235 * If this interval can't be supported, return
2236 * allocation failure.
2237 */
2238 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2239 if (interval == USB_FAILURE) {
2240
2241 return (USB_FAILURE);
2242 }
2243
2244 if (port_status == USBA_HIGH_SPEED_DEV) {
2245 /* Allocate bandwidth for high speed devices */
2246 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2247 USB_EP_ATTR_ISOCH) {
2248 error = USB_SUCCESS;
2249 } else {
2250
2251 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2252 endpoint, sbandwidth, interval);
2253 }
2254
2255 *cmask = 0x00;
2256
2257 } else {
2258 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2259 USB_EP_ATTR_INTR) {
2260
2261 /* Allocate bandwidth for low speed interrupt */
2262 error = ehci_find_bestfit_ls_intr_mask(ehcip,
2263 smask, cmask, pnode, sbandwidth, cbandwidth,
2264 interval);
2265 } else {
2266 if ((endpoint->bEndpointAddress &
2267 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2268
2269 /* Allocate bandwidth for sitd in */
2270 error = ehci_find_bestfit_sitd_in_mask(ehcip,
2271 smask, cmask, pnode, sbandwidth, cbandwidth,
2272 interval);
2273 } else {
2274
2275 /* Allocate bandwidth for sitd out */
2276 error = ehci_find_bestfit_sitd_out_mask(ehcip,
2277 smask, pnode, sbandwidth, interval);
2278 *cmask = 0x00;
2279 }
2280 }
2281 }
2282
2283 if (error != USB_SUCCESS) {
2284 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2285 "ehci_allocate_high_speed_bandwidth: Reached maximum "
2286 "bandwidth value and cannot allocate bandwidth for a "
2287 "given high-speed periodic endpoint");
2288
2289 return (USB_NO_BANDWIDTH);
2290 }
2291
2292 return (error);
2293 }
2294
2295
2296 /*
2297 * ehci_allocate_classic_tt_speed_bandwidth:
2298 *
2299 * Allocate classic TT bandwidth for the low/full speed interrupt and
2300 * isochronous endpoints.
2301 */
2302 static int
ehci_allocate_classic_tt_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode)2303 ehci_allocate_classic_tt_bandwidth(
2304 ehci_state_t *ehcip,
2305 usba_pipe_handle_data_t *ph,
2306 uint_t pnode)
2307 {
2308 uint_t bandwidth, min;
2309 uint_t height, leftmost, list;
2310 usb_ep_descr_t *endpoint = &ph->p_ep;
2311 usba_device_t *child_ud, *parent_ud;
2312 usb_port_status_t port_status;
2313 int i, interval;
2314
2315 /* This routine is protected by the ehci_int_mutex */
2316 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2317
2318 /* Get child's usba device structure */
2319 child_ud = ph->p_usba_device;
2320
2321 mutex_enter(&child_ud->usb_mutex);
2322
2323 /* Get the current usb device's port status */
2324 port_status = child_ud->usb_port_status;
2325
2326 /* Get the parent high speed hub's usba device structure */
2327 parent_ud = child_ud->usb_hs_hub_usba_dev;
2328
2329 mutex_exit(&child_ud->usb_mutex);
2330
2331 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2332 "ehci_allocate_classic_tt_bandwidth: "
2333 "child_ud 0x%p parent_ud 0x%p",
2334 (void *)child_ud, (void *)parent_ud);
2335
2336 /*
2337 * Calculate the length in bytes of a transaction on this
2338 * periodic endpoint. Return failure if maximum packet is
2339 * zero.
2340 */
2341 if (ehci_compute_classic_bandwidth(endpoint,
2342 port_status, &bandwidth) != USB_SUCCESS) {
2343
2344 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2345 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2346 "with zero endpoint maximum packet size is not supported");
2347
2348 return (USB_NOT_SUPPORTED);
2349 }
2350
2351 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2352 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2353
2354 mutex_enter(&parent_ud->usb_mutex);
2355
2356 /*
2357 * If the length in bytes plus the allocated bandwidth exceeds
2358 * the maximum, return bandwidth allocation failure.
2359 */
2360 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2361 FS_PERIODIC_BANDWIDTH) {
2362
2363 mutex_exit(&parent_ud->usb_mutex);
2364
2365 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2366 "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2367 "bandwidth value and cannot allocate bandwidth for a "
2368 "given low/full speed periodic endpoint");
2369
2370 return (USB_NO_BANDWIDTH);
2371 }
2372
2373 mutex_exit(&parent_ud->usb_mutex);
2374
2375 /* Adjust polling interval to be a power of 2 */
2376 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2377
2378 /* Find the height in the tree */
2379 height = ehci_lattice_height(interval);
2380
2381 /* Find the leftmost leaf in the subtree specified by the node. */
2382 leftmost = ehci_leftmost_leaf(pnode, height);
2383
2384 mutex_enter(&parent_ud->usb_mutex);
2385
2386 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2387 list = ehci_index[leftmost + i];
2388
2389 if ((parent_ud->usb_hs_hub_bandwidth[list] +
2390 bandwidth) > FS_PERIODIC_BANDWIDTH) {
2391
2392 mutex_exit(&parent_ud->usb_mutex);
2393
2394 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2395 "ehci_allocate_classic_tt_bandwidth: Reached "
2396 "maximum bandwidth value and cannot allocate "
2397 "bandwidth for low/full periodic endpoint");
2398
2399 return (USB_NO_BANDWIDTH);
2400 }
2401 }
2402
2403 /*
2404 * All the leaves for this node must be updated with the bandwidth.
2405 */
2406 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2407 list = ehci_index[leftmost + i];
2408 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2409 }
2410
2411 /* Find the leaf with the smallest allocated bandwidth */
2412 min = parent_ud->usb_hs_hub_bandwidth[0];
2413
2414 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2415 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2416 min = parent_ud->usb_hs_hub_bandwidth[i];
2417 }
2418 }
2419
2420 /* Save the minimum for later use */
2421 parent_ud->usb_hs_hub_min_bandwidth = min;
2422
2423 mutex_exit(&parent_ud->usb_mutex);
2424
2425 return (USB_SUCCESS);
2426 }
2427
2428
2429 /*
2430 * ehci_deallocate_bandwidth:
2431 *
2432 * Deallocate bandwidth for the given node in the lattice and the length
2433 * of transfer.
2434 */
2435 void
ehci_deallocate_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode,uchar_t smask,uchar_t cmask)2436 ehci_deallocate_bandwidth(
2437 ehci_state_t *ehcip,
2438 usba_pipe_handle_data_t *ph,
2439 uint_t pnode,
2440 uchar_t smask,
2441 uchar_t cmask)
2442 {
2443 /* This routine is protected by the ehci_int_mutex */
2444 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2445
2446 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2447
2448 /*
2449 * For low/full speed usb devices, deallocate classic TT bandwidth
2450 * in additional to high speed bandwidth.
2451 */
2452 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2453
2454 /* Deallocate classic TT bandwidth */
2455 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2456 }
2457 }
2458
2459
2460 /*
2461 * ehci_deallocate_high_speed_bandwidth:
2462 *
2463 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2464 */
2465 static void
ehci_deallocate_high_speed_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode,uchar_t smask,uchar_t cmask)2466 ehci_deallocate_high_speed_bandwidth(
2467 ehci_state_t *ehcip,
2468 usba_pipe_handle_data_t *ph,
2469 uint_t pnode,
2470 uchar_t smask,
2471 uchar_t cmask)
2472 {
2473 uint_t height, leftmost;
2474 uint_t list_count;
2475 uint_t sbandwidth, cbandwidth;
2476 int interval;
2477 usb_ep_descr_t *endpoint = &ph->p_ep;
2478 usba_device_t *child_ud;
2479 usb_port_status_t port_status;
2480
2481 /* This routine is protected by the ehci_int_mutex */
2482 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2483
2484 /* Get child's usba device structure */
2485 child_ud = ph->p_usba_device;
2486
2487 mutex_enter(&child_ud->usb_mutex);
2488
2489 /* Get the current usb device's port status */
2490 port_status = ph->p_usba_device->usb_port_status;
2491
2492 mutex_exit(&child_ud->usb_mutex);
2493
2494 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2495 port_status, &sbandwidth, &cbandwidth);
2496
2497 /* Adjust polling interval to be a power of 2 */
2498 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2499
2500 /* Find the height in the tree */
2501 height = ehci_lattice_height(interval);
2502
2503 /*
2504 * Find the leftmost leaf in the subtree specified by the node
2505 */
2506 leftmost = ehci_leftmost_leaf(pnode, height);
2507
2508 list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2509
2510 /* Delete the bandwidth from the appropriate lists */
2511 if (port_status == USBA_HIGH_SPEED_DEV) {
2512
2513 ehci_update_bw_availability(ehcip, -sbandwidth,
2514 leftmost, list_count, smask);
2515 } else {
2516 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2517 USB_EP_ATTR_INTR) {
2518
2519 ehci_update_bw_availability(ehcip, -sbandwidth,
2520 leftmost, list_count, smask);
2521 ehci_update_bw_availability(ehcip, -cbandwidth,
2522 leftmost, list_count, cmask);
2523 } else {
2524 if ((endpoint->bEndpointAddress &
2525 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2526
2527 ehci_update_bw_availability(ehcip, -sbandwidth,
2528 leftmost, list_count, smask);
2529 ehci_update_bw_availability(ehcip,
2530 -MAX_UFRAME_SITD_XFER, leftmost,
2531 list_count, cmask);
2532 } else {
2533
2534 ehci_update_bw_availability(ehcip,
2535 -MAX_UFRAME_SITD_XFER, leftmost,
2536 list_count, smask);
2537 }
2538 }
2539 }
2540 }
2541
2542 /*
2543 * ehci_deallocate_classic_tt_bandwidth:
2544 *
2545 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2546 */
2547 static void
ehci_deallocate_classic_tt_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode)2548 ehci_deallocate_classic_tt_bandwidth(
2549 ehci_state_t *ehcip,
2550 usba_pipe_handle_data_t *ph,
2551 uint_t pnode)
2552 {
2553 uint_t bandwidth, height, leftmost, list, min;
2554 int i, interval;
2555 usb_ep_descr_t *endpoint = &ph->p_ep;
2556 usba_device_t *child_ud, *parent_ud;
2557 usb_port_status_t port_status;
2558
2559 /* This routine is protected by the ehci_int_mutex */
2560 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2561
2562 /* Get child's usba device structure */
2563 child_ud = ph->p_usba_device;
2564
2565 mutex_enter(&child_ud->usb_mutex);
2566
2567 /* Get the current usb device's port status */
2568 port_status = child_ud->usb_port_status;
2569
2570 /* Get the parent high speed hub's usba device structure */
2571 parent_ud = child_ud->usb_hs_hub_usba_dev;
2572
2573 mutex_exit(&child_ud->usb_mutex);
2574
2575 /* Obtain the bandwidth */
2576 (void) ehci_compute_classic_bandwidth(endpoint,
2577 port_status, &bandwidth);
2578
2579 /* Adjust polling interval to be a power of 2 */
2580 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2581
2582 /* Find the height in the tree */
2583 height = ehci_lattice_height(interval);
2584
2585 /* Find the leftmost leaf in the subtree specified by the node */
2586 leftmost = ehci_leftmost_leaf(pnode, height);
2587
2588 mutex_enter(&parent_ud->usb_mutex);
2589
2590 /* Delete the bandwidth from the appropriate lists */
2591 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2592 list = ehci_index[leftmost + i];
2593 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2594 }
2595
2596 /* Find the leaf with the smallest allocated bandwidth */
2597 min = parent_ud->usb_hs_hub_bandwidth[0];
2598
2599 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2600 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2601 min = parent_ud->usb_hs_hub_bandwidth[i];
2602 }
2603 }
2604
2605 /* Save the minimum for later use */
2606 parent_ud->usb_hs_hub_min_bandwidth = min;
2607
2608 mutex_exit(&parent_ud->usb_mutex);
2609 }
2610
2611
2612 /*
2613 * ehci_compute_high_speed_bandwidth:
2614 *
2615 * Given a periodic endpoint (interrupt or isochronous) determine the total
2616 * bandwidth for one transaction. The EHCI host controller traverses the
2617 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2618 * services an endpoint, only a single transaction attempt is made. The HC
2619 * moves to the next Endpoint Descriptor after the first transaction attempt
2620 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2621 * Transfer Descriptor is inserted into the lattice, we will only count the
2622 * number of bytes for one transaction.
2623 *
2624 * The following are the formulas used for calculating bandwidth in terms
2625 * bytes and it is for the single USB high speed transaction. The protocol
2626 * overheads will be different for each of type of USB transfer & all these
2627 * formulas & protocol overheads are derived from the 5.11.3 section of the
2628 * USB 2.0 Specification.
2629 *
2630 * High-Speed:
2631 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2632 *
2633 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2634 *
2635 * Protocol overhead + Split transaction overhead +
2636 * ((MaxPktSz * 7)/6) + Host_Delay;
2637 */
2638 /* ARGSUSED */
2639 static int
ehci_compute_high_speed_bandwidth(ehci_state_t * ehcip,usb_ep_descr_t * endpoint,usb_port_status_t port_status,uint_t * sbandwidth,uint_t * cbandwidth)2640 ehci_compute_high_speed_bandwidth(
2641 ehci_state_t *ehcip,
2642 usb_ep_descr_t *endpoint,
2643 usb_port_status_t port_status,
2644 uint_t *sbandwidth,
2645 uint_t *cbandwidth)
2646 {
2647 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2648
2649 /* Return failure if endpoint maximum packet is zero */
2650 if (maxpacketsize == 0) {
2651 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2652 "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2653 "with zero endpoint maximum packet size is not supported");
2654
2655 return (USB_NOT_SUPPORTED);
2656 }
2657
2658 /* Add bit-stuffing overhead */
2659 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2660
2661 /* Add Host Controller specific delay to required bandwidth */
2662 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2663
2664 /* Add xfer specific protocol overheads */
2665 if ((endpoint->bmAttributes &
2666 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2667 /* High speed interrupt transaction */
2668 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2669 } else {
2670 /* Isochronous transaction */
2671 *sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2672 }
2673
2674 /*
2675 * For low/full speed devices, add split transaction specific
2676 * overheads.
2677 */
2678 if (port_status != USBA_HIGH_SPEED_DEV) {
2679 /*
2680 * Add start and complete split transaction
2681 * tokens overheads.
2682 */
2683 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2684 *sbandwidth += START_SPLIT_OVERHEAD;
2685
2686 /* Add data overhead depending on data direction */
2687 if ((endpoint->bEndpointAddress &
2688 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2689 *cbandwidth += maxpacketsize;
2690 } else {
2691 if ((endpoint->bmAttributes &
2692 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2693 /* There is no compete splits for out */
2694 *cbandwidth = 0;
2695 }
2696 *sbandwidth += maxpacketsize;
2697 }
2698 } else {
2699 uint_t xactions;
2700
2701 /* Get the max transactions per microframe */
2702 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2703 USB_EP_MAX_XACTS_SHIFT) + 1;
2704
2705 /* High speed transaction */
2706 *sbandwidth += maxpacketsize;
2707
2708 /* Calculate bandwidth per micro-frame */
2709 *sbandwidth *= xactions;
2710
2711 *cbandwidth = 0;
2712 }
2713
2714 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2715 "ehci_allocate_high_speed_bandwidth: "
2716 "Start split bandwidth %d Complete split bandwidth %d",
2717 *sbandwidth, *cbandwidth);
2718
2719 return (USB_SUCCESS);
2720 }
2721
2722
2723 /*
2724 * ehci_compute_classic_bandwidth:
2725 *
2726 * Given a periodic endpoint (interrupt or isochronous) determine the total
2727 * bandwidth for one transaction. The EHCI host controller traverses the
2728 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2729 * services an endpoint, only a single transaction attempt is made. The HC
2730 * moves to the next Endpoint Descriptor after the first transaction attempt
2731 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2732 * Transfer Descriptor is inserted into the lattice, we will only count the
2733 * number of bytes for one transaction.
2734 *
2735 * The following are the formulas used for calculating bandwidth in terms
2736 * bytes and it is for the single USB high speed transaction. The protocol
2737 * overheads will be different for each of type of USB transfer & all these
2738 * formulas & protocol overheads are derived from the 5.11.3 section of the
2739 * USB 2.0 Specification.
2740 *
2741 * Low-Speed:
2742 * Protocol overhead + Hub LS overhead +
2743 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2744 *
2745 * Full-Speed:
2746 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2747 */
2748 /* ARGSUSED */
2749 static int
ehci_compute_classic_bandwidth(usb_ep_descr_t * endpoint,usb_port_status_t port_status,uint_t * bandwidth)2750 ehci_compute_classic_bandwidth(
2751 usb_ep_descr_t *endpoint,
2752 usb_port_status_t port_status,
2753 uint_t *bandwidth)
2754 {
2755 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2756
2757 /*
2758 * If endpoint maximum packet is zero, then return immediately.
2759 */
2760 if (maxpacketsize == 0) {
2761
2762 return (USB_NOT_SUPPORTED);
2763 }
2764
2765 /* Add TT delay to required bandwidth */
2766 *bandwidth = TT_DELAY;
2767
2768 /* Add bit-stuffing overhead */
2769 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2770
2771 switch (port_status) {
2772 case USBA_LOW_SPEED_DEV:
2773 /* Low speed interrupt transaction */
2774 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2775 HUB_LOW_SPEED_PROTO_OVERHEAD +
2776 (LOW_SPEED_CLOCK * maxpacketsize));
2777 break;
2778 case USBA_FULL_SPEED_DEV:
2779 /* Full speed transaction */
2780 *bandwidth += maxpacketsize;
2781
2782 /* Add xfer specific protocol overheads */
2783 if ((endpoint->bmAttributes &
2784 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2785 /* Full speed interrupt transaction */
2786 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2787 } else {
2788 /* Isochronous and input transaction */
2789 if ((endpoint->bEndpointAddress &
2790 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2791 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2792 } else {
2793 /* Isochronous and output transaction */
2794 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2795 }
2796 }
2797 break;
2798 }
2799
2800 return (USB_SUCCESS);
2801 }
2802
2803
2804 /*
2805 * ehci_adjust_polling_interval:
2806 *
2807 * Adjust bandwidth according usb device speed.
2808 */
2809 /* ARGSUSED */
2810 int
ehci_adjust_polling_interval(ehci_state_t * ehcip,usb_ep_descr_t * endpoint,usb_port_status_t port_status)2811 ehci_adjust_polling_interval(
2812 ehci_state_t *ehcip,
2813 usb_ep_descr_t *endpoint,
2814 usb_port_status_t port_status)
2815 {
2816 uint_t interval;
2817 int i = 0;
2818
2819 /* Get the polling interval */
2820 interval = endpoint->bInterval;
2821
2822 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2823 "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2824
2825 /*
2826 * According USB 2.0 Specifications, a high-speed endpoint's
2827 * polling intervals are specified interms of 125us or micro
2828 * frame, where as full/low endpoint's polling intervals are
2829 * specified in milliseconds.
2830 *
2831 * A high speed interrupt/isochronous endpoints can specify
2832 * desired polling interval between 1 to 16 micro-frames,
2833 * where as full/low endpoints can specify between 1 to 255
2834 * milliseconds.
2835 */
2836 switch (port_status) {
2837 case USBA_LOW_SPEED_DEV:
2838 /*
2839 * Low speed endpoints are limited to specifying
2840 * only 8ms to 255ms in this driver. If a device
2841 * reports a polling interval that is less than 8ms,
2842 * it will use 8 ms instead.
2843 */
2844 if (interval < LS_MIN_POLL_INTERVAL) {
2845
2846 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2847 "Low speed endpoint's poll interval of %d ms "
2848 "is below threshold. Rounding up to %d ms",
2849 interval, LS_MIN_POLL_INTERVAL);
2850
2851 interval = LS_MIN_POLL_INTERVAL;
2852 }
2853
2854 /*
2855 * Return an error if the polling interval is greater
2856 * than 255ms.
2857 */
2858 if (interval > LS_MAX_POLL_INTERVAL) {
2859
2860 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2861 "Low speed endpoint's poll interval is "
2862 "greater than %d ms", LS_MAX_POLL_INTERVAL);
2863
2864 return (USB_FAILURE);
2865 }
2866 break;
2867
2868 case USBA_FULL_SPEED_DEV:
2869 /*
2870 * Return an error if the polling interval is less
2871 * than 1ms and greater than 255ms.
2872 */
2873 if ((interval < FS_MIN_POLL_INTERVAL) &&
2874 (interval > FS_MAX_POLL_INTERVAL)) {
2875
2876 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2877 "Full speed endpoint's poll interval must "
2878 "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2879 FS_MAX_POLL_INTERVAL);
2880
2881 return (USB_FAILURE);
2882 }
2883 break;
2884 case USBA_HIGH_SPEED_DEV:
2885 /*
2886 * Return an error if the polling interval is less 1
2887 * and greater than 16. Convert this value to 125us
2888 * units using 2^(bInterval -1). refer usb 2.0 spec
2889 * page 51 for details.
2890 */
2891 if ((interval < HS_MIN_POLL_INTERVAL) &&
2892 (interval > HS_MAX_POLL_INTERVAL)) {
2893
2894 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2895 "High speed endpoint's poll interval "
2896 "must be between %d and %d units",
2897 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2898
2899 return (USB_FAILURE);
2900 }
2901
2902 /* Adjust high speed device polling interval */
2903 interval =
2904 ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2905
2906 break;
2907 }
2908
2909 /*
2910 * If polling interval is greater than 32ms,
2911 * adjust polling interval equal to 32ms.
2912 */
2913 if (interval > EHCI_NUM_INTR_QH_LISTS) {
2914 interval = EHCI_NUM_INTR_QH_LISTS;
2915 }
2916
2917 /*
2918 * Find the nearest power of 2 that's less
2919 * than interval.
2920 */
2921 while ((ehci_pow_2(i)) <= interval) {
2922 i++;
2923 }
2924
2925 return (ehci_pow_2((i - 1)));
2926 }
2927
2928
2929 /*
2930 * ehci_adjust_high_speed_polling_interval:
2931 */
2932 /* ARGSUSED */
2933 static int
ehci_adjust_high_speed_polling_interval(ehci_state_t * ehcip,usb_ep_descr_t * endpoint)2934 ehci_adjust_high_speed_polling_interval(
2935 ehci_state_t *ehcip,
2936 usb_ep_descr_t *endpoint)
2937 {
2938 uint_t interval;
2939
2940 /* Get the polling interval */
2941 interval = ehci_pow_2(endpoint->bInterval - 1);
2942
2943 /*
2944 * Convert polling interval from micro seconds
2945 * to milli seconds.
2946 */
2947 if (interval <= EHCI_MAX_UFRAMES) {
2948 interval = 1;
2949 } else {
2950 interval = interval/EHCI_MAX_UFRAMES;
2951 }
2952
2953 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2954 "ehci_adjust_high_speed_polling_interval: "
2955 "High speed adjusted interval 0x%x", interval);
2956
2957 return (interval);
2958 }
2959
2960
2961 /*
2962 * ehci_lattice_height:
2963 *
2964 * Given the requested bandwidth, find the height in the tree at which the
2965 * nodes for this bandwidth fall. The height is measured as the number of
2966 * nodes from the leaf to the level specified by bandwidth The root of the
2967 * tree is at height TREE_HEIGHT.
2968 */
2969 static uint_t
ehci_lattice_height(uint_t interval)2970 ehci_lattice_height(uint_t interval)
2971 {
2972 return (TREE_HEIGHT - (ehci_log_2(interval)));
2973 }
2974
2975
2976 /*
2977 * ehci_lattice_parent:
2978 *
2979 * Given a node in the lattice, find the index of the parent node
2980 */
2981 static uint_t
ehci_lattice_parent(uint_t node)2982 ehci_lattice_parent(uint_t node)
2983 {
2984 if ((node % 2) == 0) {
2985
2986 return ((node/2) - 1);
2987 } else {
2988
2989 return ((node + 1)/2 - 1);
2990 }
2991 }
2992
2993
2994 /*
2995 * ehci_find_periodic_node:
2996 *
2997 * Based on the "real" array leaf node and interval, get the periodic node.
2998 */
2999 static uint_t
ehci_find_periodic_node(uint_t leaf,int interval)3000 ehci_find_periodic_node(uint_t leaf, int interval)
3001 {
3002 uint_t lattice_leaf;
3003 uint_t height = ehci_lattice_height(interval);
3004 uint_t pnode;
3005 int i;
3006
3007 /* Get the leaf number in the lattice */
3008 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
3009
3010 /* Get the node in the lattice based on the height and leaf */
3011 pnode = lattice_leaf;
3012 for (i = 0; i < height; i++) {
3013 pnode = ehci_lattice_parent(pnode);
3014 }
3015
3016 return (pnode);
3017 }
3018
3019
3020 /*
3021 * ehci_leftmost_leaf:
3022 *
3023 * Find the leftmost leaf in the subtree specified by the node. Height refers
3024 * to number of nodes from the bottom of the tree to the node, including the
3025 * node.
3026 *
3027 * The formula for a zero based tree is:
3028 * 2^H * Node + 2^H - 1
3029 * The leaf of the tree is an array, convert the number for the array.
3030 * Subtract the size of nodes not in the array
3031 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
3032 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
3033 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
3034 * 0
3035 * 1 2
3036 * 0 1 2 3
3037 */
3038 static uint_t
ehci_leftmost_leaf(uint_t node,uint_t height)3039 ehci_leftmost_leaf(
3040 uint_t node,
3041 uint_t height)
3042 {
3043 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
3044 }
3045
3046
3047 /*
3048 * ehci_pow_2:
3049 *
3050 * Compute 2 to the power
3051 */
3052 static uint_t
ehci_pow_2(uint_t x)3053 ehci_pow_2(uint_t x)
3054 {
3055 if (x == 0) {
3056
3057 return (1);
3058 } else {
3059
3060 return (2 << (x - 1));
3061 }
3062 }
3063
3064
3065 /*
3066 * ehci_log_2:
3067 *
3068 * Compute log base 2 of x
3069 */
3070 static uint_t
ehci_log_2(uint_t x)3071 ehci_log_2(uint_t x)
3072 {
3073 int i = 0;
3074
3075 while (x != 1) {
3076 x = x >> 1;
3077 i++;
3078 }
3079
3080 return (i);
3081 }
3082
3083
3084 /*
3085 * ehci_find_bestfit_hs_mask:
3086 *
3087 * Find the smask and cmask in the bandwidth allocation, and update the
3088 * bandwidth allocation.
3089 */
3090 static int
ehci_find_bestfit_hs_mask(ehci_state_t * ehcip,uchar_t * smask,uint_t * pnode,usb_ep_descr_t * endpoint,uint_t bandwidth,int interval)3091 ehci_find_bestfit_hs_mask(
3092 ehci_state_t *ehcip,
3093 uchar_t *smask,
3094 uint_t *pnode,
3095 usb_ep_descr_t *endpoint,
3096 uint_t bandwidth,
3097 int interval)
3098 {
3099 int i;
3100 uint_t elements, index;
3101 int array_leaf, best_array_leaf;
3102 uint_t node_bandwidth, best_node_bandwidth;
3103 uint_t leaf_count;
3104 uchar_t bw_mask;
3105 uchar_t best_smask;
3106
3107 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3108 "ehci_find_bestfit_hs_mask: ");
3109
3110 /* Get all the valid smasks */
3111 switch (ehci_pow_2(endpoint->bInterval - 1)) {
3112 case EHCI_INTR_1US_POLL:
3113 index = EHCI_1US_MASK_INDEX;
3114 elements = EHCI_INTR_1US_POLL;
3115 break;
3116 case EHCI_INTR_2US_POLL:
3117 index = EHCI_2US_MASK_INDEX;
3118 elements = EHCI_INTR_2US_POLL;
3119 break;
3120 case EHCI_INTR_4US_POLL:
3121 index = EHCI_4US_MASK_INDEX;
3122 elements = EHCI_INTR_4US_POLL;
3123 break;
3124 case EHCI_INTR_XUS_POLL:
3125 default:
3126 index = EHCI_XUS_MASK_INDEX;
3127 elements = EHCI_INTR_XUS_POLL;
3128 break;
3129 }
3130
3131 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3132
3133 /*
3134 * Because of the way the leaves are setup, we will automatically
3135 * hit the leftmost leaf of every possible node with this interval.
3136 */
3137 best_smask = 0x00;
3138 best_node_bandwidth = 0;
3139 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3140 /* Find the bandwidth mask */
3141 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3142 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3143
3144 /*
3145 * If this node cannot support our requirements skip to the
3146 * next leaf.
3147 */
3148 if (bw_mask == 0x00) {
3149 continue;
3150 }
3151
3152 /*
3153 * Now make sure our bandwidth requirements can be
3154 * satisfied with one of smasks in this node.
3155 */
3156 *smask = 0x00;
3157 for (i = index; i < (index + elements); i++) {
3158 /* Check the start split mask value */
3159 if (ehci_start_split_mask[index] & bw_mask) {
3160 *smask = ehci_start_split_mask[index];
3161 break;
3162 }
3163 }
3164
3165 /*
3166 * If an appropriate smask is found save the information if:
3167 * o best_smask has not been found yet.
3168 * - or -
3169 * o This is the node with the least amount of bandwidth
3170 */
3171 if ((*smask != 0x00) &&
3172 ((best_smask == 0x00) ||
3173 (best_node_bandwidth > node_bandwidth))) {
3174
3175 best_node_bandwidth = node_bandwidth;
3176 best_array_leaf = array_leaf;
3177 best_smask = *smask;
3178 }
3179 }
3180
3181 /*
3182 * If we find node that can handle the bandwidth populate the
3183 * appropriate variables and return success.
3184 */
3185 if (best_smask) {
3186 *smask = best_smask;
3187 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3188 interval);
3189 ehci_update_bw_availability(ehcip, bandwidth,
3190 ehci_index[best_array_leaf], leaf_count, best_smask);
3191
3192 return (USB_SUCCESS);
3193 }
3194
3195 return (USB_FAILURE);
3196 }
3197
3198
3199 /*
3200 * ehci_find_bestfit_ls_intr_mask:
3201 *
3202 * Find the smask and cmask in the bandwidth allocation.
3203 */
3204 static int
ehci_find_bestfit_ls_intr_mask(ehci_state_t * ehcip,uchar_t * smask,uchar_t * cmask,uint_t * pnode,uint_t sbandwidth,uint_t cbandwidth,int interval)3205 ehci_find_bestfit_ls_intr_mask(
3206 ehci_state_t *ehcip,
3207 uchar_t *smask,
3208 uchar_t *cmask,
3209 uint_t *pnode,
3210 uint_t sbandwidth,
3211 uint_t cbandwidth,
3212 int interval)
3213 {
3214 int i;
3215 uint_t elements, index;
3216 int array_leaf, best_array_leaf;
3217 uint_t node_sbandwidth, node_cbandwidth;
3218 uint_t best_node_bandwidth;
3219 uint_t leaf_count;
3220 uchar_t bw_smask, bw_cmask;
3221 uchar_t best_smask, best_cmask;
3222
3223 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3224 "ehci_find_bestfit_ls_intr_mask: ");
3225
3226 /* For low and full speed devices */
3227 index = EHCI_XUS_MASK_INDEX;
3228 elements = EHCI_INTR_4MS_POLL;
3229
3230 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3231
3232 /*
3233 * Because of the way the leaves are setup, we will automatically
3234 * hit the leftmost leaf of every possible node with this interval.
3235 */
3236 best_smask = 0x00;
3237 best_node_bandwidth = 0;
3238 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3239 /* Find the bandwidth mask */
3240 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3241 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3242 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3243 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3244
3245 /*
3246 * If this node cannot support our requirements skip to the
3247 * next leaf.
3248 */
3249 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3250 continue;
3251 }
3252
3253 /*
3254 * Now make sure our bandwidth requirements can be
3255 * satisfied with one of smasks in this node.
3256 */
3257 *smask = 0x00;
3258 *cmask = 0x00;
3259 for (i = index; i < (index + elements); i++) {
3260 /* Check the start split mask value */
3261 if ((ehci_start_split_mask[index] & bw_smask) &&
3262 (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3263 *smask = ehci_start_split_mask[index];
3264 *cmask = ehci_intr_complete_split_mask[index];
3265 break;
3266 }
3267 }
3268
3269 /*
3270 * If an appropriate smask is found save the information if:
3271 * o best_smask has not been found yet.
3272 * - or -
3273 * o This is the node with the least amount of bandwidth
3274 */
3275 if ((*smask != 0x00) &&
3276 ((best_smask == 0x00) ||
3277 (best_node_bandwidth >
3278 (node_sbandwidth + node_cbandwidth)))) {
3279 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3280 best_array_leaf = array_leaf;
3281 best_smask = *smask;
3282 best_cmask = *cmask;
3283 }
3284 }
3285
3286 /*
3287 * If we find node that can handle the bandwidth populate the
3288 * appropriate variables and return success.
3289 */
3290 if (best_smask) {
3291 *smask = best_smask;
3292 *cmask = best_cmask;
3293 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3294 interval);
3295 ehci_update_bw_availability(ehcip, sbandwidth,
3296 ehci_index[best_array_leaf], leaf_count, best_smask);
3297 ehci_update_bw_availability(ehcip, cbandwidth,
3298 ehci_index[best_array_leaf], leaf_count, best_cmask);
3299
3300 return (USB_SUCCESS);
3301 }
3302
3303 return (USB_FAILURE);
3304 }
3305
3306
3307 /*
3308 * ehci_find_bestfit_sitd_in_mask:
3309 *
3310 * Find the smask and cmask in the bandwidth allocation.
3311 */
3312 static int
ehci_find_bestfit_sitd_in_mask(ehci_state_t * ehcip,uchar_t * smask,uchar_t * cmask,uint_t * pnode,uint_t sbandwidth,uint_t cbandwidth,int interval)3313 ehci_find_bestfit_sitd_in_mask(
3314 ehci_state_t *ehcip,
3315 uchar_t *smask,
3316 uchar_t *cmask,
3317 uint_t *pnode,
3318 uint_t sbandwidth,
3319 uint_t cbandwidth,
3320 int interval)
3321 {
3322 int i, uFrames, found;
3323 int array_leaf, best_array_leaf;
3324 uint_t node_sbandwidth, node_cbandwidth;
3325 uint_t best_node_bandwidth;
3326 uint_t leaf_count;
3327 uchar_t bw_smask, bw_cmask;
3328 uchar_t best_smask, best_cmask;
3329
3330 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3331 "ehci_find_bestfit_sitd_in_mask: ");
3332
3333 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3334
3335 /*
3336 * Because of the way the leaves are setup, we will automatically
3337 * hit the leftmost leaf of every possible node with this interval.
3338 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3339 */
3340 /*
3341 * Need to add an additional 2 uFrames, if the "L"ast
3342 * complete split is before uFrame 6. See section
3343 * 11.8.4 in USB 2.0 Spec. Currently we do not support
3344 * the "Back Ptr" which means we support on IN of
3345 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3346 */
3347 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3348 if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3349 uFrames++;
3350 }
3351 if (uFrames > 6) {
3352
3353 return (USB_FAILURE);
3354 }
3355 *smask = 0x1;
3356 *cmask = 0x00;
3357 for (i = 0; i < uFrames; i++) {
3358 *cmask = *cmask << 1;
3359 *cmask |= 0x1;
3360 }
3361 /* cmask must start 2 frames after the smask */
3362 *cmask = *cmask << 2;
3363
3364 found = 0;
3365 best_smask = 0x00;
3366 best_node_bandwidth = 0;
3367 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3368 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3369 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3370 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3371 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3372 &bw_cmask);
3373
3374 /*
3375 * If this node cannot support our requirements skip to the
3376 * next leaf.
3377 */
3378 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3379 continue;
3380 }
3381
3382 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3383 if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3384 found = 1;
3385 break;
3386 }
3387 *smask = *smask << 1;
3388 *cmask = *cmask << 1;
3389 }
3390
3391 /*
3392 * If an appropriate smask is found save the information if:
3393 * o best_smask has not been found yet.
3394 * - or -
3395 * o This is the node with the least amount of bandwidth
3396 */
3397 if (found &&
3398 ((best_smask == 0x00) ||
3399 (best_node_bandwidth >
3400 (node_sbandwidth + node_cbandwidth)))) {
3401 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3402 best_array_leaf = array_leaf;
3403 best_smask = *smask;
3404 best_cmask = *cmask;
3405 }
3406 }
3407
3408 /*
3409 * If we find node that can handle the bandwidth populate the
3410 * appropriate variables and return success.
3411 */
3412 if (best_smask) {
3413 *smask = best_smask;
3414 *cmask = best_cmask;
3415 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3416 interval);
3417 ehci_update_bw_availability(ehcip, sbandwidth,
3418 ehci_index[best_array_leaf], leaf_count, best_smask);
3419 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3420 ehci_index[best_array_leaf], leaf_count, best_cmask);
3421
3422 return (USB_SUCCESS);
3423 }
3424
3425 return (USB_FAILURE);
3426 }
3427
3428
3429 /*
3430 * ehci_find_bestfit_sitd_out_mask:
3431 *
3432 * Find the smask in the bandwidth allocation.
3433 */
3434 static int
ehci_find_bestfit_sitd_out_mask(ehci_state_t * ehcip,uchar_t * smask,uint_t * pnode,uint_t sbandwidth,int interval)3435 ehci_find_bestfit_sitd_out_mask(
3436 ehci_state_t *ehcip,
3437 uchar_t *smask,
3438 uint_t *pnode,
3439 uint_t sbandwidth,
3440 int interval)
3441 {
3442 int i, uFrames, found;
3443 int array_leaf, best_array_leaf;
3444 uint_t node_sbandwidth;
3445 uint_t best_node_bandwidth;
3446 uint_t leaf_count;
3447 uchar_t bw_smask;
3448 uchar_t best_smask;
3449
3450 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3451 "ehci_find_bestfit_sitd_out_mask: ");
3452
3453 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3454
3455 /*
3456 * Because of the way the leaves are setup, we will automatically
3457 * hit the leftmost leaf of every possible node with this interval.
3458 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3459 */
3460 *smask = 0x00;
3461 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3462 if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3463 uFrames++;
3464 }
3465 for (i = 0; i < uFrames; i++) {
3466 *smask = *smask << 1;
3467 *smask |= 0x1;
3468 }
3469
3470 found = 0;
3471 best_smask = 0x00;
3472 best_node_bandwidth = 0;
3473 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3474 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3475 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3476 &bw_smask);
3477
3478 /*
3479 * If this node cannot support our requirements skip to the
3480 * next leaf.
3481 */
3482 if (bw_smask == 0x00) {
3483 continue;
3484 }
3485
3486 /* You cannot have a start split on the 8th uFrame */
3487 for (i = 0; (*smask & 0x80) == 0; i++) {
3488 if (*smask & bw_smask) {
3489 found = 1;
3490 break;
3491 }
3492 *smask = *smask << 1;
3493 }
3494
3495 /*
3496 * If an appropriate smask is found save the information if:
3497 * o best_smask has not been found yet.
3498 * - or -
3499 * o This is the node with the least amount of bandwidth
3500 */
3501 if (found &&
3502 ((best_smask == 0x00) ||
3503 (best_node_bandwidth > node_sbandwidth))) {
3504 best_node_bandwidth = node_sbandwidth;
3505 best_array_leaf = array_leaf;
3506 best_smask = *smask;
3507 }
3508 }
3509
3510 /*
3511 * If we find node that can handle the bandwidth populate the
3512 * appropriate variables and return success.
3513 */
3514 if (best_smask) {
3515 *smask = best_smask;
3516 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3517 interval);
3518 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3519 ehci_index[best_array_leaf], leaf_count, best_smask);
3520
3521 return (USB_SUCCESS);
3522 }
3523
3524 return (USB_FAILURE);
3525 }
3526
3527
3528 /*
3529 * ehci_calculate_bw_availability_mask:
3530 *
3531 * Returns the "total bandwidth used" in this node.
3532 * Populates bw_mask with the uFrames that can support the bandwidth.
3533 *
3534 * If all the Frames cannot support this bandwidth, then bw_mask
3535 * will return 0x00 and the "total bandwidth used" will be invalid.
3536 */
3537 static uint_t
ehci_calculate_bw_availability_mask(ehci_state_t * ehcip,uint_t bandwidth,int leaf,int leaf_count,uchar_t * bw_mask)3538 ehci_calculate_bw_availability_mask(
3539 ehci_state_t *ehcip,
3540 uint_t bandwidth,
3541 int leaf,
3542 int leaf_count,
3543 uchar_t *bw_mask)
3544 {
3545 int i, j;
3546 uchar_t bw_uframe;
3547 int uframe_total;
3548 ehci_frame_bandwidth_t *fbp;
3549 uint_t total_bandwidth = 0;
3550
3551 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3552 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3553 leaf, leaf_count);
3554
3555 /* Start by saying all uFrames are available */
3556 *bw_mask = 0xFF;
3557
3558 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3559 fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3560
3561 total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3562
3563 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3564 /*
3565 * If the uFrame in bw_mask is available check to see if
3566 * it can support the additional bandwidth.
3567 */
3568 bw_uframe = (*bw_mask & (0x1 << j));
3569 uframe_total =
3570 fbp->ehci_micro_frame_bandwidth[j] +
3571 bandwidth;
3572 if ((bw_uframe) &&
3573 (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3574 *bw_mask = *bw_mask & ~bw_uframe;
3575 }
3576 }
3577 }
3578
3579 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3580 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3581 *bw_mask);
3582
3583 return (total_bandwidth);
3584 }
3585
3586
3587 /*
3588 * ehci_update_bw_availability:
3589 *
3590 * The leftmost leaf needs to be in terms of array position and
3591 * not the actual lattice position.
3592 */
3593 static void
ehci_update_bw_availability(ehci_state_t * ehcip,int bandwidth,int leftmost_leaf,int leaf_count,uchar_t mask)3594 ehci_update_bw_availability(
3595 ehci_state_t *ehcip,
3596 int bandwidth,
3597 int leftmost_leaf,
3598 int leaf_count,
3599 uchar_t mask)
3600 {
3601 int i, j;
3602 ehci_frame_bandwidth_t *fbp;
3603 int uFrame_bandwidth[8];
3604
3605 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3606 "ehci_update_bw_availability: "
3607 "leaf %d count %d bandwidth 0x%x mask 0x%x",
3608 leftmost_leaf, leaf_count, bandwidth, mask);
3609
3610 ASSERT(leftmost_leaf < 32);
3611 ASSERT(leftmost_leaf >= 0);
3612
3613 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3614 if (mask & 0x1) {
3615 uFrame_bandwidth[j] = bandwidth;
3616 } else {
3617 uFrame_bandwidth[j] = 0;
3618 }
3619
3620 mask = mask >> 1;
3621 }
3622
3623 /* Updated all the effected leafs with the bandwidth */
3624 for (i = 0; i < leaf_count; i++) {
3625 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3626
3627 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3628 fbp->ehci_micro_frame_bandwidth[j] +=
3629 uFrame_bandwidth[j];
3630 fbp->ehci_allocated_frame_bandwidth +=
3631 uFrame_bandwidth[j];
3632 }
3633 }
3634 }
3635
3636 /*
3637 * Miscellaneous functions
3638 */
3639
3640 /*
3641 * ehci_obtain_state:
3642 *
3643 * NOTE: This function is also called from POLLED MODE.
3644 */
3645 ehci_state_t *
ehci_obtain_state(dev_info_t * dip)3646 ehci_obtain_state(dev_info_t *dip)
3647 {
3648 int instance = ddi_get_instance(dip);
3649
3650 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3651
3652 ASSERT(state != NULL);
3653
3654 return (state);
3655 }
3656
3657
3658 /*
3659 * ehci_state_is_operational:
3660 *
3661 * Check the Host controller state and return proper values.
3662 */
3663 int
ehci_state_is_operational(ehci_state_t * ehcip)3664 ehci_state_is_operational(ehci_state_t *ehcip)
3665 {
3666 int val;
3667
3668 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3669
3670 switch (ehcip->ehci_hc_soft_state) {
3671 case EHCI_CTLR_INIT_STATE:
3672 case EHCI_CTLR_SUSPEND_STATE:
3673 val = USB_FAILURE;
3674 break;
3675 case EHCI_CTLR_OPERATIONAL_STATE:
3676 val = USB_SUCCESS;
3677 break;
3678 case EHCI_CTLR_ERROR_STATE:
3679 val = USB_HC_HARDWARE_ERROR;
3680 break;
3681 default:
3682 val = USB_FAILURE;
3683 break;
3684 }
3685
3686 return (val);
3687 }
3688
3689
3690 /*
3691 * ehci_do_soft_reset
3692 *
3693 * Do soft reset of ehci host controller.
3694 */
3695 int
ehci_do_soft_reset(ehci_state_t * ehcip)3696 ehci_do_soft_reset(ehci_state_t *ehcip)
3697 {
3698 usb_frame_number_t before_frame_number, after_frame_number;
3699 ehci_regs_t *ehci_save_regs;
3700
3701 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3702
3703 /* Increment host controller error count */
3704 ehcip->ehci_hc_error++;
3705
3706 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3707 "ehci_do_soft_reset:"
3708 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3709
3710 /*
3711 * Allocate space for saving current Host Controller
3712 * registers. Don't do any recovery if allocation
3713 * fails.
3714 */
3715 ehci_save_regs = (ehci_regs_t *)
3716 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3717
3718 if (ehci_save_regs == NULL) {
3719 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3720 "ehci_do_soft_reset: kmem_zalloc failed");
3721
3722 return (USB_FAILURE);
3723 }
3724
3725 /* Save current ehci registers */
3726 ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3727 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3728 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3729 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3730 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3731 ehci_save_regs->ehci_periodic_list_base =
3732 Get_OpReg(ehci_periodic_list_base);
3733
3734 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3735 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3736
3737 /* Disable all list processing and interrupts */
3738 Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3739 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3740
3741 /* Disable all EHCI interrupts */
3742 Set_OpReg(ehci_interrupt, 0);
3743
3744 /* Wait for few milliseconds */
3745 drv_usecwait(EHCI_SOF_TIMEWAIT);
3746
3747 /* Do light soft reset of ehci host controller */
3748 Set_OpReg(ehci_command,
3749 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3750
3751 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3752 "ehci_do_soft_reset: Reset in progress");
3753
3754 /* Wait for reset to complete */
3755 drv_usecwait(EHCI_RESET_TIMEWAIT);
3756
3757 /*
3758 * Restore previous saved EHCI register value
3759 * into the current EHCI registers.
3760 */
3761 Set_OpReg(ehci_ctrl_segment, (uint32_t)
3762 ehci_save_regs->ehci_ctrl_segment);
3763
3764 Set_OpReg(ehci_periodic_list_base, (uint32_t)
3765 ehci_save_regs->ehci_periodic_list_base);
3766
3767 Set_OpReg(ehci_async_list_addr, (uint32_t)
3768 ehci_save_regs->ehci_async_list_addr);
3769
3770 /*
3771 * For some reason this register might get nulled out by
3772 * the Uli M1575 South Bridge. To workaround the hardware
3773 * problem, check the value after write and retry if the
3774 * last write fails.
3775 */
3776 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3777 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3778 (ehci_save_regs->ehci_async_list_addr !=
3779 Get_OpReg(ehci_async_list_addr))) {
3780 int retry = 0;
3781
3782 Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3783 ehci_save_regs->ehci_async_list_addr, retry);
3784 if (retry >= EHCI_MAX_RETRY) {
3785 USB_DPRINTF_L2(PRINT_MASK_ATTA,
3786 ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3787 " ASYNCLISTADDR write failed.");
3788
3789 return (USB_FAILURE);
3790 }
3791 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3792 "ehci_do_soft_reset: ASYNCLISTADDR "
3793 "write failed, retry=%d", retry);
3794 }
3795
3796 Set_OpReg(ehci_config_flag, (uint32_t)
3797 ehci_save_regs->ehci_config_flag);
3798
3799 /* Enable both Asynchronous and Periodic Schedule if necessary */
3800 ehci_toggle_scheduler(ehcip);
3801
3802 /*
3803 * Set ehci_interrupt to enable all interrupts except Root
3804 * Hub Status change and frame list rollover interrupts.
3805 */
3806 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3807 EHCI_INTR_FRAME_LIST_ROLLOVER |
3808 EHCI_INTR_USB_ERROR |
3809 EHCI_INTR_USB);
3810
3811 /*
3812 * Deallocate the space that allocated for saving
3813 * HC registers.
3814 */
3815 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
3816
3817 /*
3818 * Set the desired interrupt threshold, frame list size (if
3819 * applicable) and turn EHCI host controller.
3820 */
3821 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3822 ~EHCI_CMD_INTR_THRESHOLD) |
3823 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3824
3825 /* Wait 10ms for EHCI to start sending SOF */
3826 drv_usecwait(EHCI_RESET_TIMEWAIT);
3827
3828 /*
3829 * Get the current usb frame number before waiting for
3830 * few milliseconds.
3831 */
3832 before_frame_number = ehci_get_current_frame_number(ehcip);
3833
3834 /* Wait for few milliseconds */
3835 drv_usecwait(EHCI_SOF_TIMEWAIT);
3836
3837 /*
3838 * Get the current usb frame number after waiting for
3839 * few milliseconds.
3840 */
3841 after_frame_number = ehci_get_current_frame_number(ehcip);
3842
3843 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3844 "ehci_do_soft_reset: Before Frame Number 0x%llx "
3845 "After Frame Number 0x%llx",
3846 (unsigned long long)before_frame_number,
3847 (unsigned long long)after_frame_number);
3848
3849 if ((after_frame_number <= before_frame_number) &&
3850 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3851
3852 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3853 "ehci_do_soft_reset: Soft reset failed");
3854
3855 return (USB_FAILURE);
3856 }
3857
3858 return (USB_SUCCESS);
3859 }
3860
3861
3862 /*
3863 * ehci_get_xfer_attrs:
3864 *
3865 * Get the attributes of a particular xfer.
3866 *
3867 * NOTE: This function is also called from POLLED MODE.
3868 */
3869 usb_req_attrs_t
ehci_get_xfer_attrs(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3870 ehci_get_xfer_attrs(
3871 ehci_state_t *ehcip,
3872 ehci_pipe_private_t *pp,
3873 ehci_trans_wrapper_t *tw)
3874 {
3875 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3876 usb_req_attrs_t attrs = USB_ATTRS_NONE;
3877
3878 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3879 "ehci_get_xfer_attrs:");
3880
3881 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3882 case USB_EP_ATTR_CONTROL:
3883 attrs = ((usb_ctrl_req_t *)
3884 tw->tw_curr_xfer_reqp)->ctrl_attributes;
3885 break;
3886 case USB_EP_ATTR_BULK:
3887 attrs = ((usb_bulk_req_t *)
3888 tw->tw_curr_xfer_reqp)->bulk_attributes;
3889 break;
3890 case USB_EP_ATTR_INTR:
3891 attrs = ((usb_intr_req_t *)
3892 tw->tw_curr_xfer_reqp)->intr_attributes;
3893 break;
3894 }
3895
3896 return (attrs);
3897 }
3898
3899
3900 /*
3901 * ehci_get_current_frame_number:
3902 *
3903 * Get the current software based usb frame number.
3904 */
3905 usb_frame_number_t
ehci_get_current_frame_number(ehci_state_t * ehcip)3906 ehci_get_current_frame_number(ehci_state_t *ehcip)
3907 {
3908 usb_frame_number_t usb_frame_number;
3909 usb_frame_number_t ehci_fno, micro_frame_number;
3910
3911 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3912
3913 ehci_fno = ehcip->ehci_fno;
3914 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3915
3916 /*
3917 * Calculate current software based usb frame number.
3918 *
3919 * This code accounts for the fact that frame number is
3920 * updated by the Host Controller before the ehci driver
3921 * gets an FrameListRollover interrupt that will adjust
3922 * Frame higher part.
3923 *
3924 * Refer ehci specification 1.0, section 2.3.2, page 21.
3925 */
3926 micro_frame_number = ((micro_frame_number & 0x1FFF) |
3927 ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3928 ehci_fno) & 0x2000);
3929
3930 /*
3931 * Micro Frame number is equivalent to 125 usec. Eight
3932 * Micro Frame numbers are equivalent to one millsecond
3933 * or one usb frame number.
3934 */
3935 usb_frame_number = micro_frame_number >>
3936 EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3937
3938 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3939 "ehci_get_current_frame_number: "
3940 "Current usb uframe number = 0x%llx "
3941 "Current usb frame number = 0x%llx",
3942 (unsigned long long)micro_frame_number,
3943 (unsigned long long)usb_frame_number);
3944
3945 return (usb_frame_number);
3946 }
3947
3948
3949 /*
3950 * ehci_cpr_cleanup:
3951 *
3952 * Cleanup ehci state and other ehci specific informations across
3953 * Check Point Resume (CPR).
3954 */
3955 static void
ehci_cpr_cleanup(ehci_state_t * ehcip)3956 ehci_cpr_cleanup(ehci_state_t *ehcip)
3957 {
3958 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3959
3960 /* Reset software part of usb frame number */
3961 ehcip->ehci_fno = 0;
3962 }
3963
3964
3965 /*
3966 * ehci_wait_for_sof:
3967 *
3968 * Wait for couple of SOF interrupts
3969 */
3970 int
ehci_wait_for_sof(ehci_state_t * ehcip)3971 ehci_wait_for_sof(ehci_state_t *ehcip)
3972 {
3973 usb_frame_number_t before_frame_number, after_frame_number;
3974 int error = USB_SUCCESS;
3975
3976 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3977 ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3978
3979 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3980
3981 error = ehci_state_is_operational(ehcip);
3982
3983 if (error != USB_SUCCESS) {
3984
3985 return (error);
3986 }
3987
3988 /* Get the current usb frame number before waiting for two SOFs */
3989 before_frame_number = ehci_get_current_frame_number(ehcip);
3990
3991 mutex_exit(&ehcip->ehci_int_mutex);
3992
3993 /* Wait for few milliseconds */
3994 delay(drv_usectohz(EHCI_SOF_TIMEWAIT));
3995
3996 mutex_enter(&ehcip->ehci_int_mutex);
3997
3998 /* Get the current usb frame number after woken up */
3999 after_frame_number = ehci_get_current_frame_number(ehcip);
4000
4001 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4002 "ehci_wait_for_sof: framenumber: before 0x%llx "
4003 "after 0x%llx",
4004 (unsigned long long)before_frame_number,
4005 (unsigned long long)after_frame_number);
4006
4007 /* Return failure, if usb frame number has not been changed */
4008 if (after_frame_number <= before_frame_number) {
4009
4010 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) {
4011
4012 USB_DPRINTF_L0(PRINT_MASK_LISTS,
4013 ehcip->ehci_log_hdl, "No SOF interrupts");
4014
4015 /* Set host controller soft state to error */
4016 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
4017
4018 return (USB_FAILURE);
4019 }
4020
4021 }
4022
4023 return (USB_SUCCESS);
4024 }
4025
4026 /*
4027 * Toggle the async/periodic schedule based on opened pipe count.
4028 * During pipe cleanup(in pipe reset case), the pipe's QH is temporarily
4029 * disabled. But the TW on the pipe is not freed. In this case, we need
4030 * to disable async/periodic schedule for some non-compatible hardware.
4031 * Otherwise, the hardware will overwrite software's configuration of
4032 * the QH.
4033 */
4034 void
ehci_toggle_scheduler_on_pipe(ehci_state_t * ehcip)4035 ehci_toggle_scheduler_on_pipe(ehci_state_t *ehcip)
4036 {
4037 uint_t temp_reg, cmd_reg;
4038
4039 cmd_reg = Get_OpReg(ehci_command);
4040 temp_reg = cmd_reg;
4041
4042 /*
4043 * Enable/Disable asynchronous scheduler, and
4044 * turn on/off async list door bell
4045 */
4046 if (ehcip->ehci_open_async_count) {
4047 if ((ehcip->ehci_async_req_count > 0) &&
4048 ((cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE) == 0)) {
4049 /*
4050 * For some reason this address might get nulled out by
4051 * the ehci chip. Set it here just in case it is null.
4052 */
4053 Set_OpReg(ehci_async_list_addr,
4054 ehci_qh_cpu_to_iommu(ehcip,
4055 ehcip->ehci_head_of_async_sched_list));
4056
4057 /*
4058 * For some reason this register might get nulled out by
4059 * the Uli M1575 Southbridge. To workaround the HW
4060 * problem, check the value after write and retry if the
4061 * last write fails.
4062 *
4063 * If the ASYNCLISTADDR remains "stuck" after
4064 * EHCI_MAX_RETRY retries, then the M1575 is broken
4065 * and is stuck in an inconsistent state and is about
4066 * to crash the machine with a trn_oor panic when it
4067 * does a DMA read from 0x0. It is better to panic
4068 * now rather than wait for the trn_oor crash; this
4069 * way Customer Service will have a clean signature
4070 * that indicts the M1575 chip rather than a
4071 * mysterious and hard-to-diagnose trn_oor panic.
4072 */
4073 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4074 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4075 (ehci_qh_cpu_to_iommu(ehcip,
4076 ehcip->ehci_head_of_async_sched_list) !=
4077 Get_OpReg(ehci_async_list_addr))) {
4078 int retry = 0;
4079
4080 Set_OpRegRetry(ehci_async_list_addr,
4081 ehci_qh_cpu_to_iommu(ehcip,
4082 ehcip->ehci_head_of_async_sched_list),
4083 retry);
4084 if (retry >= EHCI_MAX_RETRY)
4085 cmn_err(CE_PANIC,
4086 "ehci_toggle_scheduler_on_pipe: "
4087 "ASYNCLISTADDR write failed.");
4088
4089 USB_DPRINTF_L2(PRINT_MASK_ATTA,
4090 ehcip->ehci_log_hdl,
4091 "ehci_toggle_scheduler_on_pipe:"
4092 " ASYNCLISTADDR write failed, retry=%d",
4093 retry);
4094 }
4095
4096 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4097 }
4098 } else {
4099 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4100 }
4101
4102 if (ehcip->ehci_open_periodic_count) {
4103 if ((ehcip->ehci_periodic_req_count > 0) &&
4104 ((cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE) == 0)) {
4105 /*
4106 * For some reason this address get's nulled out by
4107 * the ehci chip. Set it here just in case it is null.
4108 */
4109 Set_OpReg(ehci_periodic_list_base,
4110 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4111 0xFFFFF000));
4112 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4113 }
4114 } else {
4115 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4116 }
4117
4118 /* Just an optimization */
4119 if (temp_reg != cmd_reg) {
4120 Set_OpReg(ehci_command, cmd_reg);
4121 }
4122 }
4123
4124
4125 /*
4126 * ehci_toggle_scheduler:
4127 *
4128 * Turn scheduler based on pipe open count.
4129 */
4130 void
ehci_toggle_scheduler(ehci_state_t * ehcip)4131 ehci_toggle_scheduler(ehci_state_t *ehcip)
4132 {
4133 uint_t temp_reg, cmd_reg;
4134
4135 /*
4136 * For performance optimization, we need to change the bits
4137 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0)
4138 *
4139 * Related bits already enabled if
4140 * async and periodic req counts are > 1
4141 * OR async req count > 1 & no periodic pipe
4142 * OR periodic req count > 1 & no async pipe
4143 */
4144 if (((ehcip->ehci_async_req_count > 1) &&
4145 (ehcip->ehci_periodic_req_count > 1)) ||
4146 ((ehcip->ehci_async_req_count > 1) &&
4147 (ehcip->ehci_open_periodic_count == 0)) ||
4148 ((ehcip->ehci_periodic_req_count > 1) &&
4149 (ehcip->ehci_open_async_count == 0))) {
4150 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4151 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4152 "async/periodic bits no need to change");
4153
4154 return;
4155 }
4156
4157 cmd_reg = Get_OpReg(ehci_command);
4158 temp_reg = cmd_reg;
4159
4160 /*
4161 * Enable/Disable asynchronous scheduler, and
4162 * turn on/off async list door bell
4163 */
4164 if (ehcip->ehci_async_req_count > 1) {
4165 /* we already enable the async bit */
4166 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4167 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4168 "async bit already enabled: cmd_reg=0x%x", cmd_reg);
4169 } else if (ehcip->ehci_async_req_count == 1) {
4170 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) {
4171 /*
4172 * For some reason this address might get nulled out by
4173 * the ehci chip. Set it here just in case it is null.
4174 * If it's not null, we should not reset the
4175 * ASYNCLISTADDR, because it's updated by hardware to
4176 * point to the next queue head to be executed.
4177 */
4178 if (!Get_OpReg(ehci_async_list_addr)) {
4179 Set_OpReg(ehci_async_list_addr,
4180 ehci_qh_cpu_to_iommu(ehcip,
4181 ehcip->ehci_head_of_async_sched_list));
4182 }
4183
4184 /*
4185 * For some reason this register might get nulled out by
4186 * the Uli M1575 Southbridge. To workaround the HW
4187 * problem, check the value after write and retry if the
4188 * last write fails.
4189 *
4190 * If the ASYNCLISTADDR remains "stuck" after
4191 * EHCI_MAX_RETRY retries, then the M1575 is broken
4192 * and is stuck in an inconsistent state and is about
4193 * to crash the machine with a trn_oor panic when it
4194 * does a DMA read from 0x0. It is better to panic
4195 * now rather than wait for the trn_oor crash; this
4196 * way Customer Service will have a clean signature
4197 * that indicts the M1575 chip rather than a
4198 * mysterious and hard-to-diagnose trn_oor panic.
4199 */
4200 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4201 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4202 (ehci_qh_cpu_to_iommu(ehcip,
4203 ehcip->ehci_head_of_async_sched_list) !=
4204 Get_OpReg(ehci_async_list_addr))) {
4205 int retry = 0;
4206
4207 Set_OpRegRetry(ehci_async_list_addr,
4208 ehci_qh_cpu_to_iommu(ehcip,
4209 ehcip->ehci_head_of_async_sched_list),
4210 retry);
4211 if (retry >= EHCI_MAX_RETRY)
4212 cmn_err(CE_PANIC,
4213 "ehci_toggle_scheduler: "
4214 "ASYNCLISTADDR write failed.");
4215
4216 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4217 ehcip->ehci_log_hdl,
4218 "ehci_toggle_scheduler: ASYNCLISTADDR "
4219 "write failed, retry=%d", retry);
4220 }
4221 }
4222 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4223 } else {
4224 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4225 }
4226
4227 if (ehcip->ehci_periodic_req_count > 1) {
4228 /* we already enable the periodic bit. */
4229 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4230 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4231 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg);
4232 } else if (ehcip->ehci_periodic_req_count == 1) {
4233 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) {
4234 /*
4235 * For some reason this address get's nulled out by
4236 * the ehci chip. Set it here just in case it is null.
4237 */
4238 Set_OpReg(ehci_periodic_list_base,
4239 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4240 0xFFFFF000));
4241 }
4242 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4243 } else {
4244 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4245 }
4246
4247 /* Just an optimization */
4248 if (temp_reg != cmd_reg) {
4249 Set_OpReg(ehci_command, cmd_reg);
4250
4251 /* To make sure the command register is updated correctly */
4252 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4253 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
4254 int retry = 0;
4255
4256 Set_OpRegRetry(ehci_command, cmd_reg, retry);
4257 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4258 ehcip->ehci_log_hdl,
4259 "ehci_toggle_scheduler: CMD write failed, retry=%d",
4260 retry);
4261 }
4262
4263 }
4264 }
4265
4266 /*
4267 * ehci print functions
4268 */
4269
4270 /*
4271 * ehci_print_caps:
4272 */
4273 void
ehci_print_caps(ehci_state_t * ehcip)4274 ehci_print_caps(ehci_state_t *ehcip)
4275 {
4276 uint_t i;
4277
4278 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4279 "\n\tUSB 2.0 Host Controller Characteristics\n");
4280
4281 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4282 "Caps Length: 0x%x Version: 0x%x\n",
4283 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version));
4284
4285 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4286 "Structural Parameters\n");
4287 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4288 "Port indicators: %s", (Get_Cap(ehci_hcs_params) &
4289 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No");
4290 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4291 "No of Classic host controllers: 0x%x",
4292 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS)
4293 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT);
4294 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4295 "No of ports per Classic host controller: 0x%x",
4296 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC)
4297 >> EHCI_HCS_NUM_PORTS_CC_SHIFT);
4298 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4299 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) &
4300 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No");
4301 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4302 "Port power control: %s", (Get_Cap(ehci_hcs_params) &
4303 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No");
4304 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4305 "No of root hub ports: 0x%x\n",
4306 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS);
4307
4308 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4309 "Capability Parameters\n");
4310 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4311 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) &
4312 EHCI_HCC_EECP) ? "Yes" : "No");
4313 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4314 "Isoch schedule threshold: 0x%x",
4315 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD);
4316 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4317 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) &
4318 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No");
4319 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4320 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) &
4321 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024");
4322 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4323 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) &
4324 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No");
4325
4326 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4327 "Classic Port Route Description");
4328
4329 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4330 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4331 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i]));
4332 }
4333 }
4334
4335
4336 /*
4337 * ehci_print_regs:
4338 */
4339 void
ehci_print_regs(ehci_state_t * ehcip)4340 ehci_print_regs(ehci_state_t *ehcip)
4341 {
4342 uint_t i;
4343
4344 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4345 "\n\tEHCI%d Operational Registers\n",
4346 ddi_get_instance(ehcip->ehci_dip));
4347
4348 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4349 "Command: 0x%x Status: 0x%x",
4350 Get_OpReg(ehci_command), Get_OpReg(ehci_status));
4351 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4352 "Interrupt: 0x%x Frame Index: 0x%x",
4353 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index));
4354 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4355 "Control Segment: 0x%x Periodic List Base: 0x%x",
4356 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base));
4357 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4358 "Async List Addr: 0x%x Config Flag: 0x%x",
4359 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag));
4360
4361 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4362 "Root Hub Port Status");
4363
4364 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4365 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4366 "\tPort Status 0x%x: 0x%x ", i,
4367 Get_OpReg(ehci_rh_port_status[i]));
4368 }
4369 }
4370
4371
4372 /*
4373 * ehci_print_qh:
4374 */
4375 void
ehci_print_qh(ehci_state_t * ehcip,ehci_qh_t * qh)4376 ehci_print_qh(
4377 ehci_state_t *ehcip,
4378 ehci_qh_t *qh)
4379 {
4380 uint_t i;
4381
4382 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4383 "ehci_print_qh: qh = 0x%p", (void *)qh);
4384
4385 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4386 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr));
4387 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4388 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl));
4389 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4390 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl));
4391 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4392 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd));
4393 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4394 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd));
4395 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4396 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd));
4397 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4398 "\tqh_status: 0x%x ", Get_QH(qh->qh_status));
4399
4400 for (i = 0; i < 5; i++) {
4401 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4402 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i]));
4403 }
4404
4405 for (i = 0; i < 5; i++) {
4406 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4407 "\tqh_buf_high[%d]: 0x%x ",
4408 i, Get_QH(qh->qh_buf_high[i]));
4409 }
4410
4411 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4412 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd));
4413 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4414 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev));
4415 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4416 "\tqh_state: 0x%x ", Get_QH(qh->qh_state));
4417 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4418 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next));
4419 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4420 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame));
4421 }
4422
4423
4424 /*
4425 * ehci_print_qtd:
4426 */
4427 void
ehci_print_qtd(ehci_state_t * ehcip,ehci_qtd_t * qtd)4428 ehci_print_qtd(
4429 ehci_state_t *ehcip,
4430 ehci_qtd_t *qtd)
4431 {
4432 uint_t i;
4433
4434 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4435 "ehci_print_qtd: qtd = 0x%p", (void *)qtd);
4436
4437 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4438 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd));
4439 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4440 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd));
4441 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4442 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl));
4443
4444 for (i = 0; i < 5; i++) {
4445 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4446 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i]));
4447 }
4448
4449 for (i = 0; i < 5; i++) {
4450 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4451 "\tqtd_buf_high[%d]: 0x%x ",
4452 i, Get_QTD(qtd->qtd_buf_high[i]));
4453 }
4454
4455 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4456 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper));
4457 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4458 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd));
4459 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4460 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next));
4461 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4462 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev));
4463 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4464 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state));
4465 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4466 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase));
4467 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4468 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs));
4469 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4470 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len));
4471 }
4472
4473 /*
4474 * ehci kstat functions
4475 */
4476
4477 /*
4478 * ehci_create_stats:
4479 *
4480 * Allocate and initialize the ehci kstat structures
4481 */
4482 void
ehci_create_stats(ehci_state_t * ehcip)4483 ehci_create_stats(ehci_state_t *ehcip)
4484 {
4485 char kstatname[KSTAT_STRLEN];
4486 const char *dname = ddi_driver_name(ehcip->ehci_dip);
4487 char *usbtypes[USB_N_COUNT_KSTATS] =
4488 {"ctrl", "isoch", "bulk", "intr"};
4489 uint_t instance = ehcip->ehci_instance;
4490 ehci_intrs_stats_t *isp;
4491 int i;
4492
4493 if (EHCI_INTRS_STATS(ehcip) == NULL) {
4494 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
4495 dname, instance);
4496 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance,
4497 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
4498 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t),
4499 KSTAT_FLAG_PERSISTENT);
4500
4501 if (EHCI_INTRS_STATS(ehcip)) {
4502 isp = EHCI_INTRS_STATS_DATA(ehcip);
4503 kstat_named_init(&isp->ehci_sts_total,
4504 "Interrupts Total", KSTAT_DATA_UINT64);
4505 kstat_named_init(&isp->ehci_sts_not_claimed,
4506 "Not Claimed", KSTAT_DATA_UINT64);
4507 kstat_named_init(&isp->ehci_sts_async_sched_status,
4508 "Async schedule status", KSTAT_DATA_UINT64);
4509 kstat_named_init(&isp->ehci_sts_periodic_sched_status,
4510 "Periodic sched status", KSTAT_DATA_UINT64);
4511 kstat_named_init(&isp->ehci_sts_empty_async_schedule,
4512 "Empty async schedule", KSTAT_DATA_UINT64);
4513 kstat_named_init(&isp->ehci_sts_host_ctrl_halted,
4514 "Host controller Halted", KSTAT_DATA_UINT64);
4515 kstat_named_init(&isp->ehci_sts_async_advance_intr,
4516 "Intr on async advance", KSTAT_DATA_UINT64);
4517 kstat_named_init(&isp->ehci_sts_host_system_error_intr,
4518 "Host system error", KSTAT_DATA_UINT64);
4519 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr,
4520 "Frame list rollover", KSTAT_DATA_UINT64);
4521 kstat_named_init(&isp->ehci_sts_rh_port_change_intr,
4522 "Port change detect", KSTAT_DATA_UINT64);
4523 kstat_named_init(&isp->ehci_sts_usb_error_intr,
4524 "USB error interrupt", KSTAT_DATA_UINT64);
4525 kstat_named_init(&isp->ehci_sts_usb_intr,
4526 "USB interrupt", KSTAT_DATA_UINT64);
4527
4528 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip;
4529 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev;
4530 kstat_install(EHCI_INTRS_STATS(ehcip));
4531 }
4532 }
4533
4534 if (EHCI_TOTAL_STATS(ehcip) == NULL) {
4535 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
4536 dname, instance);
4537 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance,
4538 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
4539 KSTAT_FLAG_PERSISTENT);
4540
4541 if (EHCI_TOTAL_STATS(ehcip)) {
4542 kstat_install(EHCI_TOTAL_STATS(ehcip));
4543 }
4544 }
4545
4546 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4547 if (ehcip->ehci_count_stats[i] == NULL) {
4548 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
4549 dname, instance, usbtypes[i]);
4550 ehcip->ehci_count_stats[i] = kstat_create("usba",
4551 instance, kstatname, "usb_byte_count",
4552 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
4553
4554 if (ehcip->ehci_count_stats[i]) {
4555 kstat_install(ehcip->ehci_count_stats[i]);
4556 }
4557 }
4558 }
4559 }
4560
4561
4562 /*
4563 * ehci_destroy_stats:
4564 *
4565 * Clean up ehci kstat structures
4566 */
4567 void
ehci_destroy_stats(ehci_state_t * ehcip)4568 ehci_destroy_stats(ehci_state_t *ehcip)
4569 {
4570 int i;
4571
4572 if (EHCI_INTRS_STATS(ehcip)) {
4573 kstat_delete(EHCI_INTRS_STATS(ehcip));
4574 EHCI_INTRS_STATS(ehcip) = NULL;
4575 }
4576
4577 if (EHCI_TOTAL_STATS(ehcip)) {
4578 kstat_delete(EHCI_TOTAL_STATS(ehcip));
4579 EHCI_TOTAL_STATS(ehcip) = NULL;
4580 }
4581
4582 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4583 if (ehcip->ehci_count_stats[i]) {
4584 kstat_delete(ehcip->ehci_count_stats[i]);
4585 ehcip->ehci_count_stats[i] = NULL;
4586 }
4587 }
4588 }
4589
4590
4591 /*
4592 * ehci_do_intrs_stats:
4593 *
4594 * ehci status information
4595 */
4596 void
ehci_do_intrs_stats(ehci_state_t * ehcip,int val)4597 ehci_do_intrs_stats(
4598 ehci_state_t *ehcip,
4599 int val)
4600 {
4601 if (EHCI_INTRS_STATS(ehcip)) {
4602 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++;
4603 switch (val) {
4604 case EHCI_STS_ASYNC_SCHED_STATUS:
4605 EHCI_INTRS_STATS_DATA(ehcip)->
4606 ehci_sts_async_sched_status.value.ui64++;
4607 break;
4608 case EHCI_STS_PERIODIC_SCHED_STATUS:
4609 EHCI_INTRS_STATS_DATA(ehcip)->
4610 ehci_sts_periodic_sched_status.value.ui64++;
4611 break;
4612 case EHCI_STS_EMPTY_ASYNC_SCHEDULE:
4613 EHCI_INTRS_STATS_DATA(ehcip)->
4614 ehci_sts_empty_async_schedule.value.ui64++;
4615 break;
4616 case EHCI_STS_HOST_CTRL_HALTED:
4617 EHCI_INTRS_STATS_DATA(ehcip)->
4618 ehci_sts_host_ctrl_halted.value.ui64++;
4619 break;
4620 case EHCI_STS_ASYNC_ADVANCE_INTR:
4621 EHCI_INTRS_STATS_DATA(ehcip)->
4622 ehci_sts_async_advance_intr.value.ui64++;
4623 break;
4624 case EHCI_STS_HOST_SYSTEM_ERROR_INTR:
4625 EHCI_INTRS_STATS_DATA(ehcip)->
4626 ehci_sts_host_system_error_intr.value.ui64++;
4627 break;
4628 case EHCI_STS_FRM_LIST_ROLLOVER_INTR:
4629 EHCI_INTRS_STATS_DATA(ehcip)->
4630 ehci_sts_frm_list_rollover_intr.value.ui64++;
4631 break;
4632 case EHCI_STS_RH_PORT_CHANGE_INTR:
4633 EHCI_INTRS_STATS_DATA(ehcip)->
4634 ehci_sts_rh_port_change_intr.value.ui64++;
4635 break;
4636 case EHCI_STS_USB_ERROR_INTR:
4637 EHCI_INTRS_STATS_DATA(ehcip)->
4638 ehci_sts_usb_error_intr.value.ui64++;
4639 break;
4640 case EHCI_STS_USB_INTR:
4641 EHCI_INTRS_STATS_DATA(ehcip)->
4642 ehci_sts_usb_intr.value.ui64++;
4643 break;
4644 default:
4645 EHCI_INTRS_STATS_DATA(ehcip)->
4646 ehci_sts_not_claimed.value.ui64++;
4647 break;
4648 }
4649 }
4650 }
4651
4652
4653 /*
4654 * ehci_do_byte_stats:
4655 *
4656 * ehci data xfer information
4657 */
4658 void
ehci_do_byte_stats(ehci_state_t * ehcip,size_t len,uint8_t attr,uint8_t addr)4659 ehci_do_byte_stats(
4660 ehci_state_t *ehcip,
4661 size_t len,
4662 uint8_t attr,
4663 uint8_t addr)
4664 {
4665 uint8_t type = attr & USB_EP_ATTR_MASK;
4666 uint8_t dir = addr & USB_EP_DIR_MASK;
4667
4668 if (dir == USB_EP_DIR_IN) {
4669 EHCI_TOTAL_STATS_DATA(ehcip)->reads++;
4670 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len;
4671 switch (type) {
4672 case USB_EP_ATTR_CONTROL:
4673 EHCI_CTRL_STATS(ehcip)->reads++;
4674 EHCI_CTRL_STATS(ehcip)->nread += len;
4675 break;
4676 case USB_EP_ATTR_BULK:
4677 EHCI_BULK_STATS(ehcip)->reads++;
4678 EHCI_BULK_STATS(ehcip)->nread += len;
4679 break;
4680 case USB_EP_ATTR_INTR:
4681 EHCI_INTR_STATS(ehcip)->reads++;
4682 EHCI_INTR_STATS(ehcip)->nread += len;
4683 break;
4684 case USB_EP_ATTR_ISOCH:
4685 EHCI_ISOC_STATS(ehcip)->reads++;
4686 EHCI_ISOC_STATS(ehcip)->nread += len;
4687 break;
4688 }
4689 } else if (dir == USB_EP_DIR_OUT) {
4690 EHCI_TOTAL_STATS_DATA(ehcip)->writes++;
4691 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len;
4692 switch (type) {
4693 case USB_EP_ATTR_CONTROL:
4694 EHCI_CTRL_STATS(ehcip)->writes++;
4695 EHCI_CTRL_STATS(ehcip)->nwritten += len;
4696 break;
4697 case USB_EP_ATTR_BULK:
4698 EHCI_BULK_STATS(ehcip)->writes++;
4699 EHCI_BULK_STATS(ehcip)->nwritten += len;
4700 break;
4701 case USB_EP_ATTR_INTR:
4702 EHCI_INTR_STATS(ehcip)->writes++;
4703 EHCI_INTR_STATS(ehcip)->nwritten += len;
4704 break;
4705 case USB_EP_ATTR_ISOCH:
4706 EHCI_ISOC_STATS(ehcip)->writes++;
4707 EHCI_ISOC_STATS(ehcip)->nwritten += len;
4708 break;
4709 }
4710 }
4711 }
4712