1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * EHCI Host Controller Driver (EHCI)
28 *
29 * The EHCI driver is a software driver which interfaces to the Universal
30 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
31 * the Host Controller is defined by the EHCI Host Controller Interface.
32 *
33 * This module contains the main EHCI driver code which handles all USB
34 * transfers, bandwidth allocations and other general functionalities.
35 */
36
37 #include <sys/usb/hcd/ehci/ehcid.h>
38 #include <sys/usb/hcd/ehci/ehci_isoch.h>
39 #include <sys/usb/hcd/ehci/ehci_xfer.h>
40
41 /*
42 * EHCI MSI tunable:
43 *
44 * By default MSI is enabled on all supported platforms except for the
45 * EHCI controller of ULI1575 South bridge.
46 */
47 boolean_t ehci_enable_msi = B_TRUE;
48
49 /* Pointer to the state structure */
50 extern void *ehci_statep;
51
52 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
53
54 extern uint_t ehci_vt62x2_workaround;
55 extern int force_ehci_off;
56
57 /* Adjustable variables for the size of the pools */
58 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
59 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
60
61 /*
62 * Initialize the values which the order of 32ms intr qh are executed
63 * by the host controller in the lattice tree.
64 */
65 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
66 {0x00, 0x10, 0x08, 0x18,
67 0x04, 0x14, 0x0c, 0x1c,
68 0x02, 0x12, 0x0a, 0x1a,
69 0x06, 0x16, 0x0e, 0x1e,
70 0x01, 0x11, 0x09, 0x19,
71 0x05, 0x15, 0x0d, 0x1d,
72 0x03, 0x13, 0x0b, 0x1b,
73 0x07, 0x17, 0x0f, 0x1f};
74
75 /*
76 * Initialize the values which are used to calculate start split mask
77 * for the low/full/high speed interrupt and isochronous endpoints.
78 */
79 static uint_t ehci_start_split_mask[15] = {
80 /*
81 * For high/full/low speed usb devices. For high speed
82 * device with polling interval greater than or equal
83 * to 8us (125us).
84 */
85 0x01, /* 00000001 */
86 0x02, /* 00000010 */
87 0x04, /* 00000100 */
88 0x08, /* 00001000 */
89 0x10, /* 00010000 */
90 0x20, /* 00100000 */
91 0x40, /* 01000000 */
92 0x80, /* 10000000 */
93
94 /* Only for high speed devices with polling interval 4us */
95 0x11, /* 00010001 */
96 0x22, /* 00100010 */
97 0x44, /* 01000100 */
98 0x88, /* 10001000 */
99
100 /* Only for high speed devices with polling interval 2us */
101 0x55, /* 01010101 */
102 0xaa, /* 10101010 */
103
104 /* Only for high speed devices with polling interval 1us */
105 0xff /* 11111111 */
106 };
107
108 /*
109 * Initialize the values which are used to calculate complete split mask
110 * for the low/full speed interrupt and isochronous endpoints.
111 */
112 static uint_t ehci_intr_complete_split_mask[7] = {
113 /* Only full/low speed devices */
114 0x1c, /* 00011100 */
115 0x38, /* 00111000 */
116 0x70, /* 01110000 */
117 0xe0, /* 11100000 */
118 0x00, /* Need FSTN feature */
119 0x00, /* Need FSTN feature */
120 0x00 /* Need FSTN feature */
121 };
122
123
124 /*
125 * EHCI Internal Function Prototypes
126 */
127
128 /* Host Controller Driver (HCD) initialization functions */
129 void ehci_set_dma_attributes(ehci_state_t *ehcip);
130 int ehci_allocate_pools(ehci_state_t *ehcip);
131 void ehci_decode_ddi_dma_addr_bind_handle_result(
132 ehci_state_t *ehcip,
133 int result);
134 int ehci_map_regs(ehci_state_t *ehcip);
135 int ehci_register_intrs_and_init_mutex(
136 ehci_state_t *ehcip);
137 static int ehci_add_intrs(ehci_state_t *ehcip,
138 int intr_type);
139 int ehci_init_ctlr(ehci_state_t *ehcip,
140 int init_type);
141 static int ehci_take_control(ehci_state_t *ehcip);
142 static int ehci_init_periodic_frame_lst_table(
143 ehci_state_t *ehcip);
144 static void ehci_build_interrupt_lattice(
145 ehci_state_t *ehcip);
146 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip);
147
148 /* Host Controller Driver (HCD) deinitialization functions */
149 int ehci_cleanup(ehci_state_t *ehcip);
150 static void ehci_rem_intrs(ehci_state_t *ehcip);
151 int ehci_cpr_suspend(ehci_state_t *ehcip);
152 int ehci_cpr_resume(ehci_state_t *ehcip);
153
154 /* Bandwidth Allocation functions */
155 int ehci_allocate_bandwidth(ehci_state_t *ehcip,
156 usba_pipe_handle_data_t *ph,
157 uint_t *pnode,
158 uchar_t *smask,
159 uchar_t *cmask);
160 static int ehci_allocate_high_speed_bandwidth(
161 ehci_state_t *ehcip,
162 usba_pipe_handle_data_t *ph,
163 uint_t *hnode,
164 uchar_t *smask,
165 uchar_t *cmask);
166 static int ehci_allocate_classic_tt_bandwidth(
167 ehci_state_t *ehcip,
168 usba_pipe_handle_data_t *ph,
169 uint_t pnode);
170 void ehci_deallocate_bandwidth(ehci_state_t *ehcip,
171 usba_pipe_handle_data_t *ph,
172 uint_t pnode,
173 uchar_t smask,
174 uchar_t cmask);
175 static void ehci_deallocate_high_speed_bandwidth(
176 ehci_state_t *ehcip,
177 usba_pipe_handle_data_t *ph,
178 uint_t hnode,
179 uchar_t smask,
180 uchar_t cmask);
181 static void ehci_deallocate_classic_tt_bandwidth(
182 ehci_state_t *ehcip,
183 usba_pipe_handle_data_t *ph,
184 uint_t pnode);
185 static int ehci_compute_high_speed_bandwidth(
186 ehci_state_t *ehcip,
187 usb_ep_descr_t *endpoint,
188 usb_port_status_t port_status,
189 uint_t *sbandwidth,
190 uint_t *cbandwidth);
191 static int ehci_compute_classic_bandwidth(
192 usb_ep_descr_t *endpoint,
193 usb_port_status_t port_status,
194 uint_t *bandwidth);
195 int ehci_adjust_polling_interval(
196 ehci_state_t *ehcip,
197 usb_ep_descr_t *endpoint,
198 usb_port_status_t port_status);
199 static int ehci_adjust_high_speed_polling_interval(
200 ehci_state_t *ehcip,
201 usb_ep_descr_t *endpoint);
202 static uint_t ehci_lattice_height(uint_t interval);
203 static uint_t ehci_lattice_parent(uint_t node);
204 static uint_t ehci_find_periodic_node(
205 uint_t leaf,
206 int interval);
207 static uint_t ehci_leftmost_leaf(uint_t node,
208 uint_t height);
209 static uint_t ehci_pow_2(uint_t x);
210 static uint_t ehci_log_2(uint_t x);
211 static int ehci_find_bestfit_hs_mask(
212 ehci_state_t *ehcip,
213 uchar_t *smask,
214 uint_t *pnode,
215 usb_ep_descr_t *endpoint,
216 uint_t bandwidth,
217 int interval);
218 static int ehci_find_bestfit_ls_intr_mask(
219 ehci_state_t *ehcip,
220 uchar_t *smask,
221 uchar_t *cmask,
222 uint_t *pnode,
223 uint_t sbandwidth,
224 uint_t cbandwidth,
225 int interval);
226 static int ehci_find_bestfit_sitd_in_mask(
227 ehci_state_t *ehcip,
228 uchar_t *smask,
229 uchar_t *cmask,
230 uint_t *pnode,
231 uint_t sbandwidth,
232 uint_t cbandwidth,
233 int interval);
234 static int ehci_find_bestfit_sitd_out_mask(
235 ehci_state_t *ehcip,
236 uchar_t *smask,
237 uint_t *pnode,
238 uint_t sbandwidth,
239 int interval);
240 static uint_t ehci_calculate_bw_availability_mask(
241 ehci_state_t *ehcip,
242 uint_t bandwidth,
243 int leaf,
244 int leaf_count,
245 uchar_t *bw_mask);
246 static void ehci_update_bw_availability(
247 ehci_state_t *ehcip,
248 int bandwidth,
249 int leftmost_leaf,
250 int leaf_count,
251 uchar_t mask);
252
253 /* Miscellaneous functions */
254 ehci_state_t *ehci_obtain_state(
255 dev_info_t *dip);
256 int ehci_state_is_operational(
257 ehci_state_t *ehcip);
258 int ehci_do_soft_reset(
259 ehci_state_t *ehcip);
260 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip,
261 ehci_pipe_private_t *pp,
262 ehci_trans_wrapper_t *tw);
263 usb_frame_number_t ehci_get_current_frame_number(
264 ehci_state_t *ehcip);
265 static void ehci_cpr_cleanup(
266 ehci_state_t *ehcip);
267 int ehci_wait_for_sof(
268 ehci_state_t *ehcip);
269 void ehci_toggle_scheduler(
270 ehci_state_t *ehcip);
271 void ehci_print_caps(ehci_state_t *ehcip);
272 void ehci_print_regs(ehci_state_t *ehcip);
273 void ehci_print_qh(ehci_state_t *ehcip,
274 ehci_qh_t *qh);
275 void ehci_print_qtd(ehci_state_t *ehcip,
276 ehci_qtd_t *qtd);
277 void ehci_create_stats(ehci_state_t *ehcip);
278 void ehci_destroy_stats(ehci_state_t *ehcip);
279 void ehci_do_intrs_stats(ehci_state_t *ehcip,
280 int val);
281 void ehci_do_byte_stats(ehci_state_t *ehcip,
282 size_t len,
283 uint8_t attr,
284 uint8_t addr);
285
286 /*
287 * check if this ehci controller can support PM
288 */
289 int
ehci_hcdi_pm_support(dev_info_t * dip)290 ehci_hcdi_pm_support(dev_info_t *dip)
291 {
292 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
293 ddi_get_instance(dip));
294
295 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
296 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
297
298 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
299 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
300
301 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
302
303 return (USB_SUCCESS);
304 }
305
306 return (USB_FAILURE);
307 }
308
309 void
ehci_dma_attr_workaround(ehci_state_t * ehcip)310 ehci_dma_attr_workaround(ehci_state_t *ehcip)
311 {
312 /*
313 * Some Nvidia chips can not handle qh dma address above 2G.
314 * The bit 31 of the dma address might be omitted and it will
315 * cause system crash or other unpredicable result. So force
316 * the dma address allocated below 2G to make ehci work.
317 */
318 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
319 switch (ehcip->ehci_device_id) {
320 case PCI_DEVICE_NVIDIA_CK804:
321 case PCI_DEVICE_NVIDIA_MCP04:
322 USB_DPRINTF_L2(PRINT_MASK_ATTA,
323 ehcip->ehci_log_hdl,
324 "ehci_dma_attr_workaround: NVIDIA dma "
325 "workaround enabled, force dma address "
326 "to be allocated below 2G");
327 ehcip->ehci_dma_attr.dma_attr_addr_hi =
328 0x7fffffffull;
329 break;
330 default:
331 break;
332
333 }
334 }
335 }
336
337 /*
338 * Host Controller Driver (HCD) initialization functions
339 */
340
341 /*
342 * ehci_set_dma_attributes:
343 *
344 * Set the limits in the DMA attributes structure. Most of the values used
345 * in the DMA limit structures are the default values as specified by the
346 * Writing PCI device drivers document.
347 */
348 void
ehci_set_dma_attributes(ehci_state_t * ehcip)349 ehci_set_dma_attributes(ehci_state_t *ehcip)
350 {
351 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
352 "ehci_set_dma_attributes:");
353
354 /* Initialize the DMA attributes */
355 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
356 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
357 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
358
359 /* 32 bit addressing */
360 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
361
362 /* Byte alignment */
363 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
364
365 /*
366 * Since PCI specification is byte alignment, the
367 * burst size field should be set to 1 for PCI devices.
368 */
369 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
370
371 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
372 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
373 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
374 ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
375 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
376 ehcip->ehci_dma_attr.dma_attr_flags = 0;
377 ehci_dma_attr_workaround(ehcip);
378 }
379
380
381 /*
382 * ehci_allocate_pools:
383 *
384 * Allocate the system memory for the Endpoint Descriptor (QH) and for the
385 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
386 * to a 16 byte boundary.
387 */
388 int
ehci_allocate_pools(ehci_state_t * ehcip)389 ehci_allocate_pools(ehci_state_t *ehcip)
390 {
391 ddi_device_acc_attr_t dev_attr;
392 size_t real_length;
393 int result;
394 uint_t ccount;
395 int i;
396
397 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
398 "ehci_allocate_pools:");
399
400 /* The host controller will be little endian */
401 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
402 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
403 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
404
405 /* Byte alignment */
406 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
407
408 /* Allocate the QTD pool DMA handle */
409 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
410 DDI_DMA_SLEEP, 0,
411 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
412
413 goto failure;
414 }
415
416 /* Allocate the memory for the QTD pool */
417 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
418 ehci_qtd_pool_size * sizeof (ehci_qtd_t),
419 &dev_attr,
420 DDI_DMA_CONSISTENT,
421 DDI_DMA_SLEEP,
422 0,
423 (caddr_t *)&ehcip->ehci_qtd_pool_addr,
424 &real_length,
425 &ehcip->ehci_qtd_pool_mem_handle)) {
426
427 goto failure;
428 }
429
430 /* Map the QTD pool into the I/O address space */
431 result = ddi_dma_addr_bind_handle(
432 ehcip->ehci_qtd_pool_dma_handle,
433 NULL,
434 (caddr_t)ehcip->ehci_qtd_pool_addr,
435 real_length,
436 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
437 DDI_DMA_SLEEP,
438 NULL,
439 &ehcip->ehci_qtd_pool_cookie,
440 &ccount);
441
442 bzero((void *)ehcip->ehci_qtd_pool_addr,
443 ehci_qtd_pool_size * sizeof (ehci_qtd_t));
444
445 /* Process the result */
446 if (result == DDI_DMA_MAPPED) {
447 /* The cookie count should be 1 */
448 if (ccount != 1) {
449 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
450 "ehci_allocate_pools: More than 1 cookie");
451
452 goto failure;
453 }
454 } else {
455 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
456 "ehci_allocate_pools: Result = %d", result);
457
458 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
459
460 goto failure;
461 }
462
463 /*
464 * DMA addresses for QTD pools are bound
465 */
466 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
467
468 /* Initialize the QTD pool */
469 for (i = 0; i < ehci_qtd_pool_size; i ++) {
470 Set_QTD(ehcip->ehci_qtd_pool_addr[i].
471 qtd_state, EHCI_QTD_FREE);
472 }
473
474 /* Allocate the QTD pool DMA handle */
475 if (ddi_dma_alloc_handle(ehcip->ehci_dip,
476 &ehcip->ehci_dma_attr,
477 DDI_DMA_SLEEP,
478 0,
479 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
480 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
481 "ehci_allocate_pools: ddi_dma_alloc_handle failed");
482
483 goto failure;
484 }
485
486 /* Allocate the memory for the QH pool */
487 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
488 ehci_qh_pool_size * sizeof (ehci_qh_t),
489 &dev_attr,
490 DDI_DMA_CONSISTENT,
491 DDI_DMA_SLEEP,
492 0,
493 (caddr_t *)&ehcip->ehci_qh_pool_addr,
494 &real_length,
495 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
496 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
497 "ehci_allocate_pools: ddi_dma_mem_alloc failed");
498
499 goto failure;
500 }
501
502 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
503 NULL,
504 (caddr_t)ehcip->ehci_qh_pool_addr,
505 real_length,
506 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
507 DDI_DMA_SLEEP,
508 NULL,
509 &ehcip->ehci_qh_pool_cookie,
510 &ccount);
511
512 bzero((void *)ehcip->ehci_qh_pool_addr,
513 ehci_qh_pool_size * sizeof (ehci_qh_t));
514
515 /* Process the result */
516 if (result == DDI_DMA_MAPPED) {
517 /* The cookie count should be 1 */
518 if (ccount != 1) {
519 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
520 "ehci_allocate_pools: More than 1 cookie");
521
522 goto failure;
523 }
524 } else {
525 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
526
527 goto failure;
528 }
529
530 /*
531 * DMA addresses for QH pools are bound
532 */
533 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
534
535 /* Initialize the QH pool */
536 for (i = 0; i < ehci_qh_pool_size; i ++) {
537 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
538 }
539
540 /* Byte alignment */
541 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
542
543 return (DDI_SUCCESS);
544
545 failure:
546 /* Byte alignment */
547 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
548
549 return (DDI_FAILURE);
550 }
551
552
553 /*
554 * ehci_decode_ddi_dma_addr_bind_handle_result:
555 *
556 * Process the return values of ddi_dma_addr_bind_handle()
557 */
558 void
ehci_decode_ddi_dma_addr_bind_handle_result(ehci_state_t * ehcip,int result)559 ehci_decode_ddi_dma_addr_bind_handle_result(
560 ehci_state_t *ehcip,
561 int result)
562 {
563 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
564 "ehci_decode_ddi_dma_addr_bind_handle_result:");
565
566 switch (result) {
567 case DDI_DMA_PARTIAL_MAP:
568 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
569 "Partial transfers not allowed");
570 break;
571 case DDI_DMA_INUSE:
572 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
573 "Handle is in use");
574 break;
575 case DDI_DMA_NORESOURCES:
576 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
577 "No resources");
578 break;
579 case DDI_DMA_NOMAPPING:
580 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
581 "No mapping");
582 break;
583 case DDI_DMA_TOOBIG:
584 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
585 "Object is too big");
586 break;
587 default:
588 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
589 "Unknown dma error");
590 }
591 }
592
593
594 /*
595 * ehci_map_regs:
596 *
597 * The Host Controller (HC) contains a set of on-chip operational registers
598 * and which should be mapped into a non-cacheable portion of the system
599 * addressable space.
600 */
601 int
ehci_map_regs(ehci_state_t * ehcip)602 ehci_map_regs(ehci_state_t *ehcip)
603 {
604 ddi_device_acc_attr_t attr;
605 uint16_t cmd_reg;
606 uint_t length;
607
608 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
609
610 /* Check to make sure we have memory access */
611 if (pci_config_setup(ehcip->ehci_dip,
612 &ehcip->ehci_config_handle) != DDI_SUCCESS) {
613
614 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
615 "ehci_map_regs: Config error");
616
617 return (DDI_FAILURE);
618 }
619
620 /* Make sure Memory Access Enable is set */
621 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
622
623 if (!(cmd_reg & PCI_COMM_MAE)) {
624
625 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
626 "ehci_map_regs: Memory base address access disabled");
627
628 return (DDI_FAILURE);
629 }
630
631 /* The host controller will be little endian */
632 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
633 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
634 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
635
636 /* Map in EHCI Capability registers */
637 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
638 (caddr_t *)&ehcip->ehci_capsp, 0,
639 sizeof (ehci_caps_t), &attr,
640 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
641
642 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
643 "ehci_map_regs: Map setup error");
644
645 return (DDI_FAILURE);
646 }
647
648 length = ddi_get8(ehcip->ehci_caps_handle,
649 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
650
651 /* Free the original mapping */
652 ddi_regs_map_free(&ehcip->ehci_caps_handle);
653
654 /* Re-map in EHCI Capability and Operational registers */
655 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
656 (caddr_t *)&ehcip->ehci_capsp, 0,
657 length + sizeof (ehci_regs_t), &attr,
658 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
659
660 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
661 "ehci_map_regs: Map setup error");
662
663 return (DDI_FAILURE);
664 }
665
666 /* Get the pointer to EHCI Operational Register */
667 ehcip->ehci_regsp = (ehci_regs_t *)
668 ((uintptr_t)ehcip->ehci_capsp + length);
669
670 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
671 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
672 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
673
674 return (DDI_SUCCESS);
675 }
676
677 /*
678 * The following simulated polling is for debugging purposes only.
679 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
680 */
681 static int
ehci_is_polled(dev_info_t * dip)682 ehci_is_polled(dev_info_t *dip)
683 {
684 int ret;
685 char *propval;
686
687 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
688 "usb-polling", &propval) != DDI_SUCCESS)
689
690 return (0);
691
692 ret = (strcmp(propval, "true") == 0);
693 ddi_prop_free(propval);
694
695 return (ret);
696 }
697
698 static void
ehci_poll_intr(void * arg)699 ehci_poll_intr(void *arg)
700 {
701 /* poll every msec */
702 for (;;) {
703 (void) ehci_intr(arg, NULL);
704 delay(drv_usectohz(1000));
705 }
706 }
707
708 /*
709 * ehci_register_intrs_and_init_mutex:
710 *
711 * Register interrupts and initialize each mutex and condition variables
712 */
713 int
ehci_register_intrs_and_init_mutex(ehci_state_t * ehcip)714 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip)
715 {
716 int intr_types;
717
718 #if defined(__x86)
719 uint8_t iline;
720 #endif
721
722 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
723 "ehci_register_intrs_and_init_mutex:");
724
725 /*
726 * There is a known MSI hardware bug with the EHCI controller
727 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
728 */
729 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
730 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
731 ehcip->ehci_msi_enabled = B_FALSE;
732 } else {
733 /* Set the MSI enable flag from the global EHCI MSI tunable */
734 ehcip->ehci_msi_enabled = ehci_enable_msi;
735 }
736
737 /* launch polling thread instead of enabling pci interrupt */
738 if (ehci_is_polled(ehcip->ehci_dip)) {
739 extern pri_t maxclsyspri;
740
741 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
742 "ehci_register_intrs_and_init_mutex: "
743 "running in simulated polled mode");
744
745 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
746 TS_RUN, maxclsyspri);
747
748 goto skip_intr;
749 }
750
751 #if defined(__x86)
752 /*
753 * Make sure that the interrupt pin is connected to the
754 * interrupt controller on x86. Interrupt line 255 means
755 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
756 * If we would return failure when interrupt line equals 255, then
757 * high speed devices will be routed to companion host controllers.
758 * However, it is not necessary to return failure here, and
759 * o/uhci codes don't check the interrupt line either.
760 * But it's good to log a message here for debug purposes.
761 */
762 iline = pci_config_get8(ehcip->ehci_config_handle,
763 PCI_CONF_ILINE);
764
765 if (iline == 255) {
766 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
767 "ehci_register_intrs_and_init_mutex: "
768 "interrupt line value out of range (%d)",
769 iline);
770 }
771 #endif /* __x86 */
772
773 /* Get supported interrupt types */
774 if (ddi_intr_get_supported_types(ehcip->ehci_dip,
775 &intr_types) != DDI_SUCCESS) {
776 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
777 "ehci_register_intrs_and_init_mutex: "
778 "ddi_intr_get_supported_types failed");
779
780 return (DDI_FAILURE);
781 }
782
783 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
784 "ehci_register_intrs_and_init_mutex: "
785 "supported interrupt types 0x%x", intr_types);
786
787 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
788 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
789 != DDI_SUCCESS) {
790 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
791 "ehci_register_intrs_and_init_mutex: MSI "
792 "registration failed, trying FIXED interrupt \n");
793 } else {
794 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
795 "ehci_register_intrs_and_init_mutex: "
796 "Using MSI interrupt type\n");
797
798 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
799 ehcip->ehci_flags |= EHCI_INTR;
800 }
801 }
802
803 if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
804 (intr_types & DDI_INTR_TYPE_FIXED)) {
805 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
806 != DDI_SUCCESS) {
807 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
808 "ehci_register_intrs_and_init_mutex: "
809 "FIXED interrupt registration failed\n");
810
811 return (DDI_FAILURE);
812 }
813
814 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
815 "ehci_register_intrs_and_init_mutex: "
816 "Using FIXED interrupt type\n");
817
818 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
819 ehcip->ehci_flags |= EHCI_INTR;
820 }
821
822 skip_intr:
823 /* Create prototype for advance on async schedule */
824 cv_init(&ehcip->ehci_async_schedule_advance_cv,
825 NULL, CV_DRIVER, NULL);
826
827 return (DDI_SUCCESS);
828 }
829
830
831 /*
832 * ehci_add_intrs:
833 *
834 * Register FIXED or MSI interrupts.
835 */
836 static int
ehci_add_intrs(ehci_state_t * ehcip,int intr_type)837 ehci_add_intrs(ehci_state_t *ehcip,
838 int intr_type)
839 {
840 int actual, avail, intr_size, count = 0;
841 int i, flag, ret;
842
843 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
844 "ehci_add_intrs: interrupt type 0x%x", intr_type);
845
846 /* Get number of interrupts */
847 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
848 if ((ret != DDI_SUCCESS) || (count == 0)) {
849 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
850 "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
851 "ret: %d, count: %d", ret, count);
852
853 return (DDI_FAILURE);
854 }
855
856 /* Get number of available interrupts */
857 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
858 if ((ret != DDI_SUCCESS) || (avail == 0)) {
859 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
860 "ehci_add_intrs: ddi_intr_get_navail() failure, "
861 "ret: %d, count: %d", ret, count);
862
863 return (DDI_FAILURE);
864 }
865
866 if (avail < count) {
867 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
868 "ehci_add_intrs: ehci_add_intrs: nintrs () "
869 "returned %d, navail returned %d\n", count, avail);
870 }
871
872 /* Allocate an array of interrupt handles */
873 intr_size = count * sizeof (ddi_intr_handle_t);
874 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
875
876 flag = (intr_type == DDI_INTR_TYPE_MSI) ?
877 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
878
879 /* call ddi_intr_alloc() */
880 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
881 intr_type, 0, count, &actual, flag);
882
883 if ((ret != DDI_SUCCESS) || (actual == 0)) {
884 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
885 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
886
887 kmem_free(ehcip->ehci_htable, intr_size);
888
889 return (DDI_FAILURE);
890 }
891
892 if (actual < count) {
893 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
894 "ehci_add_intrs: Requested: %d, Received: %d\n",
895 count, actual);
896
897 for (i = 0; i < actual; i++)
898 (void) ddi_intr_free(ehcip->ehci_htable[i]);
899
900 kmem_free(ehcip->ehci_htable, intr_size);
901
902 return (DDI_FAILURE);
903 }
904
905 ehcip->ehci_intr_cnt = actual;
906
907 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
908 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
909 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
910 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
911
912 for (i = 0; i < actual; i++)
913 (void) ddi_intr_free(ehcip->ehci_htable[i]);
914
915 kmem_free(ehcip->ehci_htable, intr_size);
916
917 return (DDI_FAILURE);
918 }
919
920 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
921 "ehci_add_intrs: Supported Interrupt priority 0x%x",
922 ehcip->ehci_intr_pri);
923
924 /* Test for high level mutex */
925 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
926 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
927 "ehci_add_intrs: Hi level interrupt not supported");
928
929 for (i = 0; i < actual; i++)
930 (void) ddi_intr_free(ehcip->ehci_htable[i]);
931
932 kmem_free(ehcip->ehci_htable, intr_size);
933
934 return (DDI_FAILURE);
935 }
936
937 /* Initialize the mutex */
938 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
939 DDI_INTR_PRI(ehcip->ehci_intr_pri));
940
941 /* Call ddi_intr_add_handler() */
942 for (i = 0; i < actual; i++) {
943 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
944 ehci_intr, (caddr_t)ehcip,
945 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
946 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
947 "ehci_add_intrs:ddi_intr_add_handler() "
948 "failed %d", ret);
949
950 for (i = 0; i < actual; i++)
951 (void) ddi_intr_free(ehcip->ehci_htable[i]);
952
953 mutex_destroy(&ehcip->ehci_int_mutex);
954 kmem_free(ehcip->ehci_htable, intr_size);
955
956 return (DDI_FAILURE);
957 }
958 }
959
960 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
961 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
962 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
963 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
964
965 for (i = 0; i < actual; i++) {
966 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
967 (void) ddi_intr_free(ehcip->ehci_htable[i]);
968 }
969
970 mutex_destroy(&ehcip->ehci_int_mutex);
971 kmem_free(ehcip->ehci_htable, intr_size);
972
973 return (DDI_FAILURE);
974 }
975
976 /* Enable all interrupts */
977 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
978 /* Call ddi_intr_block_enable() for MSI interrupts */
979 (void) ddi_intr_block_enable(ehcip->ehci_htable,
980 ehcip->ehci_intr_cnt);
981 } else {
982 /* Call ddi_intr_enable for MSI or FIXED interrupts */
983 for (i = 0; i < ehcip->ehci_intr_cnt; i++)
984 (void) ddi_intr_enable(ehcip->ehci_htable[i]);
985 }
986
987 return (DDI_SUCCESS);
988 }
989
990
991 /*
992 * ehci_init_hardware
993 *
994 * take control from BIOS, reset EHCI host controller, and check version, etc.
995 */
996 int
ehci_init_hardware(ehci_state_t * ehcip)997 ehci_init_hardware(ehci_state_t *ehcip)
998 {
999 int revision;
1000 uint16_t cmd_reg;
1001 int abort_on_BIOS_take_over_failure;
1002
1003 /* Take control from the BIOS */
1004 if (ehci_take_control(ehcip) != USB_SUCCESS) {
1005
1006 /* read .conf file properties */
1007 abort_on_BIOS_take_over_failure =
1008 ddi_prop_get_int(DDI_DEV_T_ANY,
1009 ehcip->ehci_dip, DDI_PROP_DONTPASS,
1010 "abort-on-BIOS-take-over-failure", 0);
1011
1012 if (abort_on_BIOS_take_over_failure) {
1013
1014 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1015 "Unable to take control from BIOS.");
1016
1017 return (DDI_FAILURE);
1018 }
1019
1020 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1021 "Unable to take control from BIOS. Failure is ignored.");
1022 }
1023
1024 /* set Memory Master Enable */
1025 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1026 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1027 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1028
1029 /* Reset the EHCI host controller */
1030 Set_OpReg(ehci_command,
1031 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1032
1033 /* Wait 10ms for reset to complete */
1034 drv_usecwait(EHCI_RESET_TIMEWAIT);
1035
1036 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1037
1038 /* Verify the version number */
1039 revision = Get_16Cap(ehci_version);
1040
1041 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1042 "ehci_init_hardware: Revision 0x%x", revision);
1043
1044 /*
1045 * EHCI driver supports EHCI host controllers compliant to
1046 * 0.95 and higher revisions of EHCI specifications.
1047 */
1048 if (revision < EHCI_REVISION_0_95) {
1049
1050 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1051 "Revision 0x%x is not supported", revision);
1052
1053 return (DDI_FAILURE);
1054 }
1055
1056 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1057
1058 /* Initialize the Frame list base address area */
1059 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1060
1061 return (DDI_FAILURE);
1062 }
1063
1064 /*
1065 * For performance reasons, do not insert anything into the
1066 * asynchronous list or activate the asynch list schedule until
1067 * there is a valid QH.
1068 */
1069 ehcip->ehci_head_of_async_sched_list = NULL;
1070
1071 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1072 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1073 /*
1074 * The driver is unable to reliably stop the asynch
1075 * list schedule on VIA VT6202 controllers, so we
1076 * always keep a dummy QH on the list.
1077 */
1078 ehci_qh_t *dummy_async_qh =
1079 ehci_alloc_qh(ehcip, NULL, NULL);
1080
1081 Set_QH(dummy_async_qh->qh_link_ptr,
1082 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1083 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1084
1085 /* Set this QH to be the "head" of the circular list */
1086 Set_QH(dummy_async_qh->qh_ctrl,
1087 Get_QH(dummy_async_qh->qh_ctrl) |
1088 EHCI_QH_CTRL_RECLAIM_HEAD);
1089
1090 Set_QH(dummy_async_qh->qh_next_qtd,
1091 EHCI_QH_NEXT_QTD_PTR_VALID);
1092 Set_QH(dummy_async_qh->qh_alt_next_qtd,
1093 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1094
1095 ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1096 ehcip->ehci_open_async_count++;
1097 ehcip->ehci_async_req_count++;
1098 }
1099 }
1100
1101 return (DDI_SUCCESS);
1102 }
1103
1104
1105 /*
1106 * ehci_init_workaround
1107 *
1108 * some workarounds during initializing ehci
1109 */
1110 int
ehci_init_workaround(ehci_state_t * ehcip)1111 ehci_init_workaround(ehci_state_t *ehcip)
1112 {
1113 /*
1114 * Acer Labs Inc. M5273 EHCI controller does not send
1115 * interrupts unless the Root hub ports are routed to the EHCI
1116 * host controller; so route the ports now, before we test for
1117 * the presence of SOFs interrupts.
1118 */
1119 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1120 /* Route all Root hub ports to EHCI host controller */
1121 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1122 }
1123
1124 /*
1125 * VIA chips have some issues and may not work reliably.
1126 * Revisions >= 0x80 are part of a southbridge and appear
1127 * to be reliable with the workaround.
1128 * For revisions < 0x80, if we were bound using class
1129 * complain, else proceed. This will allow the user to
1130 * bind ehci specifically to this chip and not have the
1131 * warnings
1132 */
1133 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1134
1135 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1136
1137 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1138 "ehci_init_workaround: Applying VIA workarounds "
1139 "for the 6212 chip.");
1140
1141 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1142 "pciclass,0c0320") == 0) {
1143
1144 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1145 "Due to recently discovered incompatibilities");
1146 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1147 "with this USB controller, USB2.x transfer");
1148 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1149 "support has been disabled. This device will");
1150 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1151 "continue to function as a USB1.x controller.");
1152 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1153 "If you are interested in enabling USB2.x");
1154 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1155 "support please, refer to the ehci(7D) man page.");
1156 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1157 "Please also refer to www.sun.com/io for");
1158 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1159 "Solaris Ready products and to");
1160 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1161 "www.sun.com/bigadmin/hcl for additional");
1162 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1163 "compatible USB products.");
1164
1165 return (DDI_FAILURE);
1166
1167 } else if (ehci_vt62x2_workaround) {
1168
1169 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1170 "Applying VIA workarounds");
1171 }
1172 }
1173
1174 return (DDI_SUCCESS);
1175 }
1176
1177
1178 /*
1179 * ehci_init_check_status
1180 *
1181 * Check if EHCI host controller is running
1182 */
1183 int
ehci_init_check_status(ehci_state_t * ehcip)1184 ehci_init_check_status(ehci_state_t *ehcip)
1185 {
1186 clock_t sof_time_wait;
1187
1188 /*
1189 * Get the number of clock ticks to wait.
1190 * This is based on the maximum time it takes for a frame list rollover
1191 * and maximum time wait for SOFs to begin.
1192 */
1193 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) +
1194 EHCI_SOF_TIMEWAIT);
1195
1196 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */
1197 ehcip->ehci_flags |= EHCI_CV_INTR;
1198
1199 /* We need to add a delay to allow the chip time to start running */
1200 (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv,
1201 &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
1202
1203 /*
1204 * Check EHCI host controller is running, otherwise return failure.
1205 */
1206 if ((ehcip->ehci_flags & EHCI_CV_INTR) ||
1207 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
1208
1209 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1210 "No SOF interrupts have been received, this USB EHCI host"
1211 "controller is unusable");
1212
1213 /*
1214 * Route all Root hub ports to Classic host
1215 * controller, in case this is an unusable ALI M5273
1216 * EHCI controller.
1217 */
1218 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1219 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1220 }
1221
1222 return (DDI_FAILURE);
1223 }
1224
1225 return (DDI_SUCCESS);
1226 }
1227
1228
1229 /*
1230 * ehci_init_ctlr:
1231 *
1232 * Initialize the Host Controller (HC).
1233 */
1234 int
ehci_init_ctlr(ehci_state_t * ehcip,int init_type)1235 ehci_init_ctlr(ehci_state_t *ehcip,
1236 int init_type)
1237 {
1238 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1239
1240 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1241
1242 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1243
1244 return (DDI_FAILURE);
1245 }
1246 }
1247
1248 /*
1249 * Check for Asynchronous schedule park capability feature. If this
1250 * feature is supported, then, program ehci command register with
1251 * appropriate values..
1252 */
1253 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1254
1255 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1256 "ehci_init_ctlr: Async park mode is supported");
1257
1258 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1259 (EHCI_CMD_ASYNC_PARK_ENABLE |
1260 EHCI_CMD_ASYNC_PARK_COUNT_3)));
1261 }
1262
1263 /*
1264 * Check for programmable periodic frame list feature. If this
1265 * feature is supported, then, program ehci command register with
1266 * 1024 frame list value.
1267 */
1268 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1269
1270 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1271 "ehci_init_ctlr: Variable programmable periodic "
1272 "frame list is supported");
1273
1274 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1275 EHCI_CMD_FRAME_1024_SIZE));
1276 }
1277
1278 /*
1279 * Currently EHCI driver doesn't support 64 bit addressing.
1280 *
1281 * If the controller is 64-bit address capable, then program
1282 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1283 * of the interface data structures are allocated.
1284 */
1285 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1286
1287 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1288 "ehci_init_ctlr: EHCI driver doesn't support "
1289 "64 bit addressing");
1290
1291 /* 64 bit addressing is not supported */
1292 Set_OpReg(ehci_ctrl_segment, 0x00000000);
1293 }
1294
1295 /* Turn on/off the schedulers */
1296 ehci_toggle_scheduler(ehcip);
1297
1298 /* Set host controller soft state to operational */
1299 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1300
1301 /*
1302 * Set the Periodic Frame List Base Address register with the
1303 * starting physical address of the Periodic Frame List.
1304 */
1305 Set_OpReg(ehci_periodic_list_base,
1306 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1307 EHCI_PERIODIC_LIST_BASE));
1308
1309 /*
1310 * Set ehci_interrupt to enable all interrupts except Root
1311 * Hub Status change interrupt.
1312 */
1313 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1314 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1315 EHCI_INTR_USB);
1316
1317 /*
1318 * Set the desired interrupt threshold and turn on EHCI host controller.
1319 */
1320 uint32_t cmd_reg = Get_OpReg(ehci_command);
1321
1322 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1323 "%s: cmd_reg: %x\n", __func__, cmd_reg);
1324
1325 cmd_reg &= ~EHCI_CMD_INTR_THRESHOLD;
1326 cmd_reg |= EHCI_CMD_01_INTR;
1327 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
1328
1329 Set_OpReg(ehci_command, cmd_reg | EHCI_CMD_HOST_CTRL_RUN);
1330
1331 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1332
1333 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1334
1335 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1336
1337 /* Set host controller soft state to error */
1338 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1339
1340 return (DDI_FAILURE);
1341 }
1342
1343 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) {
1344
1345 /* Set host controller soft state to error */
1346 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1347
1348 return (DDI_FAILURE);
1349 }
1350
1351 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1352 "ehci_init_ctlr: SOF's have started");
1353 }
1354
1355 /* Route all Root hub ports to EHCI host controller */
1356 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1357
1358 return (DDI_SUCCESS);
1359 }
1360
1361 /*
1362 * ehci_take_control:
1363 *
1364 * Handshake to take EHCI control from BIOS if necessary. Its only valid for
1365 * x86 machines, because sparc doesn't have a BIOS.
1366 * On x86 machine, the take control process includes
1367 * o get the base address of the extended capability list
1368 * o find out the capability for handoff synchronization in the list.
1369 * o check if BIOS has owned the host controller.
1370 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1371 * o wait for a constant time and check if BIOS has relinquished control.
1372 */
1373 /* ARGSUSED */
1374 static int
ehci_take_control(ehci_state_t * ehcip)1375 ehci_take_control(ehci_state_t *ehcip)
1376 {
1377 #if defined(__x86)
1378 uint32_t extended_cap;
1379 uint32_t extended_cap_offset;
1380 uint32_t extended_cap_id;
1381 uint_t retry;
1382
1383 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1384 "ehci_take_control:");
1385
1386 /*
1387 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1388 * register.
1389 */
1390 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1391 EHCI_HCC_EECP_SHIFT;
1392
1393 /*
1394 * According EHCI Spec 2.2.4, if the extended capability offset is
1395 * less than 40h then its not valid. This means we don't need to
1396 * worry about BIOS handoff.
1397 */
1398 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1399
1400 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1401 "ehci_take_control: Hardware doesn't support legacy.");
1402
1403 goto success;
1404 }
1405
1406 /*
1407 * According EHCI Spec 2.1.7, A zero offset indicates the
1408 * end of the extended capability list.
1409 */
1410 while (extended_cap_offset) {
1411
1412 /* Get the extended capability value. */
1413 extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1414 extended_cap_offset);
1415
1416 /* Get the capability ID */
1417 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1418 EHCI_EX_CAP_ID_SHIFT;
1419
1420 /* Check if the card support legacy */
1421 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1422 break;
1423 }
1424
1425 /* Get the offset of the next capability */
1426 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1427 EHCI_EX_CAP_NEXT_PTR_SHIFT;
1428 }
1429
1430 /*
1431 * Unable to find legacy support in hardware's extended capability list.
1432 * This means we don't need to worry about BIOS handoff.
1433 */
1434 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1435
1436 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1437 "ehci_take_control: Hardware doesn't support legacy");
1438
1439 goto success;
1440 }
1441
1442 /* Check if BIOS has owned it. */
1443 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1444
1445 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1446 "ehci_take_control: BIOS does not own EHCI");
1447
1448 goto success;
1449 }
1450
1451 /*
1452 * According EHCI Spec 5.1, The OS driver initiates an ownership
1453 * request by setting the OS Owned semaphore to a one. The OS
1454 * waits for the BIOS Owned bit to go to a zero before attempting
1455 * to use the EHCI controller. The time that OS must wait for BIOS
1456 * to respond to the request for ownership is beyond the scope of
1457 * this specification.
1458 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1459 * for BIOS to release the ownership.
1460 */
1461 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1462 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1463 extended_cap);
1464
1465 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1466
1467 /* wait a special interval */
1468 #ifndef __lock_lint
1469 delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1470 #endif
1471 /* Check to see if the BIOS has released the ownership */
1472 extended_cap = pci_config_get32(
1473 ehcip->ehci_config_handle, extended_cap_offset);
1474
1475 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1476
1477 USB_DPRINTF_L3(PRINT_MASK_ATTA,
1478 ehcip->ehci_log_hdl,
1479 "ehci_take_control: BIOS has released "
1480 "the ownership. retry = %d", retry);
1481
1482 goto success;
1483 }
1484
1485 }
1486
1487 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1488 "ehci_take_control: take control from BIOS failed.");
1489
1490 return (USB_FAILURE);
1491
1492 success:
1493
1494 #endif /* __x86 */
1495 return (USB_SUCCESS);
1496 }
1497
1498
1499 /*
1500 * ehci_init_periodic_frame_list_table :
1501 *
1502 * Allocate the system memory and initialize Host Controller
1503 * Periodic Frame List table area. The starting of the Periodic
1504 * Frame List Table area must be 4096 byte aligned.
1505 */
1506 static int
ehci_init_periodic_frame_lst_table(ehci_state_t * ehcip)1507 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1508 {
1509 ddi_device_acc_attr_t dev_attr;
1510 size_t real_length;
1511 uint_t ccount;
1512 int result;
1513
1514 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1515
1516 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1517 "ehci_init_periodic_frame_lst_table:");
1518
1519 /* The host controller will be little endian */
1520 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1521 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1522 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1523
1524 /* Force the required 4K restrictive alignment */
1525 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1526
1527 /* Create space for the Periodic Frame List */
1528 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1529 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1530
1531 goto failure;
1532 }
1533
1534 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1535 sizeof (ehci_periodic_frame_list_t),
1536 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1537 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1538 &real_length, &ehcip->ehci_pflt_mem_handle)) {
1539
1540 goto failure;
1541 }
1542
1543 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1544 "ehci_init_periodic_frame_lst_table: "
1545 "Real length %lu", real_length);
1546
1547 /* Map the whole Periodic Frame List into the I/O address space */
1548 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1549 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1550 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1551 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1552
1553 if (result == DDI_DMA_MAPPED) {
1554 /* The cookie count should be 1 */
1555 if (ccount != 1) {
1556 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1557 "ehci_init_periodic_frame_lst_table: "
1558 "More than 1 cookie");
1559
1560 goto failure;
1561 }
1562 } else {
1563 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1564
1565 goto failure;
1566 }
1567
1568 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1569 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1570 (void *)ehcip->ehci_periodic_frame_list_tablep,
1571 ehcip->ehci_pflt_cookie.dmac_address);
1572
1573 /*
1574 * DMA addresses for Periodic Frame List are bound.
1575 */
1576 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1577
1578 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1579
1580 /* Initialize the Periodic Frame List */
1581 ehci_build_interrupt_lattice(ehcip);
1582
1583 /* Reset Byte Alignment to Default */
1584 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1585
1586 return (DDI_SUCCESS);
1587 failure:
1588 /* Byte alignment */
1589 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1590
1591 return (DDI_FAILURE);
1592 }
1593
1594
1595 /*
1596 * ehci_build_interrupt_lattice:
1597 *
1598 * Construct the interrupt lattice tree using static Endpoint Descriptors
1599 * (QH). This interrupt lattice tree will have total of 32 interrupt QH
1600 * lists and the Host Controller (HC) processes one interrupt QH list in
1601 * every frame. The Host Controller traverses the periodic schedule by
1602 * constructing an array offset reference from the Periodic List Base Address
1603 * register and bits 12 to 3 of Frame Index register. It fetches the element
1604 * and begins traversing the graph of linked schedule data structures.
1605 */
1606 static void
ehci_build_interrupt_lattice(ehci_state_t * ehcip)1607 ehci_build_interrupt_lattice(ehci_state_t *ehcip)
1608 {
1609 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr;
1610 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1611 ehci_periodic_frame_list_t *periodic_frame_list =
1612 ehcip->ehci_periodic_frame_list_tablep;
1613 ushort_t *temp, num_of_nodes;
1614 uintptr_t addr;
1615 int i, j, k;
1616
1617 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1618 "ehci_build_interrupt_lattice:");
1619
1620 /*
1621 * Reserve the first 63 Endpoint Descriptor (QH) structures
1622 * in the pool as static endpoints & these are required for
1623 * constructing interrupt lattice tree.
1624 */
1625 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1626 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1627 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1628 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1629 Set_QH(list_array[i].qh_alt_next_qtd,
1630 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1631 }
1632
1633 /*
1634 * Make sure that last Endpoint on the periodic frame list terminates
1635 * periodic schedule.
1636 */
1637 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1638
1639 /* Build the interrupt lattice tree */
1640 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1641 /*
1642 * The next pointer in the host controller endpoint
1643 * descriptor must contain an iommu address. Calculate
1644 * the offset into the cpu address and add this to the
1645 * starting iommu address.
1646 */
1647 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1648
1649 Set_QH(list_array[2*i + 1].qh_link_ptr,
1650 addr | EHCI_QH_LINK_REF_QH);
1651 Set_QH(list_array[2*i + 2].qh_link_ptr,
1652 addr | EHCI_QH_LINK_REF_QH);
1653 }
1654
1655 /* Build the tree bottom */
1656 temp = (unsigned short *)
1657 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1658
1659 num_of_nodes = 1;
1660
1661 /*
1662 * Initialize the values which are used for setting up head pointers
1663 * for the 32ms scheduling lists which starts from the Periodic Frame
1664 * List.
1665 */
1666 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1667 for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1668 ehci_index[j++] = temp[k];
1669 ehci_index[j] = temp[k] + ehci_pow_2(i);
1670 }
1671
1672 num_of_nodes *= 2;
1673 for (k = 0; k < num_of_nodes; k++)
1674 temp[k] = ehci_index[k];
1675 }
1676
1677 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1678
1679 /*
1680 * Initialize the interrupt list in the Periodic Frame List Table
1681 * so that it points to the bottom of the tree.
1682 */
1683 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1684 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1685 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1686
1687 ASSERT(addr);
1688
1689 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1690 Set_PFLT(periodic_frame_list->
1691 ehci_periodic_frame_list_table[ehci_index[j++]],
1692 (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1693 }
1694 }
1695 }
1696
1697
1698 /*
1699 * ehci_alloc_hcdi_ops:
1700 *
1701 * The HCDI interfaces or entry points are the software interfaces used by
1702 * the Universal Serial Bus Driver (USBA) to access the services of the
1703 * Host Controller Driver (HCD). During HCD initialization, inform USBA
1704 * about all available HCDI interfaces or entry points.
1705 */
1706 usba_hcdi_ops_t *
ehci_alloc_hcdi_ops(ehci_state_t * ehcip)1707 ehci_alloc_hcdi_ops(ehci_state_t *ehcip)
1708 {
1709 usba_hcdi_ops_t *usba_hcdi_ops;
1710
1711 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1712 "ehci_alloc_hcdi_ops:");
1713
1714 usba_hcdi_ops = usba_alloc_hcdi_ops();
1715
1716 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1717
1718 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1719 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1720 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1721
1722 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1723 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
1724 ehci_hcdi_pipe_reset_data_toggle;
1725
1726 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1727 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1728 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1729 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1730
1731 usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1732 ehci_hcdi_bulk_transfer_size;
1733
1734 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1735 ehci_hcdi_pipe_stop_intr_polling;
1736 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1737 ehci_hcdi_pipe_stop_isoc_polling;
1738
1739 usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1740 ehci_hcdi_get_current_frame_number;
1741 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1742 ehci_hcdi_get_max_isoc_pkts;
1743
1744 usba_hcdi_ops->usba_hcdi_console_input_init =
1745 ehci_hcdi_polled_input_init;
1746 usba_hcdi_ops->usba_hcdi_console_input_enter =
1747 ehci_hcdi_polled_input_enter;
1748 usba_hcdi_ops->usba_hcdi_console_read =
1749 ehci_hcdi_polled_read;
1750 usba_hcdi_ops->usba_hcdi_console_input_exit =
1751 ehci_hcdi_polled_input_exit;
1752 usba_hcdi_ops->usba_hcdi_console_input_fini =
1753 ehci_hcdi_polled_input_fini;
1754
1755 usba_hcdi_ops->usba_hcdi_console_output_init =
1756 ehci_hcdi_polled_output_init;
1757 usba_hcdi_ops->usba_hcdi_console_output_enter =
1758 ehci_hcdi_polled_output_enter;
1759 usba_hcdi_ops->usba_hcdi_console_write =
1760 ehci_hcdi_polled_write;
1761 usba_hcdi_ops->usba_hcdi_console_output_exit =
1762 ehci_hcdi_polled_output_exit;
1763 usba_hcdi_ops->usba_hcdi_console_output_fini =
1764 ehci_hcdi_polled_output_fini;
1765 return (usba_hcdi_ops);
1766 }
1767
1768
1769 /*
1770 * Host Controller Driver (HCD) deinitialization functions
1771 */
1772
1773 /*
1774 * ehci_cleanup:
1775 *
1776 * Cleanup on attach failure or detach
1777 */
1778 int
ehci_cleanup(ehci_state_t * ehcip)1779 ehci_cleanup(ehci_state_t *ehcip)
1780 {
1781 ehci_trans_wrapper_t *tw;
1782 ehci_pipe_private_t *pp;
1783 ehci_qtd_t *qtd;
1784 int i, ctrl, rval;
1785 int flags = ehcip->ehci_flags;
1786
1787 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1788
1789 if (flags & EHCI_RHREG) {
1790 /* Unload the root hub driver */
1791 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1792
1793 return (DDI_FAILURE);
1794 }
1795 }
1796
1797 if (flags & EHCI_USBAREG) {
1798 /* Unregister this HCD instance with USBA */
1799 usba_hcdi_unregister(ehcip->ehci_dip);
1800 }
1801
1802 if (flags & EHCI_INTR) {
1803
1804 mutex_enter(&ehcip->ehci_int_mutex);
1805
1806 /* Disable all EHCI QH list processing */
1807 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1808 ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1809 EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1810
1811 /* Disable all EHCI interrupts */
1812 Set_OpReg(ehci_interrupt, 0);
1813
1814 /* wait for the next SOF */
1815 (void) ehci_wait_for_sof(ehcip);
1816
1817 /* Route all Root hub ports to Classic host controller */
1818 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1819
1820 /* Stop the EHCI host controller */
1821 Set_OpReg(ehci_command,
1822 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1823
1824 mutex_exit(&ehcip->ehci_int_mutex);
1825
1826 /* Wait for sometime */
1827 delay(drv_usectohz(EHCI_TIMEWAIT));
1828
1829 ehci_rem_intrs(ehcip);
1830 }
1831
1832 /* Unmap the EHCI registers */
1833 if (ehcip->ehci_caps_handle) {
1834 ddi_regs_map_free(&ehcip->ehci_caps_handle);
1835 }
1836
1837 if (ehcip->ehci_config_handle) {
1838 pci_config_teardown(&ehcip->ehci_config_handle);
1839 }
1840
1841 /* Free all the buffers */
1842 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1843 for (i = 0; i < ehci_qtd_pool_size; i ++) {
1844 qtd = &ehcip->ehci_qtd_pool_addr[i];
1845 ctrl = Get_QTD(ehcip->
1846 ehci_qtd_pool_addr[i].qtd_state);
1847
1848 if ((ctrl != EHCI_QTD_FREE) &&
1849 (ctrl != EHCI_QTD_DUMMY) &&
1850 (qtd->qtd_trans_wrapper)) {
1851
1852 mutex_enter(&ehcip->ehci_int_mutex);
1853
1854 tw = (ehci_trans_wrapper_t *)
1855 EHCI_LOOKUP_ID((uint32_t)
1856 Get_QTD(qtd->qtd_trans_wrapper));
1857
1858 /* Obtain the pipe private structure */
1859 pp = tw->tw_pipe_private;
1860
1861 /* Stop the the transfer timer */
1862 ehci_stop_xfer_timer(ehcip, tw,
1863 EHCI_REMOVE_XFER_ALWAYS);
1864
1865 ehci_deallocate_tw(ehcip, pp, tw);
1866
1867 mutex_exit(&ehcip->ehci_int_mutex);
1868 }
1869 }
1870
1871 /*
1872 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1873 * the handle for QTD pools.
1874 */
1875 if ((ehcip->ehci_dma_addr_bind_flag &
1876 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1877
1878 rval = ddi_dma_unbind_handle(
1879 ehcip->ehci_qtd_pool_dma_handle);
1880
1881 ASSERT(rval == DDI_SUCCESS);
1882 }
1883 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1884 }
1885
1886 /* Free the QTD pool */
1887 if (ehcip->ehci_qtd_pool_dma_handle) {
1888 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1889 }
1890
1891 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1892 /*
1893 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1894 * the handle for QH pools.
1895 */
1896 if ((ehcip->ehci_dma_addr_bind_flag &
1897 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1898
1899 rval = ddi_dma_unbind_handle(
1900 ehcip->ehci_qh_pool_dma_handle);
1901
1902 ASSERT(rval == DDI_SUCCESS);
1903 }
1904
1905 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1906 }
1907
1908 /* Free the QH pool */
1909 if (ehcip->ehci_qh_pool_dma_handle) {
1910 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1911 }
1912
1913 /* Free the Periodic frame list table (PFLT) area */
1914 if (ehcip->ehci_periodic_frame_list_tablep &&
1915 ehcip->ehci_pflt_mem_handle) {
1916 /*
1917 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1918 * the handle for PFLT.
1919 */
1920 if ((ehcip->ehci_dma_addr_bind_flag &
1921 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1922
1923 rval = ddi_dma_unbind_handle(
1924 ehcip->ehci_pflt_dma_handle);
1925
1926 ASSERT(rval == DDI_SUCCESS);
1927 }
1928
1929 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1930 }
1931
1932 (void) ehci_isoc_cleanup(ehcip);
1933
1934 if (ehcip->ehci_pflt_dma_handle) {
1935 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1936 }
1937
1938 if (flags & EHCI_INTR) {
1939 /* Destroy the mutex */
1940 mutex_destroy(&ehcip->ehci_int_mutex);
1941
1942 /* Destroy the async schedule advance condition variable */
1943 cv_destroy(&ehcip->ehci_async_schedule_advance_cv);
1944 }
1945
1946 /* clean up kstat structs */
1947 ehci_destroy_stats(ehcip);
1948
1949 /* Free ehci hcdi ops */
1950 if (ehcip->ehci_hcdi_ops) {
1951 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1952 }
1953
1954 if (flags & EHCI_ZALLOC) {
1955
1956 usb_free_log_hdl(ehcip->ehci_log_hdl);
1957
1958 /* Remove all properties that might have been created */
1959 ddi_prop_remove_all(ehcip->ehci_dip);
1960
1961 /* Free the soft state */
1962 ddi_soft_state_free(ehci_statep,
1963 ddi_get_instance(ehcip->ehci_dip));
1964 }
1965
1966 return (DDI_SUCCESS);
1967 }
1968
1969
1970 /*
1971 * ehci_rem_intrs:
1972 *
1973 * Unregister FIXED or MSI interrupts
1974 */
1975 static void
ehci_rem_intrs(ehci_state_t * ehcip)1976 ehci_rem_intrs(ehci_state_t *ehcip)
1977 {
1978 int i;
1979
1980 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1981 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
1982
1983 /* Disable all interrupts */
1984 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
1985 (void) ddi_intr_block_disable(ehcip->ehci_htable,
1986 ehcip->ehci_intr_cnt);
1987 } else {
1988 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1989 (void) ddi_intr_disable(ehcip->ehci_htable[i]);
1990 }
1991 }
1992
1993 /* Call ddi_intr_remove_handler() */
1994 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1995 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
1996 (void) ddi_intr_free(ehcip->ehci_htable[i]);
1997 }
1998
1999 kmem_free(ehcip->ehci_htable,
2000 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
2001 }
2002
2003
2004 /*
2005 * ehci_cpr_suspend
2006 */
2007 int
ehci_cpr_suspend(ehci_state_t * ehcip)2008 ehci_cpr_suspend(ehci_state_t *ehcip)
2009 {
2010 int i;
2011
2012 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2013 "ehci_cpr_suspend:");
2014
2015 /* Call into the root hub and suspend it */
2016 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
2017
2018 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2019 "ehci_cpr_suspend: root hub fails to suspend");
2020
2021 return (DDI_FAILURE);
2022 }
2023
2024 /* Only root hub's intr pipe should be open at this time */
2025 mutex_enter(&ehcip->ehci_int_mutex);
2026
2027 ASSERT(ehcip->ehci_open_pipe_count == 0);
2028
2029 /* Just wait till all resources are reclaimed */
2030 i = 0;
2031 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
2032 ehci_handle_endpoint_reclaimation(ehcip);
2033 (void) ehci_wait_for_sof(ehcip);
2034 }
2035 ASSERT(ehcip->ehci_reclaim_list == NULL);
2036
2037 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2038 "ehci_cpr_suspend: Disable HC QH list processing");
2039
2040 /* Disable all EHCI QH list processing */
2041 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
2042 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
2043
2044 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2045 "ehci_cpr_suspend: Disable HC interrupts");
2046
2047 /* Disable all EHCI interrupts */
2048 Set_OpReg(ehci_interrupt, 0);
2049
2050 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2051 "ehci_cpr_suspend: Wait for the next SOF");
2052
2053 /* Wait for the next SOF */
2054 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
2055
2056 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2057 "ehci_cpr_suspend: ehci host controller suspend failed");
2058
2059 mutex_exit(&ehcip->ehci_int_mutex);
2060 return (DDI_FAILURE);
2061 }
2062
2063 /*
2064 * Stop the ehci host controller
2065 * if usb keyboard is not connected.
2066 */
2067 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2068 Set_OpReg(ehci_command,
2069 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2070
2071 }
2072
2073 /* Set host controller soft state to suspend */
2074 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2075
2076 mutex_exit(&ehcip->ehci_int_mutex);
2077
2078 return (DDI_SUCCESS);
2079 }
2080
2081
2082 /*
2083 * ehci_cpr_resume
2084 */
2085 int
ehci_cpr_resume(ehci_state_t * ehcip)2086 ehci_cpr_resume(ehci_state_t *ehcip)
2087 {
2088 mutex_enter(&ehcip->ehci_int_mutex);
2089
2090 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2091 "ehci_cpr_resume: Restart the controller");
2092
2093 /* Cleanup ehci specific information across cpr */
2094 ehci_cpr_cleanup(ehcip);
2095
2096 /* Restart the controller */
2097 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2098
2099 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2100 "ehci_cpr_resume: ehci host controller resume failed ");
2101
2102 mutex_exit(&ehcip->ehci_int_mutex);
2103
2104 return (DDI_FAILURE);
2105 }
2106
2107 mutex_exit(&ehcip->ehci_int_mutex);
2108
2109 /* Now resume the root hub */
2110 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2111
2112 return (DDI_FAILURE);
2113 }
2114
2115 return (DDI_SUCCESS);
2116 }
2117
2118
2119 /*
2120 * Bandwidth Allocation functions
2121 */
2122
2123 /*
2124 * ehci_allocate_bandwidth:
2125 *
2126 * Figure out whether or not this interval may be supported. Return the index
2127 * into the lattice if it can be supported. Return allocation failure if it
2128 * can not be supported.
2129 */
2130 int
ehci_allocate_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t * pnode,uchar_t * smask,uchar_t * cmask)2131 ehci_allocate_bandwidth(
2132 ehci_state_t *ehcip,
2133 usba_pipe_handle_data_t *ph,
2134 uint_t *pnode,
2135 uchar_t *smask,
2136 uchar_t *cmask)
2137 {
2138 int error = USB_SUCCESS;
2139
2140 /* This routine is protected by the ehci_int_mutex */
2141 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2142
2143 /* Reset the pnode to the last checked pnode */
2144 *pnode = 0;
2145
2146 /* Allocate high speed bandwidth */
2147 if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2148 ph, pnode, smask, cmask)) != USB_SUCCESS) {
2149
2150 return (error);
2151 }
2152
2153 /*
2154 * For low/full speed usb devices, allocate classic TT bandwidth
2155 * in additional to high speed bandwidth.
2156 */
2157 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2158
2159 /* Allocate classic TT bandwidth */
2160 if ((error = ehci_allocate_classic_tt_bandwidth(
2161 ehcip, ph, *pnode)) != USB_SUCCESS) {
2162
2163 /* Deallocate high speed bandwidth */
2164 ehci_deallocate_high_speed_bandwidth(
2165 ehcip, ph, *pnode, *smask, *cmask);
2166 }
2167 }
2168
2169 return (error);
2170 }
2171
2172
2173 /*
2174 * ehci_allocate_high_speed_bandwidth:
2175 *
2176 * Allocate high speed bandwidth for the low/full/high speed interrupt and
2177 * isochronous endpoints.
2178 */
2179 static int
ehci_allocate_high_speed_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t * pnode,uchar_t * smask,uchar_t * cmask)2180 ehci_allocate_high_speed_bandwidth(
2181 ehci_state_t *ehcip,
2182 usba_pipe_handle_data_t *ph,
2183 uint_t *pnode,
2184 uchar_t *smask,
2185 uchar_t *cmask)
2186 {
2187 uint_t sbandwidth, cbandwidth;
2188 int interval;
2189 usb_ep_descr_t *endpoint = &ph->p_ep;
2190 usba_device_t *child_ud;
2191 usb_port_status_t port_status;
2192 int error;
2193
2194 /* This routine is protected by the ehci_int_mutex */
2195 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2196
2197 /* Get child's usba device structure */
2198 child_ud = ph->p_usba_device;
2199
2200 mutex_enter(&child_ud->usb_mutex);
2201
2202 /* Get the current usb device's port status */
2203 port_status = ph->p_usba_device->usb_port_status;
2204
2205 mutex_exit(&child_ud->usb_mutex);
2206
2207 /*
2208 * Calculate the length in bytes of a transaction on this
2209 * periodic endpoint. Return failure if maximum packet is
2210 * zero.
2211 */
2212 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2213 port_status, &sbandwidth, &cbandwidth);
2214 if (error != USB_SUCCESS) {
2215
2216 return (error);
2217 }
2218
2219 /*
2220 * Adjust polling interval to be a power of 2.
2221 * If this interval can't be supported, return
2222 * allocation failure.
2223 */
2224 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2225 if (interval == USB_FAILURE) {
2226
2227 return (USB_FAILURE);
2228 }
2229
2230 if (port_status == USBA_HIGH_SPEED_DEV) {
2231 /* Allocate bandwidth for high speed devices */
2232 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2233 USB_EP_ATTR_ISOCH) {
2234 error = USB_SUCCESS;
2235 } else {
2236
2237 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2238 endpoint, sbandwidth, interval);
2239 }
2240
2241 *cmask = 0x00;
2242
2243 } else {
2244 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2245 USB_EP_ATTR_INTR) {
2246
2247 /* Allocate bandwidth for low speed interrupt */
2248 error = ehci_find_bestfit_ls_intr_mask(ehcip,
2249 smask, cmask, pnode, sbandwidth, cbandwidth,
2250 interval);
2251 } else {
2252 if ((endpoint->bEndpointAddress &
2253 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2254
2255 /* Allocate bandwidth for sitd in */
2256 error = ehci_find_bestfit_sitd_in_mask(ehcip,
2257 smask, cmask, pnode, sbandwidth, cbandwidth,
2258 interval);
2259 } else {
2260
2261 /* Allocate bandwidth for sitd out */
2262 error = ehci_find_bestfit_sitd_out_mask(ehcip,
2263 smask, pnode, sbandwidth, interval);
2264 *cmask = 0x00;
2265 }
2266 }
2267 }
2268
2269 if (error != USB_SUCCESS) {
2270 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2271 "ehci_allocate_high_speed_bandwidth: Reached maximum "
2272 "bandwidth value and cannot allocate bandwidth for a "
2273 "given high-speed periodic endpoint");
2274
2275 return (USB_NO_BANDWIDTH);
2276 }
2277
2278 return (error);
2279 }
2280
2281
2282 /*
2283 * ehci_allocate_classic_tt_speed_bandwidth:
2284 *
2285 * Allocate classic TT bandwidth for the low/full speed interrupt and
2286 * isochronous endpoints.
2287 */
2288 static int
ehci_allocate_classic_tt_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode)2289 ehci_allocate_classic_tt_bandwidth(
2290 ehci_state_t *ehcip,
2291 usba_pipe_handle_data_t *ph,
2292 uint_t pnode)
2293 {
2294 uint_t bandwidth, min;
2295 uint_t height, leftmost, list;
2296 usb_ep_descr_t *endpoint = &ph->p_ep;
2297 usba_device_t *child_ud, *parent_ud;
2298 usb_port_status_t port_status;
2299 int i, interval;
2300
2301 /* This routine is protected by the ehci_int_mutex */
2302 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2303
2304 /* Get child's usba device structure */
2305 child_ud = ph->p_usba_device;
2306
2307 mutex_enter(&child_ud->usb_mutex);
2308
2309 /* Get the current usb device's port status */
2310 port_status = child_ud->usb_port_status;
2311
2312 /* Get the parent high speed hub's usba device structure */
2313 parent_ud = child_ud->usb_hs_hub_usba_dev;
2314
2315 mutex_exit(&child_ud->usb_mutex);
2316
2317 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2318 "ehci_allocate_classic_tt_bandwidth: "
2319 "child_ud 0x%p parent_ud 0x%p",
2320 (void *)child_ud, (void *)parent_ud);
2321
2322 /*
2323 * Calculate the length in bytes of a transaction on this
2324 * periodic endpoint. Return failure if maximum packet is
2325 * zero.
2326 */
2327 if (ehci_compute_classic_bandwidth(endpoint,
2328 port_status, &bandwidth) != USB_SUCCESS) {
2329
2330 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2331 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2332 "with zero endpoint maximum packet size is not supported");
2333
2334 return (USB_NOT_SUPPORTED);
2335 }
2336
2337 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2338 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2339
2340 mutex_enter(&parent_ud->usb_mutex);
2341
2342 /*
2343 * If the length in bytes plus the allocated bandwidth exceeds
2344 * the maximum, return bandwidth allocation failure.
2345 */
2346 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2347 FS_PERIODIC_BANDWIDTH) {
2348
2349 mutex_exit(&parent_ud->usb_mutex);
2350
2351 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2352 "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2353 "bandwidth value and cannot allocate bandwidth for a "
2354 "given low/full speed periodic endpoint");
2355
2356 return (USB_NO_BANDWIDTH);
2357 }
2358
2359 mutex_exit(&parent_ud->usb_mutex);
2360
2361 /* Adjust polling interval to be a power of 2 */
2362 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2363
2364 /* Find the height in the tree */
2365 height = ehci_lattice_height(interval);
2366
2367 /* Find the leftmost leaf in the subtree specified by the node. */
2368 leftmost = ehci_leftmost_leaf(pnode, height);
2369
2370 mutex_enter(&parent_ud->usb_mutex);
2371
2372 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2373 list = ehci_index[leftmost + i];
2374
2375 if ((parent_ud->usb_hs_hub_bandwidth[list] +
2376 bandwidth) > FS_PERIODIC_BANDWIDTH) {
2377
2378 mutex_exit(&parent_ud->usb_mutex);
2379
2380 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2381 "ehci_allocate_classic_tt_bandwidth: Reached "
2382 "maximum bandwidth value and cannot allocate "
2383 "bandwidth for low/full periodic endpoint");
2384
2385 return (USB_NO_BANDWIDTH);
2386 }
2387 }
2388
2389 /*
2390 * All the leaves for this node must be updated with the bandwidth.
2391 */
2392 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2393 list = ehci_index[leftmost + i];
2394 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2395 }
2396
2397 /* Find the leaf with the smallest allocated bandwidth */
2398 min = parent_ud->usb_hs_hub_bandwidth[0];
2399
2400 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2401 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2402 min = parent_ud->usb_hs_hub_bandwidth[i];
2403 }
2404 }
2405
2406 /* Save the minimum for later use */
2407 parent_ud->usb_hs_hub_min_bandwidth = min;
2408
2409 mutex_exit(&parent_ud->usb_mutex);
2410
2411 return (USB_SUCCESS);
2412 }
2413
2414
2415 /*
2416 * ehci_deallocate_bandwidth:
2417 *
2418 * Deallocate bandwidth for the given node in the lattice and the length
2419 * of transfer.
2420 */
2421 void
ehci_deallocate_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode,uchar_t smask,uchar_t cmask)2422 ehci_deallocate_bandwidth(
2423 ehci_state_t *ehcip,
2424 usba_pipe_handle_data_t *ph,
2425 uint_t pnode,
2426 uchar_t smask,
2427 uchar_t cmask)
2428 {
2429 /* This routine is protected by the ehci_int_mutex */
2430 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2431
2432 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2433
2434 /*
2435 * For low/full speed usb devices, deallocate classic TT bandwidth
2436 * in additional to high speed bandwidth.
2437 */
2438 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2439
2440 /* Deallocate classic TT bandwidth */
2441 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2442 }
2443 }
2444
2445
2446 /*
2447 * ehci_deallocate_high_speed_bandwidth:
2448 *
2449 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2450 */
2451 static void
ehci_deallocate_high_speed_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode,uchar_t smask,uchar_t cmask)2452 ehci_deallocate_high_speed_bandwidth(
2453 ehci_state_t *ehcip,
2454 usba_pipe_handle_data_t *ph,
2455 uint_t pnode,
2456 uchar_t smask,
2457 uchar_t cmask)
2458 {
2459 uint_t height, leftmost;
2460 uint_t list_count;
2461 uint_t sbandwidth, cbandwidth;
2462 int interval;
2463 usb_ep_descr_t *endpoint = &ph->p_ep;
2464 usba_device_t *child_ud;
2465 usb_port_status_t port_status;
2466
2467 /* This routine is protected by the ehci_int_mutex */
2468 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2469
2470 /* Get child's usba device structure */
2471 child_ud = ph->p_usba_device;
2472
2473 mutex_enter(&child_ud->usb_mutex);
2474
2475 /* Get the current usb device's port status */
2476 port_status = ph->p_usba_device->usb_port_status;
2477
2478 mutex_exit(&child_ud->usb_mutex);
2479
2480 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2481 port_status, &sbandwidth, &cbandwidth);
2482
2483 /* Adjust polling interval to be a power of 2 */
2484 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2485
2486 /* Find the height in the tree */
2487 height = ehci_lattice_height(interval);
2488
2489 /*
2490 * Find the leftmost leaf in the subtree specified by the node
2491 */
2492 leftmost = ehci_leftmost_leaf(pnode, height);
2493
2494 list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2495
2496 /* Delete the bandwidth from the appropriate lists */
2497 if (port_status == USBA_HIGH_SPEED_DEV) {
2498
2499 ehci_update_bw_availability(ehcip, -sbandwidth,
2500 leftmost, list_count, smask);
2501 } else {
2502 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2503 USB_EP_ATTR_INTR) {
2504
2505 ehci_update_bw_availability(ehcip, -sbandwidth,
2506 leftmost, list_count, smask);
2507 ehci_update_bw_availability(ehcip, -cbandwidth,
2508 leftmost, list_count, cmask);
2509 } else {
2510 if ((endpoint->bEndpointAddress &
2511 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2512
2513 ehci_update_bw_availability(ehcip, -sbandwidth,
2514 leftmost, list_count, smask);
2515 ehci_update_bw_availability(ehcip,
2516 -MAX_UFRAME_SITD_XFER, leftmost,
2517 list_count, cmask);
2518 } else {
2519
2520 ehci_update_bw_availability(ehcip,
2521 -MAX_UFRAME_SITD_XFER, leftmost,
2522 list_count, smask);
2523 }
2524 }
2525 }
2526 }
2527
2528 /*
2529 * ehci_deallocate_classic_tt_bandwidth:
2530 *
2531 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2532 */
2533 static void
ehci_deallocate_classic_tt_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode)2534 ehci_deallocate_classic_tt_bandwidth(
2535 ehci_state_t *ehcip,
2536 usba_pipe_handle_data_t *ph,
2537 uint_t pnode)
2538 {
2539 uint_t bandwidth, height, leftmost, list, min;
2540 int i, interval;
2541 usb_ep_descr_t *endpoint = &ph->p_ep;
2542 usba_device_t *child_ud, *parent_ud;
2543 usb_port_status_t port_status;
2544
2545 /* This routine is protected by the ehci_int_mutex */
2546 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2547
2548 /* Get child's usba device structure */
2549 child_ud = ph->p_usba_device;
2550
2551 mutex_enter(&child_ud->usb_mutex);
2552
2553 /* Get the current usb device's port status */
2554 port_status = child_ud->usb_port_status;
2555
2556 /* Get the parent high speed hub's usba device structure */
2557 parent_ud = child_ud->usb_hs_hub_usba_dev;
2558
2559 mutex_exit(&child_ud->usb_mutex);
2560
2561 /* Obtain the bandwidth */
2562 (void) ehci_compute_classic_bandwidth(endpoint,
2563 port_status, &bandwidth);
2564
2565 /* Adjust polling interval to be a power of 2 */
2566 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2567
2568 /* Find the height in the tree */
2569 height = ehci_lattice_height(interval);
2570
2571 /* Find the leftmost leaf in the subtree specified by the node */
2572 leftmost = ehci_leftmost_leaf(pnode, height);
2573
2574 mutex_enter(&parent_ud->usb_mutex);
2575
2576 /* Delete the bandwidth from the appropriate lists */
2577 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2578 list = ehci_index[leftmost + i];
2579 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2580 }
2581
2582 /* Find the leaf with the smallest allocated bandwidth */
2583 min = parent_ud->usb_hs_hub_bandwidth[0];
2584
2585 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2586 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2587 min = parent_ud->usb_hs_hub_bandwidth[i];
2588 }
2589 }
2590
2591 /* Save the minimum for later use */
2592 parent_ud->usb_hs_hub_min_bandwidth = min;
2593
2594 mutex_exit(&parent_ud->usb_mutex);
2595 }
2596
2597
2598 /*
2599 * ehci_compute_high_speed_bandwidth:
2600 *
2601 * Given a periodic endpoint (interrupt or isochronous) determine the total
2602 * bandwidth for one transaction. The EHCI host controller traverses the
2603 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2604 * services an endpoint, only a single transaction attempt is made. The HC
2605 * moves to the next Endpoint Descriptor after the first transaction attempt
2606 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2607 * Transfer Descriptor is inserted into the lattice, we will only count the
2608 * number of bytes for one transaction.
2609 *
2610 * The following are the formulas used for calculating bandwidth in terms
2611 * bytes and it is for the single USB high speed transaction. The protocol
2612 * overheads will be different for each of type of USB transfer & all these
2613 * formulas & protocol overheads are derived from the 5.11.3 section of the
2614 * USB 2.0 Specification.
2615 *
2616 * High-Speed:
2617 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2618 *
2619 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2620 *
2621 * Protocol overhead + Split transaction overhead +
2622 * ((MaxPktSz * 7)/6) + Host_Delay;
2623 */
2624 /* ARGSUSED */
2625 static int
ehci_compute_high_speed_bandwidth(ehci_state_t * ehcip,usb_ep_descr_t * endpoint,usb_port_status_t port_status,uint_t * sbandwidth,uint_t * cbandwidth)2626 ehci_compute_high_speed_bandwidth(
2627 ehci_state_t *ehcip,
2628 usb_ep_descr_t *endpoint,
2629 usb_port_status_t port_status,
2630 uint_t *sbandwidth,
2631 uint_t *cbandwidth)
2632 {
2633 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2634
2635 /* Return failure if endpoint maximum packet is zero */
2636 if (maxpacketsize == 0) {
2637 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2638 "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2639 "with zero endpoint maximum packet size is not supported");
2640
2641 return (USB_NOT_SUPPORTED);
2642 }
2643
2644 /* Add bit-stuffing overhead */
2645 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2646
2647 /* Add Host Controller specific delay to required bandwidth */
2648 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2649
2650 /* Add xfer specific protocol overheads */
2651 if ((endpoint->bmAttributes &
2652 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2653 /* High speed interrupt transaction */
2654 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2655 } else {
2656 /* Isochronous transaction */
2657 *sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2658 }
2659
2660 /*
2661 * For low/full speed devices, add split transaction specific
2662 * overheads.
2663 */
2664 if (port_status != USBA_HIGH_SPEED_DEV) {
2665 /*
2666 * Add start and complete split transaction
2667 * tokens overheads.
2668 */
2669 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2670 *sbandwidth += START_SPLIT_OVERHEAD;
2671
2672 /* Add data overhead depending on data direction */
2673 if ((endpoint->bEndpointAddress &
2674 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2675 *cbandwidth += maxpacketsize;
2676 } else {
2677 if ((endpoint->bmAttributes &
2678 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2679 /* There is no compete splits for out */
2680 *cbandwidth = 0;
2681 }
2682 *sbandwidth += maxpacketsize;
2683 }
2684 } else {
2685 uint_t xactions;
2686
2687 /* Get the max transactions per microframe */
2688 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2689 USB_EP_MAX_XACTS_SHIFT) + 1;
2690
2691 /* High speed transaction */
2692 *sbandwidth += maxpacketsize;
2693
2694 /* Calculate bandwidth per micro-frame */
2695 *sbandwidth *= xactions;
2696
2697 *cbandwidth = 0;
2698 }
2699
2700 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2701 "ehci_allocate_high_speed_bandwidth: "
2702 "Start split bandwidth %d Complete split bandwidth %d",
2703 *sbandwidth, *cbandwidth);
2704
2705 return (USB_SUCCESS);
2706 }
2707
2708
2709 /*
2710 * ehci_compute_classic_bandwidth:
2711 *
2712 * Given a periodic endpoint (interrupt or isochronous) determine the total
2713 * bandwidth for one transaction. The EHCI host controller traverses the
2714 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2715 * services an endpoint, only a single transaction attempt is made. The HC
2716 * moves to the next Endpoint Descriptor after the first transaction attempt
2717 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2718 * Transfer Descriptor is inserted into the lattice, we will only count the
2719 * number of bytes for one transaction.
2720 *
2721 * The following are the formulas used for calculating bandwidth in terms
2722 * bytes and it is for the single USB high speed transaction. The protocol
2723 * overheads will be different for each of type of USB transfer & all these
2724 * formulas & protocol overheads are derived from the 5.11.3 section of the
2725 * USB 2.0 Specification.
2726 *
2727 * Low-Speed:
2728 * Protocol overhead + Hub LS overhead +
2729 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2730 *
2731 * Full-Speed:
2732 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2733 */
2734 /* ARGSUSED */
2735 static int
ehci_compute_classic_bandwidth(usb_ep_descr_t * endpoint,usb_port_status_t port_status,uint_t * bandwidth)2736 ehci_compute_classic_bandwidth(
2737 usb_ep_descr_t *endpoint,
2738 usb_port_status_t port_status,
2739 uint_t *bandwidth)
2740 {
2741 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2742
2743 /*
2744 * If endpoint maximum packet is zero, then return immediately.
2745 */
2746 if (maxpacketsize == 0) {
2747
2748 return (USB_NOT_SUPPORTED);
2749 }
2750
2751 /* Add TT delay to required bandwidth */
2752 *bandwidth = TT_DELAY;
2753
2754 /* Add bit-stuffing overhead */
2755 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2756
2757 switch (port_status) {
2758 case USBA_LOW_SPEED_DEV:
2759 /* Low speed interrupt transaction */
2760 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2761 HUB_LOW_SPEED_PROTO_OVERHEAD +
2762 (LOW_SPEED_CLOCK * maxpacketsize));
2763 break;
2764 case USBA_FULL_SPEED_DEV:
2765 /* Full speed transaction */
2766 *bandwidth += maxpacketsize;
2767
2768 /* Add xfer specific protocol overheads */
2769 if ((endpoint->bmAttributes &
2770 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2771 /* Full speed interrupt transaction */
2772 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2773 } else {
2774 /* Isochronous and input transaction */
2775 if ((endpoint->bEndpointAddress &
2776 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2777 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2778 } else {
2779 /* Isochronous and output transaction */
2780 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2781 }
2782 }
2783 break;
2784 }
2785
2786 return (USB_SUCCESS);
2787 }
2788
2789
2790 /*
2791 * ehci_adjust_polling_interval:
2792 *
2793 * Adjust bandwidth according usb device speed.
2794 */
2795 /* ARGSUSED */
2796 int
ehci_adjust_polling_interval(ehci_state_t * ehcip,usb_ep_descr_t * endpoint,usb_port_status_t port_status)2797 ehci_adjust_polling_interval(
2798 ehci_state_t *ehcip,
2799 usb_ep_descr_t *endpoint,
2800 usb_port_status_t port_status)
2801 {
2802 uint_t interval;
2803 int i = 0;
2804
2805 /* Get the polling interval */
2806 interval = endpoint->bInterval;
2807
2808 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2809 "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2810
2811 /*
2812 * According USB 2.0 Specifications, a high-speed endpoint's
2813 * polling intervals are specified interms of 125us or micro
2814 * frame, where as full/low endpoint's polling intervals are
2815 * specified in milliseconds.
2816 *
2817 * A high speed interrupt/isochronous endpoints can specify
2818 * desired polling interval between 1 to 16 micro-frames,
2819 * where as full/low endpoints can specify between 1 to 255
2820 * milliseconds.
2821 */
2822 switch (port_status) {
2823 case USBA_LOW_SPEED_DEV:
2824 /*
2825 * Low speed endpoints are limited to specifying
2826 * only 8ms to 255ms in this driver. If a device
2827 * reports a polling interval that is less than 8ms,
2828 * it will use 8 ms instead.
2829 */
2830 if (interval < LS_MIN_POLL_INTERVAL) {
2831
2832 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2833 "Low speed endpoint's poll interval of %d ms "
2834 "is below threshold. Rounding up to %d ms",
2835 interval, LS_MIN_POLL_INTERVAL);
2836
2837 interval = LS_MIN_POLL_INTERVAL;
2838 }
2839
2840 /*
2841 * Return an error if the polling interval is greater
2842 * than 255ms.
2843 */
2844 if (interval > LS_MAX_POLL_INTERVAL) {
2845
2846 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2847 "Low speed endpoint's poll interval is "
2848 "greater than %d ms", LS_MAX_POLL_INTERVAL);
2849
2850 return (USB_FAILURE);
2851 }
2852 break;
2853
2854 case USBA_FULL_SPEED_DEV:
2855 /*
2856 * Return an error if the polling interval is less
2857 * than 1ms and greater than 255ms.
2858 */
2859 if ((interval < FS_MIN_POLL_INTERVAL) &&
2860 (interval > FS_MAX_POLL_INTERVAL)) {
2861
2862 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2863 "Full speed endpoint's poll interval must "
2864 "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2865 FS_MAX_POLL_INTERVAL);
2866
2867 return (USB_FAILURE);
2868 }
2869 break;
2870 case USBA_HIGH_SPEED_DEV:
2871 /*
2872 * Return an error if the polling interval is less 1
2873 * and greater than 16. Convert this value to 125us
2874 * units using 2^(bInterval -1). refer usb 2.0 spec
2875 * page 51 for details.
2876 */
2877 if ((interval < HS_MIN_POLL_INTERVAL) &&
2878 (interval > HS_MAX_POLL_INTERVAL)) {
2879
2880 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2881 "High speed endpoint's poll interval "
2882 "must be between %d and %d units",
2883 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2884
2885 return (USB_FAILURE);
2886 }
2887
2888 /* Adjust high speed device polling interval */
2889 interval =
2890 ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2891
2892 break;
2893 }
2894
2895 /*
2896 * If polling interval is greater than 32ms,
2897 * adjust polling interval equal to 32ms.
2898 */
2899 if (interval > EHCI_NUM_INTR_QH_LISTS) {
2900 interval = EHCI_NUM_INTR_QH_LISTS;
2901 }
2902
2903 /*
2904 * Find the nearest power of 2 that's less
2905 * than interval.
2906 */
2907 while ((ehci_pow_2(i)) <= interval) {
2908 i++;
2909 }
2910
2911 return (ehci_pow_2((i - 1)));
2912 }
2913
2914
2915 /*
2916 * ehci_adjust_high_speed_polling_interval:
2917 */
2918 /* ARGSUSED */
2919 static int
ehci_adjust_high_speed_polling_interval(ehci_state_t * ehcip,usb_ep_descr_t * endpoint)2920 ehci_adjust_high_speed_polling_interval(
2921 ehci_state_t *ehcip,
2922 usb_ep_descr_t *endpoint)
2923 {
2924 uint_t interval;
2925
2926 /* Get the polling interval */
2927 interval = ehci_pow_2(endpoint->bInterval - 1);
2928
2929 /*
2930 * Convert polling interval from micro seconds
2931 * to milli seconds.
2932 */
2933 if (interval <= EHCI_MAX_UFRAMES) {
2934 interval = 1;
2935 } else {
2936 interval = interval/EHCI_MAX_UFRAMES;
2937 }
2938
2939 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2940 "ehci_adjust_high_speed_polling_interval: "
2941 "High speed adjusted interval 0x%x", interval);
2942
2943 return (interval);
2944 }
2945
2946
2947 /*
2948 * ehci_lattice_height:
2949 *
2950 * Given the requested bandwidth, find the height in the tree at which the
2951 * nodes for this bandwidth fall. The height is measured as the number of
2952 * nodes from the leaf to the level specified by bandwidth The root of the
2953 * tree is at height TREE_HEIGHT.
2954 */
2955 static uint_t
ehci_lattice_height(uint_t interval)2956 ehci_lattice_height(uint_t interval)
2957 {
2958 return (TREE_HEIGHT - (ehci_log_2(interval)));
2959 }
2960
2961
2962 /*
2963 * ehci_lattice_parent:
2964 *
2965 * Given a node in the lattice, find the index of the parent node
2966 */
2967 static uint_t
ehci_lattice_parent(uint_t node)2968 ehci_lattice_parent(uint_t node)
2969 {
2970 if ((node % 2) == 0) {
2971
2972 return ((node/2) - 1);
2973 } else {
2974
2975 return ((node + 1)/2 - 1);
2976 }
2977 }
2978
2979
2980 /*
2981 * ehci_find_periodic_node:
2982 *
2983 * Based on the "real" array leaf node and interval, get the periodic node.
2984 */
2985 static uint_t
ehci_find_periodic_node(uint_t leaf,int interval)2986 ehci_find_periodic_node(uint_t leaf, int interval) {
2987 uint_t lattice_leaf;
2988 uint_t height = ehci_lattice_height(interval);
2989 uint_t pnode;
2990 int i;
2991
2992 /* Get the leaf number in the lattice */
2993 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
2994
2995 /* Get the node in the lattice based on the height and leaf */
2996 pnode = lattice_leaf;
2997 for (i = 0; i < height; i++) {
2998 pnode = ehci_lattice_parent(pnode);
2999 }
3000
3001 return (pnode);
3002 }
3003
3004
3005 /*
3006 * ehci_leftmost_leaf:
3007 *
3008 * Find the leftmost leaf in the subtree specified by the node. Height refers
3009 * to number of nodes from the bottom of the tree to the node, including the
3010 * node.
3011 *
3012 * The formula for a zero based tree is:
3013 * 2^H * Node + 2^H - 1
3014 * The leaf of the tree is an array, convert the number for the array.
3015 * Subtract the size of nodes not in the array
3016 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
3017 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
3018 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
3019 * 0
3020 * 1 2
3021 * 0 1 2 3
3022 */
3023 static uint_t
ehci_leftmost_leaf(uint_t node,uint_t height)3024 ehci_leftmost_leaf(
3025 uint_t node,
3026 uint_t height)
3027 {
3028 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
3029 }
3030
3031
3032 /*
3033 * ehci_pow_2:
3034 *
3035 * Compute 2 to the power
3036 */
3037 static uint_t
ehci_pow_2(uint_t x)3038 ehci_pow_2(uint_t x)
3039 {
3040 if (x == 0) {
3041
3042 return (1);
3043 } else {
3044
3045 return (2 << (x - 1));
3046 }
3047 }
3048
3049
3050 /*
3051 * ehci_log_2:
3052 *
3053 * Compute log base 2 of x
3054 */
3055 static uint_t
ehci_log_2(uint_t x)3056 ehci_log_2(uint_t x)
3057 {
3058 int i = 0;
3059
3060 while (x != 1) {
3061 x = x >> 1;
3062 i++;
3063 }
3064
3065 return (i);
3066 }
3067
3068
3069 /*
3070 * ehci_find_bestfit_hs_mask:
3071 *
3072 * Find the smask and cmask in the bandwidth allocation, and update the
3073 * bandwidth allocation.
3074 */
3075 static int
ehci_find_bestfit_hs_mask(ehci_state_t * ehcip,uchar_t * smask,uint_t * pnode,usb_ep_descr_t * endpoint,uint_t bandwidth,int interval)3076 ehci_find_bestfit_hs_mask(
3077 ehci_state_t *ehcip,
3078 uchar_t *smask,
3079 uint_t *pnode,
3080 usb_ep_descr_t *endpoint,
3081 uint_t bandwidth,
3082 int interval)
3083 {
3084 int i;
3085 uint_t elements, index;
3086 int array_leaf, best_array_leaf;
3087 uint_t node_bandwidth, best_node_bandwidth;
3088 uint_t leaf_count;
3089 uchar_t bw_mask;
3090 uchar_t best_smask;
3091
3092 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3093 "ehci_find_bestfit_hs_mask: ");
3094
3095 /* Get all the valid smasks */
3096 switch (ehci_pow_2(endpoint->bInterval - 1)) {
3097 case EHCI_INTR_1US_POLL:
3098 index = EHCI_1US_MASK_INDEX;
3099 elements = EHCI_INTR_1US_POLL;
3100 break;
3101 case EHCI_INTR_2US_POLL:
3102 index = EHCI_2US_MASK_INDEX;
3103 elements = EHCI_INTR_2US_POLL;
3104 break;
3105 case EHCI_INTR_4US_POLL:
3106 index = EHCI_4US_MASK_INDEX;
3107 elements = EHCI_INTR_4US_POLL;
3108 break;
3109 case EHCI_INTR_XUS_POLL:
3110 default:
3111 index = EHCI_XUS_MASK_INDEX;
3112 elements = EHCI_INTR_XUS_POLL;
3113 break;
3114 }
3115
3116 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3117
3118 /*
3119 * Because of the way the leaves are setup, we will automatically
3120 * hit the leftmost leaf of every possible node with this interval.
3121 */
3122 best_smask = 0x00;
3123 best_node_bandwidth = 0;
3124 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3125 /* Find the bandwidth mask */
3126 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3127 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3128
3129 /*
3130 * If this node cannot support our requirements skip to the
3131 * next leaf.
3132 */
3133 if (bw_mask == 0x00) {
3134 continue;
3135 }
3136
3137 /*
3138 * Now make sure our bandwidth requirements can be
3139 * satisfied with one of smasks in this node.
3140 */
3141 *smask = 0x00;
3142 for (i = index; i < (index + elements); i++) {
3143 /* Check the start split mask value */
3144 if (ehci_start_split_mask[index] & bw_mask) {
3145 *smask = ehci_start_split_mask[index];
3146 break;
3147 }
3148 }
3149
3150 /*
3151 * If an appropriate smask is found save the information if:
3152 * o best_smask has not been found yet.
3153 * - or -
3154 * o This is the node with the least amount of bandwidth
3155 */
3156 if ((*smask != 0x00) &&
3157 ((best_smask == 0x00) ||
3158 (best_node_bandwidth > node_bandwidth))) {
3159
3160 best_node_bandwidth = node_bandwidth;
3161 best_array_leaf = array_leaf;
3162 best_smask = *smask;
3163 }
3164 }
3165
3166 /*
3167 * If we find node that can handle the bandwidth populate the
3168 * appropriate variables and return success.
3169 */
3170 if (best_smask) {
3171 *smask = best_smask;
3172 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3173 interval);
3174 ehci_update_bw_availability(ehcip, bandwidth,
3175 ehci_index[best_array_leaf], leaf_count, best_smask);
3176
3177 return (USB_SUCCESS);
3178 }
3179
3180 return (USB_FAILURE);
3181 }
3182
3183
3184 /*
3185 * ehci_find_bestfit_ls_intr_mask:
3186 *
3187 * Find the smask and cmask in the bandwidth allocation.
3188 */
3189 static int
ehci_find_bestfit_ls_intr_mask(ehci_state_t * ehcip,uchar_t * smask,uchar_t * cmask,uint_t * pnode,uint_t sbandwidth,uint_t cbandwidth,int interval)3190 ehci_find_bestfit_ls_intr_mask(
3191 ehci_state_t *ehcip,
3192 uchar_t *smask,
3193 uchar_t *cmask,
3194 uint_t *pnode,
3195 uint_t sbandwidth,
3196 uint_t cbandwidth,
3197 int interval)
3198 {
3199 int i;
3200 uint_t elements, index;
3201 int array_leaf, best_array_leaf;
3202 uint_t node_sbandwidth, node_cbandwidth;
3203 uint_t best_node_bandwidth;
3204 uint_t leaf_count;
3205 uchar_t bw_smask, bw_cmask;
3206 uchar_t best_smask, best_cmask;
3207
3208 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3209 "ehci_find_bestfit_ls_intr_mask: ");
3210
3211 /* For low and full speed devices */
3212 index = EHCI_XUS_MASK_INDEX;
3213 elements = EHCI_INTR_4MS_POLL;
3214
3215 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3216
3217 /*
3218 * Because of the way the leaves are setup, we will automatically
3219 * hit the leftmost leaf of every possible node with this interval.
3220 */
3221 best_smask = 0x00;
3222 best_node_bandwidth = 0;
3223 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3224 /* Find the bandwidth mask */
3225 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3226 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3227 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3228 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3229
3230 /*
3231 * If this node cannot support our requirements skip to the
3232 * next leaf.
3233 */
3234 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3235 continue;
3236 }
3237
3238 /*
3239 * Now make sure our bandwidth requirements can be
3240 * satisfied with one of smasks in this node.
3241 */
3242 *smask = 0x00;
3243 *cmask = 0x00;
3244 for (i = index; i < (index + elements); i++) {
3245 /* Check the start split mask value */
3246 if ((ehci_start_split_mask[index] & bw_smask) &&
3247 (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3248 *smask = ehci_start_split_mask[index];
3249 *cmask = ehci_intr_complete_split_mask[index];
3250 break;
3251 }
3252 }
3253
3254 /*
3255 * If an appropriate smask is found save the information if:
3256 * o best_smask has not been found yet.
3257 * - or -
3258 * o This is the node with the least amount of bandwidth
3259 */
3260 if ((*smask != 0x00) &&
3261 ((best_smask == 0x00) ||
3262 (best_node_bandwidth >
3263 (node_sbandwidth + node_cbandwidth)))) {
3264 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3265 best_array_leaf = array_leaf;
3266 best_smask = *smask;
3267 best_cmask = *cmask;
3268 }
3269 }
3270
3271 /*
3272 * If we find node that can handle the bandwidth populate the
3273 * appropriate variables and return success.
3274 */
3275 if (best_smask) {
3276 *smask = best_smask;
3277 *cmask = best_cmask;
3278 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3279 interval);
3280 ehci_update_bw_availability(ehcip, sbandwidth,
3281 ehci_index[best_array_leaf], leaf_count, best_smask);
3282 ehci_update_bw_availability(ehcip, cbandwidth,
3283 ehci_index[best_array_leaf], leaf_count, best_cmask);
3284
3285 return (USB_SUCCESS);
3286 }
3287
3288 return (USB_FAILURE);
3289 }
3290
3291
3292 /*
3293 * ehci_find_bestfit_sitd_in_mask:
3294 *
3295 * Find the smask and cmask in the bandwidth allocation.
3296 */
3297 static int
ehci_find_bestfit_sitd_in_mask(ehci_state_t * ehcip,uchar_t * smask,uchar_t * cmask,uint_t * pnode,uint_t sbandwidth,uint_t cbandwidth,int interval)3298 ehci_find_bestfit_sitd_in_mask(
3299 ehci_state_t *ehcip,
3300 uchar_t *smask,
3301 uchar_t *cmask,
3302 uint_t *pnode,
3303 uint_t sbandwidth,
3304 uint_t cbandwidth,
3305 int interval)
3306 {
3307 int i, uFrames, found;
3308 int array_leaf, best_array_leaf;
3309 uint_t node_sbandwidth, node_cbandwidth;
3310 uint_t best_node_bandwidth;
3311 uint_t leaf_count;
3312 uchar_t bw_smask, bw_cmask;
3313 uchar_t best_smask, best_cmask;
3314
3315 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3316 "ehci_find_bestfit_sitd_in_mask: ");
3317
3318 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3319
3320 /*
3321 * Because of the way the leaves are setup, we will automatically
3322 * hit the leftmost leaf of every possible node with this interval.
3323 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3324 */
3325 /*
3326 * Need to add an additional 2 uFrames, if the "L"ast
3327 * complete split is before uFrame 6. See section
3328 * 11.8.4 in USB 2.0 Spec. Currently we do not support
3329 * the "Back Ptr" which means we support on IN of
3330 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3331 */
3332 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3333 if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3334 uFrames++;
3335 }
3336 if (uFrames > 6) {
3337
3338 return (USB_FAILURE);
3339 }
3340 *smask = 0x1;
3341 *cmask = 0x00;
3342 for (i = 0; i < uFrames; i++) {
3343 *cmask = *cmask << 1;
3344 *cmask |= 0x1;
3345 }
3346 /* cmask must start 2 frames after the smask */
3347 *cmask = *cmask << 2;
3348
3349 found = 0;
3350 best_smask = 0x00;
3351 best_node_bandwidth = 0;
3352 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3353 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3354 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3355 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3356 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3357 &bw_cmask);
3358
3359 /*
3360 * If this node cannot support our requirements skip to the
3361 * next leaf.
3362 */
3363 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3364 continue;
3365 }
3366
3367 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3368 if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3369 found = 1;
3370 break;
3371 }
3372 *smask = *smask << 1;
3373 *cmask = *cmask << 1;
3374 }
3375
3376 /*
3377 * If an appropriate smask is found save the information if:
3378 * o best_smask has not been found yet.
3379 * - or -
3380 * o This is the node with the least amount of bandwidth
3381 */
3382 if (found &&
3383 ((best_smask == 0x00) ||
3384 (best_node_bandwidth >
3385 (node_sbandwidth + node_cbandwidth)))) {
3386 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3387 best_array_leaf = array_leaf;
3388 best_smask = *smask;
3389 best_cmask = *cmask;
3390 }
3391 }
3392
3393 /*
3394 * If we find node that can handle the bandwidth populate the
3395 * appropriate variables and return success.
3396 */
3397 if (best_smask) {
3398 *smask = best_smask;
3399 *cmask = best_cmask;
3400 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3401 interval);
3402 ehci_update_bw_availability(ehcip, sbandwidth,
3403 ehci_index[best_array_leaf], leaf_count, best_smask);
3404 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3405 ehci_index[best_array_leaf], leaf_count, best_cmask);
3406
3407 return (USB_SUCCESS);
3408 }
3409
3410 return (USB_FAILURE);
3411 }
3412
3413
3414 /*
3415 * ehci_find_bestfit_sitd_out_mask:
3416 *
3417 * Find the smask in the bandwidth allocation.
3418 */
3419 static int
ehci_find_bestfit_sitd_out_mask(ehci_state_t * ehcip,uchar_t * smask,uint_t * pnode,uint_t sbandwidth,int interval)3420 ehci_find_bestfit_sitd_out_mask(
3421 ehci_state_t *ehcip,
3422 uchar_t *smask,
3423 uint_t *pnode,
3424 uint_t sbandwidth,
3425 int interval)
3426 {
3427 int i, uFrames, found;
3428 int array_leaf, best_array_leaf;
3429 uint_t node_sbandwidth;
3430 uint_t best_node_bandwidth;
3431 uint_t leaf_count;
3432 uchar_t bw_smask;
3433 uchar_t best_smask;
3434
3435 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3436 "ehci_find_bestfit_sitd_out_mask: ");
3437
3438 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3439
3440 /*
3441 * Because of the way the leaves are setup, we will automatically
3442 * hit the leftmost leaf of every possible node with this interval.
3443 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3444 */
3445 *smask = 0x00;
3446 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3447 if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3448 uFrames++;
3449 }
3450 for (i = 0; i < uFrames; i++) {
3451 *smask = *smask << 1;
3452 *smask |= 0x1;
3453 }
3454
3455 found = 0;
3456 best_smask = 0x00;
3457 best_node_bandwidth = 0;
3458 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3459 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3460 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3461 &bw_smask);
3462
3463 /*
3464 * If this node cannot support our requirements skip to the
3465 * next leaf.
3466 */
3467 if (bw_smask == 0x00) {
3468 continue;
3469 }
3470
3471 /* You cannot have a start split on the 8th uFrame */
3472 for (i = 0; (*smask & 0x80) == 0; i++) {
3473 if (*smask & bw_smask) {
3474 found = 1;
3475 break;
3476 }
3477 *smask = *smask << 1;
3478 }
3479
3480 /*
3481 * If an appropriate smask is found save the information if:
3482 * o best_smask has not been found yet.
3483 * - or -
3484 * o This is the node with the least amount of bandwidth
3485 */
3486 if (found &&
3487 ((best_smask == 0x00) ||
3488 (best_node_bandwidth > node_sbandwidth))) {
3489 best_node_bandwidth = node_sbandwidth;
3490 best_array_leaf = array_leaf;
3491 best_smask = *smask;
3492 }
3493 }
3494
3495 /*
3496 * If we find node that can handle the bandwidth populate the
3497 * appropriate variables and return success.
3498 */
3499 if (best_smask) {
3500 *smask = best_smask;
3501 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3502 interval);
3503 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3504 ehci_index[best_array_leaf], leaf_count, best_smask);
3505
3506 return (USB_SUCCESS);
3507 }
3508
3509 return (USB_FAILURE);
3510 }
3511
3512
3513 /*
3514 * ehci_calculate_bw_availability_mask:
3515 *
3516 * Returns the "total bandwidth used" in this node.
3517 * Populates bw_mask with the uFrames that can support the bandwidth.
3518 *
3519 * If all the Frames cannot support this bandwidth, then bw_mask
3520 * will return 0x00 and the "total bandwidth used" will be invalid.
3521 */
3522 static uint_t
ehci_calculate_bw_availability_mask(ehci_state_t * ehcip,uint_t bandwidth,int leaf,int leaf_count,uchar_t * bw_mask)3523 ehci_calculate_bw_availability_mask(
3524 ehci_state_t *ehcip,
3525 uint_t bandwidth,
3526 int leaf,
3527 int leaf_count,
3528 uchar_t *bw_mask)
3529 {
3530 int i, j;
3531 uchar_t bw_uframe;
3532 int uframe_total;
3533 ehci_frame_bandwidth_t *fbp;
3534 uint_t total_bandwidth = 0;
3535
3536 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3537 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3538 leaf, leaf_count);
3539
3540 /* Start by saying all uFrames are available */
3541 *bw_mask = 0xFF;
3542
3543 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3544 fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3545
3546 total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3547
3548 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3549 /*
3550 * If the uFrame in bw_mask is available check to see if
3551 * it can support the additional bandwidth.
3552 */
3553 bw_uframe = (*bw_mask & (0x1 << j));
3554 uframe_total =
3555 fbp->ehci_micro_frame_bandwidth[j] +
3556 bandwidth;
3557 if ((bw_uframe) &&
3558 (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3559 *bw_mask = *bw_mask & ~bw_uframe;
3560 }
3561 }
3562 }
3563
3564 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3565 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3566 *bw_mask);
3567
3568 return (total_bandwidth);
3569 }
3570
3571
3572 /*
3573 * ehci_update_bw_availability:
3574 *
3575 * The leftmost leaf needs to be in terms of array position and
3576 * not the actual lattice position.
3577 */
3578 static void
ehci_update_bw_availability(ehci_state_t * ehcip,int bandwidth,int leftmost_leaf,int leaf_count,uchar_t mask)3579 ehci_update_bw_availability(
3580 ehci_state_t *ehcip,
3581 int bandwidth,
3582 int leftmost_leaf,
3583 int leaf_count,
3584 uchar_t mask)
3585 {
3586 int i, j;
3587 ehci_frame_bandwidth_t *fbp;
3588 int uFrame_bandwidth[8];
3589
3590 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3591 "ehci_update_bw_availability: "
3592 "leaf %d count %d bandwidth 0x%x mask 0x%x",
3593 leftmost_leaf, leaf_count, bandwidth, mask);
3594
3595 ASSERT(leftmost_leaf < 32);
3596 ASSERT(leftmost_leaf >= 0);
3597
3598 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3599 if (mask & 0x1) {
3600 uFrame_bandwidth[j] = bandwidth;
3601 } else {
3602 uFrame_bandwidth[j] = 0;
3603 }
3604
3605 mask = mask >> 1;
3606 }
3607
3608 /* Updated all the effected leafs with the bandwidth */
3609 for (i = 0; i < leaf_count; i++) {
3610 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3611
3612 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3613 fbp->ehci_micro_frame_bandwidth[j] +=
3614 uFrame_bandwidth[j];
3615 fbp->ehci_allocated_frame_bandwidth +=
3616 uFrame_bandwidth[j];
3617 }
3618 }
3619 }
3620
3621 /*
3622 * Miscellaneous functions
3623 */
3624
3625 /*
3626 * ehci_obtain_state:
3627 *
3628 * NOTE: This function is also called from POLLED MODE.
3629 */
3630 ehci_state_t *
ehci_obtain_state(dev_info_t * dip)3631 ehci_obtain_state(dev_info_t *dip)
3632 {
3633 int instance = ddi_get_instance(dip);
3634
3635 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3636
3637 ASSERT(state != NULL);
3638
3639 return (state);
3640 }
3641
3642
3643 /*
3644 * ehci_state_is_operational:
3645 *
3646 * Check the Host controller state and return proper values.
3647 */
3648 int
ehci_state_is_operational(ehci_state_t * ehcip)3649 ehci_state_is_operational(ehci_state_t *ehcip)
3650 {
3651 int val;
3652
3653 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3654
3655 switch (ehcip->ehci_hc_soft_state) {
3656 case EHCI_CTLR_INIT_STATE:
3657 case EHCI_CTLR_SUSPEND_STATE:
3658 val = USB_FAILURE;
3659 break;
3660 case EHCI_CTLR_OPERATIONAL_STATE:
3661 val = USB_SUCCESS;
3662 break;
3663 case EHCI_CTLR_ERROR_STATE:
3664 val = USB_HC_HARDWARE_ERROR;
3665 break;
3666 default:
3667 val = USB_FAILURE;
3668 break;
3669 }
3670
3671 return (val);
3672 }
3673
3674
3675 /*
3676 * ehci_do_soft_reset
3677 *
3678 * Do soft reset of ehci host controller.
3679 */
3680 int
ehci_do_soft_reset(ehci_state_t * ehcip)3681 ehci_do_soft_reset(ehci_state_t *ehcip)
3682 {
3683 usb_frame_number_t before_frame_number, after_frame_number;
3684 ehci_regs_t *ehci_save_regs;
3685
3686 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3687
3688 /* Increment host controller error count */
3689 ehcip->ehci_hc_error++;
3690
3691 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3692 "ehci_do_soft_reset:"
3693 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3694
3695 /*
3696 * Allocate space for saving current Host Controller
3697 * registers. Don't do any recovery if allocation
3698 * fails.
3699 */
3700 ehci_save_regs = (ehci_regs_t *)
3701 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3702
3703 if (ehci_save_regs == NULL) {
3704 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3705 "ehci_do_soft_reset: kmem_zalloc failed");
3706
3707 return (USB_FAILURE);
3708 }
3709
3710 /* Save current ehci registers */
3711 ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3712 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3713 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3714 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3715 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3716 ehci_save_regs->ehci_periodic_list_base =
3717 Get_OpReg(ehci_periodic_list_base);
3718
3719 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3720 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3721
3722 /* Disable all list processing and interrupts */
3723 Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3724 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3725
3726 /* Disable all EHCI interrupts */
3727 Set_OpReg(ehci_interrupt, 0);
3728
3729 /* Wait for few milliseconds */
3730 drv_usecwait(EHCI_SOF_TIMEWAIT);
3731
3732 /* Do light soft reset of ehci host controller */
3733 Set_OpReg(ehci_command,
3734 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3735
3736 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3737 "ehci_do_soft_reset: Reset in progress");
3738
3739 /* Wait for reset to complete */
3740 drv_usecwait(EHCI_RESET_TIMEWAIT);
3741
3742 /*
3743 * Restore previous saved EHCI register value
3744 * into the current EHCI registers.
3745 */
3746 Set_OpReg(ehci_ctrl_segment, (uint32_t)
3747 ehci_save_regs->ehci_ctrl_segment);
3748
3749 Set_OpReg(ehci_periodic_list_base, (uint32_t)
3750 ehci_save_regs->ehci_periodic_list_base);
3751
3752 Set_OpReg(ehci_async_list_addr, (uint32_t)
3753 ehci_save_regs->ehci_async_list_addr);
3754
3755 /*
3756 * For some reason this register might get nulled out by
3757 * the Uli M1575 South Bridge. To workaround the hardware
3758 * problem, check the value after write and retry if the
3759 * last write fails.
3760 */
3761 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3762 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3763 (ehci_save_regs->ehci_async_list_addr !=
3764 Get_OpReg(ehci_async_list_addr))) {
3765 int retry = 0;
3766
3767 Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3768 ehci_save_regs->ehci_async_list_addr, retry);
3769 if (retry >= EHCI_MAX_RETRY) {
3770 USB_DPRINTF_L2(PRINT_MASK_ATTA,
3771 ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3772 " ASYNCLISTADDR write failed.");
3773
3774 return (USB_FAILURE);
3775 }
3776 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3777 "ehci_do_soft_reset: ASYNCLISTADDR "
3778 "write failed, retry=%d", retry);
3779 }
3780
3781 Set_OpReg(ehci_config_flag, (uint32_t)
3782 ehci_save_regs->ehci_config_flag);
3783
3784 /* Enable both Asynchronous and Periodic Schedule if necessary */
3785 ehci_toggle_scheduler(ehcip);
3786
3787 /*
3788 * Set ehci_interrupt to enable all interrupts except Root
3789 * Hub Status change and frame list rollover interrupts.
3790 */
3791 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3792 EHCI_INTR_FRAME_LIST_ROLLOVER |
3793 EHCI_INTR_USB_ERROR |
3794 EHCI_INTR_USB);
3795
3796 /*
3797 * Deallocate the space that allocated for saving
3798 * HC registers.
3799 */
3800 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
3801
3802 /*
3803 * Set the desired interrupt threshold, frame list size (if
3804 * applicable) and turn EHCI host controller.
3805 */
3806 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3807 ~EHCI_CMD_INTR_THRESHOLD) |
3808 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3809
3810 /* Wait 10ms for EHCI to start sending SOF */
3811 drv_usecwait(EHCI_RESET_TIMEWAIT);
3812
3813 /*
3814 * Get the current usb frame number before waiting for
3815 * few milliseconds.
3816 */
3817 before_frame_number = ehci_get_current_frame_number(ehcip);
3818
3819 /* Wait for few milliseconds */
3820 drv_usecwait(EHCI_SOF_TIMEWAIT);
3821
3822 /*
3823 * Get the current usb frame number after waiting for
3824 * few milliseconds.
3825 */
3826 after_frame_number = ehci_get_current_frame_number(ehcip);
3827
3828 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3829 "ehci_do_soft_reset: Before Frame Number 0x%llx "
3830 "After Frame Number 0x%llx",
3831 (unsigned long long)before_frame_number,
3832 (unsigned long long)after_frame_number);
3833
3834 if ((after_frame_number <= before_frame_number) &&
3835 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3836
3837 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3838 "ehci_do_soft_reset: Soft reset failed");
3839
3840 return (USB_FAILURE);
3841 }
3842
3843 return (USB_SUCCESS);
3844 }
3845
3846
3847 /*
3848 * ehci_get_xfer_attrs:
3849 *
3850 * Get the attributes of a particular xfer.
3851 *
3852 * NOTE: This function is also called from POLLED MODE.
3853 */
3854 usb_req_attrs_t
ehci_get_xfer_attrs(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3855 ehci_get_xfer_attrs(
3856 ehci_state_t *ehcip,
3857 ehci_pipe_private_t *pp,
3858 ehci_trans_wrapper_t *tw)
3859 {
3860 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3861 usb_req_attrs_t attrs = USB_ATTRS_NONE;
3862
3863 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3864 "ehci_get_xfer_attrs:");
3865
3866 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3867 case USB_EP_ATTR_CONTROL:
3868 attrs = ((usb_ctrl_req_t *)
3869 tw->tw_curr_xfer_reqp)->ctrl_attributes;
3870 break;
3871 case USB_EP_ATTR_BULK:
3872 attrs = ((usb_bulk_req_t *)
3873 tw->tw_curr_xfer_reqp)->bulk_attributes;
3874 break;
3875 case USB_EP_ATTR_INTR:
3876 attrs = ((usb_intr_req_t *)
3877 tw->tw_curr_xfer_reqp)->intr_attributes;
3878 break;
3879 }
3880
3881 return (attrs);
3882 }
3883
3884
3885 /*
3886 * ehci_get_current_frame_number:
3887 *
3888 * Get the current software based usb frame number.
3889 */
3890 usb_frame_number_t
ehci_get_current_frame_number(ehci_state_t * ehcip)3891 ehci_get_current_frame_number(ehci_state_t *ehcip)
3892 {
3893 usb_frame_number_t usb_frame_number;
3894 usb_frame_number_t ehci_fno, micro_frame_number;
3895
3896 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3897
3898 ehci_fno = ehcip->ehci_fno;
3899 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3900
3901 /*
3902 * Calculate current software based usb frame number.
3903 *
3904 * This code accounts for the fact that frame number is
3905 * updated by the Host Controller before the ehci driver
3906 * gets an FrameListRollover interrupt that will adjust
3907 * Frame higher part.
3908 *
3909 * Refer ehci specification 1.0, section 2.3.2, page 21.
3910 */
3911 micro_frame_number = ((micro_frame_number & 0x1FFF) |
3912 ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3913 ehci_fno) & 0x2000);
3914
3915 /*
3916 * Micro Frame number is equivalent to 125 usec. Eight
3917 * Micro Frame numbers are equivalent to one millsecond
3918 * or one usb frame number.
3919 */
3920 usb_frame_number = micro_frame_number >>
3921 EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3922
3923 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3924 "ehci_get_current_frame_number: "
3925 "Current usb uframe number = 0x%llx "
3926 "Current usb frame number = 0x%llx",
3927 (unsigned long long)micro_frame_number,
3928 (unsigned long long)usb_frame_number);
3929
3930 return (usb_frame_number);
3931 }
3932
3933
3934 /*
3935 * ehci_cpr_cleanup:
3936 *
3937 * Cleanup ehci state and other ehci specific informations across
3938 * Check Point Resume (CPR).
3939 */
3940 static void
ehci_cpr_cleanup(ehci_state_t * ehcip)3941 ehci_cpr_cleanup(ehci_state_t *ehcip)
3942 {
3943 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3944
3945 /* Reset software part of usb frame number */
3946 ehcip->ehci_fno = 0;
3947 }
3948
3949
3950 /*
3951 * ehci_wait_for_sof:
3952 *
3953 * Wait for couple of SOF interrupts
3954 */
3955 int
ehci_wait_for_sof(ehci_state_t * ehcip)3956 ehci_wait_for_sof(ehci_state_t *ehcip)
3957 {
3958 usb_frame_number_t before_frame_number, after_frame_number;
3959 int error = USB_SUCCESS;
3960
3961 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3962 ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3963
3964 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3965
3966 error = ehci_state_is_operational(ehcip);
3967
3968 if (error != USB_SUCCESS) {
3969
3970 return (error);
3971 }
3972
3973 /* Get the current usb frame number before waiting for two SOFs */
3974 before_frame_number = ehci_get_current_frame_number(ehcip);
3975
3976 mutex_exit(&ehcip->ehci_int_mutex);
3977
3978 /* Wait for few milliseconds */
3979 delay(drv_usectohz(EHCI_SOF_TIMEWAIT));
3980
3981 mutex_enter(&ehcip->ehci_int_mutex);
3982
3983 /* Get the current usb frame number after woken up */
3984 after_frame_number = ehci_get_current_frame_number(ehcip);
3985
3986 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3987 "ehci_wait_for_sof: framenumber: before 0x%llx "
3988 "after 0x%llx",
3989 (unsigned long long)before_frame_number,
3990 (unsigned long long)after_frame_number);
3991
3992 /* Return failure, if usb frame number has not been changed */
3993 if (after_frame_number <= before_frame_number) {
3994
3995 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) {
3996
3997 USB_DPRINTF_L0(PRINT_MASK_LISTS,
3998 ehcip->ehci_log_hdl, "No SOF interrupts");
3999
4000 /* Set host controller soft state to error */
4001 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
4002
4003 return (USB_FAILURE);
4004 }
4005
4006 }
4007
4008 return (USB_SUCCESS);
4009 }
4010
4011 /*
4012 * Toggle the async/periodic schedule based on opened pipe count.
4013 * During pipe cleanup(in pipe reset case), the pipe's QH is temporarily
4014 * disabled. But the TW on the pipe is not freed. In this case, we need
4015 * to disable async/periodic schedule for some non-compatible hardware.
4016 * Otherwise, the hardware will overwrite software's configuration of
4017 * the QH.
4018 */
4019 void
ehci_toggle_scheduler_on_pipe(ehci_state_t * ehcip)4020 ehci_toggle_scheduler_on_pipe(ehci_state_t *ehcip)
4021 {
4022 uint_t temp_reg, cmd_reg;
4023
4024 cmd_reg = Get_OpReg(ehci_command);
4025 temp_reg = cmd_reg;
4026
4027 /*
4028 * Enable/Disable asynchronous scheduler, and
4029 * turn on/off async list door bell
4030 */
4031 if (ehcip->ehci_open_async_count) {
4032 if ((ehcip->ehci_async_req_count > 0) &&
4033 ((cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE) == 0)) {
4034 /*
4035 * For some reason this address might get nulled out by
4036 * the ehci chip. Set it here just in case it is null.
4037 */
4038 Set_OpReg(ehci_async_list_addr,
4039 ehci_qh_cpu_to_iommu(ehcip,
4040 ehcip->ehci_head_of_async_sched_list));
4041
4042 /*
4043 * For some reason this register might get nulled out by
4044 * the Uli M1575 Southbridge. To workaround the HW
4045 * problem, check the value after write and retry if the
4046 * last write fails.
4047 *
4048 * If the ASYNCLISTADDR remains "stuck" after
4049 * EHCI_MAX_RETRY retries, then the M1575 is broken
4050 * and is stuck in an inconsistent state and is about
4051 * to crash the machine with a trn_oor panic when it
4052 * does a DMA read from 0x0. It is better to panic
4053 * now rather than wait for the trn_oor crash; this
4054 * way Customer Service will have a clean signature
4055 * that indicts the M1575 chip rather than a
4056 * mysterious and hard-to-diagnose trn_oor panic.
4057 */
4058 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4059 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4060 (ehci_qh_cpu_to_iommu(ehcip,
4061 ehcip->ehci_head_of_async_sched_list) !=
4062 Get_OpReg(ehci_async_list_addr))) {
4063 int retry = 0;
4064
4065 Set_OpRegRetry(ehci_async_list_addr,
4066 ehci_qh_cpu_to_iommu(ehcip,
4067 ehcip->ehci_head_of_async_sched_list),
4068 retry);
4069 if (retry >= EHCI_MAX_RETRY)
4070 cmn_err(CE_PANIC,
4071 "ehci_toggle_scheduler_on_pipe: "
4072 "ASYNCLISTADDR write failed.");
4073
4074 USB_DPRINTF_L2(PRINT_MASK_ATTA,
4075 ehcip->ehci_log_hdl,
4076 "ehci_toggle_scheduler_on_pipe:"
4077 " ASYNCLISTADDR write failed, retry=%d",
4078 retry);
4079 }
4080
4081 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4082 }
4083 } else {
4084 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4085 }
4086
4087 if (ehcip->ehci_open_periodic_count) {
4088 if ((ehcip->ehci_periodic_req_count > 0) &&
4089 ((cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE) == 0)) {
4090 /*
4091 * For some reason this address get's nulled out by
4092 * the ehci chip. Set it here just in case it is null.
4093 */
4094 Set_OpReg(ehci_periodic_list_base,
4095 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4096 0xFFFFF000));
4097 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4098 }
4099 } else {
4100 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4101 }
4102
4103 /* Just an optimization */
4104 if (temp_reg != cmd_reg) {
4105 Set_OpReg(ehci_command, cmd_reg);
4106 }
4107 }
4108
4109
4110 /*
4111 * ehci_toggle_scheduler:
4112 *
4113 * Turn scheduler based on pipe open count.
4114 */
4115 void
ehci_toggle_scheduler(ehci_state_t * ehcip)4116 ehci_toggle_scheduler(ehci_state_t *ehcip)
4117 {
4118 uint_t temp_reg, cmd_reg;
4119
4120 /*
4121 * For performance optimization, we need to change the bits
4122 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0)
4123 *
4124 * Related bits already enabled if
4125 * async and periodic req counts are > 1
4126 * OR async req count > 1 & no periodic pipe
4127 * OR periodic req count > 1 & no async pipe
4128 */
4129 if (((ehcip->ehci_async_req_count > 1) &&
4130 (ehcip->ehci_periodic_req_count > 1)) ||
4131 ((ehcip->ehci_async_req_count > 1) &&
4132 (ehcip->ehci_open_periodic_count == 0)) ||
4133 ((ehcip->ehci_periodic_req_count > 1) &&
4134 (ehcip->ehci_open_async_count == 0))) {
4135 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4136 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4137 "async/periodic bits no need to change");
4138
4139 return;
4140 }
4141
4142 cmd_reg = Get_OpReg(ehci_command);
4143 temp_reg = cmd_reg;
4144
4145 /*
4146 * Enable/Disable asynchronous scheduler, and
4147 * turn on/off async list door bell
4148 */
4149 if (ehcip->ehci_async_req_count > 1) {
4150 /* we already enable the async bit */
4151 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4152 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4153 "async bit already enabled: cmd_reg=0x%x", cmd_reg);
4154 } else if (ehcip->ehci_async_req_count == 1) {
4155 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) {
4156 /*
4157 * For some reason this address might get nulled out by
4158 * the ehci chip. Set it here just in case it is null.
4159 * If it's not null, we should not reset the
4160 * ASYNCLISTADDR, because it's updated by hardware to
4161 * point to the next queue head to be executed.
4162 */
4163 if (!Get_OpReg(ehci_async_list_addr)) {
4164 Set_OpReg(ehci_async_list_addr,
4165 ehci_qh_cpu_to_iommu(ehcip,
4166 ehcip->ehci_head_of_async_sched_list));
4167 }
4168
4169 /*
4170 * For some reason this register might get nulled out by
4171 * the Uli M1575 Southbridge. To workaround the HW
4172 * problem, check the value after write and retry if the
4173 * last write fails.
4174 *
4175 * If the ASYNCLISTADDR remains "stuck" after
4176 * EHCI_MAX_RETRY retries, then the M1575 is broken
4177 * and is stuck in an inconsistent state and is about
4178 * to crash the machine with a trn_oor panic when it
4179 * does a DMA read from 0x0. It is better to panic
4180 * now rather than wait for the trn_oor crash; this
4181 * way Customer Service will have a clean signature
4182 * that indicts the M1575 chip rather than a
4183 * mysterious and hard-to-diagnose trn_oor panic.
4184 */
4185 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4186 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4187 (ehci_qh_cpu_to_iommu(ehcip,
4188 ehcip->ehci_head_of_async_sched_list) !=
4189 Get_OpReg(ehci_async_list_addr))) {
4190 int retry = 0;
4191
4192 Set_OpRegRetry(ehci_async_list_addr,
4193 ehci_qh_cpu_to_iommu(ehcip,
4194 ehcip->ehci_head_of_async_sched_list),
4195 retry);
4196 if (retry >= EHCI_MAX_RETRY)
4197 cmn_err(CE_PANIC,
4198 "ehci_toggle_scheduler: "
4199 "ASYNCLISTADDR write failed.");
4200
4201 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4202 ehcip->ehci_log_hdl,
4203 "ehci_toggle_scheduler: ASYNCLISTADDR "
4204 "write failed, retry=%d", retry);
4205 }
4206 }
4207 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4208 } else {
4209 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4210 }
4211
4212 if (ehcip->ehci_periodic_req_count > 1) {
4213 /* we already enable the periodic bit. */
4214 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4215 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4216 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg);
4217 } else if (ehcip->ehci_periodic_req_count == 1) {
4218 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) {
4219 /*
4220 * For some reason this address get's nulled out by
4221 * the ehci chip. Set it here just in case it is null.
4222 */
4223 Set_OpReg(ehci_periodic_list_base,
4224 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4225 0xFFFFF000));
4226 }
4227 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4228 } else {
4229 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4230 }
4231
4232 /* Just an optimization */
4233 if (temp_reg != cmd_reg) {
4234 Set_OpReg(ehci_command, cmd_reg);
4235
4236 /* To make sure the command register is updated correctly */
4237 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4238 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
4239 int retry = 0;
4240
4241 Set_OpRegRetry(ehci_command, cmd_reg, retry);
4242 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4243 ehcip->ehci_log_hdl,
4244 "ehci_toggle_scheduler: CMD write failed, retry=%d",
4245 retry);
4246 }
4247
4248 }
4249 }
4250
4251 /*
4252 * ehci print functions
4253 */
4254
4255 /*
4256 * ehci_print_caps:
4257 */
4258 void
ehci_print_caps(ehci_state_t * ehcip)4259 ehci_print_caps(ehci_state_t *ehcip)
4260 {
4261 uint_t i;
4262
4263 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4264 "\n\tUSB 2.0 Host Controller Characteristics\n");
4265
4266 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4267 "Caps Length: 0x%x Version: 0x%x\n",
4268 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version));
4269
4270 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4271 "Structural Parameters\n");
4272 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4273 "Port indicators: %s", (Get_Cap(ehci_hcs_params) &
4274 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No");
4275 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4276 "No of Classic host controllers: 0x%x",
4277 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS)
4278 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT);
4279 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4280 "No of ports per Classic host controller: 0x%x",
4281 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC)
4282 >> EHCI_HCS_NUM_PORTS_CC_SHIFT);
4283 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4284 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) &
4285 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No");
4286 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4287 "Port power control: %s", (Get_Cap(ehci_hcs_params) &
4288 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No");
4289 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4290 "No of root hub ports: 0x%x\n",
4291 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS);
4292
4293 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4294 "Capability Parameters\n");
4295 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4296 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) &
4297 EHCI_HCC_EECP) ? "Yes" : "No");
4298 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4299 "Isoch schedule threshold: 0x%x",
4300 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD);
4301 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4302 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) &
4303 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No");
4304 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4305 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) &
4306 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024");
4307 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4308 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) &
4309 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No");
4310
4311 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4312 "Classic Port Route Description");
4313
4314 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4315 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4316 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i]));
4317 }
4318 }
4319
4320
4321 /*
4322 * ehci_print_regs:
4323 */
4324 void
ehci_print_regs(ehci_state_t * ehcip)4325 ehci_print_regs(ehci_state_t *ehcip)
4326 {
4327 uint_t i;
4328
4329 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4330 "\n\tEHCI%d Operational Registers\n",
4331 ddi_get_instance(ehcip->ehci_dip));
4332
4333 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4334 "Command: 0x%x Status: 0x%x",
4335 Get_OpReg(ehci_command), Get_OpReg(ehci_status));
4336 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4337 "Interrupt: 0x%x Frame Index: 0x%x",
4338 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index));
4339 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4340 "Control Segment: 0x%x Periodic List Base: 0x%x",
4341 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base));
4342 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4343 "Async List Addr: 0x%x Config Flag: 0x%x",
4344 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag));
4345
4346 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4347 "Root Hub Port Status");
4348
4349 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4350 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4351 "\tPort Status 0x%x: 0x%x ", i,
4352 Get_OpReg(ehci_rh_port_status[i]));
4353 }
4354 }
4355
4356
4357 /*
4358 * ehci_print_qh:
4359 */
4360 void
ehci_print_qh(ehci_state_t * ehcip,ehci_qh_t * qh)4361 ehci_print_qh(
4362 ehci_state_t *ehcip,
4363 ehci_qh_t *qh)
4364 {
4365 uint_t i;
4366
4367 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4368 "ehci_print_qh: qh = 0x%p", (void *)qh);
4369
4370 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4371 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr));
4372 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4373 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl));
4374 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4375 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl));
4376 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4377 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd));
4378 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4379 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd));
4380 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4381 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd));
4382 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4383 "\tqh_status: 0x%x ", Get_QH(qh->qh_status));
4384
4385 for (i = 0; i < 5; i++) {
4386 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4387 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i]));
4388 }
4389
4390 for (i = 0; i < 5; i++) {
4391 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4392 "\tqh_buf_high[%d]: 0x%x ",
4393 i, Get_QH(qh->qh_buf_high[i]));
4394 }
4395
4396 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4397 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd));
4398 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4399 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev));
4400 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4401 "\tqh_state: 0x%x ", Get_QH(qh->qh_state));
4402 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4403 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next));
4404 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4405 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame));
4406 }
4407
4408
4409 /*
4410 * ehci_print_qtd:
4411 */
4412 void
ehci_print_qtd(ehci_state_t * ehcip,ehci_qtd_t * qtd)4413 ehci_print_qtd(
4414 ehci_state_t *ehcip,
4415 ehci_qtd_t *qtd)
4416 {
4417 uint_t i;
4418
4419 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4420 "ehci_print_qtd: qtd = 0x%p", (void *)qtd);
4421
4422 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4423 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd));
4424 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4425 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd));
4426 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4427 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl));
4428
4429 for (i = 0; i < 5; i++) {
4430 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4431 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i]));
4432 }
4433
4434 for (i = 0; i < 5; i++) {
4435 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4436 "\tqtd_buf_high[%d]: 0x%x ",
4437 i, Get_QTD(qtd->qtd_buf_high[i]));
4438 }
4439
4440 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4441 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper));
4442 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4443 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd));
4444 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4445 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next));
4446 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4447 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev));
4448 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4449 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state));
4450 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4451 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase));
4452 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4453 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs));
4454 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4455 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len));
4456 }
4457
4458 /*
4459 * ehci kstat functions
4460 */
4461
4462 /*
4463 * ehci_create_stats:
4464 *
4465 * Allocate and initialize the ehci kstat structures
4466 */
4467 void
ehci_create_stats(ehci_state_t * ehcip)4468 ehci_create_stats(ehci_state_t *ehcip)
4469 {
4470 char kstatname[KSTAT_STRLEN];
4471 const char *dname = ddi_driver_name(ehcip->ehci_dip);
4472 char *usbtypes[USB_N_COUNT_KSTATS] =
4473 {"ctrl", "isoch", "bulk", "intr"};
4474 uint_t instance = ehcip->ehci_instance;
4475 ehci_intrs_stats_t *isp;
4476 int i;
4477
4478 if (EHCI_INTRS_STATS(ehcip) == NULL) {
4479 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
4480 dname, instance);
4481 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance,
4482 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
4483 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t),
4484 KSTAT_FLAG_PERSISTENT);
4485
4486 if (EHCI_INTRS_STATS(ehcip)) {
4487 isp = EHCI_INTRS_STATS_DATA(ehcip);
4488 kstat_named_init(&isp->ehci_sts_total,
4489 "Interrupts Total", KSTAT_DATA_UINT64);
4490 kstat_named_init(&isp->ehci_sts_not_claimed,
4491 "Not Claimed", KSTAT_DATA_UINT64);
4492 kstat_named_init(&isp->ehci_sts_async_sched_status,
4493 "Async schedule status", KSTAT_DATA_UINT64);
4494 kstat_named_init(&isp->ehci_sts_periodic_sched_status,
4495 "Periodic sched status", KSTAT_DATA_UINT64);
4496 kstat_named_init(&isp->ehci_sts_empty_async_schedule,
4497 "Empty async schedule", KSTAT_DATA_UINT64);
4498 kstat_named_init(&isp->ehci_sts_host_ctrl_halted,
4499 "Host controller Halted", KSTAT_DATA_UINT64);
4500 kstat_named_init(&isp->ehci_sts_async_advance_intr,
4501 "Intr on async advance", KSTAT_DATA_UINT64);
4502 kstat_named_init(&isp->ehci_sts_host_system_error_intr,
4503 "Host system error", KSTAT_DATA_UINT64);
4504 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr,
4505 "Frame list rollover", KSTAT_DATA_UINT64);
4506 kstat_named_init(&isp->ehci_sts_rh_port_change_intr,
4507 "Port change detect", KSTAT_DATA_UINT64);
4508 kstat_named_init(&isp->ehci_sts_usb_error_intr,
4509 "USB error interrupt", KSTAT_DATA_UINT64);
4510 kstat_named_init(&isp->ehci_sts_usb_intr,
4511 "USB interrupt", KSTAT_DATA_UINT64);
4512
4513 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip;
4514 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev;
4515 kstat_install(EHCI_INTRS_STATS(ehcip));
4516 }
4517 }
4518
4519 if (EHCI_TOTAL_STATS(ehcip) == NULL) {
4520 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
4521 dname, instance);
4522 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance,
4523 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
4524 KSTAT_FLAG_PERSISTENT);
4525
4526 if (EHCI_TOTAL_STATS(ehcip)) {
4527 kstat_install(EHCI_TOTAL_STATS(ehcip));
4528 }
4529 }
4530
4531 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4532 if (ehcip->ehci_count_stats[i] == NULL) {
4533 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
4534 dname, instance, usbtypes[i]);
4535 ehcip->ehci_count_stats[i] = kstat_create("usba",
4536 instance, kstatname, "usb_byte_count",
4537 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
4538
4539 if (ehcip->ehci_count_stats[i]) {
4540 kstat_install(ehcip->ehci_count_stats[i]);
4541 }
4542 }
4543 }
4544 }
4545
4546
4547 /*
4548 * ehci_destroy_stats:
4549 *
4550 * Clean up ehci kstat structures
4551 */
4552 void
ehci_destroy_stats(ehci_state_t * ehcip)4553 ehci_destroy_stats(ehci_state_t *ehcip)
4554 {
4555 int i;
4556
4557 if (EHCI_INTRS_STATS(ehcip)) {
4558 kstat_delete(EHCI_INTRS_STATS(ehcip));
4559 EHCI_INTRS_STATS(ehcip) = NULL;
4560 }
4561
4562 if (EHCI_TOTAL_STATS(ehcip)) {
4563 kstat_delete(EHCI_TOTAL_STATS(ehcip));
4564 EHCI_TOTAL_STATS(ehcip) = NULL;
4565 }
4566
4567 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4568 if (ehcip->ehci_count_stats[i]) {
4569 kstat_delete(ehcip->ehci_count_stats[i]);
4570 ehcip->ehci_count_stats[i] = NULL;
4571 }
4572 }
4573 }
4574
4575
4576 /*
4577 * ehci_do_intrs_stats:
4578 *
4579 * ehci status information
4580 */
4581 void
ehci_do_intrs_stats(ehci_state_t * ehcip,int val)4582 ehci_do_intrs_stats(
4583 ehci_state_t *ehcip,
4584 int val)
4585 {
4586 if (EHCI_INTRS_STATS(ehcip)) {
4587 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++;
4588 switch (val) {
4589 case EHCI_STS_ASYNC_SCHED_STATUS:
4590 EHCI_INTRS_STATS_DATA(ehcip)->
4591 ehci_sts_async_sched_status.value.ui64++;
4592 break;
4593 case EHCI_STS_PERIODIC_SCHED_STATUS:
4594 EHCI_INTRS_STATS_DATA(ehcip)->
4595 ehci_sts_periodic_sched_status.value.ui64++;
4596 break;
4597 case EHCI_STS_EMPTY_ASYNC_SCHEDULE:
4598 EHCI_INTRS_STATS_DATA(ehcip)->
4599 ehci_sts_empty_async_schedule.value.ui64++;
4600 break;
4601 case EHCI_STS_HOST_CTRL_HALTED:
4602 EHCI_INTRS_STATS_DATA(ehcip)->
4603 ehci_sts_host_ctrl_halted.value.ui64++;
4604 break;
4605 case EHCI_STS_ASYNC_ADVANCE_INTR:
4606 EHCI_INTRS_STATS_DATA(ehcip)->
4607 ehci_sts_async_advance_intr.value.ui64++;
4608 break;
4609 case EHCI_STS_HOST_SYSTEM_ERROR_INTR:
4610 EHCI_INTRS_STATS_DATA(ehcip)->
4611 ehci_sts_host_system_error_intr.value.ui64++;
4612 break;
4613 case EHCI_STS_FRM_LIST_ROLLOVER_INTR:
4614 EHCI_INTRS_STATS_DATA(ehcip)->
4615 ehci_sts_frm_list_rollover_intr.value.ui64++;
4616 break;
4617 case EHCI_STS_RH_PORT_CHANGE_INTR:
4618 EHCI_INTRS_STATS_DATA(ehcip)->
4619 ehci_sts_rh_port_change_intr.value.ui64++;
4620 break;
4621 case EHCI_STS_USB_ERROR_INTR:
4622 EHCI_INTRS_STATS_DATA(ehcip)->
4623 ehci_sts_usb_error_intr.value.ui64++;
4624 break;
4625 case EHCI_STS_USB_INTR:
4626 EHCI_INTRS_STATS_DATA(ehcip)->
4627 ehci_sts_usb_intr.value.ui64++;
4628 break;
4629 default:
4630 EHCI_INTRS_STATS_DATA(ehcip)->
4631 ehci_sts_not_claimed.value.ui64++;
4632 break;
4633 }
4634 }
4635 }
4636
4637
4638 /*
4639 * ehci_do_byte_stats:
4640 *
4641 * ehci data xfer information
4642 */
4643 void
ehci_do_byte_stats(ehci_state_t * ehcip,size_t len,uint8_t attr,uint8_t addr)4644 ehci_do_byte_stats(
4645 ehci_state_t *ehcip,
4646 size_t len,
4647 uint8_t attr,
4648 uint8_t addr)
4649 {
4650 uint8_t type = attr & USB_EP_ATTR_MASK;
4651 uint8_t dir = addr & USB_EP_DIR_MASK;
4652
4653 if (dir == USB_EP_DIR_IN) {
4654 EHCI_TOTAL_STATS_DATA(ehcip)->reads++;
4655 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len;
4656 switch (type) {
4657 case USB_EP_ATTR_CONTROL:
4658 EHCI_CTRL_STATS(ehcip)->reads++;
4659 EHCI_CTRL_STATS(ehcip)->nread += len;
4660 break;
4661 case USB_EP_ATTR_BULK:
4662 EHCI_BULK_STATS(ehcip)->reads++;
4663 EHCI_BULK_STATS(ehcip)->nread += len;
4664 break;
4665 case USB_EP_ATTR_INTR:
4666 EHCI_INTR_STATS(ehcip)->reads++;
4667 EHCI_INTR_STATS(ehcip)->nread += len;
4668 break;
4669 case USB_EP_ATTR_ISOCH:
4670 EHCI_ISOC_STATS(ehcip)->reads++;
4671 EHCI_ISOC_STATS(ehcip)->nread += len;
4672 break;
4673 }
4674 } else if (dir == USB_EP_DIR_OUT) {
4675 EHCI_TOTAL_STATS_DATA(ehcip)->writes++;
4676 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len;
4677 switch (type) {
4678 case USB_EP_ATTR_CONTROL:
4679 EHCI_CTRL_STATS(ehcip)->writes++;
4680 EHCI_CTRL_STATS(ehcip)->nwritten += len;
4681 break;
4682 case USB_EP_ATTR_BULK:
4683 EHCI_BULK_STATS(ehcip)->writes++;
4684 EHCI_BULK_STATS(ehcip)->nwritten += len;
4685 break;
4686 case USB_EP_ATTR_INTR:
4687 EHCI_INTR_STATS(ehcip)->writes++;
4688 EHCI_INTR_STATS(ehcip)->nwritten += len;
4689 break;
4690 case USB_EP_ATTR_ISOCH:
4691 EHCI_ISOC_STATS(ehcip)->writes++;
4692 EHCI_ISOC_STATS(ehcip)->nwritten += len;
4693 break;
4694 }
4695 }
4696 }
4697