1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * EHCI Host Controller Driver (EHCI)
28 *
29 * The EHCI driver is a software driver which interfaces to the Universal
30 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
31 * the Host Controller is defined by the EHCI Host Controller Interface.
32 *
33 * This module contains the main EHCI driver code which handles all USB
34 * transfers, bandwidth allocations and other general functionalities.
35 */
36
37 #include <sys/usb/hcd/ehci/ehcid.h>
38 #include <sys/usb/hcd/ehci/ehci_intr.h>
39 #include <sys/usb/hcd/ehci/ehci_util.h>
40 #include <sys/usb/hcd/ehci/ehci_isoch.h>
41
42 /* Adjustable variables for the size of the pools */
43 extern int ehci_qh_pool_size;
44 extern int ehci_qtd_pool_size;
45
46
47 /* Endpoint Descriptor (QH) related functions */
48 ehci_qh_t *ehci_alloc_qh(
49 ehci_state_t *ehcip,
50 usba_pipe_handle_data_t *ph,
51 uint_t flag);
52 static void ehci_unpack_endpoint(
53 ehci_state_t *ehcip,
54 usba_pipe_handle_data_t *ph,
55 ehci_qh_t *qh);
56 void ehci_insert_qh(
57 ehci_state_t *ehcip,
58 usba_pipe_handle_data_t *ph);
59 static void ehci_insert_async_qh(
60 ehci_state_t *ehcip,
61 ehci_pipe_private_t *pp);
62 static void ehci_insert_intr_qh(
63 ehci_state_t *ehcip,
64 ehci_pipe_private_t *pp);
65 static void ehci_modify_qh_status_bit(
66 ehci_state_t *ehcip,
67 ehci_pipe_private_t *pp,
68 halt_bit_t action);
69 static void ehci_halt_hs_qh(
70 ehci_state_t *ehcip,
71 ehci_pipe_private_t *pp,
72 ehci_qh_t *qh);
73 static void ehci_halt_fls_ctrl_and_bulk_qh(
74 ehci_state_t *ehcip,
75 ehci_pipe_private_t *pp,
76 ehci_qh_t *qh);
77 static void ehci_clear_tt_buffer(
78 ehci_state_t *ehcip,
79 usba_pipe_handle_data_t *ph,
80 ehci_qh_t *qh);
81 static void ehci_halt_fls_intr_qh(
82 ehci_state_t *ehcip,
83 ehci_qh_t *qh);
84 void ehci_remove_qh(
85 ehci_state_t *ehcip,
86 ehci_pipe_private_t *pp,
87 boolean_t reclaim);
88 static void ehci_remove_async_qh(
89 ehci_state_t *ehcip,
90 ehci_pipe_private_t *pp,
91 boolean_t reclaim);
92 static void ehci_remove_intr_qh(
93 ehci_state_t *ehcip,
94 ehci_pipe_private_t *pp,
95 boolean_t reclaim);
96 static void ehci_insert_qh_on_reclaim_list(
97 ehci_state_t *ehcip,
98 ehci_pipe_private_t *pp);
99 void ehci_deallocate_qh(
100 ehci_state_t *ehcip,
101 ehci_qh_t *old_qh);
102 uint32_t ehci_qh_cpu_to_iommu(
103 ehci_state_t *ehcip,
104 ehci_qh_t *addr);
105 ehci_qh_t *ehci_qh_iommu_to_cpu(
106 ehci_state_t *ehcip,
107 uintptr_t addr);
108
109 /* Transfer Descriptor (QTD) related functions */
110 static int ehci_initialize_dummy(
111 ehci_state_t *ehcip,
112 ehci_qh_t *qh);
113 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
114 ehci_state_t *ehcip,
115 ehci_pipe_private_t *pp,
116 usb_ctrl_req_t *ctrl_reqp,
117 usb_flags_t usb_flags);
118 void ehci_insert_ctrl_req(
119 ehci_state_t *ehcip,
120 usba_pipe_handle_data_t *ph,
121 usb_ctrl_req_t *ctrl_reqp,
122 ehci_trans_wrapper_t *tw,
123 usb_flags_t usb_flags);
124 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
125 ehci_state_t *ehcip,
126 ehci_pipe_private_t *pp,
127 usb_bulk_req_t *bulk_reqp,
128 usb_flags_t usb_flags);
129 void ehci_insert_bulk_req(
130 ehci_state_t *ehcip,
131 usba_pipe_handle_data_t *ph,
132 usb_bulk_req_t *bulk_reqp,
133 ehci_trans_wrapper_t *tw,
134 usb_flags_t flags);
135 int ehci_start_periodic_pipe_polling(
136 ehci_state_t *ehcip,
137 usba_pipe_handle_data_t *ph,
138 usb_opaque_t periodic_in_reqp,
139 usb_flags_t flags);
140 static int ehci_start_pipe_polling(
141 ehci_state_t *ehcip,
142 usba_pipe_handle_data_t *ph,
143 usb_flags_t flags);
144 static int ehci_start_intr_polling(
145 ehci_state_t *ehcip,
146 usba_pipe_handle_data_t *ph,
147 usb_flags_t flags);
148 static void ehci_set_periodic_pipe_polling(
149 ehci_state_t *ehcip,
150 usba_pipe_handle_data_t *ph);
151 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
152 ehci_state_t *ehcip,
153 usba_pipe_handle_data_t *ph,
154 usb_intr_req_t *intr_reqp,
155 usb_flags_t usb_flags);
156 void ehci_insert_intr_req(
157 ehci_state_t *ehcip,
158 ehci_pipe_private_t *pp,
159 ehci_trans_wrapper_t *tw,
160 usb_flags_t flags);
161 int ehci_stop_periodic_pipe_polling(
162 ehci_state_t *ehcip,
163 usba_pipe_handle_data_t *ph,
164 usb_flags_t flags);
165 int ehci_insert_qtd(
166 ehci_state_t *ehcip,
167 uint32_t qtd_ctrl,
168 size_t qtd_dma_offs,
169 size_t qtd_length,
170 uint32_t qtd_ctrl_phase,
171 ehci_pipe_private_t *pp,
172 ehci_trans_wrapper_t *tw);
173 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
174 ehci_state_t *ehcip);
175 static void ehci_fill_in_qtd(
176 ehci_state_t *ehcip,
177 ehci_qtd_t *qtd,
178 uint32_t qtd_ctrl,
179 size_t qtd_dma_offs,
180 size_t qtd_length,
181 uint32_t qtd_ctrl_phase,
182 ehci_pipe_private_t *pp,
183 ehci_trans_wrapper_t *tw);
184 static void ehci_insert_qtd_on_tw(
185 ehci_state_t *ehcip,
186 ehci_trans_wrapper_t *tw,
187 ehci_qtd_t *qtd);
188 static void ehci_insert_qtd_into_active_qtd_list(
189 ehci_state_t *ehcip,
190 ehci_qtd_t *curr_qtd);
191 void ehci_remove_qtd_from_active_qtd_list(
192 ehci_state_t *ehcip,
193 ehci_qtd_t *curr_qtd);
194 static void ehci_traverse_qtds(
195 ehci_state_t *ehcip,
196 usba_pipe_handle_data_t *ph);
197 void ehci_deallocate_qtd(
198 ehci_state_t *ehcip,
199 ehci_qtd_t *old_qtd);
200 uint32_t ehci_qtd_cpu_to_iommu(
201 ehci_state_t *ehcip,
202 ehci_qtd_t *addr);
203 ehci_qtd_t *ehci_qtd_iommu_to_cpu(
204 ehci_state_t *ehcip,
205 uintptr_t addr);
206
207 /* Transfer Wrapper (TW) functions */
208 static ehci_trans_wrapper_t *ehci_create_transfer_wrapper(
209 ehci_state_t *ehcip,
210 ehci_pipe_private_t *pp,
211 size_t length,
212 uint_t usb_flags);
213 int ehci_allocate_tds_for_tw(
214 ehci_state_t *ehcip,
215 ehci_pipe_private_t *pp,
216 ehci_trans_wrapper_t *tw,
217 size_t qtd_count);
218 static ehci_trans_wrapper_t *ehci_allocate_tw_resources(
219 ehci_state_t *ehcip,
220 ehci_pipe_private_t *pp,
221 size_t length,
222 usb_flags_t usb_flags,
223 size_t td_count);
224 static void ehci_free_tw_td_resources(
225 ehci_state_t *ehcip,
226 ehci_trans_wrapper_t *tw);
227 static void ehci_start_xfer_timer(
228 ehci_state_t *ehcip,
229 ehci_pipe_private_t *pp,
230 ehci_trans_wrapper_t *tw);
231 void ehci_stop_xfer_timer(
232 ehci_state_t *ehcip,
233 ehci_trans_wrapper_t *tw,
234 uint_t flag);
235 static void ehci_xfer_timeout_handler(void *arg);
236 static void ehci_remove_tw_from_timeout_list(
237 ehci_state_t *ehcip,
238 ehci_trans_wrapper_t *tw);
239 static void ehci_start_timer(ehci_state_t *ehcip,
240 ehci_pipe_private_t *pp);
241 void ehci_deallocate_tw(
242 ehci_state_t *ehcip,
243 ehci_pipe_private_t *pp,
244 ehci_trans_wrapper_t *tw);
245 void ehci_free_dma_resources(
246 ehci_state_t *ehcip,
247 usba_pipe_handle_data_t *ph);
248 static void ehci_free_tw(
249 ehci_state_t *ehcip,
250 ehci_pipe_private_t *pp,
251 ehci_trans_wrapper_t *tw);
252
253 /* Miscellaneous functions */
254 int ehci_allocate_intr_in_resource(
255 ehci_state_t *ehcip,
256 ehci_pipe_private_t *pp,
257 ehci_trans_wrapper_t *tw,
258 usb_flags_t flags);
259 void ehci_pipe_cleanup(
260 ehci_state_t *ehcip,
261 usba_pipe_handle_data_t *ph);
262 static void ehci_wait_for_transfers_completion(
263 ehci_state_t *ehcip,
264 ehci_pipe_private_t *pp);
265 void ehci_check_for_transfers_completion(
266 ehci_state_t *ehcip,
267 ehci_pipe_private_t *pp);
268 static void ehci_save_data_toggle(
269 ehci_state_t *ehcip,
270 usba_pipe_handle_data_t *ph);
271 void ehci_restore_data_toggle(
272 ehci_state_t *ehcip,
273 usba_pipe_handle_data_t *ph);
274 void ehci_handle_outstanding_requests(
275 ehci_state_t *ehcip,
276 ehci_pipe_private_t *pp);
277 void ehci_deallocate_intr_in_resource(
278 ehci_state_t *ehcip,
279 ehci_pipe_private_t *pp,
280 ehci_trans_wrapper_t *tw);
281 void ehci_do_client_periodic_in_req_callback(
282 ehci_state_t *ehcip,
283 ehci_pipe_private_t *pp,
284 usb_cr_t completion_reason);
285 void ehci_hcdi_callback(
286 usba_pipe_handle_data_t *ph,
287 ehci_trans_wrapper_t *tw,
288 usb_cr_t completion_reason);
289
290
291 /*
292 * Endpoint Descriptor (QH) manipulations functions
293 */
294
295 /*
296 * ehci_alloc_qh:
297 *
298 * Allocate an endpoint descriptor (QH)
299 *
300 * NOTE: This function is also called from POLLED MODE.
301 */
302 ehci_qh_t *
ehci_alloc_qh(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t flag)303 ehci_alloc_qh(
304 ehci_state_t *ehcip,
305 usba_pipe_handle_data_t *ph,
306 uint_t flag)
307 {
308 int i, state;
309 ehci_qh_t *qh;
310
311 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
312 "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
313
314 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
315
316 /*
317 * If this is for a ISOC endpoint return null.
318 * Isochronous uses ITD put directly onto the PFL.
319 */
320 if (ph) {
321 if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
322
323 return (NULL);
324 }
325 }
326
327 /*
328 * The first 63 endpoints in the Endpoint Descriptor (QH)
329 * buffer pool are reserved for building interrupt lattice
330 * tree. Search for a blank endpoint descriptor in the QH
331 * buffer pool.
332 */
333 for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
334 state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
335
336 if (state == EHCI_QH_FREE) {
337 break;
338 }
339 }
340
341 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
342 "ehci_alloc_qh: Allocated %d", i);
343
344 if (i == ehci_qh_pool_size) {
345 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
346 "ehci_alloc_qh: QH exhausted");
347
348 return (NULL);
349 } else {
350 qh = &ehcip->ehci_qh_pool_addr[i];
351 bzero((void *)qh, sizeof (ehci_qh_t));
352
353 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
354 "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
355
356 /* Check polled mode flag */
357 if (flag == EHCI_POLLED_MODE_FLAG) {
358 Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
359 Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
360 }
361
362 /* Unpack the endpoint descriptor into a control field */
363 if (ph) {
364 if ((ehci_initialize_dummy(ehcip,
365 qh)) == USB_NO_RESOURCES) {
366
367 Set_QH(qh->qh_state, EHCI_QH_FREE);
368
369 return (NULL);
370 }
371
372 ehci_unpack_endpoint(ehcip, ph, qh);
373
374 Set_QH(qh->qh_curr_qtd, NULL);
375 Set_QH(qh->qh_alt_next_qtd,
376 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
377
378 /* Change QH's state Active */
379 Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
380 } else {
381 Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
382
383 /* Change QH's state Static */
384 Set_QH(qh->qh_state, EHCI_QH_STATIC);
385 }
386
387 ehci_print_qh(ehcip, qh);
388
389 return (qh);
390 }
391 }
392
393
394 /*
395 * ehci_unpack_endpoint:
396 *
397 * Unpack the information in the pipe handle and create the first byte
398 * of the Host Controller's (HC) Endpoint Descriptor (QH).
399 */
400 static void
ehci_unpack_endpoint(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,ehci_qh_t * qh)401 ehci_unpack_endpoint(
402 ehci_state_t *ehcip,
403 usba_pipe_handle_data_t *ph,
404 ehci_qh_t *qh)
405 {
406 usb_ep_descr_t *endpoint = &ph->p_ep;
407 uint_t maxpacketsize, addr, xactions;
408 uint_t ctrl = 0, status = 0, split_ctrl = 0;
409 usb_port_status_t usb_port_status;
410 usba_device_t *usba_device = ph->p_usba_device;
411 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
412
413 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
414 "ehci_unpack_endpoint:");
415
416 mutex_enter(&usba_device->usb_mutex);
417 ctrl = usba_device->usb_addr;
418 usb_port_status = usba_device->usb_port_status;
419 mutex_exit(&usba_device->usb_mutex);
420
421 addr = endpoint->bEndpointAddress;
422
423 /* Assign the endpoint's address */
424 ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
425
426 /* Assign the speed */
427 switch (usb_port_status) {
428 case USBA_LOW_SPEED_DEV:
429 ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
430 break;
431 case USBA_FULL_SPEED_DEV:
432 ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
433 break;
434 case USBA_HIGH_SPEED_DEV:
435 ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
436 break;
437 }
438
439 switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
440 case USB_EP_ATTR_CONTROL:
441 /* Assign data toggle information */
442 ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
443
444 if (usb_port_status != USBA_HIGH_SPEED_DEV) {
445 ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
446 }
447 /* FALLTHRU */
448 case USB_EP_ATTR_BULK:
449 /* Maximum nak counter */
450 ctrl |= EHCI_QH_CTRL_MAX_NC;
451
452 if (usb_port_status == USBA_HIGH_SPEED_DEV) {
453 /*
454 * Perform ping before executing control
455 * and bulk transactions.
456 */
457 status = EHCI_QH_STS_DO_PING;
458 }
459 break;
460 case USB_EP_ATTR_INTR:
461 /* Set start split mask */
462 split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
463
464 /*
465 * Set complete split mask for low/full speed
466 * usb devices.
467 */
468 if (usb_port_status != USBA_HIGH_SPEED_DEV) {
469 split_ctrl |= ((pp->pp_cmask <<
470 EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
471 EHCI_QH_SPLIT_CTRL_COMP_MASK);
472 }
473 break;
474 }
475
476 /* Get the max transactions per microframe */
477 xactions = (endpoint->wMaxPacketSize &
478 USB_EP_MAX_XACTS_MASK) >> USB_EP_MAX_XACTS_SHIFT;
479
480 switch (xactions) {
481 case 0:
482 split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
483 break;
484 case 1:
485 split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
486 break;
487 case 2:
488 split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
489 break;
490 default:
491 split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
492 break;
493 }
494
495 /*
496 * For low/full speed devices, program high speed hub
497 * address and port number.
498 */
499 if (usb_port_status != USBA_HIGH_SPEED_DEV) {
500 mutex_enter(&usba_device->usb_mutex);
501 split_ctrl |= ((usba_device->usb_hs_hub_addr
502 << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
503 EHCI_QH_SPLIT_CTRL_HUB_ADDR);
504
505 split_ctrl |= ((usba_device->usb_hs_hub_port
506 << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
507 EHCI_QH_SPLIT_CTRL_HUB_PORT);
508
509 mutex_exit(&usba_device->usb_mutex);
510
511 /* Set start split transaction state */
512 status = EHCI_QH_STS_DO_START_SPLIT;
513 }
514
515 /* Assign endpoint's maxpacketsize */
516 maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
517 maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
518 ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
519
520 Set_QH(qh->qh_ctrl, ctrl);
521 Set_QH(qh->qh_split_ctrl, split_ctrl);
522 Set_QH(qh->qh_status, status);
523 }
524
525
526 /*
527 * ehci_insert_qh:
528 *
529 * Add the Endpoint Descriptor (QH) into the Host Controller's
530 * (HC) appropriate endpoint list.
531 */
532 void
ehci_insert_qh(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)533 ehci_insert_qh(
534 ehci_state_t *ehcip,
535 usba_pipe_handle_data_t *ph)
536 {
537 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
538
539 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
540 "ehci_insert_qh: qh=0x%p", (void *)pp->pp_qh);
541
542 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
543
544 switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
545 case USB_EP_ATTR_CONTROL:
546 case USB_EP_ATTR_BULK:
547 ehci_insert_async_qh(ehcip, pp);
548 ehcip->ehci_open_async_count++;
549 break;
550 case USB_EP_ATTR_INTR:
551 ehci_insert_intr_qh(ehcip, pp);
552 ehcip->ehci_open_periodic_count++;
553 break;
554 case USB_EP_ATTR_ISOCH:
555 /* ISOCH does not use QH, don't do anything but update count */
556 ehcip->ehci_open_periodic_count++;
557 break;
558 }
559 }
560
561
562 /*
563 * ehci_insert_async_qh:
564 *
565 * Insert a control/bulk endpoint into the Host Controller's (HC)
566 * Asynchronous schedule endpoint list.
567 */
568 static void
ehci_insert_async_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp)569 ehci_insert_async_qh(
570 ehci_state_t *ehcip,
571 ehci_pipe_private_t *pp)
572 {
573 ehci_qh_t *qh = pp->pp_qh;
574 ehci_qh_t *async_head_qh;
575 ehci_qh_t *next_qh;
576 uintptr_t qh_addr;
577
578 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
579 "ehci_insert_async_qh:");
580
581 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
582
583 /* Make sure this QH is not already in the list */
584 ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
585
586 qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
587
588 /* Obtain a ptr to the head of the Async schedule list */
589 async_head_qh = ehcip->ehci_head_of_async_sched_list;
590
591 if (async_head_qh == NULL) {
592 /* Set this QH to be the "head" of the circular list */
593 Set_QH(qh->qh_ctrl,
594 (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
595
596 /* Set new QH's link and previous pointer to itself */
597 Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
598 Set_QH(qh->qh_prev, qh_addr);
599
600 ehcip->ehci_head_of_async_sched_list = qh;
601
602 /* Set the head ptr to the new endpoint */
603 Set_OpReg(ehci_async_list_addr, qh_addr);
604
605 /*
606 * For some reason this register might get nulled out by
607 * the Uli M1575 South Bridge. To workaround the hardware
608 * problem, check the value after write and retry if the
609 * last write fails.
610 *
611 * If the ASYNCLISTADDR remains "stuck" after
612 * EHCI_MAX_RETRY retries, then the M1575 is broken
613 * and is stuck in an inconsistent state and is about
614 * to crash the machine with a trn_oor panic when it
615 * does a DMA read from 0x0. It is better to panic
616 * now rather than wait for the trn_oor crash; this
617 * way Customer Service will have a clean signature
618 * that indicts the M1575 chip rather than a
619 * mysterious and hard-to-diagnose trn_oor panic.
620 */
621 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
622 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
623 (qh_addr != Get_OpReg(ehci_async_list_addr))) {
624 int retry = 0;
625
626 Set_OpRegRetry(ehci_async_list_addr, qh_addr, retry);
627 if (retry >= EHCI_MAX_RETRY)
628 cmn_err(CE_PANIC, "ehci_insert_async_qh:"
629 " ASYNCLISTADDR write failed.");
630
631 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
632 "ehci_insert_async_qh: ASYNCLISTADDR "
633 "write failed, retry=%d", retry);
634 }
635 } else {
636 ASSERT(Get_QH(async_head_qh->qh_ctrl) &
637 EHCI_QH_CTRL_RECLAIM_HEAD);
638
639 /* Ensure this QH's "H" bit is not set */
640 Set_QH(qh->qh_ctrl,
641 (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
642
643 next_qh = ehci_qh_iommu_to_cpu(ehcip,
644 Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
645
646 /* Set new QH's link and previous pointers */
647 Set_QH(qh->qh_link_ptr,
648 Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
649 Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
650
651 /* Set next QH's prev pointer */
652 Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
653
654 /* Set QH Head's link pointer points to new QH */
655 Set_QH(async_head_qh->qh_link_ptr,
656 qh_addr | EHCI_QH_LINK_REF_QH);
657 }
658 }
659
660
661 /*
662 * ehci_insert_intr_qh:
663 *
664 * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
665 * lattice tree.
666 */
667 static void
ehci_insert_intr_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp)668 ehci_insert_intr_qh(
669 ehci_state_t *ehcip,
670 ehci_pipe_private_t *pp)
671 {
672 ehci_qh_t *qh = pp->pp_qh;
673 ehci_qh_t *next_lattice_qh, *lattice_qh;
674 uint_t hnode;
675
676 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
677 "ehci_insert_intr_qh:");
678
679 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
680
681 /* Make sure this QH is not already in the list */
682 ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
683
684 /*
685 * The appropriate high speed node was found
686 * during the opening of the pipe.
687 */
688 hnode = pp->pp_pnode;
689
690 /* Find the lattice endpoint */
691 lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
692
693 /* Find the next lattice endpoint */
694 next_lattice_qh = ehci_qh_iommu_to_cpu(
695 ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
696
697 /* Update the previous pointer */
698 Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
699
700 /* Check next_lattice_qh value */
701 if (next_lattice_qh) {
702 /* Update this qh to point to the next one in the lattice */
703 Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
704
705 /* Update the previous pointer of qh->qh_link_ptr */
706 if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
707 Set_QH(next_lattice_qh->qh_prev,
708 ehci_qh_cpu_to_iommu(ehcip, qh));
709 }
710 } else {
711 /* Update qh's link pointer to terminate periodic list */
712 Set_QH(qh->qh_link_ptr,
713 (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
714 }
715
716 /* Insert this endpoint into the lattice */
717 Set_QH(lattice_qh->qh_link_ptr,
718 (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
719 }
720
721
722 /*
723 * ehci_modify_qh_status_bit:
724 *
725 * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
726 *
727 * If several threads try to halt the same pipe, they will need to wait on
728 * a condition variable. Only one thread is allowed to halt or unhalt the
729 * pipe at a time.
730 *
731 * Usually after a halt pipe, an unhalt pipe will follow soon after. There
732 * is an assumption that an Unhalt pipe will never occur without a halt pipe.
733 */
734 static void
ehci_modify_qh_status_bit(ehci_state_t * ehcip,ehci_pipe_private_t * pp,halt_bit_t action)735 ehci_modify_qh_status_bit(
736 ehci_state_t *ehcip,
737 ehci_pipe_private_t *pp,
738 halt_bit_t action)
739 {
740 ehci_qh_t *qh = pp->pp_qh;
741 uint_t smask, eps, split_intr_qh;
742 uint_t status;
743
744 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
745 "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
746 action, (void *)qh);
747
748 ehci_print_qh(ehcip, qh);
749
750 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
751
752 /*
753 * If this pipe is in the middle of halting don't allow another
754 * thread to come in and modify the same pipe.
755 */
756 while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
757
758 cv_wait(&pp->pp_halt_cmpl_cv,
759 &ehcip->ehci_int_mutex);
760 }
761
762 /* Sync the QH QTD pool to get up to date information */
763 Sync_QH_QTD_Pool(ehcip);
764
765
766 if (action == CLEAR_HALT) {
767 /*
768 * If the halt bit is to be cleared, just clear it.
769 * there shouldn't be any race condition problems.
770 * If the host controller reads the bit before the
771 * driver has a chance to set the bit, the bit will
772 * be reread on the next frame.
773 */
774 Set_QH(qh->qh_ctrl,
775 (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
776 Set_QH(qh->qh_status,
777 Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
778
779 goto success;
780 }
781
782 /* Halt the the QH, but first check to see if it is already halted */
783 status = Get_QH(qh->qh_status);
784 if (!(status & EHCI_QH_STS_HALTED)) {
785 /* Indicate that this pipe is in the middle of halting. */
786 pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
787
788 /*
789 * Find out if this is an full/low speed interrupt endpoint.
790 * A non-zero Cmask indicates that this QH is an interrupt
791 * endpoint. Check the endpoint speed to see if it is either
792 * FULL or LOW .
793 */
794 smask = Get_QH(qh->qh_split_ctrl) &
795 EHCI_QH_SPLIT_CTRL_INTR_MASK;
796 eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
797 split_intr_qh = ((smask != 0) &&
798 (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
799
800 if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
801 ehci_halt_hs_qh(ehcip, pp, qh);
802 } else {
803 if (split_intr_qh) {
804 ehci_halt_fls_intr_qh(ehcip, qh);
805 } else {
806 ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
807 }
808 }
809
810 /* Indicate that this pipe is not in the middle of halting. */
811 pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
812 }
813
814 /* Sync the QH QTD pool again to get the most up to date information */
815 Sync_QH_QTD_Pool(ehcip);
816
817 ehci_print_qh(ehcip, qh);
818
819 status = Get_QH(qh->qh_status);
820 if (!(status & EHCI_QH_STS_HALTED)) {
821 USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
822 "ehci_modify_qh_status_bit: Failed to halt qh=0x%p",
823 (void *)qh);
824
825 ehci_print_qh(ehcip, qh);
826
827 /* Set host controller soft state to error */
828 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
829
830 ASSERT(status & EHCI_QH_STS_HALTED);
831 }
832
833 success:
834 /* Wake up threads waiting for this pipe to be halted. */
835 cv_signal(&pp->pp_halt_cmpl_cv);
836 }
837
838
839 /*
840 * ehci_halt_hs_qh:
841 *
842 * Halts all types of HIGH SPEED QHs.
843 */
844 static void
ehci_halt_hs_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_qh_t * qh)845 ehci_halt_hs_qh(
846 ehci_state_t *ehcip,
847 ehci_pipe_private_t *pp,
848 ehci_qh_t *qh)
849 {
850 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
851
852 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
853 "ehci_halt_hs_qh:");
854
855 /* Remove this qh from the HCD's view, but do not reclaim it */
856 ehci_remove_qh(ehcip, pp, B_FALSE);
857 ehci_toggle_scheduler_on_pipe(ehcip);
858
859 /*
860 * Wait for atleast one SOF, just in case the HCD is in the
861 * middle accessing this QH.
862 */
863 (void) ehci_wait_for_sof(ehcip);
864
865 /* Sync the QH QTD pool to get up to date information */
866 Sync_QH_QTD_Pool(ehcip);
867
868 /* Modify the status bit and halt this QH. */
869 Set_QH(qh->qh_status,
870 ((Get_QH(qh->qh_status) &
871 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
872
873 /* Insert this QH back into the HCD's view */
874 ehci_insert_qh(ehcip, ph);
875 ehci_toggle_scheduler_on_pipe(ehcip);
876 }
877
878
879 /*
880 * ehci_halt_fls_ctrl_and_bulk_qh:
881 *
882 * Halts FULL/LOW Ctrl and Bulk QHs only.
883 */
884 static void
ehci_halt_fls_ctrl_and_bulk_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_qh_t * qh)885 ehci_halt_fls_ctrl_and_bulk_qh(
886 ehci_state_t *ehcip,
887 ehci_pipe_private_t *pp,
888 ehci_qh_t *qh)
889 {
890 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
891 uint_t status, split_status, bytes_left;
892
893
894 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
895 "ehci_halt_fls_ctrl_and_bulk_qh:");
896
897 /* Remove this qh from the HCD's view, but do not reclaim it */
898 ehci_remove_qh(ehcip, pp, B_FALSE);
899 ehci_toggle_scheduler_on_pipe(ehcip);
900
901 /*
902 * Wait for atleast one SOF, just in case the HCD is in the
903 * middle accessing this QH.
904 */
905 (void) ehci_wait_for_sof(ehcip);
906
907 /* Sync the QH QTD pool to get up to date information */
908 Sync_QH_QTD_Pool(ehcip);
909
910 /* Modify the status bit and halt this QH. */
911 Set_QH(qh->qh_status,
912 ((Get_QH(qh->qh_status) &
913 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
914
915 /* Check to see if the QH was in the middle of a transaction */
916 status = Get_QH(qh->qh_status);
917 split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
918 bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
919 if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
920 (bytes_left != 0)) {
921 /* send ClearTTBuffer to this device's parent 2.0 hub */
922 ehci_clear_tt_buffer(ehcip, ph, qh);
923 }
924
925 /* Insert this QH back into the HCD's view */
926 ehci_insert_qh(ehcip, ph);
927 ehci_toggle_scheduler_on_pipe(ehcip);
928 }
929
930
931 /*
932 * ehci_clear_tt_buffer
933 *
934 * This function will sent a Clear_TT_Buffer request to the pipe's
935 * parent 2.0 hub.
936 */
937 static void
ehci_clear_tt_buffer(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,ehci_qh_t * qh)938 ehci_clear_tt_buffer(
939 ehci_state_t *ehcip,
940 usba_pipe_handle_data_t *ph,
941 ehci_qh_t *qh)
942 {
943 usba_device_t *usba_device;
944 usba_device_t *hub_usba_device;
945 usb_pipe_handle_t hub_def_ph;
946 usb_ep_descr_t *eptd;
947 uchar_t attributes;
948 uint16_t wValue;
949 usb_ctrl_setup_t setup;
950 usb_cr_t completion_reason;
951 usb_cb_flags_t cb_flags;
952 int retry;
953
954 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
955 "ehci_clear_tt_buffer: ");
956
957 /* Get some information about the current pipe */
958 usba_device = ph->p_usba_device;
959 eptd = &ph->p_ep;
960 attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
961
962 /*
963 * Create the wIndex for this request (usb spec 11.24.2.3)
964 * 3..0 Endpoint Number
965 * 10..4 Device Address
966 * 12..11 Endpoint Type
967 * 14..13 Reserved (must be 0)
968 * 15 Direction 1 = IN, 0 = OUT
969 */
970 wValue = 0;
971 if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
972 wValue |= 0x8000;
973 }
974 wValue |= attributes << 11;
975 wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
976 wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
977 EHCI_QH_CTRL_ED_NUMBER_SHIFT;
978
979 mutex_exit(&ehcip->ehci_int_mutex);
980
981 /* Manually fill in the request. */
982 setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
983 setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
984 setup.wValue = wValue;
985 setup.wIndex = 1;
986 setup.wLength = 0;
987 setup.attrs = USB_ATTRS_NONE;
988
989 /* Get the usba_device of the parent 2.0 hub. */
990 mutex_enter(&usba_device->usb_mutex);
991 hub_usba_device = usba_device->usb_hs_hub_usba_dev;
992 mutex_exit(&usba_device->usb_mutex);
993
994 /* Get the default ctrl pipe for the parent 2.0 hub */
995 mutex_enter(&hub_usba_device->usb_mutex);
996 hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
997 mutex_exit(&hub_usba_device->usb_mutex);
998
999 for (retry = 0; retry < 3; retry++) {
1000
1001 /* sync send the request to the default pipe */
1002 if (usb_pipe_ctrl_xfer_wait(
1003 hub_def_ph,
1004 &setup,
1005 NULL,
1006 &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
1007
1008 break;
1009 }
1010
1011 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1012 "ehci_clear_tt_buffer: Failed to clear tt buffer,"
1013 "retry = %d, cr = %d, cb_flags = 0x%x\n",
1014 retry, completion_reason, cb_flags);
1015 }
1016
1017 if (retry >= 3) {
1018 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1019 dev_info_t *dip = hub_usba_device->usb_dip;
1020
1021 /*
1022 * Ask the user to hotplug the 2.0 hub, to make sure that
1023 * all the buffer is in sync since this command has failed.
1024 */
1025 USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1026 "Error recovery failure: Please hotplug the 2.0 hub at"
1027 "%s", ddi_pathname(dip, path));
1028
1029 kmem_free(path, MAXPATHLEN);
1030 }
1031
1032 mutex_enter(&ehcip->ehci_int_mutex);
1033 }
1034
1035 /*
1036 * ehci_halt_fls_intr_qh:
1037 *
1038 * Halts FULL/LOW speed Intr QHs.
1039 */
1040 static void
ehci_halt_fls_intr_qh(ehci_state_t * ehcip,ehci_qh_t * qh)1041 ehci_halt_fls_intr_qh(
1042 ehci_state_t *ehcip,
1043 ehci_qh_t *qh)
1044 {
1045 usb_frame_number_t starting_frame;
1046 usb_frame_number_t frames_past;
1047 uint_t status, i;
1048
1049 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1050 "ehci_halt_fls_intr_qh:");
1051
1052 /*
1053 * Ask the HC to deactivate the QH in a
1054 * full/low periodic QH.
1055 */
1056 Set_QH(qh->qh_ctrl,
1057 (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1058
1059 starting_frame = ehci_get_current_frame_number(ehcip);
1060
1061 /*
1062 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1063 * the QH has been halted.
1064 */
1065 Sync_QH_QTD_Pool(ehcip);
1066 frames_past = 0;
1067 status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1068
1069 while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1070 (status != 0)) {
1071
1072 (void) ehci_wait_for_sof(ehcip);
1073
1074 Sync_QH_QTD_Pool(ehcip);
1075 status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1076 frames_past = ehci_get_current_frame_number(ehcip) -
1077 starting_frame;
1078 }
1079
1080 /* Modify the status bit and halt this QH. */
1081 Sync_QH_QTD_Pool(ehcip);
1082
1083 status = Get_QH(qh->qh_status);
1084
1085 for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1086 Set_QH(qh->qh_status,
1087 ((Get_QH(qh->qh_status) &
1088 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1089
1090 Sync_QH_QTD_Pool(ehcip);
1091
1092 (void) ehci_wait_for_sof(ehcip);
1093 Sync_QH_QTD_Pool(ehcip);
1094
1095 if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1096
1097 break;
1098 }
1099 }
1100
1101 Sync_QH_QTD_Pool(ehcip);
1102
1103 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1104 "ehci_halt_fls_intr_qh: qh=0x%p frames past=%llu,"
1105 " status=0x%x, 0x%x", (void *)qh,
1106 (unsigned long long)(ehci_get_current_frame_number(ehcip) -
1107 starting_frame), status, Get_QH(qh->qh_status));
1108 }
1109
1110
1111 /*
1112 * ehci_remove_qh:
1113 *
1114 * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1115 * endpoint list.
1116 */
1117 void
ehci_remove_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,boolean_t reclaim)1118 ehci_remove_qh(
1119 ehci_state_t *ehcip,
1120 ehci_pipe_private_t *pp,
1121 boolean_t reclaim)
1122 {
1123 uchar_t attributes;
1124
1125 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1126
1127 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1128 "ehci_remove_qh: qh=0x%p", (void *)pp->pp_qh);
1129
1130 attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1131
1132 switch (attributes) {
1133 case USB_EP_ATTR_CONTROL:
1134 case USB_EP_ATTR_BULK:
1135 ehci_remove_async_qh(ehcip, pp, reclaim);
1136 ehcip->ehci_open_async_count--;
1137 break;
1138 case USB_EP_ATTR_INTR:
1139 ehci_remove_intr_qh(ehcip, pp, reclaim);
1140 ehcip->ehci_open_periodic_count--;
1141 break;
1142 case USB_EP_ATTR_ISOCH:
1143 /* ISOCH does not use QH, don't do anything but update count */
1144 ehcip->ehci_open_periodic_count--;
1145 break;
1146 }
1147 }
1148
1149
1150 /*
1151 * ehci_remove_async_qh:
1152 *
1153 * Remove a control/bulk endpoint into the Host Controller's (HC)
1154 * Asynchronous schedule endpoint list.
1155 */
1156 static void
ehci_remove_async_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,boolean_t reclaim)1157 ehci_remove_async_qh(
1158 ehci_state_t *ehcip,
1159 ehci_pipe_private_t *pp,
1160 boolean_t reclaim)
1161 {
1162 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */
1163 ehci_qh_t *prev_qh, *next_qh;
1164
1165 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1166 "ehci_remove_async_qh:");
1167
1168 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1169
1170 prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1171 Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1172 next_qh = ehci_qh_iommu_to_cpu(ehcip,
1173 Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1174
1175 /* Make sure this QH is in the list */
1176 ASSERT(prev_qh != NULL);
1177
1178 /*
1179 * If next QH and current QH are the same, then this is the last
1180 * QH on the Asynchronous Schedule list.
1181 */
1182 if (qh == next_qh) {
1183 ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1184 /*
1185 * Null our pointer to the async sched list, but do not
1186 * touch the host controller's list_addr.
1187 */
1188 ehcip->ehci_head_of_async_sched_list = NULL;
1189 ASSERT(ehcip->ehci_open_async_count == 1);
1190 } else {
1191 /* If this QH is the HEAD then find another one to replace it */
1192 if (ehcip->ehci_head_of_async_sched_list == qh) {
1193
1194 ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1195 ehcip->ehci_head_of_async_sched_list = next_qh;
1196 Set_QH(next_qh->qh_ctrl,
1197 Get_QH(next_qh->qh_ctrl) |
1198 EHCI_QH_CTRL_RECLAIM_HEAD);
1199 }
1200 Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1201 Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1202 }
1203
1204 /* qh_prev to indicate it is no longer in the circular list */
1205 Set_QH(qh->qh_prev, NULL);
1206
1207 if (reclaim) {
1208 ehci_insert_qh_on_reclaim_list(ehcip, pp);
1209 }
1210 }
1211
1212
1213 /*
1214 * ehci_remove_intr_qh:
1215 *
1216 * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1217 * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1218 * interrupt handler.
1219 */
1220 static void
ehci_remove_intr_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,boolean_t reclaim)1221 ehci_remove_intr_qh(
1222 ehci_state_t *ehcip,
1223 ehci_pipe_private_t *pp,
1224 boolean_t reclaim)
1225 {
1226 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */
1227 ehci_qh_t *prev_qh, *next_qh;
1228
1229 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1230 "ehci_remove_intr_qh:");
1231
1232 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1233
1234 prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1235 next_qh = ehci_qh_iommu_to_cpu(ehcip,
1236 Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1237
1238 /* Make sure this QH is in the list */
1239 ASSERT(prev_qh != NULL);
1240
1241 if (next_qh) {
1242 /* Update previous qh's link pointer */
1243 Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1244
1245 if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1246 /* Set the previous pointer of the next one */
1247 Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1248 }
1249 } else {
1250 /* Update previous qh's link pointer */
1251 Set_QH(prev_qh->qh_link_ptr,
1252 (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1253 }
1254
1255 /* qh_prev to indicate it is no longer in the circular list */
1256 Set_QH(qh->qh_prev, NULL);
1257
1258 if (reclaim) {
1259 ehci_insert_qh_on_reclaim_list(ehcip, pp);
1260 }
1261 }
1262
1263
1264 /*
1265 * ehci_insert_qh_on_reclaim_list:
1266 *
1267 * Insert Endpoint onto the reclaim list
1268 */
1269 static void
ehci_insert_qh_on_reclaim_list(ehci_state_t * ehcip,ehci_pipe_private_t * pp)1270 ehci_insert_qh_on_reclaim_list(
1271 ehci_state_t *ehcip,
1272 ehci_pipe_private_t *pp)
1273 {
1274 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */
1275 ehci_qh_t *next_qh, *prev_qh;
1276 usb_frame_number_t frame_number;
1277
1278 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1279
1280 /*
1281 * Read current usb frame number and add appropriate number of
1282 * usb frames needs to wait before reclaiming current endpoint.
1283 */
1284 frame_number =
1285 ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1286
1287 /* Store 32-bit ID */
1288 Set_QH(qh->qh_reclaim_frame,
1289 ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1290
1291 /* Insert the endpoint onto the reclamation list */
1292 if (ehcip->ehci_reclaim_list) {
1293 next_qh = ehcip->ehci_reclaim_list;
1294
1295 while (next_qh) {
1296 prev_qh = next_qh;
1297 next_qh = ehci_qh_iommu_to_cpu(ehcip,
1298 Get_QH(next_qh->qh_reclaim_next));
1299 }
1300
1301 Set_QH(prev_qh->qh_reclaim_next,
1302 ehci_qh_cpu_to_iommu(ehcip, qh));
1303 } else {
1304 ehcip->ehci_reclaim_list = qh;
1305 }
1306
1307 ASSERT(Get_QH(qh->qh_reclaim_next) == NULL);
1308 }
1309
1310
1311 /*
1312 * ehci_deallocate_qh:
1313 *
1314 * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1315 *
1316 * NOTE: This function is also called from POLLED MODE.
1317 */
1318 void
ehci_deallocate_qh(ehci_state_t * ehcip,ehci_qh_t * old_qh)1319 ehci_deallocate_qh(
1320 ehci_state_t *ehcip,
1321 ehci_qh_t *old_qh)
1322 {
1323 ehci_qtd_t *first_dummy_qtd, *second_dummy_qtd;
1324
1325 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1326 "ehci_deallocate_qh:");
1327
1328 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1329
1330 first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1331 (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1332
1333 if (first_dummy_qtd) {
1334 ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1335
1336 second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1337 Get_QTD(first_dummy_qtd->qtd_next_qtd));
1338
1339 if (second_dummy_qtd) {
1340 ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1341 EHCI_QTD_DUMMY);
1342
1343 ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1344 }
1345
1346 ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1347 }
1348
1349 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1350 "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1351
1352 Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1353 }
1354
1355
1356 /*
1357 * ehci_qh_cpu_to_iommu:
1358 *
1359 * This function converts for the given Endpoint Descriptor (QH) CPU address
1360 * to IO address.
1361 *
1362 * NOTE: This function is also called from POLLED MODE.
1363 */
1364 uint32_t
ehci_qh_cpu_to_iommu(ehci_state_t * ehcip,ehci_qh_t * addr)1365 ehci_qh_cpu_to_iommu(
1366 ehci_state_t *ehcip,
1367 ehci_qh_t *addr)
1368 {
1369 uint32_t qh;
1370
1371 qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1372 (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1373
1374 ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1375 ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1376 sizeof (ehci_qh_t) * ehci_qh_pool_size);
1377
1378 return (qh);
1379 }
1380
1381
1382 /*
1383 * ehci_qh_iommu_to_cpu:
1384 *
1385 * This function converts for the given Endpoint Descriptor (QH) IO address
1386 * to CPU address.
1387 */
1388 ehci_qh_t *
ehci_qh_iommu_to_cpu(ehci_state_t * ehcip,uintptr_t addr)1389 ehci_qh_iommu_to_cpu(
1390 ehci_state_t *ehcip,
1391 uintptr_t addr)
1392 {
1393 ehci_qh_t *qh;
1394
1395 if (addr == NULL) {
1396
1397 return (NULL);
1398 }
1399
1400 qh = (ehci_qh_t *)((uintptr_t)
1401 (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1402 (uintptr_t)ehcip->ehci_qh_pool_addr);
1403
1404 ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1405 ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1406 (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1407
1408 return (qh);
1409 }
1410
1411
1412 /*
1413 * Transfer Descriptor manipulations functions
1414 */
1415
1416 /*
1417 * ehci_initialize_dummy:
1418 *
1419 * An Endpoint Descriptor (QH) has a dummy Transfer Descriptor (QTD) on the
1420 * end of its QTD list. Initially, both the head and tail pointers of the QH
1421 * point to the dummy QTD.
1422 */
1423 static int
ehci_initialize_dummy(ehci_state_t * ehcip,ehci_qh_t * qh)1424 ehci_initialize_dummy(
1425 ehci_state_t *ehcip,
1426 ehci_qh_t *qh)
1427 {
1428 ehci_qtd_t *first_dummy_qtd, *second_dummy_qtd;
1429
1430 /* Allocate first dummy QTD */
1431 first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1432
1433 if (first_dummy_qtd == NULL) {
1434 return (USB_NO_RESOURCES);
1435 }
1436
1437 /* Allocate second dummy QTD */
1438 second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1439
1440 if (second_dummy_qtd == NULL) {
1441 /* Deallocate first dummy QTD */
1442 ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1443
1444 return (USB_NO_RESOURCES);
1445 }
1446
1447 /* Next QTD pointer of an QH point to this new dummy QTD */
1448 Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1449 first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1450
1451 /* Set qh's dummy qtd field */
1452 Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1453
1454 /* Set first_dummy's next qtd pointer */
1455 Set_QTD(first_dummy_qtd->qtd_next_qtd,
1456 ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1457
1458 return (USB_SUCCESS);
1459 }
1460
1461 /*
1462 * ehci_allocate_ctrl_resources:
1463 *
1464 * Calculates the number of tds necessary for a ctrl transfer, and allocates
1465 * all the resources necessary.
1466 *
1467 * Returns NULL if there is insufficient resources otherwise TW.
1468 */
1469 ehci_trans_wrapper_t *
ehci_allocate_ctrl_resources(ehci_state_t * ehcip,ehci_pipe_private_t * pp,usb_ctrl_req_t * ctrl_reqp,usb_flags_t usb_flags)1470 ehci_allocate_ctrl_resources(
1471 ehci_state_t *ehcip,
1472 ehci_pipe_private_t *pp,
1473 usb_ctrl_req_t *ctrl_reqp,
1474 usb_flags_t usb_flags)
1475 {
1476 size_t qtd_count = 2;
1477 size_t ctrl_buf_size;
1478 ehci_trans_wrapper_t *tw;
1479
1480 /* Add one more td for data phase */
1481 if (ctrl_reqp->ctrl_wLength) {
1482 qtd_count += 1;
1483 }
1484
1485 /*
1486 * If we have a control data phase, the data buffer starts
1487 * on the next 4K page boundary. So the TW buffer is allocated
1488 * to be larger than required. The buffer in the range of
1489 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding
1490 * and not to be transferred.
1491 */
1492 if (ctrl_reqp->ctrl_wLength) {
1493 ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE +
1494 ctrl_reqp->ctrl_wLength;
1495 } else {
1496 ctrl_buf_size = SETUP_SIZE;
1497 }
1498
1499 tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size,
1500 usb_flags, qtd_count);
1501
1502 return (tw);
1503 }
1504
1505 /*
1506 * ehci_insert_ctrl_req:
1507 *
1508 * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1509 */
1510 /* ARGSUSED */
1511 void
ehci_insert_ctrl_req(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_ctrl_req_t * ctrl_reqp,ehci_trans_wrapper_t * tw,usb_flags_t usb_flags)1512 ehci_insert_ctrl_req(
1513 ehci_state_t *ehcip,
1514 usba_pipe_handle_data_t *ph,
1515 usb_ctrl_req_t *ctrl_reqp,
1516 ehci_trans_wrapper_t *tw,
1517 usb_flags_t usb_flags)
1518 {
1519 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1520 uchar_t bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1521 uchar_t bRequest = ctrl_reqp->ctrl_bRequest;
1522 uint16_t wValue = ctrl_reqp->ctrl_wValue;
1523 uint16_t wIndex = ctrl_reqp->ctrl_wIndex;
1524 uint16_t wLength = ctrl_reqp->ctrl_wLength;
1525 mblk_t *data = ctrl_reqp->ctrl_data;
1526 uint32_t ctrl = 0;
1527 uint8_t setup_packet[8];
1528
1529 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1530 "ehci_insert_ctrl_req:");
1531
1532 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1533
1534 /*
1535 * Save current control request pointer and timeout values
1536 * in transfer wrapper.
1537 */
1538 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1539 tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1540 ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1541
1542 /*
1543 * Initialize the callback and any callback data for when
1544 * the qtd completes.
1545 */
1546 tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1547 tw->tw_handle_callback_value = NULL;
1548
1549 /*
1550 * swap the setup bytes where necessary since we specified
1551 * NEVERSWAP
1552 */
1553 setup_packet[0] = bmRequestType;
1554 setup_packet[1] = bRequest;
1555 setup_packet[2] = (uint8_t)wValue;
1556 setup_packet[3] = wValue >> 8;
1557 setup_packet[4] = (uint8_t)wIndex;
1558 setup_packet[5] = wIndex >> 8;
1559 setup_packet[6] = (uint8_t)wLength;
1560 setup_packet[7] = wLength >> 8;
1561
1562 bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1563
1564 Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1565
1566 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1567
1568 /*
1569 * The QTD's are placed on the QH one at a time.
1570 * Once this QTD is placed on the done list, the
1571 * data or status phase QTD will be enqueued.
1572 */
1573 (void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE,
1574 EHCI_CTRL_SETUP_PHASE, pp, tw);
1575
1576 USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1577 "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1578
1579 /*
1580 * If this control transfer has a data phase, record the
1581 * direction. If the data phase is an OUT transaction,
1582 * copy the data into the buffer of the transfer wrapper.
1583 */
1584 if (wLength != 0) {
1585 /* There is a data stage. Find the direction */
1586 if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1587 tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1588 } else {
1589 tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1590
1591 /* Copy the data into the message */
1592 bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE,
1593 wLength);
1594
1595 Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1596 wLength + EHCI_MAX_QTD_BUF_SIZE);
1597 }
1598
1599 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1600
1601 /*
1602 * Create the QTD. If this is an OUT transaction,
1603 * the data is already in the buffer of the TW.
1604 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE
1605 * which is 4K aligned, though the ctrl phase only
1606 * transfers a length of SETUP_SIZE. The padding data
1607 * in the TW buffer are discarded.
1608 */
1609 (void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE,
1610 tw->tw_length - EHCI_MAX_QTD_BUF_SIZE,
1611 EHCI_CTRL_DATA_PHASE, pp, tw);
1612
1613 /*
1614 * The direction of the STATUS QTD depends on
1615 * the direction of the transfer.
1616 */
1617 if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1618 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1619 EHCI_QTD_CTRL_OUT_PID |
1620 EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1621 } else {
1622 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1623 EHCI_QTD_CTRL_IN_PID |
1624 EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1625 }
1626 } else {
1627 /*
1628 * There is no data stage, then initiate
1629 * status phase from the host.
1630 */
1631 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1632 EHCI_QTD_CTRL_IN_PID |
1633 EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1634 }
1635
1636
1637 (void) ehci_insert_qtd(ehcip, ctrl, 0, 0,
1638 EHCI_CTRL_STATUS_PHASE, pp, tw);
1639
1640 /* Start the timer for this control transfer */
1641 ehci_start_xfer_timer(ehcip, pp, tw);
1642 }
1643
1644
1645 /*
1646 * ehci_allocate_bulk_resources:
1647 *
1648 * Calculates the number of tds necessary for a ctrl transfer, and allocates
1649 * all the resources necessary.
1650 *
1651 * Returns NULL if there is insufficient resources otherwise TW.
1652 */
1653 ehci_trans_wrapper_t *
ehci_allocate_bulk_resources(ehci_state_t * ehcip,ehci_pipe_private_t * pp,usb_bulk_req_t * bulk_reqp,usb_flags_t usb_flags)1654 ehci_allocate_bulk_resources(
1655 ehci_state_t *ehcip,
1656 ehci_pipe_private_t *pp,
1657 usb_bulk_req_t *bulk_reqp,
1658 usb_flags_t usb_flags)
1659 {
1660 size_t qtd_count = 0;
1661 ehci_trans_wrapper_t *tw;
1662
1663 /* Check the size of bulk request */
1664 if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1665
1666 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1667 "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1668 "more than 0x%x", bulk_reqp->bulk_len,
1669 EHCI_MAX_BULK_XFER_SIZE);
1670
1671 return (NULL);
1672 }
1673
1674 /* Get the required bulk packet size */
1675 qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1676 if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE ||
1677 bulk_reqp->bulk_len == 0) {
1678 qtd_count += 1;
1679 }
1680
1681 tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1682 usb_flags, qtd_count);
1683
1684 return (tw);
1685 }
1686
1687 /*
1688 * ehci_insert_bulk_req:
1689 *
1690 * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1691 * endpoint.
1692 */
1693 /* ARGSUSED */
1694 void
ehci_insert_bulk_req(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_bulk_req_t * bulk_reqp,ehci_trans_wrapper_t * tw,usb_flags_t flags)1695 ehci_insert_bulk_req(
1696 ehci_state_t *ehcip,
1697 usba_pipe_handle_data_t *ph,
1698 usb_bulk_req_t *bulk_reqp,
1699 ehci_trans_wrapper_t *tw,
1700 usb_flags_t flags)
1701 {
1702 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1703 uint_t bulk_pkt_size, count;
1704 size_t residue = 0, len = 0;
1705 uint32_t ctrl = 0;
1706 int pipe_dir;
1707
1708 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1709 "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1710 (void *)bulk_reqp, flags);
1711
1712 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1713
1714 /* Get the bulk pipe direction */
1715 pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1716
1717 /* Get the required bulk packet size */
1718 bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1719
1720 if (bulk_pkt_size) {
1721 residue = tw->tw_length % bulk_pkt_size;
1722 }
1723
1724 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1725 "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1726
1727 /*
1728 * Save current bulk request pointer and timeout values
1729 * in transfer wrapper.
1730 */
1731 tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1732 tw->tw_timeout = bulk_reqp->bulk_timeout;
1733
1734 /*
1735 * Initialize the callback and any callback
1736 * data required when the qtd completes.
1737 */
1738 tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1739 tw->tw_handle_callback_value = NULL;
1740
1741 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1742 EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1743
1744 if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1745
1746 if (bulk_reqp->bulk_len) {
1747 ASSERT(bulk_reqp->bulk_data != NULL);
1748
1749 bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1750 bulk_reqp->bulk_len);
1751
1752 Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1753 bulk_reqp->bulk_len);
1754 }
1755 }
1756
1757 ctrl = tw->tw_direction;
1758
1759 /* Insert all the bulk QTDs */
1760 for (count = 0; count < tw->tw_num_qtds; count++) {
1761
1762 /* Check for last qtd */
1763 if (count == (tw->tw_num_qtds - 1)) {
1764
1765 ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1766
1767 /* Check for inserting residue data */
1768 if (residue) {
1769 bulk_pkt_size = (uint_t)residue;
1770 }
1771 }
1772
1773 /* Insert the QTD onto the endpoint */
1774 (void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size,
1775 0, pp, tw);
1776
1777 len = len + bulk_pkt_size;
1778 }
1779
1780 /* Start the timer for this bulk transfer */
1781 ehci_start_xfer_timer(ehcip, pp, tw);
1782 }
1783
1784
1785 /*
1786 * ehci_start_periodic_pipe_polling:
1787 *
1788 * NOTE: This function is also called from POLLED MODE.
1789 */
1790 int
ehci_start_periodic_pipe_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_opaque_t periodic_in_reqp,usb_flags_t flags)1791 ehci_start_periodic_pipe_polling(
1792 ehci_state_t *ehcip,
1793 usba_pipe_handle_data_t *ph,
1794 usb_opaque_t periodic_in_reqp,
1795 usb_flags_t flags)
1796 {
1797 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1798 usb_ep_descr_t *eptd = &ph->p_ep;
1799 int error = USB_SUCCESS;
1800
1801 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1802 "ehci_start_periodic_pipe_polling: ep%d",
1803 ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1804
1805 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1806
1807 /*
1808 * Check and handle start polling on root hub interrupt pipe.
1809 */
1810 if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1811 ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1812 USB_EP_ATTR_INTR)) {
1813
1814 error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1815 (usb_intr_req_t *)periodic_in_reqp, flags);
1816
1817 return (error);
1818 }
1819
1820 switch (pp->pp_state) {
1821 case EHCI_PIPE_STATE_IDLE:
1822 /* Save the Original client's Periodic IN request */
1823 pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1824
1825 /*
1826 * This pipe is uninitialized or if a valid QTD is
1827 * not found then insert a QTD on the interrupt IN
1828 * endpoint.
1829 */
1830 error = ehci_start_pipe_polling(ehcip, ph, flags);
1831
1832 if (error != USB_SUCCESS) {
1833 USB_DPRINTF_L2(PRINT_MASK_INTR,
1834 ehcip->ehci_log_hdl,
1835 "ehci_start_periodic_pipe_polling: "
1836 "Start polling failed");
1837
1838 pp->pp_client_periodic_in_reqp = NULL;
1839
1840 return (error);
1841 }
1842
1843 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1844 "ehci_start_periodic_pipe_polling: PP = 0x%p", (void *)pp);
1845
1846 #ifdef DEBUG
1847 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1848 case USB_EP_ATTR_INTR:
1849 ASSERT((pp->pp_tw_head != NULL) &&
1850 (pp->pp_tw_tail != NULL));
1851 break;
1852 case USB_EP_ATTR_ISOCH:
1853 ASSERT((pp->pp_itw_head != NULL) &&
1854 (pp->pp_itw_tail != NULL));
1855 break;
1856 }
1857 #endif
1858
1859 break;
1860 case EHCI_PIPE_STATE_ACTIVE:
1861 USB_DPRINTF_L2(PRINT_MASK_INTR,
1862 ehcip->ehci_log_hdl,
1863 "ehci_start_periodic_pipe_polling: "
1864 "Polling is already in progress");
1865
1866 error = USB_FAILURE;
1867 break;
1868 case EHCI_PIPE_STATE_ERROR:
1869 USB_DPRINTF_L2(PRINT_MASK_INTR,
1870 ehcip->ehci_log_hdl,
1871 "ehci_start_periodic_pipe_polling: "
1872 "Pipe is halted and perform reset"
1873 "before restart polling");
1874
1875 error = USB_FAILURE;
1876 break;
1877 default:
1878 USB_DPRINTF_L2(PRINT_MASK_INTR,
1879 ehcip->ehci_log_hdl,
1880 "ehci_start_periodic_pipe_polling: "
1881 "Undefined state");
1882
1883 error = USB_FAILURE;
1884 break;
1885 }
1886
1887 return (error);
1888 }
1889
1890
1891 /*
1892 * ehci_start_pipe_polling:
1893 *
1894 * Insert the number of periodic requests corresponding to polling
1895 * interval as calculated during pipe open.
1896 */
1897 static int
ehci_start_pipe_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_flags_t flags)1898 ehci_start_pipe_polling(
1899 ehci_state_t *ehcip,
1900 usba_pipe_handle_data_t *ph,
1901 usb_flags_t flags)
1902 {
1903 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1904 usb_ep_descr_t *eptd = &ph->p_ep;
1905 int error = USB_FAILURE;
1906
1907 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1908 "ehci_start_pipe_polling:");
1909
1910 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1911
1912 /*
1913 * For the start polling, pp_max_periodic_req_cnt will be zero
1914 * and for the restart polling request, it will be non zero.
1915 *
1916 * In case of start polling request, find out number of requests
1917 * required for the Interrupt IN endpoints corresponding to the
1918 * endpoint polling interval. For Isochronous IN endpoints, it is
1919 * always fixed since its polling interval will be one ms.
1920 */
1921 if (pp->pp_max_periodic_req_cnt == 0) {
1922
1923 ehci_set_periodic_pipe_polling(ehcip, ph);
1924 }
1925
1926 ASSERT(pp->pp_max_periodic_req_cnt != 0);
1927
1928 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1929 case USB_EP_ATTR_INTR:
1930 error = ehci_start_intr_polling(ehcip, ph, flags);
1931 break;
1932 case USB_EP_ATTR_ISOCH:
1933 error = ehci_start_isoc_polling(ehcip, ph, flags);
1934 break;
1935 }
1936
1937 return (error);
1938 }
1939
1940 static int
ehci_start_intr_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_flags_t flags)1941 ehci_start_intr_polling(
1942 ehci_state_t *ehcip,
1943 usba_pipe_handle_data_t *ph,
1944 usb_flags_t flags)
1945 {
1946 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1947 ehci_trans_wrapper_t *tw_list, *tw;
1948 int i, total_tws;
1949 int error = USB_SUCCESS;
1950
1951 /* Allocate all the necessary resources for the IN transfer */
1952 tw_list = NULL;
1953 total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1954 for (i = 0; i < total_tws; i += 1) {
1955 tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1956 if (tw == NULL) {
1957 error = USB_NO_RESOURCES;
1958 /* There are not enough resources, deallocate the TWs */
1959 tw = tw_list;
1960 while (tw != NULL) {
1961 tw_list = tw->tw_next;
1962 ehci_deallocate_intr_in_resource(
1963 ehcip, pp, tw);
1964 ehci_deallocate_tw(ehcip, pp, tw);
1965 tw = tw_list;
1966 }
1967
1968 return (error);
1969 } else {
1970 if (tw_list == NULL) {
1971 tw_list = tw;
1972 }
1973 }
1974 }
1975
1976 while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1977
1978 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1979 "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1980 pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1981 (void *)tw_list);
1982
1983 tw = tw_list;
1984 tw_list = tw->tw_next;
1985
1986 ehci_insert_intr_req(ehcip, pp, tw, flags);
1987
1988 pp->pp_cur_periodic_req_cnt++;
1989 }
1990
1991 return (error);
1992 }
1993
1994
1995 /*
1996 * ehci_set_periodic_pipe_polling:
1997 *
1998 * Calculate the number of periodic requests needed corresponding to the
1999 * interrupt IN endpoints polling interval. Table below gives the number
2000 * of periodic requests needed for the interrupt IN endpoints according
2001 * to endpoint polling interval.
2002 *
2003 * Polling interval Number of periodic requests
2004 *
2005 * 1ms 4
2006 * 2ms 2
2007 * 4ms to 32ms 1
2008 */
2009 static void
ehci_set_periodic_pipe_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)2010 ehci_set_periodic_pipe_polling(
2011 ehci_state_t *ehcip,
2012 usba_pipe_handle_data_t *ph)
2013 {
2014 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2015 usb_ep_descr_t *endpoint = &ph->p_ep;
2016 uchar_t ep_attr = endpoint->bmAttributes;
2017 uint_t interval;
2018
2019 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2020 "ehci_set_periodic_pipe_polling:");
2021
2022 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2023
2024 pp->pp_cur_periodic_req_cnt = 0;
2025
2026 /*
2027 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
2028 * set and if so, set pp->pp_max_periodic_req_cnt to one.
2029 */
2030 if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
2031 (pp->pp_client_periodic_in_reqp)) {
2032 usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
2033 pp->pp_client_periodic_in_reqp;
2034
2035 if (intr_reqp->intr_attributes &
2036 USB_ATTRS_ONE_XFER) {
2037
2038 pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2039
2040 return;
2041 }
2042 }
2043
2044 mutex_enter(&ph->p_usba_device->usb_mutex);
2045
2046 /*
2047 * The ehci_adjust_polling_interval function will not fail
2048 * at this instance since bandwidth allocation is already
2049 * done. Here we are getting only the periodic interval.
2050 */
2051 interval = ehci_adjust_polling_interval(ehcip, endpoint,
2052 ph->p_usba_device->usb_port_status);
2053
2054 mutex_exit(&ph->p_usba_device->usb_mutex);
2055
2056 switch (interval) {
2057 case EHCI_INTR_1MS_POLL:
2058 pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2059 break;
2060 case EHCI_INTR_2MS_POLL:
2061 pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2062 break;
2063 default:
2064 pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2065 break;
2066 }
2067
2068 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2069 "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2070 pp->pp_max_periodic_req_cnt);
2071 }
2072
2073 /*
2074 * ehci_allocate_intr_resources:
2075 *
2076 * Calculates the number of tds necessary for a intr transfer, and allocates
2077 * all the necessary resources.
2078 *
2079 * Returns NULL if there is insufficient resources otherwise TW.
2080 */
2081 ehci_trans_wrapper_t *
ehci_allocate_intr_resources(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_intr_req_t * intr_reqp,usb_flags_t flags)2082 ehci_allocate_intr_resources(
2083 ehci_state_t *ehcip,
2084 usba_pipe_handle_data_t *ph,
2085 usb_intr_req_t *intr_reqp,
2086 usb_flags_t flags)
2087 {
2088 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2089 int pipe_dir;
2090 size_t qtd_count = 1;
2091 size_t tw_length;
2092 ehci_trans_wrapper_t *tw;
2093
2094 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2095 "ehci_allocate_intr_resources:");
2096
2097 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2098
2099 pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2100
2101 /* Get the length of interrupt transfer & alloc data */
2102 if (intr_reqp) {
2103 tw_length = intr_reqp->intr_len;
2104 } else {
2105 ASSERT(pipe_dir == USB_EP_DIR_IN);
2106 tw_length = (pp->pp_client_periodic_in_reqp) ?
2107 (((usb_intr_req_t *)pp->
2108 pp_client_periodic_in_reqp)->intr_len) :
2109 ph->p_ep.wMaxPacketSize;
2110 }
2111
2112 /* Check the size of interrupt request */
2113 if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2114
2115 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2116 "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2117 "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2118
2119 return (NULL);
2120 }
2121
2122 if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2123 qtd_count)) == NULL) {
2124
2125 return (NULL);
2126 }
2127
2128 if (pipe_dir == USB_EP_DIR_IN) {
2129 if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2130 USB_SUCCESS) {
2131 ehci_deallocate_tw(ehcip, pp, tw);
2132 }
2133 tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2134 } else {
2135 if (tw_length) {
2136 ASSERT(intr_reqp->intr_data != NULL);
2137
2138 /* Copy the data into the buffer */
2139 bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2140 intr_reqp->intr_len);
2141
2142 Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2143 intr_reqp->intr_len);
2144 }
2145
2146 tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2147 tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2148 }
2149
2150 if (intr_reqp) {
2151 tw->tw_timeout = intr_reqp->intr_timeout;
2152 }
2153
2154 /*
2155 * Initialize the callback and any callback
2156 * data required when the qtd completes.
2157 */
2158 tw->tw_handle_qtd = ehci_handle_intr_qtd;
2159 tw->tw_handle_callback_value = NULL;
2160
2161 return (tw);
2162 }
2163
2164
2165 /*
2166 * ehci_insert_intr_req:
2167 *
2168 * Insert an Interrupt request into the Host Controller's periodic list.
2169 */
2170 /* ARGSUSED */
2171 void
ehci_insert_intr_req(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw,usb_flags_t flags)2172 ehci_insert_intr_req(
2173 ehci_state_t *ehcip,
2174 ehci_pipe_private_t *pp,
2175 ehci_trans_wrapper_t *tw,
2176 usb_flags_t flags)
2177 {
2178 uint_t ctrl = 0;
2179
2180 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2181
2182 ASSERT(tw->tw_curr_xfer_reqp != NULL);
2183
2184 ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2185
2186 /* Insert another interrupt QTD */
2187 (void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw);
2188
2189 /* Start the timer for this Interrupt transfer */
2190 ehci_start_xfer_timer(ehcip, pp, tw);
2191 }
2192
2193
2194 /*
2195 * ehci_stop_periodic_pipe_polling:
2196 */
2197 /* ARGSUSED */
2198 int
ehci_stop_periodic_pipe_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_flags_t flags)2199 ehci_stop_periodic_pipe_polling(
2200 ehci_state_t *ehcip,
2201 usba_pipe_handle_data_t *ph,
2202 usb_flags_t flags)
2203 {
2204 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2205 usb_ep_descr_t *eptd = &ph->p_ep;
2206
2207 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2208 "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2209
2210 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2211
2212 /*
2213 * Check and handle stop polling on root hub interrupt pipe.
2214 */
2215 if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2216 ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2217 USB_EP_ATTR_INTR)) {
2218
2219 ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2220
2221 return (USB_SUCCESS);
2222 }
2223
2224 if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2225
2226 USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2227 "ehci_stop_periodic_pipe_polling: "
2228 "Polling already stopped");
2229
2230 return (USB_SUCCESS);
2231 }
2232
2233 /* Set pipe state to pipe stop polling */
2234 pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2235
2236 ehci_pipe_cleanup(ehcip, ph);
2237
2238 return (USB_SUCCESS);
2239 }
2240
2241
2242 /*
2243 * ehci_insert_qtd:
2244 *
2245 * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2246 * Always returns USB_SUCCESS for now. Once Isoch has been implemented,
2247 * it may return USB_FAILURE.
2248 */
2249 int
ehci_insert_qtd(ehci_state_t * ehcip,uint32_t qtd_ctrl,size_t qtd_dma_offs,size_t qtd_length,uint32_t qtd_ctrl_phase,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)2250 ehci_insert_qtd(
2251 ehci_state_t *ehcip,
2252 uint32_t qtd_ctrl,
2253 size_t qtd_dma_offs,
2254 size_t qtd_length,
2255 uint32_t qtd_ctrl_phase,
2256 ehci_pipe_private_t *pp,
2257 ehci_trans_wrapper_t *tw)
2258 {
2259 ehci_qtd_t *curr_dummy_qtd, *next_dummy_qtd;
2260 ehci_qtd_t *new_dummy_qtd;
2261 ehci_qh_t *qh = pp->pp_qh;
2262 int error = USB_SUCCESS;
2263
2264 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2265
2266 /* Allocate new dummy QTD */
2267 new_dummy_qtd = tw->tw_qtd_free_list;
2268
2269 ASSERT(new_dummy_qtd != NULL);
2270 tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2271 Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2272 Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL);
2273
2274 /* Get the current and next dummy QTDs */
2275 curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2276 Get_QH(qh->qh_dummy_qtd));
2277 next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2278 Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2279
2280 /* Update QH's dummy qtd field */
2281 Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2282
2283 /* Update next dummy's next qtd pointer */
2284 Set_QTD(next_dummy_qtd->qtd_next_qtd,
2285 ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2286
2287 /*
2288 * Fill in the current dummy qtd and
2289 * add the new dummy to the end.
2290 */
2291 ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2292 qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw);
2293
2294 /* Insert this qtd onto the tw */
2295 ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2296
2297 /*
2298 * Insert this qtd onto active qtd list.
2299 * Don't insert polled mode qtd here.
2300 */
2301 if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2302 /* Insert this qtd onto active qtd list */
2303 ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2304 }
2305
2306 /* Print qh and qtd */
2307 ehci_print_qh(ehcip, qh);
2308 ehci_print_qtd(ehcip, curr_dummy_qtd);
2309
2310 return (error);
2311 }
2312
2313
2314 /*
2315 * ehci_allocate_qtd_from_pool:
2316 *
2317 * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2318 */
2319 static ehci_qtd_t *
ehci_allocate_qtd_from_pool(ehci_state_t * ehcip)2320 ehci_allocate_qtd_from_pool(ehci_state_t *ehcip)
2321 {
2322 int i, ctrl;
2323 ehci_qtd_t *qtd;
2324
2325 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2326
2327 /*
2328 * Search for a blank Transfer Descriptor (QTD)
2329 * in the QTD buffer pool.
2330 */
2331 for (i = 0; i < ehci_qtd_pool_size; i ++) {
2332 ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2333 if (ctrl == EHCI_QTD_FREE) {
2334 break;
2335 }
2336 }
2337
2338 if (i >= ehci_qtd_pool_size) {
2339 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2340 "ehci_allocate_qtd_from_pool: QTD exhausted");
2341
2342 return (NULL);
2343 }
2344
2345 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2346 "ehci_allocate_qtd_from_pool: Allocated %d", i);
2347
2348 /* Create a new dummy for the end of the QTD list */
2349 qtd = &ehcip->ehci_qtd_pool_addr[i];
2350
2351 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2352 "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2353
2354 /* Mark the newly allocated QTD as a dummy */
2355 Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2356
2357 /* Mark the status of this new QTD to halted state */
2358 Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2359
2360 /* Disable dummy QTD's next and alternate next pointers */
2361 Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2362 Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2363
2364 return (qtd);
2365 }
2366
2367
2368 /*
2369 * ehci_fill_in_qtd:
2370 *
2371 * Fill in the fields of a Transfer Descriptor (QTD).
2372 * The "Buffer Pointer" fields of a QTD are retrieved from the TW
2373 * it is associated with.
2374 *
2375 * Note:
2376 * qtd_dma_offs - the starting offset into the TW buffer, where the QTD
2377 * should transfer from. It should be 4K aligned. And when
2378 * a TW has more than one QTDs, the QTDs must be filled in
2379 * increasing order.
2380 * qtd_length - the total bytes to transfer.
2381 */
2382 /*ARGSUSED*/
2383 static void
ehci_fill_in_qtd(ehci_state_t * ehcip,ehci_qtd_t * qtd,uint32_t qtd_ctrl,size_t qtd_dma_offs,size_t qtd_length,uint32_t qtd_ctrl_phase,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)2384 ehci_fill_in_qtd(
2385 ehci_state_t *ehcip,
2386 ehci_qtd_t *qtd,
2387 uint32_t qtd_ctrl,
2388 size_t qtd_dma_offs,
2389 size_t qtd_length,
2390 uint32_t qtd_ctrl_phase,
2391 ehci_pipe_private_t *pp,
2392 ehci_trans_wrapper_t *tw)
2393 {
2394 uint32_t buf_addr;
2395 size_t buf_len = qtd_length;
2396 uint32_t ctrl = qtd_ctrl;
2397 uint_t i = 0;
2398 int rem_len;
2399
2400 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2401 "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx "
2402 "len 0x%lx", (void *)qtd, qtd_ctrl, qtd_dma_offs, qtd_length);
2403
2404 /* Assert that the qtd to be filled in is a dummy */
2405 ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2406
2407 /* Change QTD's state Active */
2408 Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2409
2410 /* Set the total length data transfer */
2411 ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2412 & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2413
2414 /*
2415 * QTDs must be filled in increasing DMA offset order.
2416 * tw_dma_offs is initialized to be 0 at TW creation and
2417 * is only increased in this function.
2418 */
2419 ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs);
2420
2421 /*
2422 * Save the starting dma buffer offset used and
2423 * length of data that will be transfered in
2424 * the current QTD.
2425 */
2426 Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs);
2427 Set_QTD(qtd->qtd_xfer_len, buf_len);
2428
2429 while (buf_len) {
2430 /*
2431 * Advance to the next DMA cookie until finding the cookie
2432 * that qtd_dma_offs falls in.
2433 * It is very likely this loop will never repeat more than
2434 * once. It is here just to accommodate the case qtd_dma_offs
2435 * is increased by multiple cookies during two consecutive
2436 * calls into this function. In that case, the interim DMA
2437 * buffer is allowed to be skipped.
2438 */
2439 while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2440 qtd_dma_offs) {
2441 /*
2442 * tw_dma_offs always points to the starting offset
2443 * of a cookie
2444 */
2445 tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2446 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2447 tw->tw_cookie_idx++;
2448 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2449 }
2450
2451 /*
2452 * Counting the remained buffer length to be filled in
2453 * the QTD for current DMA cookie
2454 */
2455 rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2456 qtd_dma_offs;
2457
2458 /* Update the beginning of the buffer */
2459 buf_addr = (qtd_dma_offs - tw->tw_dma_offs) +
2460 tw->tw_cookie.dmac_address;
2461 ASSERT((buf_addr % EHCI_4K_ALIGN) == 0);
2462 Set_QTD(qtd->qtd_buf[i], buf_addr);
2463
2464 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2465 "ehci_fill_in_qtd: dmac_addr 0x%x dmac_size "
2466 "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2467 tw->tw_cookie_idx);
2468
2469 if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2470 ASSERT(buf_len <= rem_len);
2471 break;
2472 } else {
2473 ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE);
2474 buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2475 qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE;
2476 }
2477
2478 i++;
2479 }
2480
2481 /*
2482 * Setup the alternate next qTD pointer if appropriate. The alternate
2483 * qtd is currently pointing to a QTD that is not yet linked, but will
2484 * be in the very near future. If a short_xfer occurs in this
2485 * situation , the HC will automatically skip this QH. Eventually
2486 * everything will be placed and the alternate_qtd will be valid QTD.
2487 * For more information on alternate qtds look at section 3.5.2 in the
2488 * EHCI spec.
2489 */
2490 if (tw->tw_alt_qtd != NULL) {
2491 Set_QTD(qtd->qtd_alt_next_qtd,
2492 (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2493 EHCI_QTD_ALT_NEXT_QTD_PTR));
2494 }
2495
2496 /*
2497 * For control, bulk and interrupt QTD, now
2498 * enable current QTD by setting active bit.
2499 */
2500 Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2501
2502 /*
2503 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2504 */
2505 if (qtd_ctrl_phase) {
2506 Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2507 }
2508
2509 /* Set the transfer wrapper */
2510 ASSERT(tw != NULL);
2511 ASSERT(tw->tw_id != NULL);
2512
2513 Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2514 }
2515
2516
2517 /*
2518 * ehci_insert_qtd_on_tw:
2519 *
2520 * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2521 * are allocated for this transfer. Insert a QTD onto this list. The list
2522 * of QTD's does not include the dummy QTD that is at the end of the list of
2523 * QTD's for the endpoint.
2524 */
2525 static void
ehci_insert_qtd_on_tw(ehci_state_t * ehcip,ehci_trans_wrapper_t * tw,ehci_qtd_t * qtd)2526 ehci_insert_qtd_on_tw(
2527 ehci_state_t *ehcip,
2528 ehci_trans_wrapper_t *tw,
2529 ehci_qtd_t *qtd)
2530 {
2531 /*
2532 * Set the next pointer to NULL because
2533 * this is the last QTD on list.
2534 */
2535 Set_QTD(qtd->qtd_tw_next_qtd, NULL);
2536
2537 if (tw->tw_qtd_head == NULL) {
2538 ASSERT(tw->tw_qtd_tail == NULL);
2539 tw->tw_qtd_head = qtd;
2540 tw->tw_qtd_tail = qtd;
2541 } else {
2542 ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2543
2544 ASSERT(dummy != NULL);
2545 ASSERT(dummy != qtd);
2546 ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2547
2548 /* Add the qtd to the end of the list */
2549 Set_QTD(dummy->qtd_tw_next_qtd,
2550 ehci_qtd_cpu_to_iommu(ehcip, qtd));
2551
2552 tw->tw_qtd_tail = qtd;
2553
2554 ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL);
2555 }
2556 }
2557
2558
2559 /*
2560 * ehci_insert_qtd_into_active_qtd_list:
2561 *
2562 * Insert current QTD into active QTD list.
2563 */
2564 static void
ehci_insert_qtd_into_active_qtd_list(ehci_state_t * ehcip,ehci_qtd_t * qtd)2565 ehci_insert_qtd_into_active_qtd_list(
2566 ehci_state_t *ehcip,
2567 ehci_qtd_t *qtd)
2568 {
2569 ehci_qtd_t *curr_qtd, *next_qtd;
2570
2571 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2572
2573 curr_qtd = ehcip->ehci_active_qtd_list;
2574
2575 /* Insert this QTD into QTD Active List */
2576 if (curr_qtd) {
2577 next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2578 Get_QTD(curr_qtd->qtd_active_qtd_next));
2579
2580 while (next_qtd) {
2581 curr_qtd = next_qtd;
2582 next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2583 Get_QTD(curr_qtd->qtd_active_qtd_next));
2584 }
2585
2586 Set_QTD(qtd->qtd_active_qtd_prev,
2587 ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2588
2589 Set_QTD(curr_qtd->qtd_active_qtd_next,
2590 ehci_qtd_cpu_to_iommu(ehcip, qtd));
2591 } else {
2592 ehcip->ehci_active_qtd_list = qtd;
2593 Set_QTD(qtd->qtd_active_qtd_next, NULL);
2594 Set_QTD(qtd->qtd_active_qtd_prev, NULL);
2595 }
2596 }
2597
2598
2599 /*
2600 * ehci_remove_qtd_from_active_qtd_list:
2601 *
2602 * Remove current QTD from the active QTD list.
2603 *
2604 * NOTE: This function is also called from POLLED MODE.
2605 */
2606 void
ehci_remove_qtd_from_active_qtd_list(ehci_state_t * ehcip,ehci_qtd_t * qtd)2607 ehci_remove_qtd_from_active_qtd_list(
2608 ehci_state_t *ehcip,
2609 ehci_qtd_t *qtd)
2610 {
2611 ehci_qtd_t *curr_qtd, *prev_qtd, *next_qtd;
2612
2613 ASSERT(qtd != NULL);
2614
2615 curr_qtd = ehcip->ehci_active_qtd_list;
2616
2617 while ((curr_qtd) && (curr_qtd != qtd)) {
2618 curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2619 Get_QTD(curr_qtd->qtd_active_qtd_next));
2620 }
2621
2622 if ((curr_qtd) && (curr_qtd == qtd)) {
2623 prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2624 Get_QTD(curr_qtd->qtd_active_qtd_prev));
2625 next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2626 Get_QTD(curr_qtd->qtd_active_qtd_next));
2627
2628 if (prev_qtd) {
2629 Set_QTD(prev_qtd->qtd_active_qtd_next,
2630 Get_QTD(curr_qtd->qtd_active_qtd_next));
2631 } else {
2632 ehcip->ehci_active_qtd_list = next_qtd;
2633 }
2634
2635 if (next_qtd) {
2636 Set_QTD(next_qtd->qtd_active_qtd_prev,
2637 Get_QTD(curr_qtd->qtd_active_qtd_prev));
2638 }
2639 } else {
2640 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2641 "ehci_remove_qtd_from_active_qtd_list: "
2642 "Unable to find QTD in active_qtd_list");
2643 }
2644 }
2645
2646
2647 /*
2648 * ehci_traverse_qtds:
2649 *
2650 * Traverse the list of QTDs for given pipe using transfer wrapper. Since
2651 * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2652 * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2653 */
2654 static void
ehci_traverse_qtds(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)2655 ehci_traverse_qtds(
2656 ehci_state_t *ehcip,
2657 usba_pipe_handle_data_t *ph)
2658 {
2659 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2660 ehci_trans_wrapper_t *next_tw;
2661 ehci_qtd_t *qtd;
2662 ehci_qtd_t *next_qtd;
2663
2664 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2665
2666 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2667 "ehci_traverse_qtds:");
2668
2669 /* Process the transfer wrappers for this pipe */
2670 next_tw = pp->pp_tw_head;
2671
2672 while (next_tw) {
2673 /* Stop the the transfer timer */
2674 ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2675
2676 qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2677
2678 /* Walk through each QTD for this transfer wrapper */
2679 while (qtd) {
2680 /* Remove this QTD from active QTD list */
2681 ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2682
2683 next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2684 Get_QTD(qtd->qtd_tw_next_qtd));
2685
2686 /* Deallocate this QTD */
2687 ehci_deallocate_qtd(ehcip, qtd);
2688
2689 qtd = next_qtd;
2690 }
2691
2692 next_tw = next_tw->tw_next;
2693 }
2694
2695 /* Clear current qtd pointer */
2696 Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2697
2698 /* Update the next qtd pointer in the QH */
2699 Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2700 }
2701
2702
2703 /*
2704 * ehci_deallocate_qtd:
2705 *
2706 * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2707 *
2708 * NOTE: This function is also called from POLLED MODE.
2709 */
2710 void
ehci_deallocate_qtd(ehci_state_t * ehcip,ehci_qtd_t * old_qtd)2711 ehci_deallocate_qtd(
2712 ehci_state_t *ehcip,
2713 ehci_qtd_t *old_qtd)
2714 {
2715 ehci_trans_wrapper_t *tw = NULL;
2716
2717 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2718 "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2719
2720 /*
2721 * Obtain the transaction wrapper and tw will be
2722 * NULL for the dummy QTDs.
2723 */
2724 if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2725 tw = (ehci_trans_wrapper_t *)
2726 EHCI_LOOKUP_ID((uint32_t)
2727 Get_QTD(old_qtd->qtd_trans_wrapper));
2728
2729 ASSERT(tw != NULL);
2730 }
2731
2732 /*
2733 * If QTD's transfer wrapper is NULL, don't access its TW.
2734 * Just free the QTD.
2735 */
2736 if (tw) {
2737 ehci_qtd_t *qtd, *next_qtd;
2738
2739 qtd = tw->tw_qtd_head;
2740
2741 if (old_qtd != qtd) {
2742 next_qtd = ehci_qtd_iommu_to_cpu(
2743 ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2744
2745 while (next_qtd != old_qtd) {
2746 qtd = next_qtd;
2747 next_qtd = ehci_qtd_iommu_to_cpu(
2748 ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2749 }
2750
2751 Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2752
2753 if (qtd->qtd_tw_next_qtd == NULL) {
2754 tw->tw_qtd_tail = qtd;
2755 }
2756 } else {
2757 tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2758 ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2759
2760 if (tw->tw_qtd_head == NULL) {
2761 tw->tw_qtd_tail = NULL;
2762 }
2763 }
2764 }
2765
2766 bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2767 Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2768
2769 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2770 "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2771 }
2772
2773
2774 /*
2775 * ehci_qtd_cpu_to_iommu:
2776 *
2777 * This function converts for the given Transfer Descriptor (QTD) CPU address
2778 * to IO address.
2779 *
2780 * NOTE: This function is also called from POLLED MODE.
2781 */
2782 uint32_t
ehci_qtd_cpu_to_iommu(ehci_state_t * ehcip,ehci_qtd_t * addr)2783 ehci_qtd_cpu_to_iommu(
2784 ehci_state_t *ehcip,
2785 ehci_qtd_t *addr)
2786 {
2787 uint32_t td;
2788
2789 td = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2790 (uint32_t)((uintptr_t)addr -
2791 (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2792
2793 ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2794 (uint32_t) (sizeof (ehci_qtd_t) *
2795 (addr - ehcip->ehci_qtd_pool_addr))) ==
2796 (ehcip->ehci_qtd_pool_cookie.dmac_address +
2797 (uint32_t)((uintptr_t)addr - (uintptr_t)
2798 (ehcip->ehci_qtd_pool_addr))));
2799
2800 ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2801 ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2802 sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2803
2804 return (td);
2805 }
2806
2807
2808 /*
2809 * ehci_qtd_iommu_to_cpu:
2810 *
2811 * This function converts for the given Transfer Descriptor (QTD) IO address
2812 * to CPU address.
2813 *
2814 * NOTE: This function is also called from POLLED MODE.
2815 */
2816 ehci_qtd_t *
ehci_qtd_iommu_to_cpu(ehci_state_t * ehcip,uintptr_t addr)2817 ehci_qtd_iommu_to_cpu(
2818 ehci_state_t *ehcip,
2819 uintptr_t addr)
2820 {
2821 ehci_qtd_t *qtd;
2822
2823 if (addr == NULL) {
2824
2825 return (NULL);
2826 }
2827
2828 qtd = (ehci_qtd_t *)((uintptr_t)
2829 (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2830 (uintptr_t)ehcip->ehci_qtd_pool_addr);
2831
2832 ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2833 ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2834 (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2835
2836 return (qtd);
2837 }
2838
2839 /*
2840 * ehci_allocate_tds_for_tw_resources:
2841 *
2842 * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2843 * into the TW. Also chooses the correct alternate qtd when required. It is
2844 * used for hardware short transfer support. For more information on
2845 * alternate qtds look at section 3.5.2 in the EHCI spec.
2846 * Here is how each alternate qtd's are used:
2847 *
2848 * Bulk: used fully.
2849 * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2850 * Ctrl: Should not use alternate QTD
2851 * Isoch: Doesn't support short_xfer nor does it use QTD
2852 *
2853 * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2854 * otherwise USB_SUCCESS.
2855 */
2856 int
ehci_allocate_tds_for_tw(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw,size_t qtd_count)2857 ehci_allocate_tds_for_tw(
2858 ehci_state_t *ehcip,
2859 ehci_pipe_private_t *pp,
2860 ehci_trans_wrapper_t *tw,
2861 size_t qtd_count)
2862 {
2863 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
2864 uchar_t attributes;
2865 ehci_qtd_t *qtd;
2866 uint32_t qtd_addr;
2867 int i;
2868 int error = USB_SUCCESS;
2869
2870 attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2871
2872 for (i = 0; i < qtd_count; i += 1) {
2873 qtd = ehci_allocate_qtd_from_pool(ehcip);
2874 if (qtd == NULL) {
2875 error = USB_NO_RESOURCES;
2876 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2877 "ehci_allocate_qtds_for_tw: "
2878 "Unable to allocate %lu QTDs",
2879 qtd_count);
2880 break;
2881 }
2882 if (i > 0) {
2883 qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2884 tw->tw_qtd_free_list);
2885 Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2886 }
2887 tw->tw_qtd_free_list = qtd;
2888
2889 /*
2890 * Save the second one as a pointer to the new dummy 1.
2891 * It is used later for the alt_qtd_ptr. Xfers with only
2892 * one qtd do not need alt_qtd_ptr.
2893 * The tds's are allocated and put into a stack, that is
2894 * why the second qtd allocated will turn out to be the
2895 * new dummy 1.
2896 */
2897 if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2898 tw->tw_alt_qtd = qtd;
2899 }
2900 }
2901
2902 return (error);
2903 }
2904
2905 /*
2906 * ehci_allocate_tw_resources:
2907 *
2908 * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2909 * from the QTD buffer pool and places it into the TW. It does an all
2910 * or nothing transaction.
2911 *
2912 * Returns NULL if there is insufficient resources otherwise TW.
2913 */
2914 static ehci_trans_wrapper_t *
ehci_allocate_tw_resources(ehci_state_t * ehcip,ehci_pipe_private_t * pp,size_t tw_length,usb_flags_t usb_flags,size_t qtd_count)2915 ehci_allocate_tw_resources(
2916 ehci_state_t *ehcip,
2917 ehci_pipe_private_t *pp,
2918 size_t tw_length,
2919 usb_flags_t usb_flags,
2920 size_t qtd_count)
2921 {
2922 ehci_trans_wrapper_t *tw;
2923
2924 tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2925
2926 if (tw == NULL) {
2927 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2928 "ehci_allocate_tw_resources: Unable to allocate TW");
2929 } else {
2930 if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2931 USB_SUCCESS) {
2932 tw->tw_num_qtds = (uint_t)qtd_count;
2933 } else {
2934 ehci_deallocate_tw(ehcip, pp, tw);
2935 tw = NULL;
2936 }
2937 }
2938
2939 return (tw);
2940 }
2941
2942
2943 /*
2944 * ehci_free_tw_td_resources:
2945 *
2946 * Free all allocated resources for Transaction Wrapper (TW).
2947 * Does not free the TW itself.
2948 *
2949 * Returns NULL if there is insufficient resources otherwise TW.
2950 */
2951 static void
ehci_free_tw_td_resources(ehci_state_t * ehcip,ehci_trans_wrapper_t * tw)2952 ehci_free_tw_td_resources(
2953 ehci_state_t *ehcip,
2954 ehci_trans_wrapper_t *tw)
2955 {
2956 ehci_qtd_t *qtd = NULL;
2957 ehci_qtd_t *temp_qtd = NULL;
2958
2959 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2960 "ehci_free_tw_td_resources: tw = 0x%p", (void *)tw);
2961
2962 qtd = tw->tw_qtd_free_list;
2963 while (qtd != NULL) {
2964 /* Save the pointer to the next qtd before destroying it */
2965 temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2966 Get_QTD(qtd->qtd_tw_next_qtd));
2967 ehci_deallocate_qtd(ehcip, qtd);
2968 qtd = temp_qtd;
2969 }
2970 tw->tw_qtd_free_list = NULL;
2971 }
2972
2973 /*
2974 * Transfer Wrapper functions
2975 *
2976 * ehci_create_transfer_wrapper:
2977 *
2978 * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2979 * resources.
2980 */
2981 static ehci_trans_wrapper_t *
ehci_create_transfer_wrapper(ehci_state_t * ehcip,ehci_pipe_private_t * pp,size_t length,uint_t usb_flags)2982 ehci_create_transfer_wrapper(
2983 ehci_state_t *ehcip,
2984 ehci_pipe_private_t *pp,
2985 size_t length,
2986 uint_t usb_flags)
2987 {
2988 ddi_device_acc_attr_t dev_attr;
2989 ddi_dma_attr_t dma_attr;
2990 int result;
2991 size_t real_length;
2992 ehci_trans_wrapper_t *tw;
2993 int kmem_flag;
2994 int (*dmamem_wait)(caddr_t);
2995 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
2996
2997 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2998 "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2999 length, usb_flags);
3000
3001 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3002
3003 /* SLEEP flag should not be used while holding mutex */
3004 kmem_flag = KM_NOSLEEP;
3005 dmamem_wait = DDI_DMA_DONTWAIT;
3006
3007 /* Allocate space for the transfer wrapper */
3008 tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag);
3009
3010 if (tw == NULL) {
3011 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3012 "ehci_create_transfer_wrapper: kmem_zalloc failed");
3013
3014 return (NULL);
3015 }
3016
3017 /* zero-length packet doesn't need to allocate dma memory */
3018 if (length == 0) {
3019
3020 goto dmadone;
3021 }
3022
3023 /* allow sg lists for transfer wrapper dma memory */
3024 bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3025 dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN;
3026 dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
3027
3028 /* Allocate the DMA handle */
3029 result = ddi_dma_alloc_handle(ehcip->ehci_dip,
3030 &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle);
3031
3032 if (result != DDI_SUCCESS) {
3033 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3034 "ehci_create_transfer_wrapper: Alloc handle failed");
3035
3036 kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3037
3038 return (NULL);
3039 }
3040
3041 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3042
3043 /* no need for swapping the raw data */
3044 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
3045 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3046
3047 /* Allocate the memory */
3048 result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
3049 &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3050 (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
3051
3052 if (result != DDI_SUCCESS) {
3053 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3054 "ehci_create_transfer_wrapper: dma_mem_alloc fail");
3055
3056 ddi_dma_free_handle(&tw->tw_dmahandle);
3057 kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3058
3059 return (NULL);
3060 }
3061
3062 ASSERT(real_length >= length);
3063
3064 /* Bind the handle */
3065 result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
3066 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
3067 dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies);
3068
3069 if (result != DDI_DMA_MAPPED) {
3070 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
3071
3072 ddi_dma_mem_free(&tw->tw_accesshandle);
3073 ddi_dma_free_handle(&tw->tw_dmahandle);
3074 kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3075
3076 return (NULL);
3077 }
3078
3079 tw->tw_cookie_idx = 0;
3080 tw->tw_dma_offs = 0;
3081
3082 dmadone:
3083 /*
3084 * Only allow one wrapper to be added at a time. Insert the
3085 * new transaction wrapper into the list for this pipe.
3086 */
3087 if (pp->pp_tw_head == NULL) {
3088 pp->pp_tw_head = tw;
3089 pp->pp_tw_tail = tw;
3090 } else {
3091 pp->pp_tw_tail->tw_next = tw;
3092 pp->pp_tw_tail = tw;
3093 }
3094
3095 /* Store the transfer length */
3096 tw->tw_length = length;
3097
3098 /* Store a back pointer to the pipe private structure */
3099 tw->tw_pipe_private = pp;
3100
3101 /* Store the transfer type - synchronous or asynchronous */
3102 tw->tw_flags = usb_flags;
3103
3104 /* Get and Store 32bit ID */
3105 tw->tw_id = EHCI_GET_ID((void *)tw);
3106
3107 ASSERT(tw->tw_id != NULL);
3108
3109 /* isoc ep will not come here */
3110 if (EHCI_INTR_ENDPOINT(eptd)) {
3111 ehcip->ehci_periodic_req_count++;
3112 } else {
3113 ehcip->ehci_async_req_count++;
3114 }
3115 ehci_toggle_scheduler(ehcip);
3116
3117 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3118 "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
3119 (void *)tw, tw->tw_ncookies);
3120
3121 return (tw);
3122 }
3123
3124
3125 /*
3126 * ehci_start_xfer_timer:
3127 *
3128 * Start the timer for the control, bulk and for one time interrupt
3129 * transfers.
3130 */
3131 /* ARGSUSED */
3132 static void
ehci_start_xfer_timer(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3133 ehci_start_xfer_timer(
3134 ehci_state_t *ehcip,
3135 ehci_pipe_private_t *pp,
3136 ehci_trans_wrapper_t *tw)
3137 {
3138 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3139 "ehci_start_xfer_timer: tw = 0x%p", (void *)tw);
3140
3141 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3142
3143 /*
3144 * The timeout handling is done only for control, bulk and for
3145 * one time Interrupt transfers.
3146 *
3147 * NOTE: If timeout is zero; Assume infinite timeout and don't
3148 * insert this transfer on the timeout list.
3149 */
3150 if (tw->tw_timeout) {
3151 /*
3152 * Add this transfer wrapper to the head of the pipe's
3153 * tw timeout list.
3154 */
3155 if (pp->pp_timeout_list) {
3156 tw->tw_timeout_next = pp->pp_timeout_list;
3157 }
3158
3159 pp->pp_timeout_list = tw;
3160 ehci_start_timer(ehcip, pp);
3161 }
3162 }
3163
3164
3165 /*
3166 * ehci_stop_xfer_timer:
3167 *
3168 * Start the timer for the control, bulk and for one time interrupt
3169 * transfers.
3170 */
3171 void
ehci_stop_xfer_timer(ehci_state_t * ehcip,ehci_trans_wrapper_t * tw,uint_t flag)3172 ehci_stop_xfer_timer(
3173 ehci_state_t *ehcip,
3174 ehci_trans_wrapper_t *tw,
3175 uint_t flag)
3176 {
3177 ehci_pipe_private_t *pp;
3178 timeout_id_t timer_id;
3179
3180 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3181 "ehci_stop_xfer_timer: tw = 0x%p", (void *)tw);
3182
3183 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3184
3185 /* Obtain the pipe private structure */
3186 pp = tw->tw_pipe_private;
3187
3188 /* check if the timeout tw list is empty */
3189 if (pp->pp_timeout_list == NULL) {
3190
3191 return;
3192 }
3193
3194 switch (flag) {
3195 case EHCI_REMOVE_XFER_IFLAST:
3196 if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3197 break;
3198 }
3199
3200 /* FALLTHRU */
3201 case EHCI_REMOVE_XFER_ALWAYS:
3202 ehci_remove_tw_from_timeout_list(ehcip, tw);
3203
3204 if ((pp->pp_timeout_list == NULL) &&
3205 (pp->pp_timer_id)) {
3206
3207 timer_id = pp->pp_timer_id;
3208
3209 /* Reset the timer id to zero */
3210 pp->pp_timer_id = 0;
3211
3212 mutex_exit(&ehcip->ehci_int_mutex);
3213
3214 (void) untimeout(timer_id);
3215
3216 mutex_enter(&ehcip->ehci_int_mutex);
3217 }
3218 break;
3219 default:
3220 break;
3221 }
3222 }
3223
3224
3225 /*
3226 * ehci_xfer_timeout_handler:
3227 *
3228 * Control or bulk transfer timeout handler.
3229 */
3230 static void
ehci_xfer_timeout_handler(void * arg)3231 ehci_xfer_timeout_handler(void *arg)
3232 {
3233 usba_pipe_handle_data_t *ph = (usba_pipe_handle_data_t *)arg;
3234 ehci_state_t *ehcip = ehci_obtain_state(
3235 ph->p_usba_device->usb_root_hub_dip);
3236 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3237 ehci_trans_wrapper_t *tw, *next;
3238 ehci_trans_wrapper_t *expire_xfer_list = NULL;
3239 ehci_qtd_t *qtd;
3240
3241 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3242 "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p",
3243 (void *)ehcip, (void *)ph);
3244
3245 mutex_enter(&ehcip->ehci_int_mutex);
3246
3247 /*
3248 * Check whether still timeout handler is valid.
3249 */
3250 if (pp->pp_timer_id != 0) {
3251
3252 /* Reset the timer id to zero */
3253 pp->pp_timer_id = 0;
3254 } else {
3255 mutex_exit(&ehcip->ehci_int_mutex);
3256
3257 return;
3258 }
3259
3260 /* Get the transfer timeout list head */
3261 tw = pp->pp_timeout_list;
3262
3263 while (tw) {
3264
3265 /* Get the transfer on the timeout list */
3266 next = tw->tw_timeout_next;
3267
3268 tw->tw_timeout--;
3269
3270 if (tw->tw_timeout <= 0) {
3271
3272 /* remove the tw from the timeout list */
3273 ehci_remove_tw_from_timeout_list(ehcip, tw);
3274
3275 /* remove QTDs from active QTD list */
3276 qtd = tw->tw_qtd_head;
3277 while (qtd) {
3278 ehci_remove_qtd_from_active_qtd_list(
3279 ehcip, qtd);
3280
3281 /* Get the next QTD from the wrapper */
3282 qtd = ehci_qtd_iommu_to_cpu(ehcip,
3283 Get_QTD(qtd->qtd_tw_next_qtd));
3284 }
3285
3286 /*
3287 * Preserve the order to the requests
3288 * started time sequence.
3289 */
3290 tw->tw_timeout_next = expire_xfer_list;
3291 expire_xfer_list = tw;
3292 }
3293
3294 tw = next;
3295 }
3296
3297 /*
3298 * The timer should be started before the callbacks.
3299 * There is always a chance that ehci interrupts come
3300 * in when we release the mutex while calling the tw back.
3301 * To keep an accurate timeout it should be restarted
3302 * as soon as possible.
3303 */
3304 ehci_start_timer(ehcip, pp);
3305
3306 /* Get the expired transfer timeout list head */
3307 tw = expire_xfer_list;
3308
3309 while (tw) {
3310
3311 /* Get the next tw on the expired transfer timeout list */
3312 next = tw->tw_timeout_next;
3313
3314 /*
3315 * The error handle routine will release the mutex when
3316 * calling back to USBA. But this will not cause any race.
3317 * We do the callback and are relying on ehci_pipe_cleanup()
3318 * to halt the queue head and clean up since we should not
3319 * block in timeout context.
3320 */
3321 ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3322
3323 tw = next;
3324 }
3325 mutex_exit(&ehcip->ehci_int_mutex);
3326 }
3327
3328
3329 /*
3330 * ehci_remove_tw_from_timeout_list:
3331 *
3332 * Remove Control or bulk transfer from the timeout list.
3333 */
3334 static void
ehci_remove_tw_from_timeout_list(ehci_state_t * ehcip,ehci_trans_wrapper_t * tw)3335 ehci_remove_tw_from_timeout_list(
3336 ehci_state_t *ehcip,
3337 ehci_trans_wrapper_t *tw)
3338 {
3339 ehci_pipe_private_t *pp;
3340 ehci_trans_wrapper_t *prev, *next;
3341
3342 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3343 "ehci_remove_tw_from_timeout_list: tw = 0x%p", (void *)tw);
3344
3345 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3346
3347 /* Obtain the pipe private structure */
3348 pp = tw->tw_pipe_private;
3349
3350 if (pp->pp_timeout_list) {
3351 if (pp->pp_timeout_list == tw) {
3352 pp->pp_timeout_list = tw->tw_timeout_next;
3353
3354 tw->tw_timeout_next = NULL;
3355 } else {
3356 prev = pp->pp_timeout_list;
3357 next = prev->tw_timeout_next;
3358
3359 while (next && (next != tw)) {
3360 prev = next;
3361 next = next->tw_timeout_next;
3362 }
3363
3364 if (next == tw) {
3365 prev->tw_timeout_next =
3366 next->tw_timeout_next;
3367 tw->tw_timeout_next = NULL;
3368 }
3369 }
3370 }
3371 }
3372
3373
3374 /*
3375 * ehci_start_timer:
3376 *
3377 * Start the pipe's timer
3378 */
3379 static void
ehci_start_timer(ehci_state_t * ehcip,ehci_pipe_private_t * pp)3380 ehci_start_timer(
3381 ehci_state_t *ehcip,
3382 ehci_pipe_private_t *pp)
3383 {
3384 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3385 "ehci_start_timer: ehcip = 0x%p, pp = 0x%p",
3386 (void *)ehcip, (void *)pp);
3387
3388 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3389
3390 /*
3391 * Start the pipe's timer only if currently timer is not
3392 * running and if there are any transfers on the timeout
3393 * list. This timer will be per pipe.
3394 */
3395 if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3396 pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3397 (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3398 }
3399 }
3400
3401 /*
3402 * ehci_deallocate_tw:
3403 *
3404 * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3405 * of DMA resources.
3406 */
3407 void
ehci_deallocate_tw(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3408 ehci_deallocate_tw(
3409 ehci_state_t *ehcip,
3410 ehci_pipe_private_t *pp,
3411 ehci_trans_wrapper_t *tw)
3412 {
3413 ehci_trans_wrapper_t *prev, *next;
3414
3415 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3416 "ehci_deallocate_tw: tw = 0x%p", (void *)tw);
3417
3418 /*
3419 * If the transfer wrapper has no Host Controller (HC)
3420 * Transfer Descriptors (QTD) associated with it, then
3421 * remove the transfer wrapper.
3422 */
3423 if (tw->tw_qtd_head) {
3424 ASSERT(tw->tw_qtd_tail != NULL);
3425
3426 return;
3427 }
3428
3429 ASSERT(tw->tw_qtd_tail == NULL);
3430
3431 /* Make sure we return all the unused qtd's to the pool as well */
3432 ehci_free_tw_td_resources(ehcip, tw);
3433
3434 /*
3435 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3436 * given TW then set the head and tail equal to NULL.
3437 * Otherwise search for this TW in the linked TW's list
3438 * and then remove this TW from the list.
3439 */
3440 if (pp->pp_tw_head == tw) {
3441 if (pp->pp_tw_tail == tw) {
3442 pp->pp_tw_head = NULL;
3443 pp->pp_tw_tail = NULL;
3444 } else {
3445 pp->pp_tw_head = tw->tw_next;
3446 }
3447 } else {
3448 prev = pp->pp_tw_head;
3449 next = prev->tw_next;
3450
3451 while (next && (next != tw)) {
3452 prev = next;
3453 next = next->tw_next;
3454 }
3455
3456 if (next == tw) {
3457 prev->tw_next = next->tw_next;
3458
3459 if (pp->pp_tw_tail == tw) {
3460 pp->pp_tw_tail = prev;
3461 }
3462 }
3463 }
3464
3465 /*
3466 * Make sure that, this TW has been removed
3467 * from the timeout list.
3468 */
3469 ehci_remove_tw_from_timeout_list(ehcip, tw);
3470
3471 /* Deallocate this TW */
3472 ehci_free_tw(ehcip, pp, tw);
3473 }
3474
3475
3476 /*
3477 * ehci_free_dma_resources:
3478 *
3479 * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3480 *
3481 * NOTE: This function is also called from POLLED MODE.
3482 */
3483 void
ehci_free_dma_resources(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)3484 ehci_free_dma_resources(
3485 ehci_state_t *ehcip,
3486 usba_pipe_handle_data_t *ph)
3487 {
3488 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3489 ehci_trans_wrapper_t *head_tw = pp->pp_tw_head;
3490 ehci_trans_wrapper_t *next_tw, *tw;
3491
3492 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3493 "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3494
3495 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3496
3497 /* Process the Transfer Wrappers */
3498 next_tw = head_tw;
3499 while (next_tw) {
3500 tw = next_tw;
3501 next_tw = tw->tw_next;
3502
3503 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3504 "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3505
3506 ehci_free_tw(ehcip, pp, tw);
3507 }
3508
3509 /* Adjust the head and tail pointers */
3510 pp->pp_tw_head = NULL;
3511 pp->pp_tw_tail = NULL;
3512 }
3513
3514
3515 /*
3516 * ehci_free_tw:
3517 *
3518 * Free the Transfer Wrapper (TW).
3519 */
3520 /*ARGSUSED*/
3521 static void
ehci_free_tw(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3522 ehci_free_tw(
3523 ehci_state_t *ehcip,
3524 ehci_pipe_private_t *pp,
3525 ehci_trans_wrapper_t *tw)
3526 {
3527 int rval;
3528 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3529
3530 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3531 "ehci_free_tw: tw = 0x%p", (void *)tw);
3532
3533 ASSERT(tw != NULL);
3534 ASSERT(tw->tw_id != NULL);
3535
3536 /* Free 32bit ID */
3537 EHCI_FREE_ID((uint32_t)tw->tw_id);
3538
3539 if (tw->tw_dmahandle != NULL) {
3540 rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3541 ASSERT(rval == DDI_SUCCESS);
3542
3543 ddi_dma_mem_free(&tw->tw_accesshandle);
3544 ddi_dma_free_handle(&tw->tw_dmahandle);
3545 }
3546
3547 /* interrupt ep will come to this point */
3548 if (EHCI_INTR_ENDPOINT(eptd)) {
3549 ehcip->ehci_periodic_req_count--;
3550 } else {
3551 ehcip->ehci_async_req_count--;
3552 }
3553 ehci_toggle_scheduler(ehcip);
3554
3555 /* Free transfer wrapper */
3556 kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3557 }
3558
3559
3560 /*
3561 * Miscellaneous functions
3562 */
3563
3564 /*
3565 * ehci_allocate_intr_in_resource
3566 *
3567 * Allocate interrupt request structure for the interrupt IN transfer.
3568 */
3569 /*ARGSUSED*/
3570 int
ehci_allocate_intr_in_resource(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw,usb_flags_t flags)3571 ehci_allocate_intr_in_resource(
3572 ehci_state_t *ehcip,
3573 ehci_pipe_private_t *pp,
3574 ehci_trans_wrapper_t *tw,
3575 usb_flags_t flags)
3576 {
3577 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
3578 usb_intr_req_t *curr_intr_reqp;
3579 usb_opaque_t client_periodic_in_reqp;
3580 size_t length = 0;
3581
3582 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3583 "ehci_allocate_intr_in_resource:"
3584 "pp = 0x%p tw = 0x%p flags = 0x%x", (void *)pp, (void *)tw, flags);
3585
3586 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3587 ASSERT(tw->tw_curr_xfer_reqp == NULL);
3588
3589 /* Get the client periodic in request pointer */
3590 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3591
3592 /*
3593 * If it a periodic IN request and periodic request is NULL,
3594 * allocate corresponding usb periodic IN request for the
3595 * current periodic polling request and copy the information
3596 * from the saved periodic request structure.
3597 */
3598 if (client_periodic_in_reqp) {
3599
3600 /* Get the interrupt transfer length */
3601 length = ((usb_intr_req_t *)
3602 client_periodic_in_reqp)->intr_len;
3603
3604 curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3605 (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3606 } else {
3607 curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3608 }
3609
3610 if (curr_intr_reqp == NULL) {
3611
3612 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3613 "ehci_allocate_intr_in_resource: Interrupt"
3614 "request structure allocation failed");
3615
3616 return (USB_NO_RESOURCES);
3617 }
3618
3619 /* For polled mode */
3620 if (client_periodic_in_reqp == NULL) {
3621 curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3622 curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3623 } else {
3624 /* Check and save the timeout value */
3625 tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3626 USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3627 }
3628
3629 tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3630 tw->tw_length = curr_intr_reqp->intr_len;
3631
3632 mutex_enter(&ph->p_mutex);
3633 ph->p_req_count++;
3634 mutex_exit(&ph->p_mutex);
3635
3636 pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3637
3638 return (USB_SUCCESS);
3639 }
3640
3641 /*
3642 * ehci_pipe_cleanup
3643 *
3644 * Cleanup ehci pipe.
3645 */
3646 void
ehci_pipe_cleanup(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)3647 ehci_pipe_cleanup(
3648 ehci_state_t *ehcip,
3649 usba_pipe_handle_data_t *ph)
3650 {
3651 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3652 uint_t pipe_state = pp->pp_state;
3653 usb_cr_t completion_reason;
3654 usb_ep_descr_t *eptd = &ph->p_ep;
3655
3656 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3657 "ehci_pipe_cleanup: ph = 0x%p", (void *)ph);
3658
3659 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3660
3661 if (EHCI_ISOC_ENDPOINT(eptd)) {
3662 ehci_isoc_pipe_cleanup(ehcip, ph);
3663
3664 return;
3665 }
3666
3667 ASSERT(!servicing_interrupt());
3668
3669 /*
3670 * Set the QH's status to Halt condition.
3671 * If another thread is halting this function will automatically
3672 * wait. If a pipe close happens at this time
3673 * we will be in lots of trouble.
3674 * If we are in an interrupt thread, don't halt, because it may
3675 * do a wait_for_sof.
3676 */
3677 ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3678
3679 /*
3680 * Wait for processing all completed transfers and
3681 * to send results to upstream.
3682 */
3683 ehci_wait_for_transfers_completion(ehcip, pp);
3684
3685 /* Save the data toggle information */
3686 ehci_save_data_toggle(ehcip, ph);
3687
3688 /*
3689 * Traverse the list of QTDs for this pipe using transfer
3690 * wrapper. Process these QTDs depending on their status.
3691 * And stop the timer of this pipe.
3692 */
3693 ehci_traverse_qtds(ehcip, ph);
3694
3695 /* Make sure the timer is not running */
3696 ASSERT(pp->pp_timer_id == 0);
3697
3698 /* Do callbacks for all unfinished requests */
3699 ehci_handle_outstanding_requests(ehcip, pp);
3700
3701 /* Free DMA resources */
3702 ehci_free_dma_resources(ehcip, ph);
3703
3704 switch (pipe_state) {
3705 case EHCI_PIPE_STATE_CLOSE:
3706 completion_reason = USB_CR_PIPE_CLOSING;
3707 break;
3708 case EHCI_PIPE_STATE_RESET:
3709 case EHCI_PIPE_STATE_STOP_POLLING:
3710 /* Set completion reason */
3711 completion_reason = (pipe_state ==
3712 EHCI_PIPE_STATE_RESET) ?
3713 USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3714
3715 /* Restore the data toggle information */
3716 ehci_restore_data_toggle(ehcip, ph);
3717
3718 /*
3719 * Clear the halt bit to restart all the
3720 * transactions on this pipe.
3721 */
3722 ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3723
3724 /* Set pipe state to idle */
3725 pp->pp_state = EHCI_PIPE_STATE_IDLE;
3726
3727 break;
3728 }
3729
3730 /*
3731 * Do the callback for the original client
3732 * periodic IN request.
3733 */
3734 if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3735 ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3736 USB_EP_DIR_IN)) {
3737
3738 ehci_do_client_periodic_in_req_callback(
3739 ehcip, pp, completion_reason);
3740 }
3741 }
3742
3743
3744 /*
3745 * ehci_wait_for_transfers_completion:
3746 *
3747 * Wait for processing all completed transfers and to send results
3748 * to upstream.
3749 */
3750 static void
ehci_wait_for_transfers_completion(ehci_state_t * ehcip,ehci_pipe_private_t * pp)3751 ehci_wait_for_transfers_completion(
3752 ehci_state_t *ehcip,
3753 ehci_pipe_private_t *pp)
3754 {
3755 ehci_trans_wrapper_t *next_tw = pp->pp_tw_head;
3756 ehci_qtd_t *qtd;
3757
3758 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3759 ehcip->ehci_log_hdl,
3760 "ehci_wait_for_transfers_completion: pp = 0x%p", (void *)pp);
3761
3762 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3763
3764 if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3765
3766 return;
3767 }
3768
3769 pp->pp_count_done_qtds = 0;
3770
3771 /* Process the transfer wrappers for this pipe */
3772 while (next_tw) {
3773 qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3774
3775 /*
3776 * Walk through each QTD for this transfer wrapper.
3777 * If a QTD still exists, then it is either on done
3778 * list or on the QH's list.
3779 */
3780 while (qtd) {
3781 if (!(Get_QTD(qtd->qtd_ctrl) &
3782 EHCI_QTD_CTRL_ACTIVE_XACT)) {
3783 pp->pp_count_done_qtds++;
3784 }
3785
3786 qtd = ehci_qtd_iommu_to_cpu(ehcip,
3787 Get_QTD(qtd->qtd_tw_next_qtd));
3788 }
3789
3790 next_tw = next_tw->tw_next;
3791 }
3792
3793 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3794 "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x",
3795 pp->pp_count_done_qtds);
3796
3797 if (!pp->pp_count_done_qtds) {
3798
3799 return;
3800 }
3801
3802 (void) cv_reltimedwait(&pp->pp_xfer_cmpl_cv, &ehcip->ehci_int_mutex,
3803 drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000), TR_CLOCK_TICK);
3804
3805 if (pp->pp_count_done_qtds) {
3806
3807 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3808 "ehci_wait_for_transfers_completion:"
3809 "No transfers completion confirmation received");
3810 }
3811 }
3812
3813 /*
3814 * ehci_check_for_transfers_completion:
3815 *
3816 * Check whether anybody is waiting for transfers completion event. If so, send
3817 * this event and also stop initiating any new transfers on this pipe.
3818 */
3819 void
ehci_check_for_transfers_completion(ehci_state_t * ehcip,ehci_pipe_private_t * pp)3820 ehci_check_for_transfers_completion(
3821 ehci_state_t *ehcip,
3822 ehci_pipe_private_t *pp)
3823 {
3824 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3825 ehcip->ehci_log_hdl,
3826 "ehci_check_for_transfers_completion: pp = 0x%p", (void *)pp);
3827
3828 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3829
3830 if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) &&
3831 (pp->pp_error == USB_CR_NO_RESOURCES) &&
3832 (pp->pp_cur_periodic_req_cnt == 0)) {
3833
3834 /* Reset pipe error to zero */
3835 pp->pp_error = 0;
3836
3837 /* Do callback for original request */
3838 ehci_do_client_periodic_in_req_callback(
3839 ehcip, pp, USB_CR_NO_RESOURCES);
3840 }
3841
3842 if (pp->pp_count_done_qtds) {
3843
3844 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3845 "ehci_check_for_transfers_completion:"
3846 "count_done_qtds = 0x%x", pp->pp_count_done_qtds);
3847
3848 /* Decrement the done qtd count */
3849 pp->pp_count_done_qtds--;
3850
3851 if (!pp->pp_count_done_qtds) {
3852
3853 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3854 "ehci_check_for_transfers_completion:"
3855 "Sent transfers completion event pp = 0x%p",
3856 (void *)pp);
3857
3858 /* Send the transfer completion signal */
3859 cv_signal(&pp->pp_xfer_cmpl_cv);
3860 }
3861 }
3862 }
3863
3864
3865 /*
3866 * ehci_save_data_toggle:
3867 *
3868 * Save the data toggle information.
3869 */
3870 static void
ehci_save_data_toggle(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)3871 ehci_save_data_toggle(
3872 ehci_state_t *ehcip,
3873 usba_pipe_handle_data_t *ph)
3874 {
3875 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3876 usb_ep_descr_t *eptd = &ph->p_ep;
3877 uint_t data_toggle;
3878 usb_cr_t error = pp->pp_error;
3879 ehci_qh_t *qh = pp->pp_qh;
3880
3881 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3882 ehcip->ehci_log_hdl,
3883 "ehci_save_data_toggle: ph = 0x%p", (void *)ph);
3884
3885 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3886
3887 /* Reset the pipe error value */
3888 pp->pp_error = USB_CR_OK;
3889
3890 /* Return immediately if it is a control pipe */
3891 if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3892 USB_EP_ATTR_CONTROL) {
3893
3894 return;
3895 }
3896
3897 /* Get the data toggle information from the endpoint (QH) */
3898 data_toggle = (Get_QH(qh->qh_status) &
3899 EHCI_QH_STS_DATA_TOGGLE)? DATA1:DATA0;
3900
3901 /*
3902 * If error is STALL, then, set
3903 * data toggle to zero.
3904 */
3905 if (error == USB_CR_STALL) {
3906 data_toggle = DATA0;
3907 }
3908
3909 /*
3910 * Save the data toggle information
3911 * in the usb device structure.
3912 */
3913 mutex_enter(&ph->p_mutex);
3914 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3915 data_toggle);
3916 mutex_exit(&ph->p_mutex);
3917 }
3918
3919
3920 /*
3921 * ehci_restore_data_toggle:
3922 *
3923 * Restore the data toggle information.
3924 */
3925 void
ehci_restore_data_toggle(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)3926 ehci_restore_data_toggle(
3927 ehci_state_t *ehcip,
3928 usba_pipe_handle_data_t *ph)
3929 {
3930 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3931 usb_ep_descr_t *eptd = &ph->p_ep;
3932 uint_t data_toggle = 0;
3933
3934 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3935 ehcip->ehci_log_hdl,
3936 "ehci_restore_data_toggle: ph = 0x%p", (void *)ph);
3937
3938 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3939
3940 /* Return immediately if it is a control pipe */
3941 if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3942 USB_EP_ATTR_CONTROL) {
3943
3944 return;
3945 }
3946
3947 mutex_enter(&ph->p_mutex);
3948
3949 data_toggle = usba_hcdi_get_data_toggle(ph->p_usba_device,
3950 ph->p_ep.bEndpointAddress);
3951 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3952 0);
3953
3954 mutex_exit(&ph->p_mutex);
3955
3956 /*
3957 * Restore the data toggle bit depending on the
3958 * previous data toggle information.
3959 */
3960 if (data_toggle) {
3961 Set_QH(pp->pp_qh->qh_status,
3962 Get_QH(pp->pp_qh->qh_status) | EHCI_QH_STS_DATA_TOGGLE);
3963 } else {
3964 Set_QH(pp->pp_qh->qh_status,
3965 Get_QH(pp->pp_qh->qh_status) & (~EHCI_QH_STS_DATA_TOGGLE));
3966 }
3967 }
3968
3969
3970 /*
3971 * ehci_handle_outstanding_requests
3972 *
3973 * Deallocate interrupt request structure for the interrupt IN transfer.
3974 * Do the callbacks for all unfinished requests.
3975 *
3976 * NOTE: This function is also called from POLLED MODE.
3977 */
3978 void
ehci_handle_outstanding_requests(ehci_state_t * ehcip,ehci_pipe_private_t * pp)3979 ehci_handle_outstanding_requests(
3980 ehci_state_t *ehcip,
3981 ehci_pipe_private_t *pp)
3982 {
3983 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
3984 usb_ep_descr_t *eptd = &ph->p_ep;
3985 ehci_trans_wrapper_t *curr_tw;
3986 ehci_trans_wrapper_t *next_tw;
3987 usb_opaque_t curr_xfer_reqp;
3988
3989 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3990 ehcip->ehci_log_hdl,
3991 "ehci_handle_outstanding_requests: pp = 0x%p", (void *)pp);
3992
3993 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3994
3995 /* Deallocate all pre-allocated interrupt requests */
3996 next_tw = pp->pp_tw_head;
3997
3998 while (next_tw) {
3999 curr_tw = next_tw;
4000 next_tw = curr_tw->tw_next;
4001
4002 curr_xfer_reqp = curr_tw->tw_curr_xfer_reqp;
4003
4004 /* Deallocate current interrupt request */
4005 if (curr_xfer_reqp) {
4006
4007 if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
4008 (curr_tw->tw_direction == EHCI_QTD_CTRL_IN_PID)) {
4009
4010 /* Decrement periodic in request count */
4011 pp->pp_cur_periodic_req_cnt--;
4012
4013 ehci_deallocate_intr_in_resource(
4014 ehcip, pp, curr_tw);
4015 } else {
4016 ehci_hcdi_callback(ph, curr_tw, USB_CR_FLUSHED);
4017 }
4018 }
4019 }
4020 }
4021
4022
4023 /*
4024 * ehci_deallocate_intr_in_resource
4025 *
4026 * Deallocate interrupt request structure for the interrupt IN transfer.
4027 */
4028 void
ehci_deallocate_intr_in_resource(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)4029 ehci_deallocate_intr_in_resource(
4030 ehci_state_t *ehcip,
4031 ehci_pipe_private_t *pp,
4032 ehci_trans_wrapper_t *tw)
4033 {
4034 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
4035 uchar_t ep_attr = ph->p_ep.bmAttributes;
4036 usb_opaque_t curr_xfer_reqp;
4037
4038 USB_DPRINTF_L4(PRINT_MASK_LISTS,
4039 ehcip->ehci_log_hdl,
4040 "ehci_deallocate_intr_in_resource: "
4041 "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw);
4042
4043 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4044 ASSERT((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR);
4045
4046 curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4047
4048 /* Check the current periodic in request pointer */
4049 if (curr_xfer_reqp) {
4050
4051 tw->tw_curr_xfer_reqp = NULL;
4052
4053 mutex_enter(&ph->p_mutex);
4054 ph->p_req_count--;
4055 mutex_exit(&ph->p_mutex);
4056
4057 /* Free pre-allocated interrupt requests */
4058 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4059
4060 /* Set periodic in pipe state to idle */
4061 pp->pp_state = EHCI_PIPE_STATE_IDLE;
4062 }
4063 }
4064
4065
4066 /*
4067 * ehci_do_client_periodic_in_req_callback
4068 *
4069 * Do callback for the original client periodic IN request.
4070 */
4071 void
ehci_do_client_periodic_in_req_callback(ehci_state_t * ehcip,ehci_pipe_private_t * pp,usb_cr_t completion_reason)4072 ehci_do_client_periodic_in_req_callback(
4073 ehci_state_t *ehcip,
4074 ehci_pipe_private_t *pp,
4075 usb_cr_t completion_reason)
4076 {
4077 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
4078 usb_ep_descr_t *eptd = &ph->p_ep;
4079
4080 USB_DPRINTF_L4(PRINT_MASK_LISTS,
4081 ehcip->ehci_log_hdl,
4082 "ehci_do_client_periodic_in_req_callback: "
4083 "pp = 0x%p cc = 0x%x", (void *)pp, completion_reason);
4084
4085 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4086
4087 /*
4088 * Check for Interrupt/Isochronous IN, whether we need to do
4089 * callback for the original client's periodic IN request.
4090 */
4091 if (pp->pp_client_periodic_in_reqp) {
4092 ASSERT(pp->pp_cur_periodic_req_cnt == 0);
4093 if (EHCI_ISOC_ENDPOINT(eptd)) {
4094 ehci_hcdi_isoc_callback(ph, NULL, completion_reason);
4095 } else {
4096 ehci_hcdi_callback(ph, NULL, completion_reason);
4097 }
4098 }
4099 }
4100
4101
4102 /*
4103 * ehci_hcdi_callback()
4104 *
4105 * Convenience wrapper around usba_hcdi_cb() other than root hub.
4106 */
4107 void
ehci_hcdi_callback(usba_pipe_handle_data_t * ph,ehci_trans_wrapper_t * tw,usb_cr_t completion_reason)4108 ehci_hcdi_callback(
4109 usba_pipe_handle_data_t *ph,
4110 ehci_trans_wrapper_t *tw,
4111 usb_cr_t completion_reason)
4112 {
4113 ehci_state_t *ehcip = ehci_obtain_state(
4114 ph->p_usba_device->usb_root_hub_dip);
4115 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
4116 usb_opaque_t curr_xfer_reqp;
4117 uint_t pipe_state = 0;
4118
4119 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
4120 "ehci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4121 (void *)ph, (void *)tw, completion_reason);
4122
4123 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4124
4125 /* Set the pipe state as per completion reason */
4126 switch (completion_reason) {
4127 case USB_CR_OK:
4128 pipe_state = pp->pp_state;
4129 break;
4130 case USB_CR_NO_RESOURCES:
4131 case USB_CR_NOT_SUPPORTED:
4132 case USB_CR_PIPE_RESET:
4133 case USB_CR_STOPPED_POLLING:
4134 pipe_state = EHCI_PIPE_STATE_IDLE;
4135 break;
4136 case USB_CR_PIPE_CLOSING:
4137 break;
4138 default:
4139 /* Set the pipe state to error */
4140 pipe_state = EHCI_PIPE_STATE_ERROR;
4141 pp->pp_error = completion_reason;
4142 break;
4143
4144 }
4145
4146 pp->pp_state = pipe_state;
4147
4148 if (tw && tw->tw_curr_xfer_reqp) {
4149 curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4150 tw->tw_curr_xfer_reqp = NULL;
4151 } else {
4152 ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4153
4154 curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4155 pp->pp_client_periodic_in_reqp = NULL;
4156 }
4157
4158 ASSERT(curr_xfer_reqp != NULL);
4159
4160 mutex_exit(&ehcip->ehci_int_mutex);
4161
4162 usba_hcdi_cb(ph, curr_xfer_reqp, completion_reason);
4163
4164 mutex_enter(&ehcip->ehci_int_mutex);
4165 }
4166