1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * EHCI Host Controller Driver (EHCI)
28 *
29 * The EHCI driver is a software driver which interfaces to the Universal
30 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
31 * the Host Controller is defined by the EHCI Host Controller Interface.
32 *
33 * This module contains the main EHCI driver code which handles all USB
34 * transfers, bandwidth allocations and other general functionalities.
35 */
36
37 #include <sys/usb/hcd/ehci/ehcid.h>
38 #include <sys/usb/hcd/ehci/ehci_intr.h>
39 #include <sys/usb/hcd/ehci/ehci_util.h>
40 #include <sys/usb/hcd/ehci/ehci_isoch.h>
41
42 /* Adjustable variables for the size of the pools */
43 extern int ehci_qh_pool_size;
44 extern int ehci_qtd_pool_size;
45
46
47 /* Endpoint Descriptor (QH) related functions */
48 ehci_qh_t *ehci_alloc_qh(
49 ehci_state_t *ehcip,
50 usba_pipe_handle_data_t *ph,
51 uint_t flag);
52 static void ehci_unpack_endpoint(
53 ehci_state_t *ehcip,
54 usba_pipe_handle_data_t *ph,
55 ehci_qh_t *qh);
56 void ehci_insert_qh(
57 ehci_state_t *ehcip,
58 usba_pipe_handle_data_t *ph);
59 static void ehci_insert_async_qh(
60 ehci_state_t *ehcip,
61 ehci_pipe_private_t *pp);
62 static void ehci_insert_intr_qh(
63 ehci_state_t *ehcip,
64 ehci_pipe_private_t *pp);
65 static void ehci_modify_qh_status_bit(
66 ehci_state_t *ehcip,
67 ehci_pipe_private_t *pp,
68 halt_bit_t action);
69 static void ehci_halt_hs_qh(
70 ehci_state_t *ehcip,
71 ehci_pipe_private_t *pp,
72 ehci_qh_t *qh);
73 static void ehci_halt_fls_ctrl_and_bulk_qh(
74 ehci_state_t *ehcip,
75 ehci_pipe_private_t *pp,
76 ehci_qh_t *qh);
77 static void ehci_clear_tt_buffer(
78 ehci_state_t *ehcip,
79 usba_pipe_handle_data_t *ph,
80 ehci_qh_t *qh);
81 static void ehci_halt_fls_intr_qh(
82 ehci_state_t *ehcip,
83 ehci_qh_t *qh);
84 void ehci_remove_qh(
85 ehci_state_t *ehcip,
86 ehci_pipe_private_t *pp,
87 boolean_t reclaim);
88 static void ehci_remove_async_qh(
89 ehci_state_t *ehcip,
90 ehci_pipe_private_t *pp,
91 boolean_t reclaim);
92 static void ehci_remove_intr_qh(
93 ehci_state_t *ehcip,
94 ehci_pipe_private_t *pp,
95 boolean_t reclaim);
96 static void ehci_insert_qh_on_reclaim_list(
97 ehci_state_t *ehcip,
98 ehci_pipe_private_t *pp);
99 void ehci_deallocate_qh(
100 ehci_state_t *ehcip,
101 ehci_qh_t *old_qh);
102 uint32_t ehci_qh_cpu_to_iommu(
103 ehci_state_t *ehcip,
104 ehci_qh_t *addr);
105 ehci_qh_t *ehci_qh_iommu_to_cpu(
106 ehci_state_t *ehcip,
107 uintptr_t addr);
108
109 /* Transfer Descriptor (QTD) related functions */
110 static int ehci_initialize_dummy(
111 ehci_state_t *ehcip,
112 ehci_qh_t *qh);
113 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
114 ehci_state_t *ehcip,
115 ehci_pipe_private_t *pp,
116 usb_ctrl_req_t *ctrl_reqp,
117 usb_flags_t usb_flags);
118 void ehci_insert_ctrl_req(
119 ehci_state_t *ehcip,
120 usba_pipe_handle_data_t *ph,
121 usb_ctrl_req_t *ctrl_reqp,
122 ehci_trans_wrapper_t *tw,
123 usb_flags_t usb_flags);
124 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
125 ehci_state_t *ehcip,
126 ehci_pipe_private_t *pp,
127 usb_bulk_req_t *bulk_reqp,
128 usb_flags_t usb_flags);
129 void ehci_insert_bulk_req(
130 ehci_state_t *ehcip,
131 usba_pipe_handle_data_t *ph,
132 usb_bulk_req_t *bulk_reqp,
133 ehci_trans_wrapper_t *tw,
134 usb_flags_t flags);
135 int ehci_start_periodic_pipe_polling(
136 ehci_state_t *ehcip,
137 usba_pipe_handle_data_t *ph,
138 usb_opaque_t periodic_in_reqp,
139 usb_flags_t flags);
140 static int ehci_start_pipe_polling(
141 ehci_state_t *ehcip,
142 usba_pipe_handle_data_t *ph,
143 usb_flags_t flags);
144 static int ehci_start_intr_polling(
145 ehci_state_t *ehcip,
146 usba_pipe_handle_data_t *ph,
147 usb_flags_t flags);
148 static void ehci_set_periodic_pipe_polling(
149 ehci_state_t *ehcip,
150 usba_pipe_handle_data_t *ph);
151 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
152 ehci_state_t *ehcip,
153 usba_pipe_handle_data_t *ph,
154 usb_intr_req_t *intr_reqp,
155 usb_flags_t usb_flags);
156 void ehci_insert_intr_req(
157 ehci_state_t *ehcip,
158 ehci_pipe_private_t *pp,
159 ehci_trans_wrapper_t *tw,
160 usb_flags_t flags);
161 int ehci_stop_periodic_pipe_polling(
162 ehci_state_t *ehcip,
163 usba_pipe_handle_data_t *ph,
164 usb_flags_t flags);
165 int ehci_insert_qtd(
166 ehci_state_t *ehcip,
167 uint32_t qtd_ctrl,
168 size_t qtd_dma_offs,
169 size_t qtd_length,
170 uint32_t qtd_ctrl_phase,
171 ehci_pipe_private_t *pp,
172 ehci_trans_wrapper_t *tw);
173 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
174 ehci_state_t *ehcip);
175 static void ehci_fill_in_qtd(
176 ehci_state_t *ehcip,
177 ehci_qtd_t *qtd,
178 uint32_t qtd_ctrl,
179 size_t qtd_dma_offs,
180 size_t qtd_length,
181 uint32_t qtd_ctrl_phase,
182 ehci_pipe_private_t *pp,
183 ehci_trans_wrapper_t *tw);
184 static void ehci_insert_qtd_on_tw(
185 ehci_state_t *ehcip,
186 ehci_trans_wrapper_t *tw,
187 ehci_qtd_t *qtd);
188 static void ehci_insert_qtd_into_active_qtd_list(
189 ehci_state_t *ehcip,
190 ehci_qtd_t *curr_qtd);
191 void ehci_remove_qtd_from_active_qtd_list(
192 ehci_state_t *ehcip,
193 ehci_qtd_t *curr_qtd);
194 static void ehci_traverse_qtds(
195 ehci_state_t *ehcip,
196 usba_pipe_handle_data_t *ph);
197 void ehci_deallocate_qtd(
198 ehci_state_t *ehcip,
199 ehci_qtd_t *old_qtd);
200 uint32_t ehci_qtd_cpu_to_iommu(
201 ehci_state_t *ehcip,
202 ehci_qtd_t *addr);
203 ehci_qtd_t *ehci_qtd_iommu_to_cpu(
204 ehci_state_t *ehcip,
205 uintptr_t addr);
206
207 /* Transfer Wrapper (TW) functions */
208 static ehci_trans_wrapper_t *ehci_create_transfer_wrapper(
209 ehci_state_t *ehcip,
210 ehci_pipe_private_t *pp,
211 size_t length,
212 uint_t usb_flags);
213 int ehci_allocate_tds_for_tw(
214 ehci_state_t *ehcip,
215 ehci_pipe_private_t *pp,
216 ehci_trans_wrapper_t *tw,
217 size_t qtd_count);
218 static ehci_trans_wrapper_t *ehci_allocate_tw_resources(
219 ehci_state_t *ehcip,
220 ehci_pipe_private_t *pp,
221 size_t length,
222 usb_flags_t usb_flags,
223 size_t td_count);
224 static void ehci_free_tw_td_resources(
225 ehci_state_t *ehcip,
226 ehci_trans_wrapper_t *tw);
227 static void ehci_start_xfer_timer(
228 ehci_state_t *ehcip,
229 ehci_pipe_private_t *pp,
230 ehci_trans_wrapper_t *tw);
231 void ehci_stop_xfer_timer(
232 ehci_state_t *ehcip,
233 ehci_trans_wrapper_t *tw,
234 uint_t flag);
235 static void ehci_xfer_timeout_handler(void *arg);
236 static void ehci_remove_tw_from_timeout_list(
237 ehci_state_t *ehcip,
238 ehci_trans_wrapper_t *tw);
239 static void ehci_start_timer(ehci_state_t *ehcip,
240 ehci_pipe_private_t *pp);
241 void ehci_deallocate_tw(
242 ehci_state_t *ehcip,
243 ehci_pipe_private_t *pp,
244 ehci_trans_wrapper_t *tw);
245 void ehci_free_dma_resources(
246 ehci_state_t *ehcip,
247 usba_pipe_handle_data_t *ph);
248 static void ehci_free_tw(
249 ehci_state_t *ehcip,
250 ehci_pipe_private_t *pp,
251 ehci_trans_wrapper_t *tw);
252
253 /* Miscellaneous functions */
254 int ehci_allocate_intr_in_resource(
255 ehci_state_t *ehcip,
256 ehci_pipe_private_t *pp,
257 ehci_trans_wrapper_t *tw,
258 usb_flags_t flags);
259 void ehci_pipe_cleanup(
260 ehci_state_t *ehcip,
261 usba_pipe_handle_data_t *ph);
262 static void ehci_wait_for_transfers_completion(
263 ehci_state_t *ehcip,
264 ehci_pipe_private_t *pp);
265 void ehci_check_for_transfers_completion(
266 ehci_state_t *ehcip,
267 ehci_pipe_private_t *pp);
268 static void ehci_save_data_toggle(
269 ehci_state_t *ehcip,
270 usba_pipe_handle_data_t *ph);
271 void ehci_restore_data_toggle(
272 ehci_state_t *ehcip,
273 usba_pipe_handle_data_t *ph);
274 void ehci_handle_outstanding_requests(
275 ehci_state_t *ehcip,
276 ehci_pipe_private_t *pp);
277 void ehci_deallocate_intr_in_resource(
278 ehci_state_t *ehcip,
279 ehci_pipe_private_t *pp,
280 ehci_trans_wrapper_t *tw);
281 void ehci_do_client_periodic_in_req_callback(
282 ehci_state_t *ehcip,
283 ehci_pipe_private_t *pp,
284 usb_cr_t completion_reason);
285 void ehci_hcdi_callback(
286 usba_pipe_handle_data_t *ph,
287 ehci_trans_wrapper_t *tw,
288 usb_cr_t completion_reason);
289
290
291 /*
292 * Endpoint Descriptor (QH) manipulations functions
293 */
294
295 /*
296 * ehci_alloc_qh:
297 *
298 * Allocate an endpoint descriptor (QH)
299 *
300 * NOTE: This function is also called from POLLED MODE.
301 */
302 ehci_qh_t *
ehci_alloc_qh(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t flag)303 ehci_alloc_qh(
304 ehci_state_t *ehcip,
305 usba_pipe_handle_data_t *ph,
306 uint_t flag)
307 {
308 int i, state;
309 ehci_qh_t *qh;
310
311 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
312 "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
313
314 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
315
316 /*
317 * If this is for a ISOC endpoint return null.
318 * Isochronous uses ITD put directly onto the PFL.
319 */
320 if (ph) {
321 if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
322
323 return (NULL);
324 }
325 }
326
327 /*
328 * The first 63 endpoints in the Endpoint Descriptor (QH)
329 * buffer pool are reserved for building interrupt lattice
330 * tree. Search for a blank endpoint descriptor in the QH
331 * buffer pool.
332 */
333 for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
334 state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
335
336 if (state == EHCI_QH_FREE) {
337 break;
338 }
339 }
340
341 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
342 "ehci_alloc_qh: Allocated %d", i);
343
344 if (i == ehci_qh_pool_size) {
345 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
346 "ehci_alloc_qh: QH exhausted");
347
348 return (NULL);
349 } else {
350 qh = &ehcip->ehci_qh_pool_addr[i];
351 bzero((void *)qh, sizeof (ehci_qh_t));
352
353 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
354 "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
355
356 /* Check polled mode flag */
357 if (flag == EHCI_POLLED_MODE_FLAG) {
358 Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
359 Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
360 }
361
362 /* Unpack the endpoint descriptor into a control field */
363 if (ph) {
364 if ((ehci_initialize_dummy(ehcip,
365 qh)) == USB_NO_RESOURCES) {
366
367 Set_QH(qh->qh_state, EHCI_QH_FREE);
368
369 return (NULL);
370 }
371
372 ehci_unpack_endpoint(ehcip, ph, qh);
373
374 Set_QH(qh->qh_curr_qtd, 0);
375 Set_QH(qh->qh_alt_next_qtd,
376 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
377
378 /* Change QH's state Active */
379 Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
380 } else {
381 Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
382
383 /* Change QH's state Static */
384 Set_QH(qh->qh_state, EHCI_QH_STATIC);
385 }
386
387 ehci_print_qh(ehcip, qh);
388
389 return (qh);
390 }
391 }
392
393
394 /*
395 * ehci_unpack_endpoint:
396 *
397 * Unpack the information in the pipe handle and create the first byte
398 * of the Host Controller's (HC) Endpoint Descriptor (QH).
399 */
400 static void
ehci_unpack_endpoint(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,ehci_qh_t * qh)401 ehci_unpack_endpoint(
402 ehci_state_t *ehcip,
403 usba_pipe_handle_data_t *ph,
404 ehci_qh_t *qh)
405 {
406 usb_ep_descr_t *endpoint = &ph->p_ep;
407 uint_t maxpacketsize, addr, xactions;
408 uint_t ctrl = 0, status = 0, split_ctrl = 0;
409 usb_port_status_t usb_port_status;
410 usba_device_t *usba_device = ph->p_usba_device;
411 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
412
413 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
414 "ehci_unpack_endpoint:");
415
416 mutex_enter(&usba_device->usb_mutex);
417 ctrl = usba_device->usb_addr;
418 usb_port_status = usba_device->usb_port_status;
419 mutex_exit(&usba_device->usb_mutex);
420
421 addr = endpoint->bEndpointAddress;
422
423 /* Assign the endpoint's address */
424 ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
425
426 /* Assign the speed */
427 switch (usb_port_status) {
428 case USBA_LOW_SPEED_DEV:
429 ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
430 break;
431 case USBA_FULL_SPEED_DEV:
432 ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
433 break;
434 case USBA_HIGH_SPEED_DEV:
435 ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
436 break;
437 }
438
439 switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
440 case USB_EP_ATTR_CONTROL:
441 /* Assign data toggle information */
442 ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
443
444 if (usb_port_status != USBA_HIGH_SPEED_DEV) {
445 ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
446 }
447 /* FALLTHRU */
448 case USB_EP_ATTR_BULK:
449 /* Maximum nak counter */
450 ctrl |= EHCI_QH_CTRL_MAX_NC;
451
452 if (usb_port_status == USBA_HIGH_SPEED_DEV) {
453 /*
454 * Perform ping before executing control
455 * and bulk transactions.
456 */
457 status = EHCI_QH_STS_DO_PING;
458 }
459 break;
460 case USB_EP_ATTR_INTR:
461 /* Set start split mask */
462 split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
463
464 /*
465 * Set complete split mask for low/full speed
466 * usb devices.
467 */
468 if (usb_port_status != USBA_HIGH_SPEED_DEV) {
469 split_ctrl |= ((pp->pp_cmask <<
470 EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
471 EHCI_QH_SPLIT_CTRL_COMP_MASK);
472 }
473 break;
474 }
475
476 /* Get the max transactions per microframe */
477 xactions = (endpoint->wMaxPacketSize &
478 USB_EP_MAX_XACTS_MASK) >> USB_EP_MAX_XACTS_SHIFT;
479
480 switch (xactions) {
481 case 0:
482 split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
483 break;
484 case 1:
485 split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
486 break;
487 case 2:
488 split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
489 break;
490 default:
491 split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
492 break;
493 }
494
495 /*
496 * For low/full speed devices, program high speed hub
497 * address and port number.
498 */
499 if (usb_port_status != USBA_HIGH_SPEED_DEV) {
500 mutex_enter(&usba_device->usb_mutex);
501 split_ctrl |= ((usba_device->usb_hs_hub_addr
502 << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
503 EHCI_QH_SPLIT_CTRL_HUB_ADDR);
504
505 split_ctrl |= ((usba_device->usb_hs_hub_port
506 << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
507 EHCI_QH_SPLIT_CTRL_HUB_PORT);
508
509 mutex_exit(&usba_device->usb_mutex);
510
511 /* Set start split transaction state */
512 status = EHCI_QH_STS_DO_START_SPLIT;
513 }
514
515 /* Assign endpoint's maxpacketsize */
516 maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
517 maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
518 ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
519
520 Set_QH(qh->qh_ctrl, ctrl);
521 Set_QH(qh->qh_split_ctrl, split_ctrl);
522 Set_QH(qh->qh_status, status);
523 }
524
525
526 /*
527 * ehci_insert_qh:
528 *
529 * Add the Endpoint Descriptor (QH) into the Host Controller's
530 * (HC) appropriate endpoint list.
531 */
532 void
ehci_insert_qh(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)533 ehci_insert_qh(
534 ehci_state_t *ehcip,
535 usba_pipe_handle_data_t *ph)
536 {
537 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
538
539 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
540 "ehci_insert_qh: qh=0x%p", (void *)pp->pp_qh);
541
542 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
543
544 switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
545 case USB_EP_ATTR_CONTROL:
546 case USB_EP_ATTR_BULK:
547 ehci_insert_async_qh(ehcip, pp);
548 ehcip->ehci_open_async_count++;
549 break;
550 case USB_EP_ATTR_INTR:
551 ehci_insert_intr_qh(ehcip, pp);
552 ehcip->ehci_open_periodic_count++;
553 break;
554 case USB_EP_ATTR_ISOCH:
555 /* ISOCH does not use QH, don't do anything but update count */
556 ehcip->ehci_open_periodic_count++;
557 break;
558 }
559 }
560
561
562 /*
563 * ehci_insert_async_qh:
564 *
565 * Insert a control/bulk endpoint into the Host Controller's (HC)
566 * Asynchronous schedule endpoint list.
567 */
568 static void
ehci_insert_async_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp)569 ehci_insert_async_qh(
570 ehci_state_t *ehcip,
571 ehci_pipe_private_t *pp)
572 {
573 ehci_qh_t *qh = pp->pp_qh;
574 ehci_qh_t *async_head_qh;
575 ehci_qh_t *next_qh;
576 uintptr_t qh_addr;
577
578 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
579 "ehci_insert_async_qh:");
580
581 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
582
583 /* Make sure this QH is not already in the list */
584 ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == 0);
585
586 qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
587
588 /* Obtain a ptr to the head of the Async schedule list */
589 async_head_qh = ehcip->ehci_head_of_async_sched_list;
590
591 if (async_head_qh == NULL) {
592 /* Set this QH to be the "head" of the circular list */
593 Set_QH(qh->qh_ctrl,
594 (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
595
596 /* Set new QH's link and previous pointer to itself */
597 Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
598 Set_QH(qh->qh_prev, qh_addr);
599
600 ehcip->ehci_head_of_async_sched_list = qh;
601
602 /* Set the head ptr to the new endpoint */
603 Set_OpReg(ehci_async_list_addr, qh_addr);
604
605 /*
606 * For some reason this register might get nulled out by
607 * the Uli M1575 South Bridge. To workaround the hardware
608 * problem, check the value after write and retry if the
609 * last write fails.
610 *
611 * If the ASYNCLISTADDR remains "stuck" after
612 * EHCI_MAX_RETRY retries, then the M1575 is broken
613 * and is stuck in an inconsistent state and is about
614 * to crash the machine with a trn_oor panic when it
615 * does a DMA read from 0x0. It is better to panic
616 * now rather than wait for the trn_oor crash; this
617 * way Customer Service will have a clean signature
618 * that indicts the M1575 chip rather than a
619 * mysterious and hard-to-diagnose trn_oor panic.
620 */
621 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
622 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
623 (qh_addr != Get_OpReg(ehci_async_list_addr))) {
624 int retry = 0;
625
626 Set_OpRegRetry(ehci_async_list_addr, qh_addr, retry);
627 if (retry >= EHCI_MAX_RETRY)
628 cmn_err(CE_PANIC, "ehci_insert_async_qh:"
629 " ASYNCLISTADDR write failed.");
630
631 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
632 "ehci_insert_async_qh: ASYNCLISTADDR "
633 "write failed, retry=%d", retry);
634 }
635 } else {
636 ASSERT(Get_QH(async_head_qh->qh_ctrl) &
637 EHCI_QH_CTRL_RECLAIM_HEAD);
638
639 /* Ensure this QH's "H" bit is not set */
640 Set_QH(qh->qh_ctrl,
641 (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
642
643 next_qh = ehci_qh_iommu_to_cpu(ehcip,
644 Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
645
646 /* Set new QH's link and previous pointers */
647 Set_QH(qh->qh_link_ptr,
648 Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
649 Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
650
651 /* Set next QH's prev pointer */
652 Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
653
654 /* Set QH Head's link pointer points to new QH */
655 Set_QH(async_head_qh->qh_link_ptr,
656 qh_addr | EHCI_QH_LINK_REF_QH);
657 }
658 }
659
660
661 /*
662 * ehci_insert_intr_qh:
663 *
664 * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
665 * lattice tree.
666 */
667 static void
ehci_insert_intr_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp)668 ehci_insert_intr_qh(
669 ehci_state_t *ehcip,
670 ehci_pipe_private_t *pp)
671 {
672 ehci_qh_t *qh = pp->pp_qh;
673 ehci_qh_t *next_lattice_qh, *lattice_qh;
674 uint_t hnode;
675
676 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
677 "ehci_insert_intr_qh:");
678
679 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
680
681 /* Make sure this QH is not already in the list */
682 ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == 0);
683
684 /*
685 * The appropriate high speed node was found
686 * during the opening of the pipe.
687 */
688 hnode = pp->pp_pnode;
689
690 /* Find the lattice endpoint */
691 lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
692
693 /* Find the next lattice endpoint */
694 next_lattice_qh = ehci_qh_iommu_to_cpu(
695 ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
696
697 /* Update the previous pointer */
698 Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
699
700 /* Check next_lattice_qh value */
701 if (next_lattice_qh) {
702 /* Update this qh to point to the next one in the lattice */
703 Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
704
705 /* Update the previous pointer of qh->qh_link_ptr */
706 if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
707 Set_QH(next_lattice_qh->qh_prev,
708 ehci_qh_cpu_to_iommu(ehcip, qh));
709 }
710 } else {
711 /* Update qh's link pointer to terminate periodic list */
712 Set_QH(qh->qh_link_ptr,
713 (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
714 }
715
716 /* Insert this endpoint into the lattice */
717 Set_QH(lattice_qh->qh_link_ptr,
718 (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
719 }
720
721
722 /*
723 * ehci_modify_qh_status_bit:
724 *
725 * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
726 *
727 * If several threads try to halt the same pipe, they will need to wait on
728 * a condition variable. Only one thread is allowed to halt or unhalt the
729 * pipe at a time.
730 *
731 * Usually after a halt pipe, an unhalt pipe will follow soon after. There
732 * is an assumption that an Unhalt pipe will never occur without a halt pipe.
733 */
734 static void
ehci_modify_qh_status_bit(ehci_state_t * ehcip,ehci_pipe_private_t * pp,halt_bit_t action)735 ehci_modify_qh_status_bit(
736 ehci_state_t *ehcip,
737 ehci_pipe_private_t *pp,
738 halt_bit_t action)
739 {
740 ehci_qh_t *qh = pp->pp_qh;
741 uint_t smask, eps, split_intr_qh;
742 uint_t status;
743
744 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
745 "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
746 action, (void *)qh);
747
748 ehci_print_qh(ehcip, qh);
749
750 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
751
752 /*
753 * If this pipe is in the middle of halting don't allow another
754 * thread to come in and modify the same pipe.
755 */
756 while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
757
758 cv_wait(&pp->pp_halt_cmpl_cv,
759 &ehcip->ehci_int_mutex);
760 }
761
762 /* Sync the QH QTD pool to get up to date information */
763 Sync_QH_QTD_Pool(ehcip);
764
765
766 if (action == CLEAR_HALT) {
767 /*
768 * If the halt bit is to be cleared, just clear it.
769 * there shouldn't be any race condition problems.
770 * If the host controller reads the bit before the
771 * driver has a chance to set the bit, the bit will
772 * be reread on the next frame.
773 */
774 Set_QH(qh->qh_ctrl,
775 (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
776 Set_QH(qh->qh_status,
777 Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
778
779 goto success;
780 }
781
782 /* Halt the the QH, but first check to see if it is already halted */
783 status = Get_QH(qh->qh_status);
784 if (!(status & EHCI_QH_STS_HALTED)) {
785 /* Indicate that this pipe is in the middle of halting. */
786 pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
787
788 /*
789 * Find out if this is an full/low speed interrupt endpoint.
790 * A non-zero Cmask indicates that this QH is an interrupt
791 * endpoint. Check the endpoint speed to see if it is either
792 * FULL or LOW .
793 */
794 smask = Get_QH(qh->qh_split_ctrl) &
795 EHCI_QH_SPLIT_CTRL_INTR_MASK;
796 eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
797 split_intr_qh = ((smask != 0) &&
798 (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
799
800 if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
801 ehci_halt_hs_qh(ehcip, pp, qh);
802 } else {
803 if (split_intr_qh) {
804 ehci_halt_fls_intr_qh(ehcip, qh);
805 } else {
806 ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
807 }
808 }
809
810 /* Indicate that this pipe is not in the middle of halting. */
811 pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
812 }
813
814 /* Sync the QH QTD pool again to get the most up to date information */
815 Sync_QH_QTD_Pool(ehcip);
816
817 ehci_print_qh(ehcip, qh);
818
819 status = Get_QH(qh->qh_status);
820 if (!(status & EHCI_QH_STS_HALTED)) {
821 USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
822 "ehci_modify_qh_status_bit: Failed to halt qh=0x%p",
823 (void *)qh);
824
825 ehci_print_qh(ehcip, qh);
826
827 /* Set host controller soft state to error */
828 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
829
830 ASSERT(status & EHCI_QH_STS_HALTED);
831 }
832
833 success:
834 /* Wake up threads waiting for this pipe to be halted. */
835 cv_signal(&pp->pp_halt_cmpl_cv);
836 }
837
838
839 /*
840 * ehci_halt_hs_qh:
841 *
842 * Halts all types of HIGH SPEED QHs.
843 */
844 static void
ehci_halt_hs_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_qh_t * qh)845 ehci_halt_hs_qh(
846 ehci_state_t *ehcip,
847 ehci_pipe_private_t *pp,
848 ehci_qh_t *qh)
849 {
850 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
851
852 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
853 "ehci_halt_hs_qh:");
854
855 /* Remove this qh from the HCD's view, but do not reclaim it */
856 ehci_remove_qh(ehcip, pp, B_FALSE);
857 ehci_toggle_scheduler_on_pipe(ehcip);
858
859 /*
860 * Wait for atleast one SOF, just in case the HCD is in the
861 * middle accessing this QH.
862 */
863 (void) ehci_wait_for_sof(ehcip);
864
865 /* Sync the QH QTD pool to get up to date information */
866 Sync_QH_QTD_Pool(ehcip);
867
868 /* Modify the status bit and halt this QH. */
869 Set_QH(qh->qh_status,
870 ((Get_QH(qh->qh_status) &
871 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
872
873 /* Insert this QH back into the HCD's view */
874 ehci_insert_qh(ehcip, ph);
875 ehci_toggle_scheduler_on_pipe(ehcip);
876 }
877
878
879 /*
880 * ehci_halt_fls_ctrl_and_bulk_qh:
881 *
882 * Halts FULL/LOW Ctrl and Bulk QHs only.
883 */
884 static void
ehci_halt_fls_ctrl_and_bulk_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_qh_t * qh)885 ehci_halt_fls_ctrl_and_bulk_qh(
886 ehci_state_t *ehcip,
887 ehci_pipe_private_t *pp,
888 ehci_qh_t *qh)
889 {
890 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
891 uint_t status, split_status, bytes_left;
892
893
894 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
895 "ehci_halt_fls_ctrl_and_bulk_qh:");
896
897 /* Remove this qh from the HCD's view, but do not reclaim it */
898 ehci_remove_qh(ehcip, pp, B_FALSE);
899 ehci_toggle_scheduler_on_pipe(ehcip);
900
901 /*
902 * Wait for atleast one SOF, just in case the HCD is in the
903 * middle accessing this QH.
904 */
905 (void) ehci_wait_for_sof(ehcip);
906
907 /* Sync the QH QTD pool to get up to date information */
908 Sync_QH_QTD_Pool(ehcip);
909
910 /* Modify the status bit and halt this QH. */
911 Set_QH(qh->qh_status,
912 ((Get_QH(qh->qh_status) &
913 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
914
915 /* Check to see if the QH was in the middle of a transaction */
916 status = Get_QH(qh->qh_status);
917 split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
918 bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
919 if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
920 (bytes_left != 0)) {
921 /* send ClearTTBuffer to this device's parent 2.0 hub */
922 ehci_clear_tt_buffer(ehcip, ph, qh);
923 }
924
925 /* Insert this QH back into the HCD's view */
926 ehci_insert_qh(ehcip, ph);
927 ehci_toggle_scheduler_on_pipe(ehcip);
928 }
929
930
931 /*
932 * ehci_clear_tt_buffer
933 *
934 * This function will sent a Clear_TT_Buffer request to the pipe's
935 * parent 2.0 hub.
936 */
937 static void
ehci_clear_tt_buffer(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,ehci_qh_t * qh)938 ehci_clear_tt_buffer(
939 ehci_state_t *ehcip,
940 usba_pipe_handle_data_t *ph,
941 ehci_qh_t *qh)
942 {
943 usba_device_t *usba_device;
944 usba_device_t *hub_usba_device;
945 usb_pipe_handle_t hub_def_ph;
946 usb_ep_descr_t *eptd;
947 uchar_t attributes;
948 uint16_t wValue;
949 usb_ctrl_setup_t setup;
950 usb_cr_t completion_reason;
951 usb_cb_flags_t cb_flags;
952 int retry;
953
954 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
955 "ehci_clear_tt_buffer: ");
956
957 /* Get some information about the current pipe */
958 usba_device = ph->p_usba_device;
959 eptd = &ph->p_ep;
960 attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
961
962 /*
963 * Create the wIndex for this request (usb spec 11.24.2.3)
964 * 3..0 Endpoint Number
965 * 10..4 Device Address
966 * 12..11 Endpoint Type
967 * 14..13 Reserved (must be 0)
968 * 15 Direction 1 = IN, 0 = OUT
969 */
970 wValue = 0;
971 if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
972 wValue |= 0x8000;
973 }
974 wValue |= attributes << 11;
975 wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
976 wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
977 EHCI_QH_CTRL_ED_NUMBER_SHIFT;
978
979 mutex_exit(&ehcip->ehci_int_mutex);
980
981 /* Manually fill in the request. */
982 setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
983 setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
984 setup.wValue = wValue;
985 setup.wIndex = 1;
986 setup.wLength = 0;
987 setup.attrs = USB_ATTRS_NONE;
988
989 /* Get the usba_device of the parent 2.0 hub. */
990 mutex_enter(&usba_device->usb_mutex);
991 hub_usba_device = usba_device->usb_hs_hub_usba_dev;
992 mutex_exit(&usba_device->usb_mutex);
993
994 /* Get the default ctrl pipe for the parent 2.0 hub */
995 mutex_enter(&hub_usba_device->usb_mutex);
996 hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
997 mutex_exit(&hub_usba_device->usb_mutex);
998
999 for (retry = 0; retry < 3; retry++) {
1000
1001 /* sync send the request to the default pipe */
1002 if (usb_pipe_ctrl_xfer_wait(
1003 hub_def_ph,
1004 &setup,
1005 NULL,
1006 &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
1007
1008 break;
1009 }
1010
1011 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1012 "ehci_clear_tt_buffer: Failed to clear tt buffer,"
1013 "retry = %d, cr = %d, cb_flags = 0x%x\n",
1014 retry, completion_reason, cb_flags);
1015 }
1016
1017 if (retry >= 3) {
1018 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1019 dev_info_t *dip = hub_usba_device->usb_dip;
1020
1021 /*
1022 * Ask the user to hotplug the 2.0 hub, to make sure that
1023 * all the buffer is in sync since this command has failed.
1024 */
1025 USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1026 "Error recovery failure: Please hotplug the 2.0 hub at"
1027 "%s", ddi_pathname(dip, path));
1028
1029 kmem_free(path, MAXPATHLEN);
1030 }
1031
1032 mutex_enter(&ehcip->ehci_int_mutex);
1033 }
1034
1035 /*
1036 * ehci_halt_fls_intr_qh:
1037 *
1038 * Halts FULL/LOW speed Intr QHs.
1039 */
1040 static void
ehci_halt_fls_intr_qh(ehci_state_t * ehcip,ehci_qh_t * qh)1041 ehci_halt_fls_intr_qh(
1042 ehci_state_t *ehcip,
1043 ehci_qh_t *qh)
1044 {
1045 usb_frame_number_t starting_frame;
1046 usb_frame_number_t frames_past;
1047 uint_t status, i;
1048
1049 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1050 "ehci_halt_fls_intr_qh:");
1051
1052 /*
1053 * Ask the HC to deactivate the QH in a
1054 * full/low periodic QH.
1055 */
1056 Set_QH(qh->qh_ctrl,
1057 (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1058
1059 starting_frame = ehci_get_current_frame_number(ehcip);
1060
1061 /*
1062 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1063 * the QH has been halted.
1064 */
1065 Sync_QH_QTD_Pool(ehcip);
1066 frames_past = 0;
1067 status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1068
1069 while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1070 (status != 0)) {
1071
1072 (void) ehci_wait_for_sof(ehcip);
1073
1074 Sync_QH_QTD_Pool(ehcip);
1075 status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1076 frames_past = ehci_get_current_frame_number(ehcip) -
1077 starting_frame;
1078 }
1079
1080 /* Modify the status bit and halt this QH. */
1081 Sync_QH_QTD_Pool(ehcip);
1082
1083 status = Get_QH(qh->qh_status);
1084
1085 for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1086 Set_QH(qh->qh_status,
1087 ((Get_QH(qh->qh_status) &
1088 ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1089
1090 Sync_QH_QTD_Pool(ehcip);
1091
1092 (void) ehci_wait_for_sof(ehcip);
1093 Sync_QH_QTD_Pool(ehcip);
1094
1095 if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1096
1097 break;
1098 }
1099 }
1100
1101 Sync_QH_QTD_Pool(ehcip);
1102
1103 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1104 "ehci_halt_fls_intr_qh: qh=0x%p frames past=%llu,"
1105 " status=0x%x, 0x%x", (void *)qh,
1106 (unsigned long long)(ehci_get_current_frame_number(ehcip) -
1107 starting_frame), status, Get_QH(qh->qh_status));
1108 }
1109
1110
1111 /*
1112 * ehci_remove_qh:
1113 *
1114 * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1115 * endpoint list.
1116 */
1117 void
ehci_remove_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,boolean_t reclaim)1118 ehci_remove_qh(
1119 ehci_state_t *ehcip,
1120 ehci_pipe_private_t *pp,
1121 boolean_t reclaim)
1122 {
1123 uchar_t attributes;
1124
1125 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1126
1127 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1128 "ehci_remove_qh: qh=0x%p", (void *)pp->pp_qh);
1129
1130 attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1131
1132 switch (attributes) {
1133 case USB_EP_ATTR_CONTROL:
1134 case USB_EP_ATTR_BULK:
1135 ehci_remove_async_qh(ehcip, pp, reclaim);
1136 ehcip->ehci_open_async_count--;
1137 break;
1138 case USB_EP_ATTR_INTR:
1139 ehci_remove_intr_qh(ehcip, pp, reclaim);
1140 ehcip->ehci_open_periodic_count--;
1141 break;
1142 case USB_EP_ATTR_ISOCH:
1143 /* ISOCH does not use QH, don't do anything but update count */
1144 ehcip->ehci_open_periodic_count--;
1145 break;
1146 }
1147 }
1148
1149
1150 /*
1151 * ehci_remove_async_qh:
1152 *
1153 * Remove a control/bulk endpoint into the Host Controller's (HC)
1154 * Asynchronous schedule endpoint list.
1155 */
1156 static void
ehci_remove_async_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,boolean_t reclaim)1157 ehci_remove_async_qh(
1158 ehci_state_t *ehcip,
1159 ehci_pipe_private_t *pp,
1160 boolean_t reclaim)
1161 {
1162 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */
1163 ehci_qh_t *prev_qh, *next_qh;
1164
1165 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1166 "ehci_remove_async_qh:");
1167
1168 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1169
1170 prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1171 Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1172 next_qh = ehci_qh_iommu_to_cpu(ehcip,
1173 Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1174
1175 /* Make sure this QH is in the list */
1176 ASSERT(prev_qh != NULL);
1177
1178 /*
1179 * If next QH and current QH are the same, then this is the last
1180 * QH on the Asynchronous Schedule list.
1181 */
1182 if (qh == next_qh) {
1183 ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1184 /*
1185 * Null our pointer to the async sched list, but do not
1186 * touch the host controller's list_addr.
1187 */
1188 ehcip->ehci_head_of_async_sched_list = NULL;
1189 ASSERT(ehcip->ehci_open_async_count == 1);
1190 } else {
1191 /* If this QH is the HEAD then find another one to replace it */
1192 if (ehcip->ehci_head_of_async_sched_list == qh) {
1193
1194 ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1195 ehcip->ehci_head_of_async_sched_list = next_qh;
1196 Set_QH(next_qh->qh_ctrl,
1197 Get_QH(next_qh->qh_ctrl) |
1198 EHCI_QH_CTRL_RECLAIM_HEAD);
1199 }
1200 Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1201 Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1202 }
1203
1204 /* qh_prev to indicate it is no longer in the circular list */
1205 Set_QH(qh->qh_prev, 0);
1206
1207 if (reclaim) {
1208 ehci_insert_qh_on_reclaim_list(ehcip, pp);
1209 }
1210 }
1211
1212
1213 /*
1214 * ehci_remove_intr_qh:
1215 *
1216 * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1217 * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1218 * interrupt handler.
1219 */
1220 static void
ehci_remove_intr_qh(ehci_state_t * ehcip,ehci_pipe_private_t * pp,boolean_t reclaim)1221 ehci_remove_intr_qh(
1222 ehci_state_t *ehcip,
1223 ehci_pipe_private_t *pp,
1224 boolean_t reclaim)
1225 {
1226 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */
1227 ehci_qh_t *prev_qh, *next_qh;
1228
1229 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1230 "ehci_remove_intr_qh:");
1231
1232 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1233
1234 prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1235 next_qh = ehci_qh_iommu_to_cpu(ehcip,
1236 Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1237
1238 /* Make sure this QH is in the list */
1239 ASSERT(prev_qh != NULL);
1240
1241 if (next_qh) {
1242 /* Update previous qh's link pointer */
1243 Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1244
1245 if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1246 /* Set the previous pointer of the next one */
1247 Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1248 }
1249 } else {
1250 /* Update previous qh's link pointer */
1251 Set_QH(prev_qh->qh_link_ptr,
1252 (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1253 }
1254
1255 /* qh_prev to indicate it is no longer in the circular list */
1256 Set_QH(qh->qh_prev, 0);
1257
1258 if (reclaim) {
1259 ehci_insert_qh_on_reclaim_list(ehcip, pp);
1260 }
1261 }
1262
1263
1264 /*
1265 * ehci_insert_qh_on_reclaim_list:
1266 *
1267 * Insert Endpoint onto the reclaim list
1268 */
1269 static void
ehci_insert_qh_on_reclaim_list(ehci_state_t * ehcip,ehci_pipe_private_t * pp)1270 ehci_insert_qh_on_reclaim_list(
1271 ehci_state_t *ehcip,
1272 ehci_pipe_private_t *pp)
1273 {
1274 ehci_qh_t *qh = pp->pp_qh; /* qh to be removed */
1275 ehci_qh_t *next_qh, *prev_qh;
1276 usb_frame_number_t frame_number;
1277
1278 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1279
1280 /*
1281 * Read current usb frame number and add appropriate number of
1282 * usb frames needs to wait before reclaiming current endpoint.
1283 */
1284 frame_number =
1285 ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1286
1287 /* Store 32-bit ID */
1288 Set_QH(qh->qh_reclaim_frame,
1289 ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1290
1291 /* Insert the endpoint onto the reclamation list */
1292 if (ehcip->ehci_reclaim_list) {
1293 next_qh = ehcip->ehci_reclaim_list;
1294
1295 while (next_qh) {
1296 prev_qh = next_qh;
1297 next_qh = ehci_qh_iommu_to_cpu(ehcip,
1298 Get_QH(next_qh->qh_reclaim_next));
1299 }
1300
1301 Set_QH(prev_qh->qh_reclaim_next,
1302 ehci_qh_cpu_to_iommu(ehcip, qh));
1303 } else {
1304 ehcip->ehci_reclaim_list = qh;
1305 }
1306
1307 ASSERT(Get_QH(qh->qh_reclaim_next) == 0);
1308 }
1309
1310
1311 /*
1312 * ehci_deallocate_qh:
1313 *
1314 * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1315 *
1316 * NOTE: This function is also called from POLLED MODE.
1317 */
1318 void
ehci_deallocate_qh(ehci_state_t * ehcip,ehci_qh_t * old_qh)1319 ehci_deallocate_qh(
1320 ehci_state_t *ehcip,
1321 ehci_qh_t *old_qh)
1322 {
1323 ehci_qtd_t *first_dummy_qtd, *second_dummy_qtd;
1324
1325 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1326 "ehci_deallocate_qh:");
1327
1328 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1329
1330 first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1331 (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1332
1333 if (first_dummy_qtd) {
1334 ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1335
1336 second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1337 Get_QTD(first_dummy_qtd->qtd_next_qtd));
1338
1339 if (second_dummy_qtd) {
1340 ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1341 EHCI_QTD_DUMMY);
1342
1343 ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1344 }
1345
1346 ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1347 }
1348
1349 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1350 "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1351
1352 Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1353 }
1354
1355
1356 /*
1357 * ehci_qh_cpu_to_iommu:
1358 *
1359 * This function converts for the given Endpoint Descriptor (QH) CPU address
1360 * to IO address.
1361 *
1362 * NOTE: This function is also called from POLLED MODE.
1363 */
1364 uint32_t
ehci_qh_cpu_to_iommu(ehci_state_t * ehcip,ehci_qh_t * addr)1365 ehci_qh_cpu_to_iommu(
1366 ehci_state_t *ehcip,
1367 ehci_qh_t *addr)
1368 {
1369 uint32_t qh;
1370
1371 qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1372 (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1373
1374 ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1375 ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1376 sizeof (ehci_qh_t) * ehci_qh_pool_size);
1377
1378 return (qh);
1379 }
1380
1381
1382 /*
1383 * ehci_qh_iommu_to_cpu:
1384 *
1385 * This function converts for the given Endpoint Descriptor (QH) IO address
1386 * to CPU address.
1387 */
1388 ehci_qh_t *
ehci_qh_iommu_to_cpu(ehci_state_t * ehcip,uintptr_t addr)1389 ehci_qh_iommu_to_cpu(
1390 ehci_state_t *ehcip,
1391 uintptr_t addr)
1392 {
1393 ehci_qh_t *qh;
1394
1395 if (addr == 0)
1396 return (NULL);
1397
1398 qh = (ehci_qh_t *)((uintptr_t)
1399 (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1400 (uintptr_t)ehcip->ehci_qh_pool_addr);
1401
1402 ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1403 ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1404 (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1405
1406 return (qh);
1407 }
1408
1409
1410 /*
1411 * Transfer Descriptor manipulations functions
1412 */
1413
1414 /*
1415 * ehci_initialize_dummy:
1416 *
1417 * An Endpoint Descriptor (QH) has a dummy Transfer Descriptor (QTD) on the
1418 * end of its QTD list. Initially, both the head and tail pointers of the QH
1419 * point to the dummy QTD.
1420 */
1421 static int
ehci_initialize_dummy(ehci_state_t * ehcip,ehci_qh_t * qh)1422 ehci_initialize_dummy(
1423 ehci_state_t *ehcip,
1424 ehci_qh_t *qh)
1425 {
1426 ehci_qtd_t *first_dummy_qtd, *second_dummy_qtd;
1427
1428 /* Allocate first dummy QTD */
1429 first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1430
1431 if (first_dummy_qtd == NULL) {
1432 return (USB_NO_RESOURCES);
1433 }
1434
1435 /* Allocate second dummy QTD */
1436 second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1437
1438 if (second_dummy_qtd == NULL) {
1439 /* Deallocate first dummy QTD */
1440 ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1441
1442 return (USB_NO_RESOURCES);
1443 }
1444
1445 /* Next QTD pointer of an QH point to this new dummy QTD */
1446 Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1447 first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1448
1449 /* Set qh's dummy qtd field */
1450 Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1451
1452 /* Set first_dummy's next qtd pointer */
1453 Set_QTD(first_dummy_qtd->qtd_next_qtd,
1454 ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1455
1456 return (USB_SUCCESS);
1457 }
1458
1459 /*
1460 * ehci_allocate_ctrl_resources:
1461 *
1462 * Calculates the number of tds necessary for a ctrl transfer, and allocates
1463 * all the resources necessary.
1464 *
1465 * Returns NULL if there is insufficient resources otherwise TW.
1466 */
1467 ehci_trans_wrapper_t *
ehci_allocate_ctrl_resources(ehci_state_t * ehcip,ehci_pipe_private_t * pp,usb_ctrl_req_t * ctrl_reqp,usb_flags_t usb_flags)1468 ehci_allocate_ctrl_resources(
1469 ehci_state_t *ehcip,
1470 ehci_pipe_private_t *pp,
1471 usb_ctrl_req_t *ctrl_reqp,
1472 usb_flags_t usb_flags)
1473 {
1474 size_t qtd_count = 2;
1475 size_t ctrl_buf_size;
1476 ehci_trans_wrapper_t *tw;
1477
1478 /* Add one more td for data phase */
1479 if (ctrl_reqp->ctrl_wLength) {
1480 qtd_count += 1;
1481 }
1482
1483 /*
1484 * If we have a control data phase, the data buffer starts
1485 * on the next 4K page boundary. So the TW buffer is allocated
1486 * to be larger than required. The buffer in the range of
1487 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding
1488 * and not to be transferred.
1489 */
1490 if (ctrl_reqp->ctrl_wLength) {
1491 ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE +
1492 ctrl_reqp->ctrl_wLength;
1493 } else {
1494 ctrl_buf_size = SETUP_SIZE;
1495 }
1496
1497 tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size,
1498 usb_flags, qtd_count);
1499
1500 return (tw);
1501 }
1502
1503 /*
1504 * ehci_insert_ctrl_req:
1505 *
1506 * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1507 */
1508 /* ARGSUSED */
1509 void
ehci_insert_ctrl_req(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_ctrl_req_t * ctrl_reqp,ehci_trans_wrapper_t * tw,usb_flags_t usb_flags)1510 ehci_insert_ctrl_req(
1511 ehci_state_t *ehcip,
1512 usba_pipe_handle_data_t *ph,
1513 usb_ctrl_req_t *ctrl_reqp,
1514 ehci_trans_wrapper_t *tw,
1515 usb_flags_t usb_flags)
1516 {
1517 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1518 uchar_t bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1519 uchar_t bRequest = ctrl_reqp->ctrl_bRequest;
1520 uint16_t wValue = ctrl_reqp->ctrl_wValue;
1521 uint16_t wIndex = ctrl_reqp->ctrl_wIndex;
1522 uint16_t wLength = ctrl_reqp->ctrl_wLength;
1523 mblk_t *data = ctrl_reqp->ctrl_data;
1524 uint32_t ctrl = 0;
1525 uint8_t setup_packet[8];
1526
1527 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1528 "ehci_insert_ctrl_req:");
1529
1530 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1531
1532 /*
1533 * Save current control request pointer and timeout values
1534 * in transfer wrapper.
1535 */
1536 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1537 tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1538 ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1539
1540 /*
1541 * Initialize the callback and any callback data for when
1542 * the qtd completes.
1543 */
1544 tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1545 tw->tw_handle_callback_value = NULL;
1546
1547 /*
1548 * swap the setup bytes where necessary since we specified
1549 * NEVERSWAP
1550 */
1551 setup_packet[0] = bmRequestType;
1552 setup_packet[1] = bRequest;
1553 setup_packet[2] = (uint8_t)wValue;
1554 setup_packet[3] = wValue >> 8;
1555 setup_packet[4] = (uint8_t)wIndex;
1556 setup_packet[5] = wIndex >> 8;
1557 setup_packet[6] = (uint8_t)wLength;
1558 setup_packet[7] = wLength >> 8;
1559
1560 bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1561
1562 Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1563
1564 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1565
1566 /*
1567 * The QTD's are placed on the QH one at a time.
1568 * Once this QTD is placed on the done list, the
1569 * data or status phase QTD will be enqueued.
1570 */
1571 (void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE,
1572 EHCI_CTRL_SETUP_PHASE, pp, tw);
1573
1574 USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1575 "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1576
1577 /*
1578 * If this control transfer has a data phase, record the
1579 * direction. If the data phase is an OUT transaction,
1580 * copy the data into the buffer of the transfer wrapper.
1581 */
1582 if (wLength != 0) {
1583 /* There is a data stage. Find the direction */
1584 if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1585 tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1586 } else {
1587 tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1588
1589 /* Copy the data into the message */
1590 bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE,
1591 wLength);
1592
1593 Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1594 wLength + EHCI_MAX_QTD_BUF_SIZE);
1595 }
1596
1597 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1598
1599 /*
1600 * Create the QTD. If this is an OUT transaction,
1601 * the data is already in the buffer of the TW.
1602 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE
1603 * which is 4K aligned, though the ctrl phase only
1604 * transfers a length of SETUP_SIZE. The padding data
1605 * in the TW buffer are discarded.
1606 */
1607 (void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE,
1608 tw->tw_length - EHCI_MAX_QTD_BUF_SIZE,
1609 EHCI_CTRL_DATA_PHASE, pp, tw);
1610
1611 /*
1612 * The direction of the STATUS QTD depends on
1613 * the direction of the transfer.
1614 */
1615 if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1616 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1617 EHCI_QTD_CTRL_OUT_PID |
1618 EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1619 } else {
1620 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1621 EHCI_QTD_CTRL_IN_PID |
1622 EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1623 }
1624 } else {
1625 /*
1626 * There is no data stage, then initiate
1627 * status phase from the host.
1628 */
1629 ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1630 EHCI_QTD_CTRL_IN_PID |
1631 EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1632 }
1633
1634
1635 (void) ehci_insert_qtd(ehcip, ctrl, 0, 0,
1636 EHCI_CTRL_STATUS_PHASE, pp, tw);
1637
1638 /* Start the timer for this control transfer */
1639 ehci_start_xfer_timer(ehcip, pp, tw);
1640 }
1641
1642
1643 /*
1644 * ehci_allocate_bulk_resources:
1645 *
1646 * Calculates the number of tds necessary for a ctrl transfer, and allocates
1647 * all the resources necessary.
1648 *
1649 * Returns NULL if there is insufficient resources otherwise TW.
1650 */
1651 ehci_trans_wrapper_t *
ehci_allocate_bulk_resources(ehci_state_t * ehcip,ehci_pipe_private_t * pp,usb_bulk_req_t * bulk_reqp,usb_flags_t usb_flags)1652 ehci_allocate_bulk_resources(
1653 ehci_state_t *ehcip,
1654 ehci_pipe_private_t *pp,
1655 usb_bulk_req_t *bulk_reqp,
1656 usb_flags_t usb_flags)
1657 {
1658 size_t qtd_count = 0;
1659 ehci_trans_wrapper_t *tw;
1660
1661 /* Check the size of bulk request */
1662 if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1663
1664 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1665 "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1666 "more than 0x%x", bulk_reqp->bulk_len,
1667 EHCI_MAX_BULK_XFER_SIZE);
1668
1669 return (NULL);
1670 }
1671
1672 /* Get the required bulk packet size */
1673 qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1674 if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE ||
1675 bulk_reqp->bulk_len == 0) {
1676 qtd_count += 1;
1677 }
1678
1679 tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1680 usb_flags, qtd_count);
1681
1682 return (tw);
1683 }
1684
1685 /*
1686 * ehci_insert_bulk_req:
1687 *
1688 * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1689 * endpoint.
1690 */
1691 /* ARGSUSED */
1692 void
ehci_insert_bulk_req(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_bulk_req_t * bulk_reqp,ehci_trans_wrapper_t * tw,usb_flags_t flags)1693 ehci_insert_bulk_req(
1694 ehci_state_t *ehcip,
1695 usba_pipe_handle_data_t *ph,
1696 usb_bulk_req_t *bulk_reqp,
1697 ehci_trans_wrapper_t *tw,
1698 usb_flags_t flags)
1699 {
1700 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1701 uint_t bulk_pkt_size, count;
1702 size_t residue = 0, len = 0;
1703 uint32_t ctrl = 0;
1704 int pipe_dir;
1705
1706 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1707 "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1708 (void *)bulk_reqp, flags);
1709
1710 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1711
1712 /* Get the bulk pipe direction */
1713 pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1714
1715 /* Get the required bulk packet size */
1716 bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1717
1718 if (bulk_pkt_size) {
1719 residue = tw->tw_length % bulk_pkt_size;
1720 }
1721
1722 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1723 "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1724
1725 /*
1726 * Save current bulk request pointer and timeout values
1727 * in transfer wrapper.
1728 */
1729 tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1730 tw->tw_timeout = bulk_reqp->bulk_timeout;
1731
1732 /*
1733 * Initialize the callback and any callback
1734 * data required when the qtd completes.
1735 */
1736 tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1737 tw->tw_handle_callback_value = NULL;
1738
1739 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1740 EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1741
1742 if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1743
1744 if (bulk_reqp->bulk_len) {
1745 ASSERT(bulk_reqp->bulk_data != NULL);
1746
1747 bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1748 bulk_reqp->bulk_len);
1749
1750 Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1751 bulk_reqp->bulk_len);
1752 }
1753 }
1754
1755 ctrl = tw->tw_direction;
1756
1757 /* Insert all the bulk QTDs */
1758 for (count = 0; count < tw->tw_num_qtds; count++) {
1759
1760 /* Check for last qtd */
1761 if (count == (tw->tw_num_qtds - 1)) {
1762
1763 ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1764
1765 /* Check for inserting residue data */
1766 if (residue) {
1767 bulk_pkt_size = (uint_t)residue;
1768 }
1769 }
1770
1771 /* Insert the QTD onto the endpoint */
1772 (void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size,
1773 0, pp, tw);
1774
1775 len = len + bulk_pkt_size;
1776 }
1777
1778 /* Start the timer for this bulk transfer */
1779 ehci_start_xfer_timer(ehcip, pp, tw);
1780 }
1781
1782
1783 /*
1784 * ehci_start_periodic_pipe_polling:
1785 *
1786 * NOTE: This function is also called from POLLED MODE.
1787 */
1788 int
ehci_start_periodic_pipe_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_opaque_t periodic_in_reqp,usb_flags_t flags)1789 ehci_start_periodic_pipe_polling(
1790 ehci_state_t *ehcip,
1791 usba_pipe_handle_data_t *ph,
1792 usb_opaque_t periodic_in_reqp,
1793 usb_flags_t flags)
1794 {
1795 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1796 usb_ep_descr_t *eptd = &ph->p_ep;
1797 int error = USB_SUCCESS;
1798
1799 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1800 "ehci_start_periodic_pipe_polling: ep%d",
1801 ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1802
1803 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1804
1805 /*
1806 * Check and handle start polling on root hub interrupt pipe.
1807 */
1808 if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1809 ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1810 USB_EP_ATTR_INTR)) {
1811
1812 error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1813 (usb_intr_req_t *)periodic_in_reqp, flags);
1814
1815 return (error);
1816 }
1817
1818 switch (pp->pp_state) {
1819 case EHCI_PIPE_STATE_IDLE:
1820 /* Save the Original client's Periodic IN request */
1821 pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1822
1823 /*
1824 * This pipe is uninitialized or if a valid QTD is
1825 * not found then insert a QTD on the interrupt IN
1826 * endpoint.
1827 */
1828 error = ehci_start_pipe_polling(ehcip, ph, flags);
1829
1830 if (error != USB_SUCCESS) {
1831 USB_DPRINTF_L2(PRINT_MASK_INTR,
1832 ehcip->ehci_log_hdl,
1833 "ehci_start_periodic_pipe_polling: "
1834 "Start polling failed");
1835
1836 pp->pp_client_periodic_in_reqp = NULL;
1837
1838 return (error);
1839 }
1840
1841 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1842 "ehci_start_periodic_pipe_polling: PP = 0x%p", (void *)pp);
1843
1844 #ifdef DEBUG
1845 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1846 case USB_EP_ATTR_INTR:
1847 ASSERT((pp->pp_tw_head != NULL) &&
1848 (pp->pp_tw_tail != NULL));
1849 break;
1850 case USB_EP_ATTR_ISOCH:
1851 ASSERT((pp->pp_itw_head != NULL) &&
1852 (pp->pp_itw_tail != NULL));
1853 break;
1854 }
1855 #endif
1856
1857 break;
1858 case EHCI_PIPE_STATE_ACTIVE:
1859 USB_DPRINTF_L2(PRINT_MASK_INTR,
1860 ehcip->ehci_log_hdl,
1861 "ehci_start_periodic_pipe_polling: "
1862 "Polling is already in progress");
1863
1864 error = USB_FAILURE;
1865 break;
1866 case EHCI_PIPE_STATE_ERROR:
1867 USB_DPRINTF_L2(PRINT_MASK_INTR,
1868 ehcip->ehci_log_hdl,
1869 "ehci_start_periodic_pipe_polling: "
1870 "Pipe is halted and perform reset"
1871 "before restart polling");
1872
1873 error = USB_FAILURE;
1874 break;
1875 default:
1876 USB_DPRINTF_L2(PRINT_MASK_INTR,
1877 ehcip->ehci_log_hdl,
1878 "ehci_start_periodic_pipe_polling: "
1879 "Undefined state");
1880
1881 error = USB_FAILURE;
1882 break;
1883 }
1884
1885 return (error);
1886 }
1887
1888
1889 /*
1890 * ehci_start_pipe_polling:
1891 *
1892 * Insert the number of periodic requests corresponding to polling
1893 * interval as calculated during pipe open.
1894 */
1895 static int
ehci_start_pipe_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_flags_t flags)1896 ehci_start_pipe_polling(
1897 ehci_state_t *ehcip,
1898 usba_pipe_handle_data_t *ph,
1899 usb_flags_t flags)
1900 {
1901 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1902 usb_ep_descr_t *eptd = &ph->p_ep;
1903 int error = USB_FAILURE;
1904
1905 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1906 "ehci_start_pipe_polling:");
1907
1908 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1909
1910 /*
1911 * For the start polling, pp_max_periodic_req_cnt will be zero
1912 * and for the restart polling request, it will be non zero.
1913 *
1914 * In case of start polling request, find out number of requests
1915 * required for the Interrupt IN endpoints corresponding to the
1916 * endpoint polling interval. For Isochronous IN endpoints, it is
1917 * always fixed since its polling interval will be one ms.
1918 */
1919 if (pp->pp_max_periodic_req_cnt == 0) {
1920
1921 ehci_set_periodic_pipe_polling(ehcip, ph);
1922 }
1923
1924 ASSERT(pp->pp_max_periodic_req_cnt != 0);
1925
1926 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1927 case USB_EP_ATTR_INTR:
1928 error = ehci_start_intr_polling(ehcip, ph, flags);
1929 break;
1930 case USB_EP_ATTR_ISOCH:
1931 error = ehci_start_isoc_polling(ehcip, ph, flags);
1932 break;
1933 }
1934
1935 return (error);
1936 }
1937
1938 static int
ehci_start_intr_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_flags_t flags)1939 ehci_start_intr_polling(
1940 ehci_state_t *ehcip,
1941 usba_pipe_handle_data_t *ph,
1942 usb_flags_t flags)
1943 {
1944 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1945 ehci_trans_wrapper_t *tw_list, *tw;
1946 int i, total_tws;
1947 int error = USB_SUCCESS;
1948
1949 /* Allocate all the necessary resources for the IN transfer */
1950 tw_list = NULL;
1951 total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1952 for (i = 0; i < total_tws; i += 1) {
1953 tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1954 if (tw == NULL) {
1955 error = USB_NO_RESOURCES;
1956 /* There are not enough resources, deallocate the TWs */
1957 tw = tw_list;
1958 while (tw != NULL) {
1959 tw_list = tw->tw_next;
1960 ehci_deallocate_intr_in_resource(
1961 ehcip, pp, tw);
1962 ehci_deallocate_tw(ehcip, pp, tw);
1963 tw = tw_list;
1964 }
1965
1966 return (error);
1967 } else {
1968 if (tw_list == NULL) {
1969 tw_list = tw;
1970 }
1971 }
1972 }
1973
1974 while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1975
1976 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1977 "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1978 pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1979 (void *)tw_list);
1980
1981 tw = tw_list;
1982 tw_list = tw->tw_next;
1983
1984 ehci_insert_intr_req(ehcip, pp, tw, flags);
1985
1986 pp->pp_cur_periodic_req_cnt++;
1987 }
1988
1989 return (error);
1990 }
1991
1992
1993 /*
1994 * ehci_set_periodic_pipe_polling:
1995 *
1996 * Calculate the number of periodic requests needed corresponding to the
1997 * interrupt IN endpoints polling interval. Table below gives the number
1998 * of periodic requests needed for the interrupt IN endpoints according
1999 * to endpoint polling interval.
2000 *
2001 * Polling interval Number of periodic requests
2002 *
2003 * 1ms 4
2004 * 2ms 2
2005 * 4ms to 32ms 1
2006 */
2007 static void
ehci_set_periodic_pipe_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)2008 ehci_set_periodic_pipe_polling(
2009 ehci_state_t *ehcip,
2010 usba_pipe_handle_data_t *ph)
2011 {
2012 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2013 usb_ep_descr_t *endpoint = &ph->p_ep;
2014 uchar_t ep_attr = endpoint->bmAttributes;
2015 uint_t interval;
2016
2017 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2018 "ehci_set_periodic_pipe_polling:");
2019
2020 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2021
2022 pp->pp_cur_periodic_req_cnt = 0;
2023
2024 /*
2025 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
2026 * set and if so, set pp->pp_max_periodic_req_cnt to one.
2027 */
2028 if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
2029 (pp->pp_client_periodic_in_reqp)) {
2030 usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
2031 pp->pp_client_periodic_in_reqp;
2032
2033 if (intr_reqp->intr_attributes &
2034 USB_ATTRS_ONE_XFER) {
2035
2036 pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2037
2038 return;
2039 }
2040 }
2041
2042 mutex_enter(&ph->p_usba_device->usb_mutex);
2043
2044 /*
2045 * The ehci_adjust_polling_interval function will not fail
2046 * at this instance since bandwidth allocation is already
2047 * done. Here we are getting only the periodic interval.
2048 */
2049 interval = ehci_adjust_polling_interval(ehcip, endpoint,
2050 ph->p_usba_device->usb_port_status);
2051
2052 mutex_exit(&ph->p_usba_device->usb_mutex);
2053
2054 switch (interval) {
2055 case EHCI_INTR_1MS_POLL:
2056 pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2057 break;
2058 case EHCI_INTR_2MS_POLL:
2059 pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2060 break;
2061 default:
2062 pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2063 break;
2064 }
2065
2066 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2067 "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2068 pp->pp_max_periodic_req_cnt);
2069 }
2070
2071 /*
2072 * ehci_allocate_intr_resources:
2073 *
2074 * Calculates the number of tds necessary for a intr transfer, and allocates
2075 * all the necessary resources.
2076 *
2077 * Returns NULL if there is insufficient resources otherwise TW.
2078 */
2079 ehci_trans_wrapper_t *
ehci_allocate_intr_resources(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_intr_req_t * intr_reqp,usb_flags_t flags)2080 ehci_allocate_intr_resources(
2081 ehci_state_t *ehcip,
2082 usba_pipe_handle_data_t *ph,
2083 usb_intr_req_t *intr_reqp,
2084 usb_flags_t flags)
2085 {
2086 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2087 int pipe_dir;
2088 size_t qtd_count = 1;
2089 size_t tw_length;
2090 ehci_trans_wrapper_t *tw;
2091
2092 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2093 "ehci_allocate_intr_resources:");
2094
2095 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2096
2097 pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2098
2099 /* Get the length of interrupt transfer & alloc data */
2100 if (intr_reqp) {
2101 tw_length = intr_reqp->intr_len;
2102 } else {
2103 ASSERT(pipe_dir == USB_EP_DIR_IN);
2104 tw_length = (pp->pp_client_periodic_in_reqp) ?
2105 (((usb_intr_req_t *)pp->
2106 pp_client_periodic_in_reqp)->intr_len) :
2107 ph->p_ep.wMaxPacketSize;
2108 }
2109
2110 /* Check the size of interrupt request */
2111 if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2112
2113 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2114 "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2115 "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2116
2117 return (NULL);
2118 }
2119
2120 if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2121 qtd_count)) == NULL) {
2122
2123 return (NULL);
2124 }
2125
2126 if (pipe_dir == USB_EP_DIR_IN) {
2127 if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2128 USB_SUCCESS) {
2129 ehci_deallocate_tw(ehcip, pp, tw);
2130 }
2131 tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2132 } else {
2133 if (tw_length) {
2134 ASSERT(intr_reqp->intr_data != NULL);
2135
2136 /* Copy the data into the buffer */
2137 bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2138 intr_reqp->intr_len);
2139
2140 Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2141 intr_reqp->intr_len);
2142 }
2143
2144 tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2145 tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2146 }
2147
2148 if (intr_reqp) {
2149 tw->tw_timeout = intr_reqp->intr_timeout;
2150 }
2151
2152 /*
2153 * Initialize the callback and any callback
2154 * data required when the qtd completes.
2155 */
2156 tw->tw_handle_qtd = ehci_handle_intr_qtd;
2157 tw->tw_handle_callback_value = NULL;
2158
2159 return (tw);
2160 }
2161
2162
2163 /*
2164 * ehci_insert_intr_req:
2165 *
2166 * Insert an Interrupt request into the Host Controller's periodic list.
2167 */
2168 /* ARGSUSED */
2169 void
ehci_insert_intr_req(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw,usb_flags_t flags)2170 ehci_insert_intr_req(
2171 ehci_state_t *ehcip,
2172 ehci_pipe_private_t *pp,
2173 ehci_trans_wrapper_t *tw,
2174 usb_flags_t flags)
2175 {
2176 uint_t ctrl = 0;
2177
2178 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2179
2180 ASSERT(tw->tw_curr_xfer_reqp != NULL);
2181
2182 ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2183
2184 /* Insert another interrupt QTD */
2185 (void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw);
2186
2187 /* Start the timer for this Interrupt transfer */
2188 ehci_start_xfer_timer(ehcip, pp, tw);
2189 }
2190
2191
2192 /*
2193 * ehci_stop_periodic_pipe_polling:
2194 */
2195 /* ARGSUSED */
2196 int
ehci_stop_periodic_pipe_polling(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,usb_flags_t flags)2197 ehci_stop_periodic_pipe_polling(
2198 ehci_state_t *ehcip,
2199 usba_pipe_handle_data_t *ph,
2200 usb_flags_t flags)
2201 {
2202 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2203 usb_ep_descr_t *eptd = &ph->p_ep;
2204
2205 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2206 "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2207
2208 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2209
2210 /*
2211 * Check and handle stop polling on root hub interrupt pipe.
2212 */
2213 if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2214 ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2215 USB_EP_ATTR_INTR)) {
2216
2217 ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2218
2219 return (USB_SUCCESS);
2220 }
2221
2222 if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2223
2224 USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2225 "ehci_stop_periodic_pipe_polling: "
2226 "Polling already stopped");
2227
2228 return (USB_SUCCESS);
2229 }
2230
2231 /* Set pipe state to pipe stop polling */
2232 pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2233
2234 ehci_pipe_cleanup(ehcip, ph);
2235
2236 return (USB_SUCCESS);
2237 }
2238
2239
2240 /*
2241 * ehci_insert_qtd:
2242 *
2243 * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2244 * Always returns USB_SUCCESS for now. Once Isoch has been implemented,
2245 * it may return USB_FAILURE.
2246 */
2247 int
ehci_insert_qtd(ehci_state_t * ehcip,uint32_t qtd_ctrl,size_t qtd_dma_offs,size_t qtd_length,uint32_t qtd_ctrl_phase,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)2248 ehci_insert_qtd(
2249 ehci_state_t *ehcip,
2250 uint32_t qtd_ctrl,
2251 size_t qtd_dma_offs,
2252 size_t qtd_length,
2253 uint32_t qtd_ctrl_phase,
2254 ehci_pipe_private_t *pp,
2255 ehci_trans_wrapper_t *tw)
2256 {
2257 ehci_qtd_t *curr_dummy_qtd, *next_dummy_qtd;
2258 ehci_qtd_t *new_dummy_qtd;
2259 ehci_qh_t *qh = pp->pp_qh;
2260 int error = USB_SUCCESS;
2261
2262 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2263
2264 /* Allocate new dummy QTD */
2265 new_dummy_qtd = tw->tw_qtd_free_list;
2266
2267 ASSERT(new_dummy_qtd != NULL);
2268 tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2269 Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2270 Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, 0);
2271
2272 /* Get the current and next dummy QTDs */
2273 curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2274 Get_QH(qh->qh_dummy_qtd));
2275 next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2276 Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2277
2278 /* Update QH's dummy qtd field */
2279 Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2280
2281 /* Update next dummy's next qtd pointer */
2282 Set_QTD(next_dummy_qtd->qtd_next_qtd,
2283 ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2284
2285 /*
2286 * Fill in the current dummy qtd and
2287 * add the new dummy to the end.
2288 */
2289 ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2290 qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw);
2291
2292 /* Insert this qtd onto the tw */
2293 ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2294
2295 /*
2296 * Insert this qtd onto active qtd list.
2297 * Don't insert polled mode qtd here.
2298 */
2299 if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2300 /* Insert this qtd onto active qtd list */
2301 ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2302 }
2303
2304 /* Print qh and qtd */
2305 ehci_print_qh(ehcip, qh);
2306 ehci_print_qtd(ehcip, curr_dummy_qtd);
2307
2308 return (error);
2309 }
2310
2311
2312 /*
2313 * ehci_allocate_qtd_from_pool:
2314 *
2315 * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2316 */
2317 static ehci_qtd_t *
ehci_allocate_qtd_from_pool(ehci_state_t * ehcip)2318 ehci_allocate_qtd_from_pool(ehci_state_t *ehcip)
2319 {
2320 int i, ctrl;
2321 ehci_qtd_t *qtd;
2322
2323 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2324
2325 /*
2326 * Search for a blank Transfer Descriptor (QTD)
2327 * in the QTD buffer pool.
2328 */
2329 for (i = 0; i < ehci_qtd_pool_size; i ++) {
2330 ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2331 if (ctrl == EHCI_QTD_FREE) {
2332 break;
2333 }
2334 }
2335
2336 if (i >= ehci_qtd_pool_size) {
2337 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2338 "ehci_allocate_qtd_from_pool: QTD exhausted");
2339
2340 return (NULL);
2341 }
2342
2343 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2344 "ehci_allocate_qtd_from_pool: Allocated %d", i);
2345
2346 /* Create a new dummy for the end of the QTD list */
2347 qtd = &ehcip->ehci_qtd_pool_addr[i];
2348
2349 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2350 "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2351
2352 /* Mark the newly allocated QTD as a dummy */
2353 Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2354
2355 /* Mark the status of this new QTD to halted state */
2356 Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2357
2358 /* Disable dummy QTD's next and alternate next pointers */
2359 Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2360 Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2361
2362 return (qtd);
2363 }
2364
2365
2366 /*
2367 * ehci_fill_in_qtd:
2368 *
2369 * Fill in the fields of a Transfer Descriptor (QTD).
2370 * The "Buffer Pointer" fields of a QTD are retrieved from the TW
2371 * it is associated with.
2372 *
2373 * Note:
2374 * qtd_dma_offs - the starting offset into the TW buffer, where the QTD
2375 * should transfer from. It should be 4K aligned. And when
2376 * a TW has more than one QTDs, the QTDs must be filled in
2377 * increasing order.
2378 * qtd_length - the total bytes to transfer.
2379 */
2380 /*ARGSUSED*/
2381 static void
ehci_fill_in_qtd(ehci_state_t * ehcip,ehci_qtd_t * qtd,uint32_t qtd_ctrl,size_t qtd_dma_offs,size_t qtd_length,uint32_t qtd_ctrl_phase,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)2382 ehci_fill_in_qtd(
2383 ehci_state_t *ehcip,
2384 ehci_qtd_t *qtd,
2385 uint32_t qtd_ctrl,
2386 size_t qtd_dma_offs,
2387 size_t qtd_length,
2388 uint32_t qtd_ctrl_phase,
2389 ehci_pipe_private_t *pp,
2390 ehci_trans_wrapper_t *tw)
2391 {
2392 uint32_t buf_addr;
2393 size_t buf_len = qtd_length;
2394 uint32_t ctrl = qtd_ctrl;
2395 uint_t i = 0;
2396 int rem_len;
2397
2398 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2399 "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx "
2400 "len 0x%lx", (void *)qtd, qtd_ctrl, qtd_dma_offs, qtd_length);
2401
2402 /* Assert that the qtd to be filled in is a dummy */
2403 ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2404
2405 /* Change QTD's state Active */
2406 Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2407
2408 /* Set the total length data transfer */
2409 ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2410 & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2411
2412 /*
2413 * QTDs must be filled in increasing DMA offset order.
2414 * tw_dma_offs is initialized to be 0 at TW creation and
2415 * is only increased in this function.
2416 */
2417 ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs);
2418
2419 /*
2420 * Save the starting dma buffer offset used and
2421 * length of data that will be transfered in
2422 * the current QTD.
2423 */
2424 Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs);
2425 Set_QTD(qtd->qtd_xfer_len, buf_len);
2426
2427 while (buf_len) {
2428 /*
2429 * Advance to the next DMA cookie until finding the cookie
2430 * that qtd_dma_offs falls in.
2431 * It is very likely this loop will never repeat more than
2432 * once. It is here just to accommodate the case qtd_dma_offs
2433 * is increased by multiple cookies during two consecutive
2434 * calls into this function. In that case, the interim DMA
2435 * buffer is allowed to be skipped.
2436 */
2437 while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2438 qtd_dma_offs) {
2439 /*
2440 * tw_dma_offs always points to the starting offset
2441 * of a cookie
2442 */
2443 tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2444 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2445 tw->tw_cookie_idx++;
2446 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2447 }
2448
2449 /*
2450 * Counting the remained buffer length to be filled in
2451 * the QTD for current DMA cookie
2452 */
2453 rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2454 qtd_dma_offs;
2455
2456 /* Update the beginning of the buffer */
2457 buf_addr = (qtd_dma_offs - tw->tw_dma_offs) +
2458 tw->tw_cookie.dmac_address;
2459 ASSERT((buf_addr % EHCI_4K_ALIGN) == 0);
2460 Set_QTD(qtd->qtd_buf[i], buf_addr);
2461
2462 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2463 "ehci_fill_in_qtd: dmac_addr 0x%x dmac_size "
2464 "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2465 tw->tw_cookie_idx);
2466
2467 if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2468 ASSERT(buf_len <= rem_len);
2469 break;
2470 } else {
2471 ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE);
2472 buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2473 qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE;
2474 }
2475
2476 i++;
2477 }
2478
2479 /*
2480 * Setup the alternate next qTD pointer if appropriate. The alternate
2481 * qtd is currently pointing to a QTD that is not yet linked, but will
2482 * be in the very near future. If a short_xfer occurs in this
2483 * situation , the HC will automatically skip this QH. Eventually
2484 * everything will be placed and the alternate_qtd will be valid QTD.
2485 * For more information on alternate qtds look at section 3.5.2 in the
2486 * EHCI spec.
2487 */
2488 if (tw->tw_alt_qtd != NULL) {
2489 Set_QTD(qtd->qtd_alt_next_qtd,
2490 (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2491 EHCI_QTD_ALT_NEXT_QTD_PTR));
2492 }
2493
2494 /*
2495 * For control, bulk and interrupt QTD, now
2496 * enable current QTD by setting active bit.
2497 */
2498 Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2499
2500 /*
2501 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2502 */
2503 if (qtd_ctrl_phase) {
2504 Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2505 }
2506
2507 /* Set the transfer wrapper */
2508 ASSERT(tw != NULL);
2509 ASSERT(tw->tw_id != 0);
2510
2511 Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2512 }
2513
2514
2515 /*
2516 * ehci_insert_qtd_on_tw:
2517 *
2518 * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2519 * are allocated for this transfer. Insert a QTD onto this list. The list
2520 * of QTD's does not include the dummy QTD that is at the end of the list of
2521 * QTD's for the endpoint.
2522 */
2523 static void
ehci_insert_qtd_on_tw(ehci_state_t * ehcip,ehci_trans_wrapper_t * tw,ehci_qtd_t * qtd)2524 ehci_insert_qtd_on_tw(
2525 ehci_state_t *ehcip,
2526 ehci_trans_wrapper_t *tw,
2527 ehci_qtd_t *qtd)
2528 {
2529 /*
2530 * Set the next pointer to NULL because
2531 * this is the last QTD on list.
2532 */
2533 Set_QTD(qtd->qtd_tw_next_qtd, 0);
2534
2535 if (tw->tw_qtd_head == NULL) {
2536 ASSERT(tw->tw_qtd_tail == NULL);
2537 tw->tw_qtd_head = qtd;
2538 tw->tw_qtd_tail = qtd;
2539 } else {
2540 ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2541
2542 ASSERT(dummy != NULL);
2543 ASSERT(dummy != qtd);
2544 ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2545
2546 /* Add the qtd to the end of the list */
2547 Set_QTD(dummy->qtd_tw_next_qtd,
2548 ehci_qtd_cpu_to_iommu(ehcip, qtd));
2549
2550 tw->tw_qtd_tail = qtd;
2551
2552 ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == 0);
2553 }
2554 }
2555
2556
2557 /*
2558 * ehci_insert_qtd_into_active_qtd_list:
2559 *
2560 * Insert current QTD into active QTD list.
2561 */
2562 static void
ehci_insert_qtd_into_active_qtd_list(ehci_state_t * ehcip,ehci_qtd_t * qtd)2563 ehci_insert_qtd_into_active_qtd_list(
2564 ehci_state_t *ehcip,
2565 ehci_qtd_t *qtd)
2566 {
2567 ehci_qtd_t *curr_qtd, *next_qtd;
2568
2569 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2570
2571 curr_qtd = ehcip->ehci_active_qtd_list;
2572
2573 /* Insert this QTD into QTD Active List */
2574 if (curr_qtd) {
2575 next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2576 Get_QTD(curr_qtd->qtd_active_qtd_next));
2577
2578 while (next_qtd) {
2579 curr_qtd = next_qtd;
2580 next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2581 Get_QTD(curr_qtd->qtd_active_qtd_next));
2582 }
2583
2584 Set_QTD(qtd->qtd_active_qtd_prev,
2585 ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2586
2587 Set_QTD(curr_qtd->qtd_active_qtd_next,
2588 ehci_qtd_cpu_to_iommu(ehcip, qtd));
2589 } else {
2590 ehcip->ehci_active_qtd_list = qtd;
2591 Set_QTD(qtd->qtd_active_qtd_next, 0);
2592 Set_QTD(qtd->qtd_active_qtd_prev, 0);
2593 }
2594 }
2595
2596
2597 /*
2598 * ehci_remove_qtd_from_active_qtd_list:
2599 *
2600 * Remove current QTD from the active QTD list.
2601 *
2602 * NOTE: This function is also called from POLLED MODE.
2603 */
2604 void
ehci_remove_qtd_from_active_qtd_list(ehci_state_t * ehcip,ehci_qtd_t * qtd)2605 ehci_remove_qtd_from_active_qtd_list(
2606 ehci_state_t *ehcip,
2607 ehci_qtd_t *qtd)
2608 {
2609 ehci_qtd_t *curr_qtd, *prev_qtd, *next_qtd;
2610
2611 ASSERT(qtd != NULL);
2612
2613 curr_qtd = ehcip->ehci_active_qtd_list;
2614
2615 while ((curr_qtd) && (curr_qtd != qtd)) {
2616 curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2617 Get_QTD(curr_qtd->qtd_active_qtd_next));
2618 }
2619
2620 if ((curr_qtd) && (curr_qtd == qtd)) {
2621 prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2622 Get_QTD(curr_qtd->qtd_active_qtd_prev));
2623 next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2624 Get_QTD(curr_qtd->qtd_active_qtd_next));
2625
2626 if (prev_qtd) {
2627 Set_QTD(prev_qtd->qtd_active_qtd_next,
2628 Get_QTD(curr_qtd->qtd_active_qtd_next));
2629 } else {
2630 ehcip->ehci_active_qtd_list = next_qtd;
2631 }
2632
2633 if (next_qtd) {
2634 Set_QTD(next_qtd->qtd_active_qtd_prev,
2635 Get_QTD(curr_qtd->qtd_active_qtd_prev));
2636 }
2637 } else {
2638 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2639 "ehci_remove_qtd_from_active_qtd_list: "
2640 "Unable to find QTD in active_qtd_list");
2641 }
2642 }
2643
2644
2645 /*
2646 * ehci_traverse_qtds:
2647 *
2648 * Traverse the list of QTDs for given pipe using transfer wrapper. Since
2649 * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2650 * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2651 */
2652 static void
ehci_traverse_qtds(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)2653 ehci_traverse_qtds(
2654 ehci_state_t *ehcip,
2655 usba_pipe_handle_data_t *ph)
2656 {
2657 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2658 ehci_trans_wrapper_t *next_tw;
2659 ehci_qtd_t *qtd;
2660 ehci_qtd_t *next_qtd;
2661
2662 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2663
2664 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2665 "ehci_traverse_qtds:");
2666
2667 /* Process the transfer wrappers for this pipe */
2668 next_tw = pp->pp_tw_head;
2669
2670 while (next_tw) {
2671 /* Stop the the transfer timer */
2672 ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2673
2674 qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2675
2676 /* Walk through each QTD for this transfer wrapper */
2677 while (qtd) {
2678 /* Remove this QTD from active QTD list */
2679 ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2680
2681 next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2682 Get_QTD(qtd->qtd_tw_next_qtd));
2683
2684 /* Deallocate this QTD */
2685 ehci_deallocate_qtd(ehcip, qtd);
2686
2687 qtd = next_qtd;
2688 }
2689
2690 next_tw = next_tw->tw_next;
2691 }
2692
2693 /* Clear current qtd pointer */
2694 Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2695
2696 /* Update the next qtd pointer in the QH */
2697 Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2698 }
2699
2700
2701 /*
2702 * ehci_deallocate_qtd:
2703 *
2704 * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2705 *
2706 * NOTE: This function is also called from POLLED MODE.
2707 */
2708 void
ehci_deallocate_qtd(ehci_state_t * ehcip,ehci_qtd_t * old_qtd)2709 ehci_deallocate_qtd(
2710 ehci_state_t *ehcip,
2711 ehci_qtd_t *old_qtd)
2712 {
2713 ehci_trans_wrapper_t *tw = NULL;
2714
2715 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2716 "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2717
2718 /*
2719 * Obtain the transaction wrapper and tw will be
2720 * NULL for the dummy QTDs.
2721 */
2722 if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2723 tw = (ehci_trans_wrapper_t *)
2724 EHCI_LOOKUP_ID((uint32_t)
2725 Get_QTD(old_qtd->qtd_trans_wrapper));
2726
2727 ASSERT(tw != NULL);
2728 }
2729
2730 /*
2731 * If QTD's transfer wrapper is NULL, don't access its TW.
2732 * Just free the QTD.
2733 */
2734 if (tw) {
2735 ehci_qtd_t *qtd, *next_qtd;
2736
2737 qtd = tw->tw_qtd_head;
2738
2739 if (old_qtd != qtd) {
2740 next_qtd = ehci_qtd_iommu_to_cpu(
2741 ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2742
2743 while (next_qtd != old_qtd) {
2744 qtd = next_qtd;
2745 next_qtd = ehci_qtd_iommu_to_cpu(
2746 ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2747 }
2748
2749 Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2750
2751 if (qtd->qtd_tw_next_qtd == 0) {
2752 tw->tw_qtd_tail = qtd;
2753 }
2754 } else {
2755 tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2756 ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2757
2758 if (tw->tw_qtd_head == NULL) {
2759 tw->tw_qtd_tail = NULL;
2760 }
2761 }
2762 }
2763
2764 bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2765 Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2766
2767 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2768 "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2769 }
2770
2771
2772 /*
2773 * ehci_qtd_cpu_to_iommu:
2774 *
2775 * This function converts for the given Transfer Descriptor (QTD) CPU address
2776 * to IO address.
2777 *
2778 * NOTE: This function is also called from POLLED MODE.
2779 */
2780 uint32_t
ehci_qtd_cpu_to_iommu(ehci_state_t * ehcip,ehci_qtd_t * addr)2781 ehci_qtd_cpu_to_iommu(
2782 ehci_state_t *ehcip,
2783 ehci_qtd_t *addr)
2784 {
2785 uint32_t td;
2786
2787 td = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2788 (uint32_t)((uintptr_t)addr -
2789 (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2790
2791 ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2792 (uint32_t) (sizeof (ehci_qtd_t) *
2793 (addr - ehcip->ehci_qtd_pool_addr))) ==
2794 (ehcip->ehci_qtd_pool_cookie.dmac_address +
2795 (uint32_t)((uintptr_t)addr - (uintptr_t)
2796 (ehcip->ehci_qtd_pool_addr))));
2797
2798 ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2799 ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2800 sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2801
2802 return (td);
2803 }
2804
2805
2806 /*
2807 * ehci_qtd_iommu_to_cpu:
2808 *
2809 * This function converts for the given Transfer Descriptor (QTD) IO address
2810 * to CPU address.
2811 *
2812 * NOTE: This function is also called from POLLED MODE.
2813 */
2814 ehci_qtd_t *
ehci_qtd_iommu_to_cpu(ehci_state_t * ehcip,uintptr_t addr)2815 ehci_qtd_iommu_to_cpu(
2816 ehci_state_t *ehcip,
2817 uintptr_t addr)
2818 {
2819 ehci_qtd_t *qtd;
2820
2821 if (addr == 0)
2822 return (NULL);
2823
2824 qtd = (ehci_qtd_t *)((uintptr_t)
2825 (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2826 (uintptr_t)ehcip->ehci_qtd_pool_addr);
2827
2828 ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2829 ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2830 (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2831
2832 return (qtd);
2833 }
2834
2835 /*
2836 * ehci_allocate_tds_for_tw_resources:
2837 *
2838 * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2839 * into the TW. Also chooses the correct alternate qtd when required. It is
2840 * used for hardware short transfer support. For more information on
2841 * alternate qtds look at section 3.5.2 in the EHCI spec.
2842 * Here is how each alternate qtd's are used:
2843 *
2844 * Bulk: used fully.
2845 * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2846 * Ctrl: Should not use alternate QTD
2847 * Isoch: Doesn't support short_xfer nor does it use QTD
2848 *
2849 * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2850 * otherwise USB_SUCCESS.
2851 */
2852 int
ehci_allocate_tds_for_tw(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw,size_t qtd_count)2853 ehci_allocate_tds_for_tw(
2854 ehci_state_t *ehcip,
2855 ehci_pipe_private_t *pp,
2856 ehci_trans_wrapper_t *tw,
2857 size_t qtd_count)
2858 {
2859 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
2860 uchar_t attributes;
2861 ehci_qtd_t *qtd;
2862 uint32_t qtd_addr;
2863 int i;
2864 int error = USB_SUCCESS;
2865
2866 attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2867
2868 for (i = 0; i < qtd_count; i += 1) {
2869 qtd = ehci_allocate_qtd_from_pool(ehcip);
2870 if (qtd == NULL) {
2871 error = USB_NO_RESOURCES;
2872 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2873 "ehci_allocate_qtds_for_tw: "
2874 "Unable to allocate %lu QTDs",
2875 qtd_count);
2876 break;
2877 }
2878 if (i > 0) {
2879 qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2880 tw->tw_qtd_free_list);
2881 Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2882 }
2883 tw->tw_qtd_free_list = qtd;
2884
2885 /*
2886 * Save the second one as a pointer to the new dummy 1.
2887 * It is used later for the alt_qtd_ptr. Xfers with only
2888 * one qtd do not need alt_qtd_ptr.
2889 * The tds's are allocated and put into a stack, that is
2890 * why the second qtd allocated will turn out to be the
2891 * new dummy 1.
2892 */
2893 if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2894 tw->tw_alt_qtd = qtd;
2895 }
2896 }
2897
2898 return (error);
2899 }
2900
2901 /*
2902 * ehci_allocate_tw_resources:
2903 *
2904 * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2905 * from the QTD buffer pool and places it into the TW. It does an all
2906 * or nothing transaction.
2907 *
2908 * Returns NULL if there is insufficient resources otherwise TW.
2909 */
2910 static ehci_trans_wrapper_t *
ehci_allocate_tw_resources(ehci_state_t * ehcip,ehci_pipe_private_t * pp,size_t tw_length,usb_flags_t usb_flags,size_t qtd_count)2911 ehci_allocate_tw_resources(
2912 ehci_state_t *ehcip,
2913 ehci_pipe_private_t *pp,
2914 size_t tw_length,
2915 usb_flags_t usb_flags,
2916 size_t qtd_count)
2917 {
2918 ehci_trans_wrapper_t *tw;
2919
2920 tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2921
2922 if (tw == NULL) {
2923 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2924 "ehci_allocate_tw_resources: Unable to allocate TW");
2925 } else {
2926 if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2927 USB_SUCCESS) {
2928 tw->tw_num_qtds = (uint_t)qtd_count;
2929 } else {
2930 ehci_deallocate_tw(ehcip, pp, tw);
2931 tw = NULL;
2932 }
2933 }
2934
2935 return (tw);
2936 }
2937
2938
2939 /*
2940 * ehci_free_tw_td_resources:
2941 *
2942 * Free all allocated resources for Transaction Wrapper (TW).
2943 * Does not free the TW itself.
2944 *
2945 * Returns NULL if there is insufficient resources otherwise TW.
2946 */
2947 static void
ehci_free_tw_td_resources(ehci_state_t * ehcip,ehci_trans_wrapper_t * tw)2948 ehci_free_tw_td_resources(
2949 ehci_state_t *ehcip,
2950 ehci_trans_wrapper_t *tw)
2951 {
2952 ehci_qtd_t *qtd = NULL;
2953 ehci_qtd_t *temp_qtd = NULL;
2954
2955 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2956 "ehci_free_tw_td_resources: tw = 0x%p", (void *)tw);
2957
2958 qtd = tw->tw_qtd_free_list;
2959 while (qtd != NULL) {
2960 /* Save the pointer to the next qtd before destroying it */
2961 temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2962 Get_QTD(qtd->qtd_tw_next_qtd));
2963 ehci_deallocate_qtd(ehcip, qtd);
2964 qtd = temp_qtd;
2965 }
2966 tw->tw_qtd_free_list = NULL;
2967 }
2968
2969 /*
2970 * Transfer Wrapper functions
2971 *
2972 * ehci_create_transfer_wrapper:
2973 *
2974 * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2975 * resources.
2976 */
2977 static ehci_trans_wrapper_t *
ehci_create_transfer_wrapper(ehci_state_t * ehcip,ehci_pipe_private_t * pp,size_t length,uint_t usb_flags)2978 ehci_create_transfer_wrapper(
2979 ehci_state_t *ehcip,
2980 ehci_pipe_private_t *pp,
2981 size_t length,
2982 uint_t usb_flags)
2983 {
2984 ddi_device_acc_attr_t dev_attr;
2985 ddi_dma_attr_t dma_attr;
2986 int result;
2987 size_t real_length;
2988 ehci_trans_wrapper_t *tw;
2989 int kmem_flag;
2990 int (*dmamem_wait)(caddr_t);
2991 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
2992
2993 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2994 "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2995 length, usb_flags);
2996
2997 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2998
2999 /* SLEEP flag should not be used while holding mutex */
3000 kmem_flag = KM_NOSLEEP;
3001 dmamem_wait = DDI_DMA_DONTWAIT;
3002
3003 /* Allocate space for the transfer wrapper */
3004 tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag);
3005
3006 if (tw == NULL) {
3007 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3008 "ehci_create_transfer_wrapper: kmem_zalloc failed");
3009
3010 return (NULL);
3011 }
3012
3013 /* zero-length packet doesn't need to allocate dma memory */
3014 if (length == 0) {
3015
3016 goto dmadone;
3017 }
3018
3019 /* allow sg lists for transfer wrapper dma memory */
3020 bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3021 dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN;
3022 dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
3023
3024 /* Allocate the DMA handle */
3025 result = ddi_dma_alloc_handle(ehcip->ehci_dip,
3026 &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle);
3027
3028 if (result != DDI_SUCCESS) {
3029 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3030 "ehci_create_transfer_wrapper: Alloc handle failed");
3031
3032 kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3033
3034 return (NULL);
3035 }
3036
3037 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3038
3039 /* no need for swapping the raw data */
3040 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
3041 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3042
3043 /* Allocate the memory */
3044 result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
3045 &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3046 (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
3047
3048 if (result != DDI_SUCCESS) {
3049 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3050 "ehci_create_transfer_wrapper: dma_mem_alloc fail");
3051
3052 ddi_dma_free_handle(&tw->tw_dmahandle);
3053 kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3054
3055 return (NULL);
3056 }
3057
3058 ASSERT(real_length >= length);
3059
3060 /* Bind the handle */
3061 result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
3062 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
3063 dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies);
3064
3065 if (result != DDI_DMA_MAPPED) {
3066 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
3067
3068 ddi_dma_mem_free(&tw->tw_accesshandle);
3069 ddi_dma_free_handle(&tw->tw_dmahandle);
3070 kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3071
3072 return (NULL);
3073 }
3074
3075 tw->tw_cookie_idx = 0;
3076 tw->tw_dma_offs = 0;
3077
3078 dmadone:
3079 /*
3080 * Only allow one wrapper to be added at a time. Insert the
3081 * new transaction wrapper into the list for this pipe.
3082 */
3083 if (pp->pp_tw_head == NULL) {
3084 pp->pp_tw_head = tw;
3085 pp->pp_tw_tail = tw;
3086 } else {
3087 pp->pp_tw_tail->tw_next = tw;
3088 pp->pp_tw_tail = tw;
3089 }
3090
3091 /* Store the transfer length */
3092 tw->tw_length = length;
3093
3094 /* Store a back pointer to the pipe private structure */
3095 tw->tw_pipe_private = pp;
3096
3097 /* Store the transfer type - synchronous or asynchronous */
3098 tw->tw_flags = usb_flags;
3099
3100 /* Get and Store 32bit ID */
3101 tw->tw_id = EHCI_GET_ID((void *)tw);
3102
3103 ASSERT(tw->tw_id != 0);
3104
3105 /* isoc ep will not come here */
3106 if (EHCI_INTR_ENDPOINT(eptd)) {
3107 ehcip->ehci_periodic_req_count++;
3108 } else {
3109 ehcip->ehci_async_req_count++;
3110 }
3111 ehci_toggle_scheduler(ehcip);
3112
3113 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3114 "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
3115 (void *)tw, tw->tw_ncookies);
3116
3117 return (tw);
3118 }
3119
3120
3121 /*
3122 * ehci_start_xfer_timer:
3123 *
3124 * Start the timer for the control, bulk and for one time interrupt
3125 * transfers.
3126 */
3127 /* ARGSUSED */
3128 static void
ehci_start_xfer_timer(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3129 ehci_start_xfer_timer(
3130 ehci_state_t *ehcip,
3131 ehci_pipe_private_t *pp,
3132 ehci_trans_wrapper_t *tw)
3133 {
3134 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3135 "ehci_start_xfer_timer: tw = 0x%p", (void *)tw);
3136
3137 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3138
3139 /*
3140 * The timeout handling is done only for control, bulk and for
3141 * one time Interrupt transfers.
3142 *
3143 * NOTE: If timeout is zero; Assume infinite timeout and don't
3144 * insert this transfer on the timeout list.
3145 */
3146 if (tw->tw_timeout) {
3147 /*
3148 * Add this transfer wrapper to the head of the pipe's
3149 * tw timeout list.
3150 */
3151 if (pp->pp_timeout_list) {
3152 tw->tw_timeout_next = pp->pp_timeout_list;
3153 }
3154
3155 pp->pp_timeout_list = tw;
3156 ehci_start_timer(ehcip, pp);
3157 }
3158 }
3159
3160
3161 /*
3162 * ehci_stop_xfer_timer:
3163 *
3164 * Start the timer for the control, bulk and for one time interrupt
3165 * transfers.
3166 */
3167 void
ehci_stop_xfer_timer(ehci_state_t * ehcip,ehci_trans_wrapper_t * tw,uint_t flag)3168 ehci_stop_xfer_timer(
3169 ehci_state_t *ehcip,
3170 ehci_trans_wrapper_t *tw,
3171 uint_t flag)
3172 {
3173 ehci_pipe_private_t *pp;
3174 timeout_id_t timer_id;
3175
3176 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3177 "ehci_stop_xfer_timer: tw = 0x%p", (void *)tw);
3178
3179 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3180
3181 /* Obtain the pipe private structure */
3182 pp = tw->tw_pipe_private;
3183
3184 /* check if the timeout tw list is empty */
3185 if (pp->pp_timeout_list == NULL) {
3186
3187 return;
3188 }
3189
3190 switch (flag) {
3191 case EHCI_REMOVE_XFER_IFLAST:
3192 if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3193 break;
3194 }
3195
3196 /* FALLTHRU */
3197 case EHCI_REMOVE_XFER_ALWAYS:
3198 ehci_remove_tw_from_timeout_list(ehcip, tw);
3199
3200 if ((pp->pp_timeout_list == NULL) &&
3201 (pp->pp_timer_id)) {
3202
3203 timer_id = pp->pp_timer_id;
3204
3205 /* Reset the timer id to zero */
3206 pp->pp_timer_id = 0;
3207
3208 mutex_exit(&ehcip->ehci_int_mutex);
3209
3210 (void) untimeout(timer_id);
3211
3212 mutex_enter(&ehcip->ehci_int_mutex);
3213 }
3214 break;
3215 default:
3216 break;
3217 }
3218 }
3219
3220
3221 /*
3222 * ehci_xfer_timeout_handler:
3223 *
3224 * Control or bulk transfer timeout handler.
3225 */
3226 static void
ehci_xfer_timeout_handler(void * arg)3227 ehci_xfer_timeout_handler(void *arg)
3228 {
3229 usba_pipe_handle_data_t *ph = (usba_pipe_handle_data_t *)arg;
3230 ehci_state_t *ehcip = ehci_obtain_state(
3231 ph->p_usba_device->usb_root_hub_dip);
3232 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3233 ehci_trans_wrapper_t *tw, *next;
3234 ehci_trans_wrapper_t *expire_xfer_list = NULL;
3235 ehci_qtd_t *qtd;
3236
3237 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3238 "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p",
3239 (void *)ehcip, (void *)ph);
3240
3241 mutex_enter(&ehcip->ehci_int_mutex);
3242
3243 /*
3244 * Check whether still timeout handler is valid.
3245 */
3246 if (pp->pp_timer_id != 0) {
3247
3248 /* Reset the timer id to zero */
3249 pp->pp_timer_id = 0;
3250 } else {
3251 mutex_exit(&ehcip->ehci_int_mutex);
3252
3253 return;
3254 }
3255
3256 /* Get the transfer timeout list head */
3257 tw = pp->pp_timeout_list;
3258
3259 while (tw) {
3260
3261 /* Get the transfer on the timeout list */
3262 next = tw->tw_timeout_next;
3263
3264 tw->tw_timeout--;
3265
3266 if (tw->tw_timeout <= 0) {
3267
3268 /* remove the tw from the timeout list */
3269 ehci_remove_tw_from_timeout_list(ehcip, tw);
3270
3271 /* remove QTDs from active QTD list */
3272 qtd = tw->tw_qtd_head;
3273 while (qtd) {
3274 ehci_remove_qtd_from_active_qtd_list(
3275 ehcip, qtd);
3276
3277 /* Get the next QTD from the wrapper */
3278 qtd = ehci_qtd_iommu_to_cpu(ehcip,
3279 Get_QTD(qtd->qtd_tw_next_qtd));
3280 }
3281
3282 /*
3283 * Preserve the order to the requests
3284 * started time sequence.
3285 */
3286 tw->tw_timeout_next = expire_xfer_list;
3287 expire_xfer_list = tw;
3288 }
3289
3290 tw = next;
3291 }
3292
3293 /*
3294 * The timer should be started before the callbacks.
3295 * There is always a chance that ehci interrupts come
3296 * in when we release the mutex while calling the tw back.
3297 * To keep an accurate timeout it should be restarted
3298 * as soon as possible.
3299 */
3300 ehci_start_timer(ehcip, pp);
3301
3302 /* Get the expired transfer timeout list head */
3303 tw = expire_xfer_list;
3304
3305 while (tw) {
3306
3307 /* Get the next tw on the expired transfer timeout list */
3308 next = tw->tw_timeout_next;
3309
3310 /*
3311 * The error handle routine will release the mutex when
3312 * calling back to USBA. But this will not cause any race.
3313 * We do the callback and are relying on ehci_pipe_cleanup()
3314 * to halt the queue head and clean up since we should not
3315 * block in timeout context.
3316 */
3317 ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3318
3319 tw = next;
3320 }
3321 mutex_exit(&ehcip->ehci_int_mutex);
3322 }
3323
3324
3325 /*
3326 * ehci_remove_tw_from_timeout_list:
3327 *
3328 * Remove Control or bulk transfer from the timeout list.
3329 */
3330 static void
ehci_remove_tw_from_timeout_list(ehci_state_t * ehcip,ehci_trans_wrapper_t * tw)3331 ehci_remove_tw_from_timeout_list(
3332 ehci_state_t *ehcip,
3333 ehci_trans_wrapper_t *tw)
3334 {
3335 ehci_pipe_private_t *pp;
3336 ehci_trans_wrapper_t *prev, *next;
3337
3338 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3339 "ehci_remove_tw_from_timeout_list: tw = 0x%p", (void *)tw);
3340
3341 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3342
3343 /* Obtain the pipe private structure */
3344 pp = tw->tw_pipe_private;
3345
3346 if (pp->pp_timeout_list) {
3347 if (pp->pp_timeout_list == tw) {
3348 pp->pp_timeout_list = tw->tw_timeout_next;
3349
3350 tw->tw_timeout_next = NULL;
3351 } else {
3352 prev = pp->pp_timeout_list;
3353 next = prev->tw_timeout_next;
3354
3355 while (next && (next != tw)) {
3356 prev = next;
3357 next = next->tw_timeout_next;
3358 }
3359
3360 if (next == tw) {
3361 prev->tw_timeout_next =
3362 next->tw_timeout_next;
3363 tw->tw_timeout_next = NULL;
3364 }
3365 }
3366 }
3367 }
3368
3369
3370 /*
3371 * ehci_start_timer:
3372 *
3373 * Start the pipe's timer
3374 */
3375 static void
ehci_start_timer(ehci_state_t * ehcip,ehci_pipe_private_t * pp)3376 ehci_start_timer(
3377 ehci_state_t *ehcip,
3378 ehci_pipe_private_t *pp)
3379 {
3380 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3381 "ehci_start_timer: ehcip = 0x%p, pp = 0x%p",
3382 (void *)ehcip, (void *)pp);
3383
3384 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3385
3386 /*
3387 * Start the pipe's timer only if currently timer is not
3388 * running and if there are any transfers on the timeout
3389 * list. This timer will be per pipe.
3390 */
3391 if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3392 pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3393 (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3394 }
3395 }
3396
3397 /*
3398 * ehci_deallocate_tw:
3399 *
3400 * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3401 * of DMA resources.
3402 */
3403 void
ehci_deallocate_tw(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3404 ehci_deallocate_tw(
3405 ehci_state_t *ehcip,
3406 ehci_pipe_private_t *pp,
3407 ehci_trans_wrapper_t *tw)
3408 {
3409 ehci_trans_wrapper_t *prev, *next;
3410
3411 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3412 "ehci_deallocate_tw: tw = 0x%p", (void *)tw);
3413
3414 /*
3415 * If the transfer wrapper has no Host Controller (HC)
3416 * Transfer Descriptors (QTD) associated with it, then
3417 * remove the transfer wrapper.
3418 */
3419 if (tw->tw_qtd_head) {
3420 ASSERT(tw->tw_qtd_tail != NULL);
3421
3422 return;
3423 }
3424
3425 ASSERT(tw->tw_qtd_tail == NULL);
3426
3427 /* Make sure we return all the unused qtd's to the pool as well */
3428 ehci_free_tw_td_resources(ehcip, tw);
3429
3430 /*
3431 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3432 * given TW then set the head and tail equal to NULL.
3433 * Otherwise search for this TW in the linked TW's list
3434 * and then remove this TW from the list.
3435 */
3436 if (pp->pp_tw_head == tw) {
3437 if (pp->pp_tw_tail == tw) {
3438 pp->pp_tw_head = NULL;
3439 pp->pp_tw_tail = NULL;
3440 } else {
3441 pp->pp_tw_head = tw->tw_next;
3442 }
3443 } else {
3444 prev = pp->pp_tw_head;
3445 next = prev->tw_next;
3446
3447 while (next && (next != tw)) {
3448 prev = next;
3449 next = next->tw_next;
3450 }
3451
3452 if (next == tw) {
3453 prev->tw_next = next->tw_next;
3454
3455 if (pp->pp_tw_tail == tw) {
3456 pp->pp_tw_tail = prev;
3457 }
3458 }
3459 }
3460
3461 /*
3462 * Make sure that, this TW has been removed
3463 * from the timeout list.
3464 */
3465 ehci_remove_tw_from_timeout_list(ehcip, tw);
3466
3467 /* Deallocate this TW */
3468 ehci_free_tw(ehcip, pp, tw);
3469 }
3470
3471
3472 /*
3473 * ehci_free_dma_resources:
3474 *
3475 * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3476 *
3477 * NOTE: This function is also called from POLLED MODE.
3478 */
3479 void
ehci_free_dma_resources(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)3480 ehci_free_dma_resources(
3481 ehci_state_t *ehcip,
3482 usba_pipe_handle_data_t *ph)
3483 {
3484 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3485 ehci_trans_wrapper_t *head_tw = pp->pp_tw_head;
3486 ehci_trans_wrapper_t *next_tw, *tw;
3487
3488 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3489 "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3490
3491 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3492
3493 /* Process the Transfer Wrappers */
3494 next_tw = head_tw;
3495 while (next_tw) {
3496 tw = next_tw;
3497 next_tw = tw->tw_next;
3498
3499 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3500 "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3501
3502 ehci_free_tw(ehcip, pp, tw);
3503 }
3504
3505 /* Adjust the head and tail pointers */
3506 pp->pp_tw_head = NULL;
3507 pp->pp_tw_tail = NULL;
3508 }
3509
3510
3511 /*
3512 * ehci_free_tw:
3513 *
3514 * Free the Transfer Wrapper (TW).
3515 */
3516 /*ARGSUSED*/
3517 static void
ehci_free_tw(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3518 ehci_free_tw(
3519 ehci_state_t *ehcip,
3520 ehci_pipe_private_t *pp,
3521 ehci_trans_wrapper_t *tw)
3522 {
3523 int rval;
3524 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3525
3526 USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3527 "ehci_free_tw: tw = 0x%p", (void *)tw);
3528
3529 ASSERT(tw != NULL);
3530 ASSERT(tw->tw_id != 0);
3531
3532 /* Free 32bit ID */
3533 EHCI_FREE_ID((uint32_t)tw->tw_id);
3534
3535 if (tw->tw_dmahandle != NULL) {
3536 rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3537 ASSERT(rval == DDI_SUCCESS);
3538
3539 ddi_dma_mem_free(&tw->tw_accesshandle);
3540 ddi_dma_free_handle(&tw->tw_dmahandle);
3541 }
3542
3543 /* interrupt ep will come to this point */
3544 if (EHCI_INTR_ENDPOINT(eptd)) {
3545 ehcip->ehci_periodic_req_count--;
3546 } else {
3547 ehcip->ehci_async_req_count--;
3548 }
3549 ehci_toggle_scheduler(ehcip);
3550
3551 /* Free transfer wrapper */
3552 kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3553 }
3554
3555
3556 /*
3557 * Miscellaneous functions
3558 */
3559
3560 /*
3561 * ehci_allocate_intr_in_resource
3562 *
3563 * Allocate interrupt request structure for the interrupt IN transfer.
3564 */
3565 /*ARGSUSED*/
3566 int
ehci_allocate_intr_in_resource(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw,usb_flags_t flags)3567 ehci_allocate_intr_in_resource(
3568 ehci_state_t *ehcip,
3569 ehci_pipe_private_t *pp,
3570 ehci_trans_wrapper_t *tw,
3571 usb_flags_t flags)
3572 {
3573 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
3574 usb_intr_req_t *curr_intr_reqp;
3575 usb_opaque_t client_periodic_in_reqp;
3576 size_t length = 0;
3577
3578 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3579 "ehci_allocate_intr_in_resource:"
3580 "pp = 0x%p tw = 0x%p flags = 0x%x", (void *)pp, (void *)tw, flags);
3581
3582 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3583 ASSERT(tw->tw_curr_xfer_reqp == NULL);
3584
3585 /* Get the client periodic in request pointer */
3586 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3587
3588 /*
3589 * If it a periodic IN request and periodic request is NULL,
3590 * allocate corresponding usb periodic IN request for the
3591 * current periodic polling request and copy the information
3592 * from the saved periodic request structure.
3593 */
3594 if (client_periodic_in_reqp) {
3595
3596 /* Get the interrupt transfer length */
3597 length = ((usb_intr_req_t *)
3598 client_periodic_in_reqp)->intr_len;
3599
3600 curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3601 (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3602 } else {
3603 curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3604 }
3605
3606 if (curr_intr_reqp == NULL) {
3607
3608 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3609 "ehci_allocate_intr_in_resource: Interrupt"
3610 "request structure allocation failed");
3611
3612 return (USB_NO_RESOURCES);
3613 }
3614
3615 /* For polled mode */
3616 if (client_periodic_in_reqp == NULL) {
3617 curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3618 curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3619 } else {
3620 /* Check and save the timeout value */
3621 tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3622 USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3623 }
3624
3625 tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3626 tw->tw_length = curr_intr_reqp->intr_len;
3627
3628 mutex_enter(&ph->p_mutex);
3629 ph->p_req_count++;
3630 mutex_exit(&ph->p_mutex);
3631
3632 pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3633
3634 return (USB_SUCCESS);
3635 }
3636
3637 /*
3638 * ehci_pipe_cleanup
3639 *
3640 * Cleanup ehci pipe.
3641 */
3642 void
ehci_pipe_cleanup(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)3643 ehci_pipe_cleanup(
3644 ehci_state_t *ehcip,
3645 usba_pipe_handle_data_t *ph)
3646 {
3647 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3648 uint_t pipe_state = pp->pp_state;
3649 usb_cr_t completion_reason;
3650 usb_ep_descr_t *eptd = &ph->p_ep;
3651
3652 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3653 "ehci_pipe_cleanup: ph = 0x%p", (void *)ph);
3654
3655 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3656
3657 if (EHCI_ISOC_ENDPOINT(eptd)) {
3658 ehci_isoc_pipe_cleanup(ehcip, ph);
3659
3660 return;
3661 }
3662
3663 ASSERT(!servicing_interrupt());
3664
3665 /*
3666 * Set the QH's status to Halt condition.
3667 * If another thread is halting this function will automatically
3668 * wait. If a pipe close happens at this time
3669 * we will be in lots of trouble.
3670 * If we are in an interrupt thread, don't halt, because it may
3671 * do a wait_for_sof.
3672 */
3673 ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3674
3675 /*
3676 * Wait for processing all completed transfers and
3677 * to send results to upstream.
3678 */
3679 ehci_wait_for_transfers_completion(ehcip, pp);
3680
3681 /* Save the data toggle information */
3682 ehci_save_data_toggle(ehcip, ph);
3683
3684 /*
3685 * Traverse the list of QTDs for this pipe using transfer
3686 * wrapper. Process these QTDs depending on their status.
3687 * And stop the timer of this pipe.
3688 */
3689 ehci_traverse_qtds(ehcip, ph);
3690
3691 /* Make sure the timer is not running */
3692 ASSERT(pp->pp_timer_id == 0);
3693
3694 /* Do callbacks for all unfinished requests */
3695 ehci_handle_outstanding_requests(ehcip, pp);
3696
3697 /* Free DMA resources */
3698 ehci_free_dma_resources(ehcip, ph);
3699
3700 switch (pipe_state) {
3701 case EHCI_PIPE_STATE_CLOSE:
3702 completion_reason = USB_CR_PIPE_CLOSING;
3703 break;
3704 case EHCI_PIPE_STATE_RESET:
3705 case EHCI_PIPE_STATE_STOP_POLLING:
3706 /* Set completion reason */
3707 completion_reason = (pipe_state ==
3708 EHCI_PIPE_STATE_RESET) ?
3709 USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3710
3711 /* Restore the data toggle information */
3712 ehci_restore_data_toggle(ehcip, ph);
3713
3714 /*
3715 * Clear the halt bit to restart all the
3716 * transactions on this pipe.
3717 */
3718 ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3719
3720 /* Set pipe state to idle */
3721 pp->pp_state = EHCI_PIPE_STATE_IDLE;
3722
3723 break;
3724 }
3725
3726 /*
3727 * Do the callback for the original client
3728 * periodic IN request.
3729 */
3730 if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3731 ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3732 USB_EP_DIR_IN)) {
3733
3734 ehci_do_client_periodic_in_req_callback(
3735 ehcip, pp, completion_reason);
3736 }
3737 }
3738
3739
3740 /*
3741 * ehci_wait_for_transfers_completion:
3742 *
3743 * Wait for processing all completed transfers and to send results
3744 * to upstream.
3745 */
3746 static void
ehci_wait_for_transfers_completion(ehci_state_t * ehcip,ehci_pipe_private_t * pp)3747 ehci_wait_for_transfers_completion(
3748 ehci_state_t *ehcip,
3749 ehci_pipe_private_t *pp)
3750 {
3751 ehci_trans_wrapper_t *next_tw = pp->pp_tw_head;
3752 ehci_qtd_t *qtd;
3753
3754 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3755 ehcip->ehci_log_hdl,
3756 "ehci_wait_for_transfers_completion: pp = 0x%p", (void *)pp);
3757
3758 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3759
3760 if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3761
3762 return;
3763 }
3764
3765 pp->pp_count_done_qtds = 0;
3766
3767 /* Process the transfer wrappers for this pipe */
3768 while (next_tw) {
3769 qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3770
3771 /*
3772 * Walk through each QTD for this transfer wrapper.
3773 * If a QTD still exists, then it is either on done
3774 * list or on the QH's list.
3775 */
3776 while (qtd) {
3777 if (!(Get_QTD(qtd->qtd_ctrl) &
3778 EHCI_QTD_CTRL_ACTIVE_XACT)) {
3779 pp->pp_count_done_qtds++;
3780 }
3781
3782 qtd = ehci_qtd_iommu_to_cpu(ehcip,
3783 Get_QTD(qtd->qtd_tw_next_qtd));
3784 }
3785
3786 next_tw = next_tw->tw_next;
3787 }
3788
3789 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3790 "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x",
3791 pp->pp_count_done_qtds);
3792
3793 if (!pp->pp_count_done_qtds) {
3794
3795 return;
3796 }
3797
3798 (void) cv_reltimedwait(&pp->pp_xfer_cmpl_cv, &ehcip->ehci_int_mutex,
3799 drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000), TR_CLOCK_TICK);
3800
3801 if (pp->pp_count_done_qtds) {
3802
3803 USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3804 "ehci_wait_for_transfers_completion:"
3805 "No transfers completion confirmation received");
3806 }
3807 }
3808
3809 /*
3810 * ehci_check_for_transfers_completion:
3811 *
3812 * Check whether anybody is waiting for transfers completion event. If so, send
3813 * this event and also stop initiating any new transfers on this pipe.
3814 */
3815 void
ehci_check_for_transfers_completion(ehci_state_t * ehcip,ehci_pipe_private_t * pp)3816 ehci_check_for_transfers_completion(
3817 ehci_state_t *ehcip,
3818 ehci_pipe_private_t *pp)
3819 {
3820 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3821 ehcip->ehci_log_hdl,
3822 "ehci_check_for_transfers_completion: pp = 0x%p", (void *)pp);
3823
3824 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3825
3826 if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) &&
3827 (pp->pp_error == USB_CR_NO_RESOURCES) &&
3828 (pp->pp_cur_periodic_req_cnt == 0)) {
3829
3830 /* Reset pipe error to zero */
3831 pp->pp_error = 0;
3832
3833 /* Do callback for original request */
3834 ehci_do_client_periodic_in_req_callback(
3835 ehcip, pp, USB_CR_NO_RESOURCES);
3836 }
3837
3838 if (pp->pp_count_done_qtds) {
3839
3840 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3841 "ehci_check_for_transfers_completion:"
3842 "count_done_qtds = 0x%x", pp->pp_count_done_qtds);
3843
3844 /* Decrement the done qtd count */
3845 pp->pp_count_done_qtds--;
3846
3847 if (!pp->pp_count_done_qtds) {
3848
3849 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3850 "ehci_check_for_transfers_completion:"
3851 "Sent transfers completion event pp = 0x%p",
3852 (void *)pp);
3853
3854 /* Send the transfer completion signal */
3855 cv_signal(&pp->pp_xfer_cmpl_cv);
3856 }
3857 }
3858 }
3859
3860
3861 /*
3862 * ehci_save_data_toggle:
3863 *
3864 * Save the data toggle information.
3865 */
3866 static void
ehci_save_data_toggle(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)3867 ehci_save_data_toggle(
3868 ehci_state_t *ehcip,
3869 usba_pipe_handle_data_t *ph)
3870 {
3871 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3872 usb_ep_descr_t *eptd = &ph->p_ep;
3873 uint_t data_toggle;
3874 usb_cr_t error = pp->pp_error;
3875 ehci_qh_t *qh = pp->pp_qh;
3876
3877 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3878 ehcip->ehci_log_hdl,
3879 "ehci_save_data_toggle: ph = 0x%p", (void *)ph);
3880
3881 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3882
3883 /* Reset the pipe error value */
3884 pp->pp_error = USB_CR_OK;
3885
3886 /* Return immediately if it is a control pipe */
3887 if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3888 USB_EP_ATTR_CONTROL) {
3889
3890 return;
3891 }
3892
3893 /* Get the data toggle information from the endpoint (QH) */
3894 data_toggle = (Get_QH(qh->qh_status) &
3895 EHCI_QH_STS_DATA_TOGGLE)? DATA1:DATA0;
3896
3897 /*
3898 * If error is STALL, then, set
3899 * data toggle to zero.
3900 */
3901 if (error == USB_CR_STALL) {
3902 data_toggle = DATA0;
3903 }
3904
3905 /*
3906 * Save the data toggle information
3907 * in the usb device structure.
3908 */
3909 mutex_enter(&ph->p_mutex);
3910 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3911 data_toggle);
3912 mutex_exit(&ph->p_mutex);
3913 }
3914
3915
3916 /*
3917 * ehci_restore_data_toggle:
3918 *
3919 * Restore the data toggle information.
3920 */
3921 void
ehci_restore_data_toggle(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph)3922 ehci_restore_data_toggle(
3923 ehci_state_t *ehcip,
3924 usba_pipe_handle_data_t *ph)
3925 {
3926 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3927 usb_ep_descr_t *eptd = &ph->p_ep;
3928 uint_t data_toggle = 0;
3929
3930 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3931 ehcip->ehci_log_hdl,
3932 "ehci_restore_data_toggle: ph = 0x%p", (void *)ph);
3933
3934 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3935
3936 /* Return immediately if it is a control pipe */
3937 if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3938 USB_EP_ATTR_CONTROL) {
3939
3940 return;
3941 }
3942
3943 mutex_enter(&ph->p_mutex);
3944
3945 data_toggle = usba_hcdi_get_data_toggle(ph->p_usba_device,
3946 ph->p_ep.bEndpointAddress);
3947 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3948 0);
3949
3950 mutex_exit(&ph->p_mutex);
3951
3952 /*
3953 * Restore the data toggle bit depending on the
3954 * previous data toggle information.
3955 */
3956 if (data_toggle) {
3957 Set_QH(pp->pp_qh->qh_status,
3958 Get_QH(pp->pp_qh->qh_status) | EHCI_QH_STS_DATA_TOGGLE);
3959 } else {
3960 Set_QH(pp->pp_qh->qh_status,
3961 Get_QH(pp->pp_qh->qh_status) & (~EHCI_QH_STS_DATA_TOGGLE));
3962 }
3963 }
3964
3965
3966 /*
3967 * ehci_handle_outstanding_requests
3968 *
3969 * Deallocate interrupt request structure for the interrupt IN transfer.
3970 * Do the callbacks for all unfinished requests.
3971 *
3972 * NOTE: This function is also called from POLLED MODE.
3973 */
3974 void
ehci_handle_outstanding_requests(ehci_state_t * ehcip,ehci_pipe_private_t * pp)3975 ehci_handle_outstanding_requests(
3976 ehci_state_t *ehcip,
3977 ehci_pipe_private_t *pp)
3978 {
3979 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
3980 usb_ep_descr_t *eptd = &ph->p_ep;
3981 ehci_trans_wrapper_t *curr_tw;
3982 ehci_trans_wrapper_t *next_tw;
3983 usb_opaque_t curr_xfer_reqp;
3984
3985 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3986 ehcip->ehci_log_hdl,
3987 "ehci_handle_outstanding_requests: pp = 0x%p", (void *)pp);
3988
3989 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3990
3991 /* Deallocate all pre-allocated interrupt requests */
3992 next_tw = pp->pp_tw_head;
3993
3994 while (next_tw) {
3995 curr_tw = next_tw;
3996 next_tw = curr_tw->tw_next;
3997
3998 curr_xfer_reqp = curr_tw->tw_curr_xfer_reqp;
3999
4000 /* Deallocate current interrupt request */
4001 if (curr_xfer_reqp) {
4002
4003 if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
4004 (curr_tw->tw_direction == EHCI_QTD_CTRL_IN_PID)) {
4005
4006 /* Decrement periodic in request count */
4007 pp->pp_cur_periodic_req_cnt--;
4008
4009 ehci_deallocate_intr_in_resource(
4010 ehcip, pp, curr_tw);
4011 } else {
4012 ehci_hcdi_callback(ph, curr_tw, USB_CR_FLUSHED);
4013 }
4014 }
4015 }
4016 }
4017
4018
4019 /*
4020 * ehci_deallocate_intr_in_resource
4021 *
4022 * Deallocate interrupt request structure for the interrupt IN transfer.
4023 */
4024 void
ehci_deallocate_intr_in_resource(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)4025 ehci_deallocate_intr_in_resource(
4026 ehci_state_t *ehcip,
4027 ehci_pipe_private_t *pp,
4028 ehci_trans_wrapper_t *tw)
4029 {
4030 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
4031 uchar_t ep_attr = ph->p_ep.bmAttributes;
4032 usb_opaque_t curr_xfer_reqp;
4033
4034 USB_DPRINTF_L4(PRINT_MASK_LISTS,
4035 ehcip->ehci_log_hdl,
4036 "ehci_deallocate_intr_in_resource: "
4037 "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw);
4038
4039 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4040 ASSERT((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR);
4041
4042 curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4043
4044 /* Check the current periodic in request pointer */
4045 if (curr_xfer_reqp) {
4046
4047 tw->tw_curr_xfer_reqp = NULL;
4048
4049 mutex_enter(&ph->p_mutex);
4050 ph->p_req_count--;
4051 mutex_exit(&ph->p_mutex);
4052
4053 /* Free pre-allocated interrupt requests */
4054 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4055
4056 /* Set periodic in pipe state to idle */
4057 pp->pp_state = EHCI_PIPE_STATE_IDLE;
4058 }
4059 }
4060
4061
4062 /*
4063 * ehci_do_client_periodic_in_req_callback
4064 *
4065 * Do callback for the original client periodic IN request.
4066 */
4067 void
ehci_do_client_periodic_in_req_callback(ehci_state_t * ehcip,ehci_pipe_private_t * pp,usb_cr_t completion_reason)4068 ehci_do_client_periodic_in_req_callback(
4069 ehci_state_t *ehcip,
4070 ehci_pipe_private_t *pp,
4071 usb_cr_t completion_reason)
4072 {
4073 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle;
4074 usb_ep_descr_t *eptd = &ph->p_ep;
4075
4076 USB_DPRINTF_L4(PRINT_MASK_LISTS,
4077 ehcip->ehci_log_hdl,
4078 "ehci_do_client_periodic_in_req_callback: "
4079 "pp = 0x%p cc = 0x%x", (void *)pp, completion_reason);
4080
4081 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4082
4083 /*
4084 * Check for Interrupt/Isochronous IN, whether we need to do
4085 * callback for the original client's periodic IN request.
4086 */
4087 if (pp->pp_client_periodic_in_reqp) {
4088 ASSERT(pp->pp_cur_periodic_req_cnt == 0);
4089 if (EHCI_ISOC_ENDPOINT(eptd)) {
4090 ehci_hcdi_isoc_callback(ph, NULL, completion_reason);
4091 } else {
4092 ehci_hcdi_callback(ph, NULL, completion_reason);
4093 }
4094 }
4095 }
4096
4097
4098 /*
4099 * ehci_hcdi_callback()
4100 *
4101 * Convenience wrapper around usba_hcdi_cb() other than root hub.
4102 */
4103 void
ehci_hcdi_callback(usba_pipe_handle_data_t * ph,ehci_trans_wrapper_t * tw,usb_cr_t completion_reason)4104 ehci_hcdi_callback(
4105 usba_pipe_handle_data_t *ph,
4106 ehci_trans_wrapper_t *tw,
4107 usb_cr_t completion_reason)
4108 {
4109 ehci_state_t *ehcip = ehci_obtain_state(
4110 ph->p_usba_device->usb_root_hub_dip);
4111 ehci_pipe_private_t *pp = (ehci_pipe_private_t *)ph->p_hcd_private;
4112 usb_opaque_t curr_xfer_reqp;
4113 uint_t pipe_state = 0;
4114
4115 USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
4116 "ehci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4117 (void *)ph, (void *)tw, completion_reason);
4118
4119 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4120
4121 /* Set the pipe state as per completion reason */
4122 switch (completion_reason) {
4123 case USB_CR_OK:
4124 pipe_state = pp->pp_state;
4125 break;
4126 case USB_CR_NO_RESOURCES:
4127 case USB_CR_NOT_SUPPORTED:
4128 case USB_CR_PIPE_RESET:
4129 case USB_CR_STOPPED_POLLING:
4130 pipe_state = EHCI_PIPE_STATE_IDLE;
4131 break;
4132 case USB_CR_PIPE_CLOSING:
4133 break;
4134 default:
4135 /* Set the pipe state to error */
4136 pipe_state = EHCI_PIPE_STATE_ERROR;
4137 pp->pp_error = completion_reason;
4138 break;
4139
4140 }
4141
4142 pp->pp_state = pipe_state;
4143
4144 if (tw && tw->tw_curr_xfer_reqp) {
4145 curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4146 tw->tw_curr_xfer_reqp = NULL;
4147 } else {
4148 ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4149
4150 curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4151 pp->pp_client_periodic_in_reqp = NULL;
4152 }
4153
4154 ASSERT(curr_xfer_reqp != NULL);
4155
4156 mutex_exit(&ehcip->ehci_int_mutex);
4157
4158 usba_hcdi_cb(ph, curr_xfer_reqp, completion_reason);
4159
4160 mutex_enter(&ehcip->ehci_int_mutex);
4161 }
4162