xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c (revision 60afb9d1b449f489b493961c1c14893a7a74b287)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2018, Joyent, Inc.
14  * Copyright (c) 2019 by Western Digital Corporation
15  * Copyright 2024 Oxide Computer Company
16  */
17 
18 /*
19  * xHCI Endpoint Initialization and Management
20  *
21  * Please see the big theory statement in xhci.c for more information.
22  */
23 
24 #include <sys/usb/hcd/xhci/xhci.h>
25 #include <sys/sdt.h>
26 
27 boolean_t
xhci_endpoint_is_periodic_in(xhci_endpoint_t * xep)28 xhci_endpoint_is_periodic_in(xhci_endpoint_t *xep)
29 {
30 	usba_pipe_handle_data_t *ph;
31 
32 	ASSERT(xep != NULL);
33 	ph = xep->xep_pipe;
34 	ASSERT(ph != NULL);
35 
36 	return ((xep->xep_type == USB_EP_ATTR_INTR ||
37 	    xep->xep_type == USB_EP_ATTR_ISOCH) &&
38 	    (ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN);
39 }
40 
41 /*
42  * Endpoints are a bit weirdly numbered. Endpoint zero is the default control
43  * endpoint, so the direction doesn't matter. For all the others, they're
44  * arranged as ep 1 out, ep 1 in, ep 2 out, ep 2 in. This is based on the layout
45  * of the Device Context Structure in xHCI 1.1 / 6.2.1. Therefore to go from the
46  * endpoint and direction, we know that endpoint n starts at 2n - 1.  e.g.
47  * endpoint 1 starts at entry 1, endpoint 2 at entry 3, etc. Finally, the OUT
48  * direction comes first, followed by the IN direction. So if we're getting the
49  * endpoint for one of those, then we have to deal with that.
50  */
51 uint_t
xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t * ph)52 xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *ph)
53 {
54 	int ep;
55 
56 	ep = ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK;
57 	if (ep == 0)
58 		return (ep);
59 	ep = ep * 2 - 1;
60 	if ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN)
61 		ep++;
62 
63 	VERIFY(ep < XHCI_NUM_ENDPOINTS);
64 	return (ep);
65 }
66 
67 void
xhci_endpoint_timeout_cancel(xhci_t * xhcip,xhci_endpoint_t * xep)68 xhci_endpoint_timeout_cancel(xhci_t *xhcip, xhci_endpoint_t *xep)
69 {
70 	xep->xep_state |= XHCI_ENDPOINT_TEARDOWN;
71 	if (xep->xep_timeout != 0) {
72 		mutex_exit(&xhcip->xhci_lock);
73 		(void) untimeout(xep->xep_timeout);
74 		mutex_enter(&xhcip->xhci_lock);
75 		xep->xep_timeout = 0;
76 	}
77 }
78 
79 void
xhci_endpoint_release(xhci_t * xhcip,xhci_endpoint_t * xep)80 xhci_endpoint_release(xhci_t *xhcip, xhci_endpoint_t *xep)
81 {
82 	VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
83 	VERIFY3U(xep->xep_num, !=, XHCI_DEFAULT_ENDPOINT);
84 	VERIFY(list_is_empty(&xep->xep_transfers));
85 
86 	VERIFY(xep->xep_pipe != NULL);
87 	xep->xep_pipe = NULL;
88 
89 	VERIFY(xep->xep_state & XHCI_ENDPOINT_OPEN);
90 	xep->xep_state &= ~XHCI_ENDPOINT_OPEN;
91 
92 	xhci_endpoint_timeout_cancel(xhcip, xep);
93 }
94 
95 /*
96  * The assumption is that someone calling this owns this endpoint / device and
97  * that it's in a state where it's safe to zero out that information.
98  */
99 void
xhci_endpoint_fini(xhci_device_t * xd,int endpoint)100 xhci_endpoint_fini(xhci_device_t *xd, int endpoint)
101 {
102 	xhci_endpoint_t *xep = xd->xd_endpoints[endpoint];
103 
104 	VERIFY(xep != NULL);
105 	xd->xd_endpoints[endpoint] = NULL;
106 
107 	if (endpoint != XHCI_DEFAULT_ENDPOINT) {
108 		VERIFY(!(xep->xep_state & XHCI_ENDPOINT_OPEN));
109 	}
110 
111 	xhci_ring_free(&xep->xep_ring);
112 	cv_destroy(&xep->xep_state_cv);
113 	list_destroy(&xep->xep_transfers);
114 	kmem_free(xep, sizeof (xhci_endpoint_t));
115 }
116 
117 /*
118  * Set up the default control endpoint input context. This needs to be done
119  * before we address the device. Note, we separate out the default endpoint from
120  * others, as we must set this up before we have a pipe handle.
121  */
122 int
xhci_endpoint_setup_default_context(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)123 xhci_endpoint_setup_default_context(xhci_t *xhcip, xhci_device_t *xd,
124     xhci_endpoint_t *xep)
125 {
126 	uint_t mps;
127 	xhci_endpoint_context_t *ectx;
128 	uint64_t deq;
129 
130 	ectx = xd->xd_endin[xep->xep_num];
131 	VERIFY(ectx != NULL);
132 
133 	/*
134 	 * We may or may not have a device descriptor. This should match the
135 	 * same initial sizes that are done in hubd_create_child().
136 	 *
137 	 * Note, since we don't necessarily have an endpoint descriptor yet to
138 	 * base this on we instead use the device's defaults if available. This
139 	 * is different from normal endpoints for which there's always a
140 	 * specific descriptor.
141 	 */
142 	switch (xd->xd_usbdev->usb_port_status) {
143 	case USBA_LOW_SPEED_DEV:
144 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
145 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
146 		} else {
147 			mps = 8;
148 		}
149 		break;
150 	case USBA_FULL_SPEED_DEV:
151 	case USBA_HIGH_SPEED_DEV:
152 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
153 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
154 		} else {
155 			mps = 64;
156 		}
157 		break;
158 	case USBA_SUPER_SPEED_DEV:
159 	default:
160 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
161 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
162 			mps = 1 << mps;
163 		} else {
164 			mps = 512;
165 		}
166 		break;
167 	}
168 
169 	bzero(ectx, sizeof (xhci_endpoint_context_t));
170 	ectx->xec_info = LE_32(0);
171 	ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(3) |
172 	    XHCI_EPCTX_SET_EPTYPE(XHCI_EPCTX_TYPE_CTRL) |
173 	    XHCI_EPCTX_SET_MAXB(0) | XHCI_EPCTX_SET_MPS(mps));
174 	deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
175 	    xep->xep_ring.xr_tail;
176 	ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
177 	ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(0) |
178 	    XHCI_EPCTX_AVG_TRB_LEN(XHCI_CONTEXT_DEF_CTRL_ATL));
179 
180 	XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
181 	if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
182 		xhci_error(xhcip, "failed to initialize default device input "
183 		    "context on slot %d and port %d for endpoint %u:  "
184 		    "encountered fatal FM error synchronizing input context "
185 		    "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
186 		xhci_fm_runtime_reset(xhcip);
187 		return (EIO);
188 	}
189 
190 	return (0);
191 }
192 
193 /*
194  * Determine if we need to update the maximum packet size of the default
195  * control endpoint. This may happen because we start with the default size
196  * before we have a descriptor and then it may change. For example, with
197  * full-speed devices that may have either an 8 or 64 byte maximum packet size.
198  */
199 int
xhci_endpoint_update_default(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)200 xhci_endpoint_update_default(xhci_t *xhcip, xhci_device_t *xd,
201     xhci_endpoint_t *xep)
202 {
203 	int mps, desc, info, ret;
204 	ASSERT(xd->xd_usbdev != NULL);
205 
206 	mps = XHCI_EPCTX_GET_MPS(xd->xd_endout[xep->xep_num]->xec_info2);
207 	desc = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
208 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
209 		desc = 1 << desc;
210 	}
211 
212 	if (mps == desc)
213 		return (USB_SUCCESS);
214 
215 	/*
216 	 * Update only the context for the default control endpoint.
217 	 */
218 	mutex_enter(&xd->xd_imtx);
219 	info = LE_32(xd->xd_endout[xep->xep_num]->xec_info2);
220 	info &= ~XHCI_EPCTX_SET_MPS(mps);
221 	info |= XHCI_EPCTX_SET_MPS(desc);
222 	xd->xd_endin[xep->xep_num]->xec_info2 = LE_32(info);
223 	xd->xd_input->xic_drop_flags = LE_32(0);
224 	xd->xd_input->xic_add_flags = LE_32(XHCI_INCTX_MASK_DCI(1));
225 
226 	ret = xhci_command_evaluate_context(xhcip, xd);
227 	mutex_exit(&xd->xd_imtx);
228 
229 	return (ret);
230 }
231 
232 static uint_t
xhci_endpoint_epdesc_to_type(usb_ep_descr_t * ep)233 xhci_endpoint_epdesc_to_type(usb_ep_descr_t *ep)
234 {
235 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
236 	boolean_t in = (ep->bEndpointAddress & USB_EP_DIR_MASK) ==
237 	    USB_EP_DIR_IN;
238 
239 	switch (type) {
240 	case USB_EP_ATTR_CONTROL:
241 		return (XHCI_EPCTX_TYPE_CTRL);
242 	case USB_EP_ATTR_ISOCH:
243 		if (in == B_TRUE)
244 			return (XHCI_EPCTX_TYPE_ISOCH_IN);
245 		return (XHCI_EPCTX_TYPE_ISOCH_OUT);
246 	case USB_EP_ATTR_BULK:
247 		if (in == B_TRUE)
248 			return (XHCI_EPCTX_TYPE_BULK_IN);
249 		return (XHCI_EPCTX_TYPE_BULK_OUT);
250 	case USB_EP_ATTR_INTR:
251 		if (in == B_TRUE)
252 			return (XHCI_EPCTX_TYPE_INTR_IN);
253 		return (XHCI_EPCTX_TYPE_INTR_OUT);
254 	default:
255 		panic("bad USB attribute type: %d", type);
256 	}
257 
258 	/* LINTED: E_FUNC_NO_RET_VAL */
259 }
260 
261 static uint_t
xhci_endpoint_determine_burst(xhci_device_t * xd,xhci_endpoint_t * xep)262 xhci_endpoint_determine_burst(xhci_device_t *xd, xhci_endpoint_t *xep)
263 {
264 	switch (xd->xd_usbdev->usb_port_status) {
265 	case USBA_LOW_SPEED_DEV:
266 	case USBA_FULL_SPEED_DEV:
267 		/*
268 		 * Per xHCI 1.1 / 6.2.3.4, burst is always zero for these
269 		 * devices.
270 		 */
271 		return (0);
272 	case USBA_HIGH_SPEED_DEV:
273 		if (xep->xep_type == USB_EP_ATTR_CONTROL ||
274 		    xep->xep_type == USB_EP_ATTR_BULK)
275 			return (0);
276 		return ((xep->xep_pipe->p_xep.uex_ep.wMaxPacketSize &
277 		    XHCI_CONTEXT_BURST_MASK) >> XHCI_CONTEXT_BURST_SHIFT);
278 	default:
279 		/*
280 		 * For these USB >= 3.0, this comes from the companion
281 		 * descriptor.
282 		 */
283 		ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
284 		return (xep->xep_pipe->p_xep.uex_ep_ss.bMaxBurst);
285 	}
286 }
287 
288 /*
289  * Convert a linear mapping of values that are in in the range of 1-255 into a
290  * 2^x value. Because we're supposed to round down for these calculations (see
291  * the note in xHCI 1.1 / 6.2.3.6) we can do this simply with a fls() and
292  * subtracting one.
293  */
294 static uint_t
xhci_endpoint_linear_interval(usb_ep_descr_t * ep)295 xhci_endpoint_linear_interval(usb_ep_descr_t *ep)
296 {
297 	int exp;
298 	int ival = ep->bInterval;
299 	if (ival < 1)
300 		ival = 1;
301 	if (ival > 255)
302 		ival = 255;
303 	exp = ddi_fls(ival) - 1;
304 	ASSERT(exp >= 0 && exp <= 7);
305 	return (exp);
306 }
307 
308 /*
309  * Convert the set of values that use a 2^(x-1) value for interval into a 2^x
310  * range. Note the valid input range is 1-16, so we clamp values based on this.
311  * See xHCI 1.1 / 6.2.3.6 for more information.
312  */
313 static uint_t
xhci_endpoint_exponential_interval(usb_ep_descr_t * ep)314 xhci_endpoint_exponential_interval(usb_ep_descr_t *ep)
315 {
316 	int ival;
317 
318 	ival = ep->bInterval;
319 	if (ival < 1)
320 		ival = 1;
321 	if (ival > 16)
322 		ival = 16;
323 	ival--;
324 	ASSERT(ival >= 0 && ival <= 15);
325 	return (ival);
326 }
327 
328 
329 /*
330  * Determining the interval is unfortunately somewhat complicated as there are
331  * many differnet forms that things can take. This is all summarized in a
332  * somewhat helpful table, number 65, in xHCI 1.1 / 6.2.3.6. But here's
333  * basically the six different cases we have to consider:
334  *
335  * Case 1: Non-High Speed Bulk and Control Endpoints
336  *	Always return 0.
337  *
338  * Case 2: Super Speed and High Speed Isoch and Intr endpoints
339  *	Convert from a 2^(x-1) range to a 2^x range.
340  *
341  * Case 3: Full Speed Isochronous Endpoints
342  *	As case 2, but add 3 as its values are in frames and we need to convert
343  *	to microframes. Adding three to the result is the same as multiplying
344  *	the initial value by 8.
345  *
346  * Case 4: Full speed and Low Speed Interrupt Endpoints
347  *	These have a 1-255 ms range that we need to convert to a 2^x * 128 us
348  *	range. We use the linear conversion and then add 3 to account for the
349  *	multiplying by 8 conversion from frames to microframes.
350  *
351  * Case 5: High Speed Interrupt and Bulk Output
352  *	These are a bit of a weird case. The spec and other implementations make
353  *	it seem that it's similar to case 4, but without the fixed addition as
354  *	its interpreted differently due to NAKs.
355  *
356  * Case 6: Low Speed Isochronous Endpoints
357  *	These are not actually defined; however, like other implementations we
358  *	treat them like case 4.
359  */
360 static uint_t
xhci_endpoint_interval(xhci_device_t * xd,usb_ep_descr_t * ep)361 xhci_endpoint_interval(xhci_device_t *xd, usb_ep_descr_t *ep)
362 {
363 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
364 	int speed = xd->xd_usbdev->usb_port_status;
365 
366 	/*
367 	 * Handle Cases 1 and 5 first.
368 	 */
369 	if (type == USB_EP_ATTR_CONTROL || type == USB_EP_ATTR_BULK) {
370 		if (speed != USBA_HIGH_SPEED_DEV)
371 			return (0);
372 		return (xhci_endpoint_linear_interval(ep));
373 	}
374 
375 	/*
376 	 * Handle Isoch and Intr cases next.
377 	 */
378 	switch (speed) {
379 	case USBA_LOW_SPEED_DEV:
380 		/*
381 		 * Interrupt endpoints at low speed are the same as full speed,
382 		 * hence the fall through.
383 		 */
384 		if (type == USB_EP_ATTR_ISOCH) {
385 			return (xhci_endpoint_exponential_interval(ep) + 3);
386 		}
387 		/* FALLTHROUGH */
388 	case USBA_FULL_SPEED_DEV:
389 		return (xhci_endpoint_linear_interval(ep) + 3);
390 	case USBA_HIGH_SPEED_DEV:
391 	case USBA_SUPER_SPEED_DEV:
392 	default:
393 		/*
394 		 * Case 2. Treat any newer and faster speeds as Super Speed by
395 		 * default as USB 3.1 is effectively treated the same here.
396 		 */
397 		return (xhci_endpoint_exponential_interval(ep));
398 	}
399 }
400 
401 /*
402  * The way to calculate the Maximum ESIT is described in xHCI 1.1 / 4.14.2.
403  * First off, this only applies to Interrupt and Isochronous descriptors. For
404  * Super Speed and newer things, it comes out of a descriptor. Otherwise we
405  * calculate it by doing 'Max Packet Size' * ('Max Burst' + 1).
406  */
407 static uint_t
xhci_endpoint_max_esit(xhci_device_t * xd,xhci_endpoint_t * xep,uint_t mps,uint_t burst)408 xhci_endpoint_max_esit(xhci_device_t *xd, xhci_endpoint_t *xep, uint_t mps,
409     uint_t burst)
410 {
411 	if (xep->xep_type == USB_EP_ATTR_CONTROL ||
412 	    xep->xep_type == USB_EP_ATTR_BULK) {
413 		return (0);
414 	}
415 
416 	/*
417 	 * Note that this will need to be updated for SuperSpeedPlus ISOC
418 	 * devices to pull from the secondary companion descriptor they use.
419 	 */
420 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
421 		usb_ep_xdescr_t *ep_xdesc = &xep->xep_pipe->p_xep;
422 		ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
423 		return (ep_xdesc->uex_ep_ss.wBytesPerInterval);
424 	}
425 
426 	return (mps * (burst + 1));
427 }
428 
429 /*
430  * We've been asked to calculate and tell the xHCI controller an average TRB
431  * data length. This is talked about in an implementation note in xHCI 1.1 /
432  * 4.14.1.1. So, the reality is that it's hard to actually calculate this, as
433  * we're supposed to take into account all of the TRBs that we use on that ring.
434  *
435  * Surveying other xHCI drivers, they all agree on using the default of 8 for
436  * control endpoints; however, from there things get a little more fluid. For
437  * interrupt and isochronous endpoints, many device use the minimum of the max
438  * packet size and the device's pagesize. For bulk endpoints some folks punt and
439  * don't set anything and others try and set it to the pagesize. The xHCI
440  * implementation note suggests a 3k size here initially. For now, we'll just
441  * guess for bulk endpoints and use our page size as a determining factor for
442  * this and use the BSD style for others. Note Linux here only sets this value
443  * for control devices.
444  */
445 static uint_t
xhci_endpoint_avg_trb(xhci_t * xhcip,usb_ep_descr_t * ep,int mps)446 xhci_endpoint_avg_trb(xhci_t *xhcip, usb_ep_descr_t *ep, int mps)
447 {
448 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
449 
450 	switch (type) {
451 	case USB_EP_ATTR_ISOCH:
452 	case USB_EP_ATTR_INTR:
453 		return (MIN(xhcip->xhci_caps.xcap_pagesize, mps));
454 	case USB_EP_ATTR_CONTROL:
455 		return (XHCI_CONTEXT_DEF_CTRL_ATL);
456 	case USB_EP_ATTR_BULK:
457 		return (xhcip->xhci_caps.xcap_pagesize);
458 	default:
459 		panic("bad USB endpoint type: %d", type);
460 	}
461 
462 	/* LINTED: E_FUNC_NO_RET_VAL */
463 }
464 
465 /*
466  * Set up the input context for this endpoint.  If this endpoint is already
467  * open, just confirm that the current parameters and the originally programmed
468  * parameters match.
469  */
470 int
xhci_endpoint_setup_context(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)471 xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
472     xhci_endpoint_t *xep)
473 {
474 	xhci_endpoint_params_t new_xepp;
475 	xhci_endpoint_context_t *ectx;
476 	uint64_t deq;
477 
478 	/*
479 	 * Explicitly zero this entire struct to start so that we can compare
480 	 * it with bcmp().
481 	 */
482 	bzero(&new_xepp, sizeof (new_xepp));
483 	new_xepp.xepp_configured = B_TRUE;
484 
485 	/*
486 	 * For a USB >=3.0 device we should always have its companion descriptor
487 	 * provided for us by USBA. If it's not here, complain loudly and fail.
488 	 */
489 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV &&
490 	    (xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP) == 0) {
491 		const char *prod, *mfg;
492 
493 		prod = xd->xd_usbdev->usb_product_str;
494 		if (prod == NULL)
495 			prod = "Unknown Device";
496 		mfg = xd->xd_usbdev->usb_mfg_str;
497 		if (mfg == NULL)
498 			mfg = "Unknown Manufacturer";
499 
500 		xhci_log(xhcip, "Encountered USB >=3.0 device without endpoint "
501 		    "companion descriptor. Ensure driver %s is properly using "
502 		    "usb_pipe_xopen() for device %s %s",
503 		    ddi_driver_name(xd->xd_usbdev->usb_dip), prod, mfg);
504 		return (EINVAL);
505 	}
506 
507 	ectx = xd->xd_endin[xep->xep_num];
508 	VERIFY(ectx != NULL);
509 	VERIFY(xd->xd_usbdev->usb_dev_descr != NULL);
510 	VERIFY(xep->xep_pipe != NULL);
511 
512 	new_xepp.xepp_mps =
513 	    xep->xep_pipe->p_ep.wMaxPacketSize & XHCI_CONTEXT_MPS_MASK;
514 	new_xepp.xepp_mult = XHCI_CONTEXT_DEF_MULT;
515 	new_xepp.xepp_cerr = XHCI_CONTEXT_DEF_CERR;
516 
517 	switch (xep->xep_type) {
518 	case USB_EP_ATTR_ISOCH:
519 		/*
520 		 * When we have support for USB 3.1 SuperSpeedPlus devices,
521 		 * we'll need to make sure that we also check for its secondary
522 		 * endpoint companion descriptor here.
523 		 */
524 		/*
525 		 * Super Speed devices nominally have these xHCI super speed
526 		 * companion descriptors. We know that we're not properly
527 		 * grabbing them right now, so until we do, we should basically
528 		 * error about it.
529 		 */
530 		if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
531 			ASSERT(xep->xep_pipe->p_xep.uex_flags &
532 			    USB_EP_XFLAGS_SS_COMP);
533 			new_xepp.xepp_mult =
534 			    xep->xep_pipe->p_xep.uex_ep_ss.bmAttributes &
535 			    USB_EP_SS_COMP_ISOC_MULT_MASK;
536 		}
537 
538 		new_xepp.xepp_mps &= XHCI_CONTEXT_MPS_MASK;
539 		new_xepp.xepp_cerr = XHCI_CONTEXT_ISOCH_CERR;
540 		break;
541 	default:
542 		/*
543 		 * No explicit changes needed for CONTROL, INTR, and BULK
544 		 * endpoints. They've been handled already and don't have any
545 		 * differences.
546 		 */
547 		break;
548 	}
549 
550 	new_xepp.xepp_eptype = xhci_endpoint_epdesc_to_type(
551 	    &xep->xep_pipe->p_xep.uex_ep);
552 	new_xepp.xepp_burst = xhci_endpoint_determine_burst(xd, xep);
553 	new_xepp.xepp_ival = xhci_endpoint_interval(xd,
554 	    &xep->xep_pipe->p_xep.uex_ep);
555 	new_xepp.xepp_max_esit = xhci_endpoint_max_esit(xd, xep,
556 	    new_xepp.xepp_mps, new_xepp.xepp_burst);
557 	new_xepp.xepp_avgtrb = xhci_endpoint_avg_trb(xhcip,
558 	    &xep->xep_pipe->p_xep.uex_ep, new_xepp.xepp_mps);
559 
560 	/*
561 	 * The multi field may be reserved as zero if the LEC feature flag is
562 	 * set. See the description of mult in xHCI 1.1 / 6.2.3.
563 	 */
564 	if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
565 		new_xepp.xepp_mult = 0;
566 
567 	if (xep->xep_params.xepp_configured) {
568 		/*
569 		 * The endpoint context has been configured already.  We are
570 		 * reopening the pipe, so just confirm that the parameters are
571 		 * the same.
572 		 */
573 		if (bcmp(&xep->xep_params, &new_xepp, sizeof (new_xepp)) == 0) {
574 			/*
575 			 * Everything matches up.
576 			 */
577 			return (0);
578 		}
579 
580 		DTRACE_PROBE3(xhci__context__mismatch,
581 		    xhci_t *, xhcip,
582 		    xhci_endpoint_t *, xep,
583 		    xhci_endpoint_params_t *, &new_xepp);
584 
585 		xhci_error(xhcip, "device input context on slot %d and "
586 		    "port %d for endpoint %u was already initialized but "
587 		    "with incompatible parameters",
588 		    xd->xd_slot, xd->xd_port, xep->xep_num);
589 		return (EINVAL);
590 	}
591 
592 	bzero(ectx, sizeof (xhci_endpoint_context_t));
593 
594 	ectx->xec_info = LE_32(XHCI_EPCTX_SET_MULT(new_xepp.xepp_mult) |
595 	    XHCI_EPCTX_SET_IVAL(new_xepp.xepp_ival));
596 	if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC) {
597 		ectx->xec_info |=
598 		    LE_32(XHCI_EPCTX_SET_MAX_ESIT_HI(new_xepp.xepp_max_esit));
599 	}
600 
601 	ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(new_xepp.xepp_cerr) |
602 	    XHCI_EPCTX_SET_EPTYPE(new_xepp.xepp_eptype) |
603 	    XHCI_EPCTX_SET_MAXB(new_xepp.xepp_burst) |
604 	    XHCI_EPCTX_SET_MPS(new_xepp.xepp_mps));
605 
606 	deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
607 	    xep->xep_ring.xr_tail;
608 	ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
609 
610 	ectx->xec_txinfo = LE_32(
611 	    XHCI_EPCTX_MAX_ESIT_PAYLOAD(new_xepp.xepp_max_esit) |
612 	    XHCI_EPCTX_AVG_TRB_LEN(new_xepp.xepp_avgtrb));
613 
614 	XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
615 	if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
616 		xhci_error(xhcip, "failed to initialize device input "
617 		    "context on slot %d and port %d for endpoint %u:  "
618 		    "encountered fatal FM error synchronizing input context "
619 		    "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
620 		xhci_fm_runtime_reset(xhcip);
621 		return (EIO);
622 	}
623 
624 	bcopy(&new_xepp, &xep->xep_params, sizeof (new_xepp));
625 	VERIFY(xep->xep_params.xepp_configured);
626 	return (0);
627 }
628 
629 /*
630  * Initialize the endpoint and its input context for a given device. This is
631  * called from two different contexts:
632  *
633  *   1. Initializing a device
634  *   2. Opening a USB pipe
635  *
636  * In the second case, we need to worry about locking around the device. We
637  * don't need to worry about the locking in the first case because the USBA
638  * doesn't know about it yet.
639  */
640 int
xhci_endpoint_init(xhci_t * xhcip,xhci_device_t * xd,usba_pipe_handle_data_t * ph)641 xhci_endpoint_init(xhci_t *xhcip, xhci_device_t *xd,
642     usba_pipe_handle_data_t *ph)
643 {
644 	int ret;
645 	uint_t epid;
646 	xhci_endpoint_t *xep;
647 
648 	if (ph == NULL) {
649 		epid = XHCI_DEFAULT_ENDPOINT;
650 	} else {
651 		ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
652 		epid = xhci_endpoint_pipe_to_epid(ph);
653 	}
654 	VERIFY(xd->xd_endpoints[epid] == NULL);
655 
656 	xep = kmem_zalloc(sizeof (xhci_endpoint_t), KM_SLEEP);
657 	list_create(&xep->xep_transfers, sizeof (xhci_transfer_t),
658 	    offsetof(xhci_transfer_t, xt_link));
659 	cv_init(&xep->xep_state_cv, NULL, CV_DRIVER, NULL);
660 	xep->xep_xd = xd;
661 	xep->xep_xhci = xhcip;
662 	xep->xep_num = epid;
663 	if (ph == NULL) {
664 		xep->xep_pipe = NULL;
665 		xep->xep_type = USB_EP_ATTR_CONTROL;
666 	} else {
667 		xep->xep_pipe = ph;
668 		xep->xep_type = ph->p_ep.bmAttributes & USB_EP_ATTR_MASK;
669 	}
670 
671 	if ((ret = xhci_ring_alloc(xhcip, &xep->xep_ring)) != 0) {
672 		cv_destroy(&xep->xep_state_cv);
673 		list_destroy(&xep->xep_transfers);
674 		kmem_free(xep, sizeof (xhci_endpoint_t));
675 		return (ret);
676 	}
677 
678 	if ((ret = xhci_ring_reset(xhcip, &xep->xep_ring)) != 0) {
679 		xhci_ring_free(&xep->xep_ring);
680 		cv_destroy(&xep->xep_state_cv);
681 		list_destroy(&xep->xep_transfers);
682 		kmem_free(xep, sizeof (xhci_endpoint_t));
683 		return (ret);
684 	}
685 
686 	xd->xd_endpoints[epid] = xep;
687 	if (ph == NULL) {
688 		ret = xhci_endpoint_setup_default_context(xhcip, xd, xep);
689 	} else {
690 		ret = xhci_endpoint_setup_context(xhcip, xd, xep);
691 	}
692 	if (ret != 0) {
693 		xhci_endpoint_fini(xd, xep->xep_num);
694 		return (ret);
695 	}
696 
697 	xep->xep_state |= XHCI_ENDPOINT_OPEN;
698 	return (0);
699 }
700 
701 int
xhci_endpoint_reinit(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,usba_pipe_handle_data_t * ph)702 xhci_endpoint_reinit(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
703     usba_pipe_handle_data_t *ph)
704 {
705 	VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
706 	VERIFY(ph != NULL);
707 	VERIFY3U(xhci_endpoint_pipe_to_epid(ph), ==, xep->xep_num);
708 	VERIFY3U(xep->xep_num, !=, XHCI_DEFAULT_ENDPOINT);
709 
710 	if (xep->xep_type != (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK)) {
711 		/*
712 		 * The endpoint type should not change unless the device has
713 		 * been torn down and recreated by the framework.
714 		 */
715 		return (EINVAL);
716 	}
717 
718 	if (xep->xep_state & XHCI_ENDPOINT_OPEN) {
719 		return (EBUSY);
720 	}
721 
722 	VERIFY(xep->xep_state & XHCI_ENDPOINT_TEARDOWN);
723 	xep->xep_state &= ~XHCI_ENDPOINT_TEARDOWN;
724 
725 	VERIFY3U(xep->xep_timeout, ==, 0);
726 	VERIFY(list_is_empty(&xep->xep_transfers));
727 
728 	VERIFY3P(xep->xep_pipe, ==, NULL);
729 	xep->xep_pipe = ph;
730 
731 	/*
732 	 * Verify that the endpoint context parameters have not changed in a
733 	 * way that requires us to tell the controller about it.
734 	 */
735 	int ret;
736 	if ((ret = xhci_endpoint_setup_context(xhcip, xd, xep)) != 0) {
737 		xep->xep_pipe = NULL;
738 		xhci_endpoint_timeout_cancel(xhcip, xep);
739 		return (ret);
740 	}
741 
742 	xep->xep_state |= XHCI_ENDPOINT_OPEN;
743 	return (0);
744 }
745 
746 /*
747  * Wait until any ongoing resets or time outs are completed.
748  */
749 void
xhci_endpoint_serialize(xhci_t * xhcip,xhci_endpoint_t * xep)750 xhci_endpoint_serialize(xhci_t *xhcip, xhci_endpoint_t *xep)
751 {
752 	VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
753 
754 	while ((xep->xep_state & XHCI_ENDPOINT_SERIALIZE) != 0) {
755 		cv_wait(&xep->xep_state_cv, &xhcip->xhci_lock);
756 	}
757 }
758 
759 /*
760  * Attempt to quiesce an endpoint. Depending on the state of the endpoint, we
761  * may need to simply stop it. Alternatively, we may need to explicitly reset
762  * the endpoint. Once done, this endpoint should be stopped and can be
763  * manipulated.
764  */
765 int
xhci_endpoint_quiesce(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)766 xhci_endpoint_quiesce(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
767 {
768 	int ret = USB_SUCCESS;
769 	xhci_endpoint_context_t *epctx = xd->xd_endout[xep->xep_num];
770 
771 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
772 	ASSERT(xep->xep_state & XHCI_ENDPOINT_QUIESCE);
773 
774 	/*
775 	 * First attempt to stop the endpoint, unless it's halted. We don't
776 	 * really care what state it is in. Note that because other activity
777 	 * could be going on, the state may change on us; however, if it's
778 	 * running, it will always transition to a stopped state and none of the
779 	 * other valid states will allow transitions without us taking an active
780 	 * action.
781 	 */
782 	if (!(xep->xep_state & XHCI_ENDPOINT_HALTED)) {
783 		mutex_exit(&xhcip->xhci_lock);
784 		ret = xhci_command_stop_endpoint(xhcip, xd, xep);
785 		mutex_enter(&xhcip->xhci_lock);
786 
787 		if (ret == USB_INVALID_CONTEXT) {
788 			XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
789 		}
790 	}
791 
792 	/*
793 	 * Now, if we had the HALTED flag set or we failed to stop it due to a
794 	 * context error and we're in the HALTED state now, reset the end point.
795 	 */
796 	if ((xep->xep_state & XHCI_ENDPOINT_HALTED) ||
797 	    (ret == USB_INVALID_CONTEXT &&
798 	    XHCI_EPCTX_STATE(LE_32(epctx->xec_info)) == XHCI_EP_HALTED)) {
799 		mutex_exit(&xhcip->xhci_lock);
800 		ret = xhci_command_reset_endpoint(xhcip, xd, xep);
801 		mutex_enter(&xhcip->xhci_lock);
802 	}
803 
804 	/*
805 	 * Ideally, one of the two commands should have worked; however, we
806 	 * could have had a context error due to being in the wrong state.
807 	 * Verify that we're either in the ERROR or STOPPED state and treat both
808 	 * as success. All callers are assumed to be doing this so they can
809 	 * change the dequeue pointer.
810 	 */
811 	if (ret != USB_SUCCESS && ret != USB_INVALID_CONTEXT) {
812 		return (ret);
813 	}
814 
815 	if (ret == USB_INVALID_CONTEXT) {
816 		XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
817 
818 		switch (XHCI_EPCTX_STATE(LE_32(epctx->xec_info))) {
819 		case XHCI_EP_STOPPED:
820 		case XHCI_EP_ERROR:
821 			/*
822 			 * This is where we wanted to go, so let's just take it.
823 			 */
824 			ret = USB_SUCCESS;
825 			break;
826 		case XHCI_EP_DISABLED:
827 		case XHCI_EP_RUNNING:
828 		case XHCI_EP_HALTED:
829 		default:
830 			/*
831 			 * If we're in any of these, something really weird has
832 			 * happened and it's not worth trying to recover at this
833 			 * point.
834 			 */
835 			xhci_error(xhcip, "!asked to stop endpoint %u on slot "
836 			    "%d and port %d: ended up in unexpected state %d",
837 			    xep->xep_num, xd->xd_slot, xd->xd_port,
838 			    XHCI_EPCTX_STATE(LE_32(epctx->xec_info)));
839 			return (ret);
840 		}
841 	}
842 
843 	/*
844 	 * Now that we're successful, we can clear any possible halted state
845 	 * tracking that we might have had.
846 	 */
847 	if (ret == USB_SUCCESS) {
848 		xep->xep_state &= ~XHCI_ENDPOINT_HALTED;
849 	}
850 
851 	return (ret);
852 }
853 
854 int
xhci_endpoint_ring(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)855 xhci_endpoint_ring(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
856 {
857 	/*
858 	 * The doorbell ID's are offset by one from the endpoint numbers that we
859 	 * keep.
860 	 */
861 	xhci_put32(xhcip, XHCI_R_DOOR, XHCI_DOORBELL(xd->xd_slot),
862 	    xep->xep_num + 1);
863 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
864 		xhci_error(xhcip, "failed to ring doorbell for slot %d and "
865 		    "endpoint %u: encountered fatal FM register access error",
866 		    xd->xd_slot, xep->xep_num);
867 		xhci_fm_runtime_reset(xhcip);
868 		return (USB_HC_HARDWARE_ERROR);
869 	}
870 
871 	DTRACE_PROBE3(xhci__doorbell__ring, xhci_t *, xhcip, uint32_t,
872 	    XHCI_DOORBELL(xd->xd_slot), uint32_t, xep->xep_num + 1);
873 
874 	return (USB_SUCCESS);
875 }
876 
877 static void
xhci_endpoint_tick(void * arg)878 xhci_endpoint_tick(void *arg)
879 {
880 	int ret;
881 	xhci_transfer_t *xt;
882 	xhci_endpoint_t *xep = arg;
883 	xhci_device_t *xd = xep->xep_xd;
884 	xhci_t *xhcip = xep->xep_xhci;
885 
886 	mutex_enter(&xhcip->xhci_lock);
887 
888 	/*
889 	 * If we have the teardown flag set, then this is going away, don't try
890 	 * to do anything. Also, if somehow a periodic endpoint has something
891 	 * scheduled, just quit now and don't bother.
892 	 */
893 	if (xep->xep_state & (XHCI_ENDPOINT_TEARDOWN |
894 	    XHCI_ENDPOINT_PERIODIC)) {
895 		xep->xep_timeout = 0;
896 		mutex_exit(&xhcip->xhci_lock);
897 		return;
898 	}
899 
900 	/*
901 	 * If something else has already kicked off, something potentially
902 	 * dangerous, just don't bother waiting for it and reschedule.
903 	 */
904 	if (xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) {
905 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
906 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
907 		mutex_exit(&xhcip->xhci_lock);
908 		return;
909 	}
910 
911 	/*
912 	 * At this point, we have an endpoint that we need to consider. See if
913 	 * there are any transfers on it, if none, we're done. If so, check if
914 	 * we have exceeded the timeout. If we have, then we have some work to
915 	 * do.
916 	 */
917 	xt = list_head(&xep->xep_transfers);
918 	if (xt == NULL) {
919 		xep->xep_timeout = 0;
920 		mutex_exit(&xhcip->xhci_lock);
921 		return;
922 	}
923 
924 	if (xt->xt_timeout > 0) {
925 		xt->xt_timeout--;
926 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
927 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
928 		mutex_exit(&xhcip->xhci_lock);
929 		return;
930 	}
931 
932 	/*
933 	 * This item has timed out. We need to stop the ring and take action.
934 	 */
935 	xep->xep_state |= XHCI_ENDPOINT_TIMED_OUT | XHCI_ENDPOINT_QUIESCE;
936 	ret = xhci_endpoint_quiesce(xhcip, xd, xep);
937 	if (ret != USB_SUCCESS) {
938 		/*
939 		 * If we fail to quiesce during the timeout, then remove the
940 		 * state flags and hopefully we'll be able to the next time
941 		 * around or if a reset or polling stop comes in, maybe it can
942 		 * deal with it.
943 		 */
944 		xep->xep_state &= ~(XHCI_ENDPOINT_QUIESCE |
945 		    XHCI_ENDPOINT_TIMED_OUT);
946 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
947 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
948 		mutex_exit(&xhcip->xhci_lock);
949 		cv_broadcast(&xep->xep_state_cv);
950 		xhci_error(xhcip, "failed to successfully quiesce timed out "
951 		    "endpoint %u of device on slot %d and port %d: device "
952 		    "remains timed out", xep->xep_num, xd->xd_slot,
953 		    xd->xd_port);
954 		return;
955 	}
956 
957 	xhci_ring_skip_transfer(&xep->xep_ring, xt);
958 	(void) list_remove_head(&xep->xep_transfers);
959 	mutex_exit(&xhcip->xhci_lock);
960 
961 	/*
962 	 * At this point, we try and set the ring's dequeue pointer. If this
963 	 * fails, we're left in an awkward state. We've already adjusted the
964 	 * ring and removed the transfer. All we can really do is go through and
965 	 * return the transfer and hope that they perhaps attempt to reset the
966 	 * ring and that will succeed at this point. Based on everything we've
967 	 * done to set things up, it'd be odd if this did fail.
968 	 */
969 	ret = xhci_command_set_tr_dequeue(xhcip, xd, xep);
970 	mutex_enter(&xhcip->xhci_lock);
971 	xep->xep_state &= ~XHCI_ENDPOINT_QUIESCE;
972 	if (ret == USB_SUCCESS) {
973 		xep->xep_state &= ~XHCI_ENDPOINT_TIMED_OUT;
974 	} else {
975 		xhci_error(xhcip, "failed to successfully set transfer ring "
976 		    "dequeue pointer of timed out endpoint %u of "
977 		    "device on slot %d and port %d: device remains timed out, "
978 		    "please use cfgadm to recover", xep->xep_num, xd->xd_slot,
979 		    xd->xd_port);
980 	}
981 	xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
982 	    drv_usectohz(XHCI_TICK_TIMEOUT_US));
983 	mutex_exit(&xhcip->xhci_lock);
984 	cv_broadcast(&xep->xep_state_cv);
985 
986 	/*
987 	 * Because we never time out periodic related activity, we will always
988 	 * have the request on the transfer.
989 	 */
990 	ASSERT(xt->xt_usba_req != NULL);
991 	usba_hcdi_cb(xep->xep_pipe, xt->xt_usba_req, USB_CR_TIMEOUT);
992 	xhci_transfer_free(xhcip, xt);
993 }
994 
995 /*
996  * We've been asked to schedule a series of frames onto the specified endpoint.
997  * We need to make sure that there is enough room, at which point we can queue
998  * it and then ring the door bell. Note that we queue in reverse order to make
999  * sure that if the ring moves on, it won't see the correct cycle bit.
1000  */
1001 int
xhci_endpoint_schedule(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt,boolean_t ring)1002 xhci_endpoint_schedule(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
1003     xhci_transfer_t *xt, boolean_t ring)
1004 {
1005 	int i;
1006 	xhci_ring_t *rp = &xep->xep_ring;
1007 
1008 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1009 	ASSERT(xt->xt_ntrbs > 0);
1010 	ASSERT(xt->xt_trbs != NULL);
1011 
1012 	if ((xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) != 0)
1013 		return (USB_FAILURE);
1014 
1015 	if (xhci_ring_trb_space(rp, xt->xt_ntrbs) == B_FALSE)
1016 		return (USB_NO_RESOURCES);
1017 
1018 	for (i = xt->xt_ntrbs - 1; i > 0; i--) {
1019 		xhci_ring_trb_fill(rp, i, &xt->xt_trbs[i], &xt->xt_trbs_pa[i],
1020 		    B_TRUE);
1021 	}
1022 	xhci_ring_trb_fill(rp, 0U, &xt->xt_trbs[0], &xt->xt_trbs_pa[0],
1023 	    B_FALSE);
1024 
1025 	XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
1026 	xhci_ring_trb_produce(rp, xt->xt_ntrbs);
1027 	list_insert_tail(&xep->xep_transfers, xt);
1028 
1029 	XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
1030 	if (xhci_check_dma_handle(xhcip, &rp->xr_dma) != DDI_FM_OK) {
1031 		xhci_error(xhcip, "failed to write out TRB for device on slot "
1032 		    "%d, port %d, and endpoint %u: encountered fatal FM error "
1033 		    "synchronizing ring DMA memory", xd->xd_slot, xd->xd_port,
1034 		    xep->xep_num);
1035 		xhci_fm_runtime_reset(xhcip);
1036 		return (USB_HC_HARDWARE_ERROR);
1037 	}
1038 
1039 	if (xep->xep_timeout == 0 &&
1040 	    !(xep->xep_state & XHCI_ENDPOINT_PERIODIC)) {
1041 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
1042 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
1043 	}
1044 
1045 	xt->xt_sched_time = gethrtime();
1046 
1047 	if (ring == B_FALSE)
1048 		return (USB_SUCCESS);
1049 
1050 	return (xhci_endpoint_ring(xhcip, xd, xep));
1051 }
1052 
1053 xhci_transfer_t *
xhci_endpoint_determine_transfer(xhci_t * xhcip,xhci_endpoint_t * xep,xhci_trb_t * trb,uint_t * offp)1054 xhci_endpoint_determine_transfer(xhci_t *xhcip, xhci_endpoint_t *xep,
1055     xhci_trb_t *trb, uint_t *offp)
1056 {
1057 	uint_t i;
1058 	uint64_t addr;
1059 	xhci_transfer_t *xt;
1060 
1061 	ASSERT(xhcip != NULL);
1062 	ASSERT(offp != NULL);
1063 	ASSERT(xep != NULL);
1064 	ASSERT(trb != NULL);
1065 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1066 
1067 	if ((xt = list_head(&xep->xep_transfers)) == NULL)
1068 		return (NULL);
1069 
1070 	addr = LE_64(trb->trb_addr);
1071 
1072 	/*
1073 	 * Check if this is the simple case of an event data. If it is, then all
1074 	 * we need to do is look and see its data matches the address of the
1075 	 * transfer.
1076 	 */
1077 	if (XHCI_TRB_GET_ED(LE_32(trb->trb_flags)) != 0) {
1078 		if (LE_64(trb->trb_addr) != (uintptr_t)xt)
1079 			return (NULL);
1080 
1081 		*offp = xt->xt_ntrbs - 1;
1082 		return (xt);
1083 	}
1084 
1085 	/*
1086 	 * This represents an error that has occurred. We need to check two
1087 	 * different things. The first is that the TRB PA maps to one of the
1088 	 * TRBs in the transfer. Secondly, we need to make sure that it makes
1089 	 * sense in the context of the ring and our notion of where the tail is.
1090 	 */
1091 	for (i = 0; i < xt->xt_ntrbs; i++) {
1092 		if (xt->xt_trbs_pa[i] == addr)
1093 			break;
1094 	}
1095 
1096 	if (i == xt->xt_ntrbs)
1097 		return (NULL);
1098 
1099 	if (xhci_ring_trb_valid_range(&xep->xep_ring, LE_64(trb->trb_addr),
1100 	    xt->xt_ntrbs) == -1)
1101 		return (NULL);
1102 
1103 	*offp = i;
1104 	return (xt);
1105 }
1106 
1107 static void
xhci_endpoint_reschedule_periodic(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt)1108 xhci_endpoint_reschedule_periodic(xhci_t *xhcip, xhci_device_t *xd,
1109     xhci_endpoint_t *xep, xhci_transfer_t *xt)
1110 {
1111 	int ret;
1112 	xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
1113 	xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
1114 
1115 	ASSERT3U(xpp->xpp_tsize, >, 0);
1116 
1117 	xt->xt_short = 0;
1118 	xt->xt_cr = USB_CR_OK;
1119 
1120 	mutex_enter(&xhcip->xhci_lock);
1121 
1122 	/*
1123 	 * If we don't have an active poll, then we shouldn't bother trying to
1124 	 * reschedule it. This means that we're trying to stop or we ran out of
1125 	 * memory.
1126 	 */
1127 	if (xpp->xpp_poll_state != XHCI_PERIODIC_POLL_ACTIVE) {
1128 		mutex_exit(&xhcip->xhci_lock);
1129 		return;
1130 	}
1131 
1132 	if (xep->xep_type == USB_EP_ATTR_ISOCH) {
1133 		int i;
1134 		for (i = 0; i < xt->xt_ntrbs; i++) {
1135 			xt->xt_isoc[i].isoc_pkt_actual_length =
1136 			    xt->xt_isoc[i].isoc_pkt_length;
1137 			xt->xt_isoc[i].isoc_pkt_status = USB_CR_OK;
1138 		}
1139 	}
1140 
1141 	/*
1142 	 * In general, there should always be space on the ring for this. The
1143 	 * only reason that rescheduling an existing transfer for a periodic
1144 	 * endpoint wouldn't work is because we have a hardware error, at which
1145 	 * point we're going to be going down hard anyways. We log and bump a
1146 	 * stat here to make this case discoverable in case our assumptions our
1147 	 * wrong.
1148 	 */
1149 	ret = xhci_endpoint_schedule(xhcip, xd, xep, xt, B_TRUE);
1150 	if (ret != 0) {
1151 		xhci_log(xhcip, "!failed to reschedule periodic endpoint %u "
1152 		    "(type %u) on slot %d: %d\n", xep->xep_num, xep->xep_type,
1153 		    xd->xd_slot, ret);
1154 	}
1155 	mutex_exit(&xhcip->xhci_lock);
1156 }
1157 
1158 /*
1159  * We're dealing with a message on a control endpoint. This may be a default
1160  * endpoint or otherwise. These usually come in groups of 3+ TRBs where you have
1161  * a setup stage, data stage (which may have one or more other TRBs) and then a
1162  * final status stage.
1163  *
1164  * We generally set ourselves up such that we get interrupted and notified only
1165  * on the status stage and for short transfers in the data stage. If we
1166  * encounter a short transfer in the data stage, then we need to go through and
1167  * check whether or not the short transfer is allowed. If it is, then there's
1168  * nothing to do. We'll update everything and call back the framework once we
1169  * get the status stage.
1170  */
1171 static boolean_t
xhci_endpoint_control_callback(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt,uint_t off,xhci_trb_t * trb)1172 xhci_endpoint_control_callback(xhci_t *xhcip, xhci_device_t *xd,
1173     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1174 {
1175 	int code;
1176 	usb_ctrl_req_t *ucrp;
1177 	xhci_transfer_t *rem;
1178 
1179 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1180 
1181 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1182 	ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1183 
1184 	/*
1185 	 * Now that we know what this TRB is for, was it for a data/normal stage
1186 	 * or is it the status stage. We cheat by looking at the last entry. If
1187 	 * it's a data stage, then we must have gotten a short write. We record
1188 	 * this fact and whether we should consider the transfer fatal for the
1189 	 * subsequent status stage.
1190 	 */
1191 	if (off != xt->xt_ntrbs - 1) {
1192 		uint_t remain;
1193 		usb_ctrl_req_t *ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1194 
1195 		/*
1196 		 * This is a data stage TRB. The only reason we should have
1197 		 * gotten something for this is beacuse it was short. Make sure
1198 		 * it's okay before we continue.
1199 		 */
1200 		VERIFY3S(code, ==, XHCI_CODE_SHORT_XFER);
1201 		if (!(ucrp->ctrl_attributes & USB_ATTRS_SHORT_XFER_OK)) {
1202 			xt->xt_cr = USB_CR_DATA_UNDERRUN;
1203 			mutex_exit(&xhcip->xhci_lock);
1204 			return (B_TRUE);
1205 		}
1206 
1207 		/*
1208 		 * The value in the resulting trb is how much data remained to
1209 		 * be transferred. Normalize that against the original buffer
1210 		 * size.
1211 		 */
1212 		remain = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1213 		xt->xt_short = xt->xt_buffer.xdb_len - remain;
1214 		mutex_exit(&xhcip->xhci_lock);
1215 		return (B_TRUE);
1216 	}
1217 
1218 	/*
1219 	 * Okay, this is a status stage trb that's in good health. We should
1220 	 * finally go ahead, sync data and try and finally do the callback. If
1221 	 * we have short data, then xt->xt_short will be non-zero.
1222 	 */
1223 	if (xt->xt_data_tohost == B_TRUE) {
1224 		size_t len;
1225 		if (xt->xt_short != 0) {
1226 			len = xt->xt_short;
1227 		} else {
1228 			len = xt->xt_buffer.xdb_len;
1229 		}
1230 
1231 		if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1232 		    DDI_FM_OK) {
1233 			xhci_error(xhcip, "failed to process control transfer "
1234 			    "callback for endpoint %u of device on slot %d and "
1235 			    "port %d: encountered fatal FM error synchronizing "
1236 			    "DMA memory, resetting device", xep->xep_num,
1237 			    xd->xd_slot, xd->xd_port);
1238 			xhci_fm_runtime_reset(xhcip);
1239 			mutex_exit(&xhcip->xhci_lock);
1240 			return (B_FALSE);
1241 		}
1242 
1243 		xhci_transfer_copy(xt, ucrp->ctrl_data->b_rptr, len, B_TRUE);
1244 		ucrp->ctrl_data->b_wptr += len;
1245 	}
1246 
1247 	/*
1248 	 * Now we're done. We can go ahead and bump the ring. Free the transfer
1249 	 * outside of the lock and call back into the framework.
1250 	 */
1251 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1252 	rem = list_remove_head(&xep->xep_transfers);
1253 	VERIFY3P(rem, ==, xt);
1254 	mutex_exit(&xhcip->xhci_lock);
1255 
1256 	usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)ucrp, xt->xt_cr);
1257 	xhci_transfer_free(xhcip, xt);
1258 
1259 	return (B_TRUE);
1260 }
1261 
1262 /*
1263  * Cons up a new usb request for the periodic data transfer if we can. If there
1264  * isn't one available, change the return code to NO_RESOURCES and stop polling
1265  * on this endpoint, thus using and consuming the original request.
1266  */
1267 static usb_opaque_t
xhci_endpoint_dup_periodic(xhci_endpoint_t * xep,xhci_transfer_t * xt,usb_cr_t * cr)1268 xhci_endpoint_dup_periodic(xhci_endpoint_t *xep, xhci_transfer_t *xt,
1269     usb_cr_t *cr)
1270 {
1271 	usb_opaque_t urp;
1272 
1273 	xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
1274 	xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
1275 
1276 	if (XHCI_IS_ONESHOT_XFER(xt)) {
1277 		/*
1278 		 * Oneshot Interrupt IN transfers already have a USB request
1279 		 * which we can just return:
1280 		 */
1281 		return (xt->xt_usba_req);
1282 	}
1283 
1284 	if (xep->xep_type == USB_EP_ATTR_INTR) {
1285 		urp = (usb_opaque_t)usba_hcdi_dup_intr_req(xep->xep_pipe->p_dip,
1286 		    (usb_intr_req_t *)xpp->xpp_usb_req, xpp->xpp_tsize, 0);
1287 	} else {
1288 		urp = (usb_opaque_t)usba_hcdi_dup_isoc_req(xep->xep_pipe->p_dip,
1289 		    (usb_isoc_req_t *)xpp->xpp_usb_req, 0);
1290 	}
1291 	if (urp == NULL) {
1292 		xpp->xpp_poll_state = XHCI_PERIODIC_POLL_NOMEM;
1293 		urp = xpp->xpp_usb_req;
1294 		xpp->xpp_usb_req = NULL;
1295 		*cr = USB_CR_NO_RESOURCES;
1296 	} else {
1297 		mutex_enter(&xep->xep_pipe->p_mutex);
1298 		xep->xep_pipe->p_req_count++;
1299 		mutex_exit(&xep->xep_pipe->p_mutex);
1300 	}
1301 
1302 	return (urp);
1303 }
1304 
1305 xhci_device_t *
xhci_device_lookup_by_slot(xhci_t * xhcip,int slot)1306 xhci_device_lookup_by_slot(xhci_t *xhcip, int slot)
1307 {
1308 	xhci_device_t *xd;
1309 
1310 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1311 
1312 	for (xd = list_head(&xhcip->xhci_usba.xa_devices); xd != NULL;
1313 	    xd = list_next(&xhcip->xhci_usba.xa_devices, xd)) {
1314 		if (xd->xd_slot == slot)
1315 			return (xd);
1316 	}
1317 
1318 	return (NULL);
1319 }
1320 
1321 /*
1322  * Handle things which consist solely of normal tranfers, in other words, bulk
1323  * and interrupt transfers.
1324  */
1325 static boolean_t
xhci_endpoint_norm_callback(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt,uint_t off,xhci_trb_t * trb)1326 xhci_endpoint_norm_callback(xhci_t *xhcip, xhci_device_t *xd,
1327     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1328 {
1329 	int code;
1330 	usb_cr_t cr;
1331 	xhci_transfer_t *rem;
1332 	int attrs;
1333 	mblk_t *mp;
1334 	boolean_t periodic = B_FALSE;
1335 	usb_opaque_t urp;
1336 
1337 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1338 	ASSERT(xep->xep_type == USB_EP_ATTR_BULK ||
1339 	    xep->xep_type == USB_EP_ATTR_INTR);
1340 
1341 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1342 
1343 	if (code == XHCI_CODE_SHORT_XFER) {
1344 		uint_t residue;
1345 		residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1346 
1347 		if (xep->xep_type == USB_EP_ATTR_BULK) {
1348 			VERIFY3U(XHCI_TRB_GET_ED(LE_32(trb->trb_flags)), !=, 0);
1349 			xt->xt_short = residue;
1350 		} else {
1351 			xt->xt_short = xt->xt_buffer.xdb_len - residue;
1352 		}
1353 	}
1354 
1355 	/*
1356 	 * If we have an interrupt from something that's not the last entry,
1357 	 * that must mean we had a short transfer, so there's nothing more for
1358 	 * us to do at the moment. We won't call back until everything's
1359 	 * finished for the general transfer.
1360 	 */
1361 	if (off < xt->xt_ntrbs - 1) {
1362 		mutex_exit(&xhcip->xhci_lock);
1363 		return (B_TRUE);
1364 	}
1365 
1366 	urp = xt->xt_usba_req;
1367 	if (xep->xep_type == USB_EP_ATTR_BULK) {
1368 		usb_bulk_req_t *ubrp = (usb_bulk_req_t *)xt->xt_usba_req;
1369 		attrs = ubrp->bulk_attributes;
1370 		mp = ubrp->bulk_data;
1371 	} else {
1372 		usb_intr_req_t *uirp = (usb_intr_req_t *)xt->xt_usba_req;
1373 
1374 		if (uirp == NULL) {
1375 			periodic = B_TRUE;
1376 			urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1377 			uirp = (usb_intr_req_t *)urp;
1378 
1379 			/*
1380 			 * If we weren't able to duplicate the interrupt, then
1381 			 * we can't put any data in it.
1382 			 */
1383 			if (cr == USB_CR_NO_RESOURCES)
1384 				goto out;
1385 		}
1386 
1387 		attrs = uirp->intr_attributes;
1388 		mp = uirp->intr_data;
1389 	}
1390 
1391 	if (xt->xt_data_tohost == B_TRUE) {
1392 		size_t len;
1393 		if (xt->xt_short != 0) {
1394 			if (!(attrs & USB_ATTRS_SHORT_XFER_OK)) {
1395 				cr = USB_CR_DATA_UNDERRUN;
1396 				goto out;
1397 			}
1398 			len = xt->xt_short;
1399 		} else {
1400 			len = xt->xt_buffer.xdb_len;
1401 		}
1402 
1403 		if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1404 		    DDI_FM_OK) {
1405 			xhci_error(xhcip, "failed to process normal transfer "
1406 			    "callback for endpoint %u of device on slot %d and "
1407 			    "port %d: encountered fatal FM error synchronizing "
1408 			    "DMA memory, resetting device", xep->xep_num,
1409 			    xd->xd_slot, xd->xd_port);
1410 			xhci_fm_runtime_reset(xhcip);
1411 			mutex_exit(&xhcip->xhci_lock);
1412 			return (B_FALSE);
1413 		}
1414 
1415 		xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1416 		mp->b_wptr += len;
1417 	}
1418 	cr = USB_CR_OK;
1419 
1420 out:
1421 	/*
1422 	 * Don't use the address from the TRB here. When we're dealing with
1423 	 * event data that will be entirely wrong.
1424 	 */
1425 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, xt->xt_trbs_pa[off]));
1426 	rem = list_remove_head(&xep->xep_transfers);
1427 	VERIFY3P(rem, ==, xt);
1428 	mutex_exit(&xhcip->xhci_lock);
1429 
1430 	usba_hcdi_cb(xep->xep_pipe, urp, cr);
1431 	if (periodic == B_TRUE) {
1432 		xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1433 	} else {
1434 		xhci_transfer_free(xhcip, xt);
1435 	}
1436 
1437 	return (B_TRUE);
1438 }
1439 
1440 static boolean_t
xhci_endpoint_isoch_callback(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt,uint_t off,xhci_trb_t * trb)1441 xhci_endpoint_isoch_callback(xhci_t *xhcip, xhci_device_t *xd,
1442     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1443 {
1444 	int code;
1445 	usb_cr_t cr;
1446 	xhci_transfer_t *rem;
1447 	usb_isoc_pkt_descr_t *desc;
1448 	usb_isoc_req_t *usrp;
1449 
1450 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1451 	ASSERT3S(xep->xep_type, ==, USB_EP_ATTR_ISOCH);
1452 
1453 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1454 
1455 	/*
1456 	 * The descriptors that we copy the data from are set up to assume that
1457 	 * everything was OK and we transferred all the requested data.
1458 	 */
1459 	desc = &xt->xt_isoc[off];
1460 	if (code == XHCI_CODE_SHORT_XFER) {
1461 		int residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1462 		desc->isoc_pkt_actual_length -= residue;
1463 	}
1464 
1465 	/*
1466 	 * We don't perform the callback until the very last TRB is returned
1467 	 * here. If we have a TRB report on something else, that means that we
1468 	 * had a short transfer.
1469 	 */
1470 	if (off < xt->xt_ntrbs - 1) {
1471 		mutex_exit(&xhcip->xhci_lock);
1472 		return (B_TRUE);
1473 	}
1474 
1475 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1476 	rem = list_remove_head(&xep->xep_transfers);
1477 	VERIFY3P(rem, ==, xt);
1478 	mutex_exit(&xhcip->xhci_lock);
1479 
1480 	cr = USB_CR_OK;
1481 
1482 	if (xt->xt_data_tohost == B_TRUE) {
1483 		usb_opaque_t urp;
1484 		urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1485 		usrp = (usb_isoc_req_t *)urp;
1486 
1487 		if (cr == USB_CR_OK) {
1488 			mblk_t *mp;
1489 			size_t len;
1490 			if (xhci_transfer_sync(xhcip, xt,
1491 			    DDI_DMA_SYNC_FORCPU) != DDI_FM_OK) {
1492 				xhci_error(xhcip, "failed to process "
1493 				    "isochronous transfer callback for "
1494 				    "endpoint %u of device on slot %d and port "
1495 				    "%d: encountered fatal FM error "
1496 				    "synchronizing DMA memory, resetting "
1497 				    "device",
1498 				    xep->xep_num, xd->xd_slot, xd->xd_port);
1499 				xhci_fm_runtime_reset(xhcip);
1500 				mutex_exit(&xhcip->xhci_lock);
1501 				return (B_FALSE);
1502 			}
1503 
1504 			mp = usrp->isoc_data;
1505 			len = xt->xt_buffer.xdb_len;
1506 			xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1507 			mp->b_wptr += len;
1508 		}
1509 	} else {
1510 		usrp = (usb_isoc_req_t *)xt->xt_usba_req;
1511 	}
1512 
1513 	if (cr == USB_CR_OK) {
1514 		bcopy(xt->xt_isoc, usrp->isoc_pkt_descr,
1515 		    sizeof (usb_isoc_pkt_descr_t) * usrp->isoc_pkts_count);
1516 	}
1517 
1518 	usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)usrp, cr);
1519 	if (xt->xt_data_tohost == B_TRUE) {
1520 		xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1521 	} else {
1522 		xhci_transfer_free(xhcip, xt);
1523 	}
1524 
1525 	return (B_TRUE);
1526 }
1527 
1528 boolean_t
xhci_endpoint_transfer_callback(xhci_t * xhcip,xhci_trb_t * trb)1529 xhci_endpoint_transfer_callback(xhci_t *xhcip, xhci_trb_t *trb)
1530 {
1531 	boolean_t ret;
1532 	int slot, endpoint, code;
1533 	uint_t off;
1534 	xhci_device_t *xd;
1535 	xhci_endpoint_t *xep;
1536 	xhci_transfer_t *xt;
1537 	boolean_t transfer_done;
1538 
1539 	endpoint = XHCI_TRB_GET_EP(LE_32(trb->trb_flags));
1540 	slot = XHCI_TRB_GET_SLOT(LE_32(trb->trb_flags));
1541 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1542 
1543 	switch (code) {
1544 	case XHCI_CODE_RING_UNDERRUN:
1545 	case XHCI_CODE_RING_OVERRUN:
1546 		/*
1547 		 * If we have an ISOC overrun or underrun then there will be no
1548 		 * valid data pointer in the TRB associated with it. Just drive
1549 		 * on.
1550 		 */
1551 		return (B_TRUE);
1552 	case XHCI_CODE_UNDEFINED:
1553 		xhci_error(xhcip, "received transfer trb with undefined fatal "
1554 		    "error: resetting device");
1555 		xhci_fm_runtime_reset(xhcip);
1556 		return (B_FALSE);
1557 	case XHCI_CODE_XFER_STOPPED:
1558 	case XHCI_CODE_XFER_STOPINV:
1559 	case XHCI_CODE_XFER_STOPSHORT:
1560 		/*
1561 		 * This causes us to transition the endpoint to a stopped state.
1562 		 * Each of these indicate a different possible state that we
1563 		 * have to deal with. Effectively we're going to drop it and
1564 		 * leave it up to the consumers to figure out what to do. For
1565 		 * the moment, that's generally okay because stops are only used
1566 		 * in cases where we're cleaning up outstanding reqs, etc.
1567 		 *
1568 		 * We do this before we check for the corresponding transfer as
1569 		 * this will generally be generated by a command issued that's
1570 		 * stopping the ring.
1571 		 */
1572 		return (B_TRUE);
1573 	default:
1574 		break;
1575 	}
1576 
1577 	mutex_enter(&xhcip->xhci_lock);
1578 	xd = xhci_device_lookup_by_slot(xhcip, slot);
1579 	if (xd == NULL) {
1580 		xhci_error(xhcip, "received transfer trb with code %d for "
1581 		    "unknown slot %d and endpoint %d: resetting device", code,
1582 		    slot, endpoint);
1583 		mutex_exit(&xhcip->xhci_lock);
1584 		xhci_fm_runtime_reset(xhcip);
1585 		return (B_FALSE);
1586 	}
1587 
1588 	/*
1589 	 * Endpoint IDs are indexed based on their Device Context Index, which
1590 	 * means that we need to subtract one to get the actual ID that we use.
1591 	 */
1592 	xep = xd->xd_endpoints[endpoint - 1];
1593 	if (xep == NULL) {
1594 		xhci_error(xhcip, "received transfer trb with code %d, slot "
1595 		    "%d, and unknown endpoint %d: resetting device", code,
1596 		    slot, endpoint);
1597 		mutex_exit(&xhcip->xhci_lock);
1598 		xhci_fm_runtime_reset(xhcip);
1599 		return (B_FALSE);
1600 	}
1601 
1602 	/*
1603 	 * The TRB that we recieved may be an event data TRB for a bulk
1604 	 * endpoint, a normal or short completion for any other endpoint or an
1605 	 * error. In all cases, we need to figure out what transfer this
1606 	 * corresponds to. If this is an error, then we need to make sure that
1607 	 * the generating ring has been cleaned up.
1608 	 *
1609 	 * TRBs should be delivered in order, based on the ring. If for some
1610 	 * reason we find something that doesn't add up here, then we need to
1611 	 * assume that something has gone horribly wrong in the system and issue
1612 	 * a runtime reset. We issue the runtime reset rather than just trying
1613 	 * to stop and flush the ring, because it's unclear if we could stop
1614 	 * the ring in time.
1615 	 */
1616 	if ((xt = xhci_endpoint_determine_transfer(xhcip, xep, trb, &off)) ==
1617 	    NULL) {
1618 		xhci_error(xhcip, "received transfer trb with code %d, slot "
1619 		    "%d, and endpoint %d, but does not match current transfer "
1620 		    "for endpoint: resetting device", code, slot, endpoint);
1621 		mutex_exit(&xhcip->xhci_lock);
1622 		xhci_fm_runtime_reset(xhcip);
1623 		return (B_FALSE);
1624 	}
1625 
1626 	transfer_done = B_FALSE;
1627 
1628 	switch (code) {
1629 	case XHCI_CODE_SUCCESS:
1630 	case XHCI_CODE_SHORT_XFER:
1631 		/* Handled by endpoint logic */
1632 		break;
1633 	case XHCI_CODE_STALL:
1634 		/*
1635 		 * This causes us to transition to the halted state;
1636 		 * however, downstream clients are able to handle this just
1637 		 * fine.
1638 		 */
1639 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1640 		xt->xt_cr = USB_CR_STALL;
1641 		transfer_done = B_TRUE;
1642 		break;
1643 	case XHCI_CODE_BABBLE:
1644 		transfer_done = B_TRUE;
1645 		xt->xt_cr = USB_CR_DATA_OVERRUN;
1646 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1647 		break;
1648 	case XHCI_CODE_TXERR:
1649 	case XHCI_CODE_SPLITERR:
1650 		transfer_done = B_TRUE;
1651 		xt->xt_cr = USB_CR_DEV_NOT_RESP;
1652 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1653 		break;
1654 	case XHCI_CODE_BW_OVERRUN:
1655 		transfer_done = B_TRUE;
1656 		xt->xt_cr = USB_CR_DATA_OVERRUN;
1657 		break;
1658 	case XHCI_CODE_DATA_BUF:
1659 		transfer_done = B_TRUE;
1660 		if (xt->xt_data_tohost)
1661 			xt->xt_cr = USB_CR_DATA_OVERRUN;
1662 		else
1663 			xt->xt_cr = USB_CR_DATA_UNDERRUN;
1664 		break;
1665 	default:
1666 		/*
1667 		 * Treat these as general unspecified errors that don't cause a
1668 		 * stop of the ring. Even if it does, a subsequent timeout
1669 		 * should occur which causes us to end up dropping a pipe reset
1670 		 * or at least issuing a reset of the device as part of
1671 		 * quiescing.
1672 		 */
1673 		transfer_done = B_TRUE;
1674 		xt->xt_cr = USB_CR_HC_HARDWARE_ERR;
1675 		break;
1676 	}
1677 
1678 	if (transfer_done == B_TRUE) {
1679 		xhci_transfer_t *alt;
1680 
1681 		alt = list_remove_head(&xep->xep_transfers);
1682 		VERIFY3P(alt, ==, xt);
1683 		mutex_exit(&xhcip->xhci_lock);
1684 		if (xt->xt_usba_req == NULL) {
1685 			usb_opaque_t urp;
1686 
1687 			urp = xhci_endpoint_dup_periodic(xep, xt, &xt->xt_cr);
1688 			usba_hcdi_cb(xep->xep_pipe, urp, xt->xt_cr);
1689 		} else {
1690 			usba_hcdi_cb(xep->xep_pipe,
1691 			    (usb_opaque_t)xt->xt_usba_req, xt->xt_cr);
1692 			xhci_transfer_free(xhcip, xt);
1693 		}
1694 		return (B_TRUE);
1695 	}
1696 
1697 	/*
1698 	 * Process the transfer callback based on the type of endpoint. Each of
1699 	 * these callback functions will end up calling back into USBA via
1700 	 * usba_hcdi_cb() to return transfer information (whether successful or
1701 	 * not). Because we can't hold any locks across a call to that function,
1702 	 * all of these callbacks will drop the xhci_t`xhci_lock by the time
1703 	 * they return. This is why there's no mutex_exit() call before we
1704 	 * return.
1705 	 */
1706 	switch (xep->xep_type) {
1707 	case USB_EP_ATTR_CONTROL:
1708 		ret = xhci_endpoint_control_callback(xhcip, xd, xep, xt, off,
1709 		    trb);
1710 		break;
1711 	case USB_EP_ATTR_BULK:
1712 		ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off, trb);
1713 		break;
1714 	case USB_EP_ATTR_INTR:
1715 		ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off,
1716 		    trb);
1717 		break;
1718 	case USB_EP_ATTR_ISOCH:
1719 		ret = xhci_endpoint_isoch_callback(xhcip, xd, xep, xt, off,
1720 		    trb);
1721 		break;
1722 	default:
1723 		panic("bad endpoint type: %u", xep->xep_type);
1724 	}
1725 
1726 	return (ret);
1727 }
1728