xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c (revision 113ae9130ee83ef358c36cc3521d4a55e3e9da00)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2018, Joyent, Inc.
14  * Copyright (c) 2019 by Western Digital Corporation
15  * Copyright 2024 Oxide Computer Company
16  */
17 
18 /*
19  * xHCI Endpoint Initialization and Management
20  *
21  * Please see the big theory statement in xhci.c for more information.
22  */
23 
24 #include <sys/usb/hcd/xhci/xhci.h>
25 #include <sys/sdt.h>
26 
27 boolean_t
xhci_endpoint_is_periodic_in(xhci_endpoint_t * xep)28 xhci_endpoint_is_periodic_in(xhci_endpoint_t *xep)
29 {
30 	usba_pipe_handle_data_t *ph;
31 
32 	ASSERT(xep != NULL);
33 	ph = xep->xep_pipe;
34 	ASSERT(ph != NULL);
35 
36 	return ((xep->xep_type == USB_EP_ATTR_INTR ||
37 	    xep->xep_type == USB_EP_ATTR_ISOCH) &&
38 	    (ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN);
39 }
40 
41 static int
xhci_input_context_sync(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)42 xhci_input_context_sync(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
43 {
44 	XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
45 	if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
46 		xhci_error(xhcip, "failed to initialize device input "
47 		    "context on slot %d and port %d for endpoint %u: "
48 		    "encountered fatal FM error synchronizing input context "
49 		    "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
50 		xhci_fm_runtime_reset(xhcip);
51 		return (EIO);
52 	}
53 
54 	return (0);
55 }
56 
57 /*
58  * Endpoints are a bit weirdly numbered. Endpoint zero is the default control
59  * endpoint, so the direction doesn't matter. For all the others, they're
60  * arranged as ep 1 out, ep 1 in, ep 2 out, ep 2 in. This is based on the layout
61  * of the Device Context Structure in xHCI 1.1 / 6.2.1. Therefore to go from the
62  * endpoint and direction, we know that endpoint n starts at 2n - 1.  e.g.
63  * endpoint 1 starts at entry 1, endpoint 2 at entry 3, etc. Finally, the OUT
64  * direction comes first, followed by the IN direction. So if we're getting the
65  * endpoint for one of those, then we have to deal with that.
66  */
67 uint_t
xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t * ph)68 xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *ph)
69 {
70 	int ep;
71 
72 	ep = ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK;
73 	if (ep == 0)
74 		return (ep);
75 	ep = ep * 2 - 1;
76 	if ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN)
77 		ep++;
78 
79 	VERIFY(ep < XHCI_NUM_ENDPOINTS);
80 	return (ep);
81 }
82 
83 void
xhci_endpoint_timeout_cancel(xhci_t * xhcip,xhci_endpoint_t * xep)84 xhci_endpoint_timeout_cancel(xhci_t *xhcip, xhci_endpoint_t *xep)
85 {
86 	xep->xep_state |= XHCI_ENDPOINT_TEARDOWN;
87 	if (xep->xep_timeout != 0) {
88 		mutex_exit(&xhcip->xhci_lock);
89 		(void) untimeout(xep->xep_timeout);
90 		mutex_enter(&xhcip->xhci_lock);
91 		xep->xep_timeout = 0;
92 	}
93 }
94 
95 /*
96  * Close an endpoint that has been initialised and is presently considered
97  * open; i.e., either xhci_endpoint_init() or xhci_endpoint_reopen() have
98  * completed successfully.  This clears the open state and ensures the periodic
99  * routine is not running for this endpoint, but critically it does not disturb
100  * the controller state.
101  *
102  * A closed endpoint must either be fully unconfigured and then freed with
103  * xhci_endpoint_fini(), or if it is a bulk or control endpoint it can remain
104  * in this state until subsequent reanimation with xhci_endpoint_reopen() the
105  * next time the pipe is opened.
106  */
107 void
xhci_endpoint_close(xhci_t * xhcip,xhci_endpoint_t * xep)108 xhci_endpoint_close(xhci_t *xhcip, xhci_endpoint_t *xep)
109 {
110 	VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
111 	VERIFY3U(xep->xep_num, !=, XHCI_DEFAULT_ENDPOINT);
112 	VERIFY(list_is_empty(&xep->xep_transfers));
113 
114 	VERIFY(xep->xep_pipe != NULL);
115 	xep->xep_pipe = NULL;
116 
117 	VERIFY(xep->xep_state & XHCI_ENDPOINT_OPEN);
118 	xep->xep_state &= ~XHCI_ENDPOINT_OPEN;
119 
120 	xhci_endpoint_timeout_cancel(xhcip, xep);
121 }
122 
123 /*
124  * Attempt to unconfigure an endpoint that was previously initialised, but has
125  * now been closed.  If this function succeeds, it is then safe to call
126  * xhci_endpoint_fini().
127  */
128 int
xhci_endpoint_unconfigure(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)129 xhci_endpoint_unconfigure(xhci_t *xhcip, xhci_device_t *xd,
130     xhci_endpoint_t *xep)
131 {
132 	int ret;
133 
134 	VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
135 	VERIFY3U(xep->xep_num, !=, XHCI_DEFAULT_ENDPOINT);
136 	VERIFY(!(xep->xep_state & XHCI_ENDPOINT_OPEN));
137 	VERIFY(xep->xep_state & XHCI_ENDPOINT_TEARDOWN);
138 
139 	/*
140 	 * We only do this for periodic endpoints, in order to make their
141 	 * reserved bandwidth available.
142 	 */
143 	VERIFY(xep->xep_type == USB_EP_ATTR_INTR ||
144 	    xep->xep_type == USB_EP_ATTR_ISOCH);
145 
146 	/*
147 	 * Drop the endpoint we are unconfiguring.  We make sure to always set
148 	 * the slot as having changed in the context field as the specification
149 	 * suggests we should and some hardware requires it.
150 	 */
151 	mutex_enter(&xd->xd_imtx);
152 	xd->xd_input->xic_drop_flags =
153 	    LE_32(XHCI_INCTX_MASK_DCI(xep->xep_num + 1));
154 	xd->xd_input->xic_add_flags = LE_32(XHCI_INCTX_MASK_DCI(0));
155 	ret = xhci_input_context_sync(xhcip, xd, xep);
156 
157 	mutex_exit(&xhcip->xhci_lock);
158 
159 	if (ret != 0) {
160 		ret = USB_HC_HARDWARE_ERROR;
161 		goto done;
162 	}
163 
164 	ret = xhci_command_configure_endpoint(xhcip, xd);
165 
166 done:
167 	mutex_exit(&xd->xd_imtx);
168 	mutex_enter(&xhcip->xhci_lock);
169 	return (ret);
170 }
171 
172 /*
173  * The assumption is that someone calling this owns this endpoint / device and
174  * that it's in a state where it's safe to zero out that information.  In
175  * particular, if the endpoint has ever been initialised and was thus marked
176  * open, xhci_endpoint_close() must have been called before this routine.
177  */
178 void
xhci_endpoint_fini(xhci_device_t * xd,int endpoint)179 xhci_endpoint_fini(xhci_device_t *xd, int endpoint)
180 {
181 	xhci_endpoint_t *xep = xd->xd_endpoints[endpoint];
182 
183 	VERIFY(xep != NULL);
184 	VERIFY3P(xep->xep_pipe, ==, NULL);
185 	xd->xd_endpoints[endpoint] = NULL;
186 
187 	if (endpoint != XHCI_DEFAULT_ENDPOINT) {
188 		/*
189 		 * Make sure xhci_endpoint_close() was called before we get
190 		 * here:
191 		 */
192 		VERIFY(!(xep->xep_state & XHCI_ENDPOINT_OPEN));
193 	}
194 
195 	xhci_ring_free(&xep->xep_ring);
196 	cv_destroy(&xep->xep_state_cv);
197 	list_destroy(&xep->xep_transfers);
198 	kmem_free(xep, sizeof (xhci_endpoint_t));
199 }
200 
201 /*
202  * Set up the default control endpoint input context. This needs to be done
203  * before we address the device. Note, we separate out the default endpoint from
204  * others, as we must set this up before we have a pipe handle.
205  */
206 int
xhci_endpoint_setup_default_context(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)207 xhci_endpoint_setup_default_context(xhci_t *xhcip, xhci_device_t *xd,
208     xhci_endpoint_t *xep)
209 {
210 	uint_t mps;
211 	xhci_endpoint_context_t *ectx;
212 	uint64_t deq;
213 
214 	ectx = xd->xd_endin[xep->xep_num];
215 	VERIFY(ectx != NULL);
216 
217 	/*
218 	 * We may or may not have a device descriptor. This should match the
219 	 * same initial sizes that are done in hubd_create_child().
220 	 *
221 	 * Note, since we don't necessarily have an endpoint descriptor yet to
222 	 * base this on we instead use the device's defaults if available. This
223 	 * is different from normal endpoints for which there's always a
224 	 * specific descriptor.
225 	 */
226 	switch (xd->xd_usbdev->usb_port_status) {
227 	case USBA_LOW_SPEED_DEV:
228 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
229 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
230 		} else {
231 			mps = 8;
232 		}
233 		break;
234 	case USBA_FULL_SPEED_DEV:
235 	case USBA_HIGH_SPEED_DEV:
236 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
237 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
238 		} else {
239 			mps = 64;
240 		}
241 		break;
242 	case USBA_SUPER_SPEED_DEV:
243 	default:
244 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
245 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
246 			mps = 1 << mps;
247 		} else {
248 			mps = 512;
249 		}
250 		break;
251 	}
252 
253 	bzero(ectx, sizeof (xhci_endpoint_context_t));
254 	ectx->xec_info = LE_32(0);
255 	ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(3) |
256 	    XHCI_EPCTX_SET_EPTYPE(XHCI_EPCTX_TYPE_CTRL) |
257 	    XHCI_EPCTX_SET_MAXB(0) | XHCI_EPCTX_SET_MPS(mps));
258 	deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
259 	    xep->xep_ring.xr_tail;
260 	ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
261 	ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(0) |
262 	    XHCI_EPCTX_AVG_TRB_LEN(XHCI_CONTEXT_DEF_CTRL_ATL));
263 
264 	return (xhci_input_context_sync(xhcip, xd, xep));
265 }
266 
267 /*
268  * Determine if we need to update the maximum packet size of the default
269  * control endpoint. This may happen because we start with the default size
270  * before we have a descriptor and then it may change. For example, with
271  * full-speed devices that may have either an 8 or 64 byte maximum packet size.
272  */
273 int
xhci_endpoint_update_default(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)274 xhci_endpoint_update_default(xhci_t *xhcip, xhci_device_t *xd,
275     xhci_endpoint_t *xep)
276 {
277 	int mps, desc, info, ret;
278 	ASSERT(xd->xd_usbdev != NULL);
279 
280 	mps = XHCI_EPCTX_GET_MPS(xd->xd_endout[xep->xep_num]->xec_info2);
281 	desc = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
282 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
283 		desc = 1 << desc;
284 	}
285 
286 	if (mps == desc)
287 		return (USB_SUCCESS);
288 
289 	/*
290 	 * Update only the context for the default control endpoint.
291 	 */
292 	mutex_enter(&xd->xd_imtx);
293 	info = LE_32(xd->xd_endout[xep->xep_num]->xec_info2);
294 	info &= ~XHCI_EPCTX_SET_MPS(mps);
295 	info |= XHCI_EPCTX_SET_MPS(desc);
296 	xd->xd_endin[xep->xep_num]->xec_info2 = LE_32(info);
297 	xd->xd_input->xic_drop_flags = LE_32(0);
298 	xd->xd_input->xic_add_flags = LE_32(XHCI_INCTX_MASK_DCI(1));
299 
300 	if (xhci_input_context_sync(xhcip, xd, xep) != 0) {
301 		ret = USB_HC_HARDWARE_ERROR;
302 		goto done;
303 	}
304 
305 	ret = xhci_command_evaluate_context(xhcip, xd);
306 
307 done:
308 	mutex_exit(&xd->xd_imtx);
309 	return (ret);
310 }
311 
312 static uint_t
xhci_endpoint_epdesc_to_type(usb_ep_descr_t * ep)313 xhci_endpoint_epdesc_to_type(usb_ep_descr_t *ep)
314 {
315 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
316 	boolean_t in = (ep->bEndpointAddress & USB_EP_DIR_MASK) ==
317 	    USB_EP_DIR_IN;
318 
319 	switch (type) {
320 	case USB_EP_ATTR_CONTROL:
321 		return (XHCI_EPCTX_TYPE_CTRL);
322 	case USB_EP_ATTR_ISOCH:
323 		if (in == B_TRUE)
324 			return (XHCI_EPCTX_TYPE_ISOCH_IN);
325 		return (XHCI_EPCTX_TYPE_ISOCH_OUT);
326 	case USB_EP_ATTR_BULK:
327 		if (in == B_TRUE)
328 			return (XHCI_EPCTX_TYPE_BULK_IN);
329 		return (XHCI_EPCTX_TYPE_BULK_OUT);
330 	case USB_EP_ATTR_INTR:
331 		if (in == B_TRUE)
332 			return (XHCI_EPCTX_TYPE_INTR_IN);
333 		return (XHCI_EPCTX_TYPE_INTR_OUT);
334 	default:
335 		panic("bad USB attribute type: %d", type);
336 	}
337 
338 	/* LINTED: E_FUNC_NO_RET_VAL */
339 }
340 
341 static uint_t
xhci_endpoint_determine_burst(xhci_device_t * xd,xhci_endpoint_t * xep)342 xhci_endpoint_determine_burst(xhci_device_t *xd, xhci_endpoint_t *xep)
343 {
344 	switch (xd->xd_usbdev->usb_port_status) {
345 	case USBA_LOW_SPEED_DEV:
346 	case USBA_FULL_SPEED_DEV:
347 		/*
348 		 * Per xHCI 1.1 / 6.2.3.4, burst is always zero for these
349 		 * devices.
350 		 */
351 		return (0);
352 	case USBA_HIGH_SPEED_DEV:
353 		if (xep->xep_type == USB_EP_ATTR_CONTROL ||
354 		    xep->xep_type == USB_EP_ATTR_BULK)
355 			return (0);
356 		return ((xep->xep_pipe->p_xep.uex_ep.wMaxPacketSize &
357 		    XHCI_CONTEXT_BURST_MASK) >> XHCI_CONTEXT_BURST_SHIFT);
358 	default:
359 		/*
360 		 * For these USB >= 3.0, this comes from the companion
361 		 * descriptor.
362 		 */
363 		ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
364 		return (xep->xep_pipe->p_xep.uex_ep_ss.bMaxBurst);
365 	}
366 }
367 
368 /*
369  * Convert a linear mapping of values that are in in the range of 1-255 into a
370  * 2^x value. Because we're supposed to round down for these calculations (see
371  * the note in xHCI 1.1 / 6.2.3.6) we can do this simply with a fls() and
372  * subtracting one.
373  */
374 static uint_t
xhci_endpoint_linear_interval(usb_ep_descr_t * ep)375 xhci_endpoint_linear_interval(usb_ep_descr_t *ep)
376 {
377 	int exp;
378 	int ival = ep->bInterval;
379 	if (ival < 1)
380 		ival = 1;
381 	if (ival > 255)
382 		ival = 255;
383 	exp = ddi_fls(ival) - 1;
384 	ASSERT(exp >= 0 && exp <= 7);
385 	return (exp);
386 }
387 
388 /*
389  * Convert the set of values that use a 2^(x-1) value for interval into a 2^x
390  * range. Note the valid input range is 1-16, so we clamp values based on this.
391  * See xHCI 1.1 / 6.2.3.6 for more information.
392  */
393 static uint_t
xhci_endpoint_exponential_interval(usb_ep_descr_t * ep)394 xhci_endpoint_exponential_interval(usb_ep_descr_t *ep)
395 {
396 	int ival;
397 
398 	ival = ep->bInterval;
399 	if (ival < 1)
400 		ival = 1;
401 	if (ival > 16)
402 		ival = 16;
403 	ival--;
404 	ASSERT(ival >= 0 && ival <= 15);
405 	return (ival);
406 }
407 
408 
409 /*
410  * Determining the interval is unfortunately somewhat complicated as there are
411  * many differnet forms that things can take. This is all summarized in a
412  * somewhat helpful table, number 65, in xHCI 1.1 / 6.2.3.6. But here's
413  * basically the six different cases we have to consider:
414  *
415  * Case 1: Non-High Speed Bulk and Control Endpoints
416  *	Always return 0.
417  *
418  * Case 2: Super Speed and High Speed Isoch and Intr endpoints
419  *	Convert from a 2^(x-1) range to a 2^x range.
420  *
421  * Case 3: Full Speed Isochronous Endpoints
422  *	As case 2, but add 3 as its values are in frames and we need to convert
423  *	to microframes. Adding three to the result is the same as multiplying
424  *	the initial value by 8.
425  *
426  * Case 4: Full speed and Low Speed Interrupt Endpoints
427  *	These have a 1-255 ms range that we need to convert to a 2^x * 128 us
428  *	range. We use the linear conversion and then add 3 to account for the
429  *	multiplying by 8 conversion from frames to microframes.
430  *
431  * Case 5: High Speed Interrupt and Bulk Output
432  *	These are a bit of a weird case. The spec and other implementations make
433  *	it seem that it's similar to case 4, but without the fixed addition as
434  *	its interpreted differently due to NAKs.
435  *
436  * Case 6: Low Speed Isochronous Endpoints
437  *	These are not actually defined; however, like other implementations we
438  *	treat them like case 4.
439  */
440 static uint_t
xhci_endpoint_interval(xhci_device_t * xd,usb_ep_descr_t * ep)441 xhci_endpoint_interval(xhci_device_t *xd, usb_ep_descr_t *ep)
442 {
443 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
444 	int speed = xd->xd_usbdev->usb_port_status;
445 
446 	/*
447 	 * Handle Cases 1 and 5 first.
448 	 */
449 	if (type == USB_EP_ATTR_CONTROL || type == USB_EP_ATTR_BULK) {
450 		if (speed != USBA_HIGH_SPEED_DEV)
451 			return (0);
452 		return (xhci_endpoint_linear_interval(ep));
453 	}
454 
455 	/*
456 	 * Handle Isoch and Intr cases next.
457 	 */
458 	switch (speed) {
459 	case USBA_LOW_SPEED_DEV:
460 		/*
461 		 * Interrupt endpoints at low speed are the same as full speed,
462 		 * hence the fall through.
463 		 */
464 		if (type == USB_EP_ATTR_ISOCH) {
465 			return (xhci_endpoint_exponential_interval(ep) + 3);
466 		}
467 		/* FALLTHROUGH */
468 	case USBA_FULL_SPEED_DEV:
469 		return (xhci_endpoint_linear_interval(ep) + 3);
470 	case USBA_HIGH_SPEED_DEV:
471 	case USBA_SUPER_SPEED_DEV:
472 	default:
473 		/*
474 		 * Case 2. Treat any newer and faster speeds as Super Speed by
475 		 * default as USB 3.1 is effectively treated the same here.
476 		 */
477 		return (xhci_endpoint_exponential_interval(ep));
478 	}
479 }
480 
481 /*
482  * The way to calculate the Maximum ESIT is described in xHCI 1.1 / 4.14.2.
483  * First off, this only applies to Interrupt and Isochronous descriptors. For
484  * Super Speed and newer things, it comes out of a descriptor. Otherwise we
485  * calculate it by doing 'Max Packet Size' * ('Max Burst' + 1).
486  */
487 static uint_t
xhci_endpoint_max_esit(xhci_device_t * xd,xhci_endpoint_t * xep,uint_t mps,uint_t burst)488 xhci_endpoint_max_esit(xhci_device_t *xd, xhci_endpoint_t *xep, uint_t mps,
489     uint_t burst)
490 {
491 	if (xep->xep_type == USB_EP_ATTR_CONTROL ||
492 	    xep->xep_type == USB_EP_ATTR_BULK) {
493 		return (0);
494 	}
495 
496 	/*
497 	 * Note that this will need to be updated for SuperSpeedPlus ISOC
498 	 * devices to pull from the secondary companion descriptor they use.
499 	 */
500 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
501 		usb_ep_xdescr_t *ep_xdesc = &xep->xep_pipe->p_xep;
502 		ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
503 		return (ep_xdesc->uex_ep_ss.wBytesPerInterval);
504 	}
505 
506 	return (mps * (burst + 1));
507 }
508 
509 /*
510  * We've been asked to calculate and tell the xHCI controller an average TRB
511  * data length. This is talked about in an implementation note in xHCI 1.1 /
512  * 4.14.1.1. So, the reality is that it's hard to actually calculate this, as
513  * we're supposed to take into account all of the TRBs that we use on that ring.
514  *
515  * Surveying other xHCI drivers, they all agree on using the default of 8 for
516  * control endpoints; however, from there things get a little more fluid. For
517  * interrupt and isochronous endpoints, many device use the minimum of the max
518  * packet size and the device's pagesize. For bulk endpoints some folks punt and
519  * don't set anything and others try and set it to the pagesize. The xHCI
520  * implementation note suggests a 3k size here initially. For now, we'll just
521  * guess for bulk endpoints and use our page size as a determining factor for
522  * this and use the BSD style for others. Note Linux here only sets this value
523  * for control devices.
524  */
525 static uint_t
xhci_endpoint_avg_trb(xhci_t * xhcip,usb_ep_descr_t * ep,int mps)526 xhci_endpoint_avg_trb(xhci_t *xhcip, usb_ep_descr_t *ep, int mps)
527 {
528 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
529 
530 	switch (type) {
531 	case USB_EP_ATTR_ISOCH:
532 	case USB_EP_ATTR_INTR:
533 		return (MIN(xhcip->xhci_caps.xcap_pagesize, mps));
534 	case USB_EP_ATTR_CONTROL:
535 		return (XHCI_CONTEXT_DEF_CTRL_ATL);
536 	case USB_EP_ATTR_BULK:
537 		return (xhcip->xhci_caps.xcap_pagesize);
538 	default:
539 		panic("bad USB endpoint type: %d", type);
540 	}
541 
542 	/* LINTED: E_FUNC_NO_RET_VAL */
543 }
544 
545 /*
546  * Set up the input context for this endpoint.  If this endpoint is already
547  * open, just confirm that the current parameters and the originally programmed
548  * parameters match.
549  */
550 int
xhci_endpoint_setup_context(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)551 xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
552     xhci_endpoint_t *xep)
553 {
554 	xhci_endpoint_params_t new_xepp;
555 	xhci_endpoint_context_t *ectx;
556 	uint64_t deq;
557 	int ret;
558 
559 	/*
560 	 * Explicitly zero this entire struct to start so that we can compare
561 	 * it with bcmp().
562 	 */
563 	bzero(&new_xepp, sizeof (new_xepp));
564 	new_xepp.xepp_configured = B_TRUE;
565 
566 	/*
567 	 * For a USB >=3.0 device we should always have its companion descriptor
568 	 * provided for us by USBA. If it's not here, complain loudly and fail.
569 	 */
570 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV &&
571 	    (xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP) == 0) {
572 		const char *prod, *mfg;
573 
574 		prod = xd->xd_usbdev->usb_product_str;
575 		if (prod == NULL)
576 			prod = "Unknown Device";
577 		mfg = xd->xd_usbdev->usb_mfg_str;
578 		if (mfg == NULL)
579 			mfg = "Unknown Manufacturer";
580 
581 		xhci_log(xhcip, "Encountered USB >=3.0 device without endpoint "
582 		    "companion descriptor. Ensure driver %s is properly using "
583 		    "usb_pipe_xopen() for device %s %s",
584 		    ddi_driver_name(xd->xd_usbdev->usb_dip), prod, mfg);
585 		return (EINVAL);
586 	}
587 
588 	ectx = xd->xd_endin[xep->xep_num];
589 	VERIFY(ectx != NULL);
590 	VERIFY(xd->xd_usbdev->usb_dev_descr != NULL);
591 	VERIFY(xep->xep_pipe != NULL);
592 
593 	new_xepp.xepp_mps =
594 	    xep->xep_pipe->p_ep.wMaxPacketSize & XHCI_CONTEXT_MPS_MASK;
595 	new_xepp.xepp_mult = XHCI_CONTEXT_DEF_MULT;
596 	new_xepp.xepp_cerr = XHCI_CONTEXT_DEF_CERR;
597 
598 	switch (xep->xep_type) {
599 	case USB_EP_ATTR_ISOCH:
600 		/*
601 		 * When we have support for USB 3.1 SuperSpeedPlus devices,
602 		 * we'll need to make sure that we also check for its secondary
603 		 * endpoint companion descriptor here.
604 		 */
605 		/*
606 		 * Super Speed devices nominally have these xHCI super speed
607 		 * companion descriptors. We know that we're not properly
608 		 * grabbing them right now, so until we do, we should basically
609 		 * error about it.
610 		 */
611 		if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
612 			ASSERT(xep->xep_pipe->p_xep.uex_flags &
613 			    USB_EP_XFLAGS_SS_COMP);
614 			new_xepp.xepp_mult =
615 			    xep->xep_pipe->p_xep.uex_ep_ss.bmAttributes &
616 			    USB_EP_SS_COMP_ISOC_MULT_MASK;
617 		}
618 
619 		new_xepp.xepp_mps &= XHCI_CONTEXT_MPS_MASK;
620 		new_xepp.xepp_cerr = XHCI_CONTEXT_ISOCH_CERR;
621 		break;
622 	default:
623 		/*
624 		 * No explicit changes needed for CONTROL, INTR, and BULK
625 		 * endpoints. They've been handled already and don't have any
626 		 * differences.
627 		 */
628 		break;
629 	}
630 
631 	new_xepp.xepp_eptype = xhci_endpoint_epdesc_to_type(
632 	    &xep->xep_pipe->p_xep.uex_ep);
633 	new_xepp.xepp_burst = xhci_endpoint_determine_burst(xd, xep);
634 	new_xepp.xepp_ival = xhci_endpoint_interval(xd,
635 	    &xep->xep_pipe->p_xep.uex_ep);
636 	new_xepp.xepp_max_esit = xhci_endpoint_max_esit(xd, xep,
637 	    new_xepp.xepp_mps, new_xepp.xepp_burst);
638 	new_xepp.xepp_avgtrb = xhci_endpoint_avg_trb(xhcip,
639 	    &xep->xep_pipe->p_xep.uex_ep, new_xepp.xepp_mps);
640 
641 	/*
642 	 * The multi field may be reserved as zero if the LEC feature flag is
643 	 * set. See the description of mult in xHCI 1.1 / 6.2.3.
644 	 */
645 	if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
646 		new_xepp.xepp_mult = 0;
647 
648 	if (xep->xep_params.xepp_configured) {
649 		/*
650 		 * The endpoint context has been configured already.  We are
651 		 * reopening the pipe, so just confirm that the parameters are
652 		 * the same.
653 		 */
654 		if (bcmp(&xep->xep_params, &new_xepp, sizeof (new_xepp)) == 0) {
655 			/*
656 			 * Everything matches up.
657 			 */
658 			return (0);
659 		}
660 
661 		DTRACE_PROBE3(xhci__context__mismatch,
662 		    xhci_t *, xhcip,
663 		    xhci_endpoint_t *, xep,
664 		    xhci_endpoint_params_t *, &new_xepp);
665 
666 		xhci_error(xhcip, "device input context on slot %d and "
667 		    "port %d for endpoint %u was already initialized but "
668 		    "with incompatible parameters",
669 		    xd->xd_slot, xd->xd_port, xep->xep_num);
670 		return (EINVAL);
671 	}
672 
673 	bzero(ectx, sizeof (xhci_endpoint_context_t));
674 
675 	ectx->xec_info = LE_32(XHCI_EPCTX_SET_MULT(new_xepp.xepp_mult) |
676 	    XHCI_EPCTX_SET_IVAL(new_xepp.xepp_ival));
677 	if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC) {
678 		ectx->xec_info |=
679 		    LE_32(XHCI_EPCTX_SET_MAX_ESIT_HI(new_xepp.xepp_max_esit));
680 	}
681 
682 	ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(new_xepp.xepp_cerr) |
683 	    XHCI_EPCTX_SET_EPTYPE(new_xepp.xepp_eptype) |
684 	    XHCI_EPCTX_SET_MAXB(new_xepp.xepp_burst) |
685 	    XHCI_EPCTX_SET_MPS(new_xepp.xepp_mps));
686 
687 	deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
688 	    xep->xep_ring.xr_tail;
689 	ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
690 
691 	ectx->xec_txinfo = LE_32(
692 	    XHCI_EPCTX_MAX_ESIT_PAYLOAD(new_xepp.xepp_max_esit) |
693 	    XHCI_EPCTX_AVG_TRB_LEN(new_xepp.xepp_avgtrb));
694 
695 	if ((ret = xhci_input_context_sync(xhcip, xd, xep)) != 0) {
696 		return (ret);
697 	}
698 
699 	bcopy(&new_xepp, &xep->xep_params, sizeof (new_xepp));
700 	VERIFY(xep->xep_params.xepp_configured);
701 	return (0);
702 }
703 
704 /*
705  * Initialize the endpoint and its input context for a given device. This is
706  * called from two different contexts:
707  *
708  *   1. Initializing a device
709  *   2. Opening a USB pipe
710  *
711  * In the second case, we need to worry about locking around the device. We
712  * don't need to worry about the locking in the first case because the USBA
713  * doesn't know about it yet.
714  */
715 int
xhci_endpoint_init(xhci_t * xhcip,xhci_device_t * xd,usba_pipe_handle_data_t * ph)716 xhci_endpoint_init(xhci_t *xhcip, xhci_device_t *xd,
717     usba_pipe_handle_data_t *ph)
718 {
719 	int ret;
720 	uint_t epid;
721 	xhci_endpoint_t *xep;
722 
723 	if (ph == NULL) {
724 		epid = XHCI_DEFAULT_ENDPOINT;
725 	} else {
726 		ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
727 		epid = xhci_endpoint_pipe_to_epid(ph);
728 	}
729 	VERIFY(xd->xd_endpoints[epid] == NULL);
730 
731 	xep = kmem_zalloc(sizeof (xhci_endpoint_t), KM_SLEEP);
732 	list_create(&xep->xep_transfers, sizeof (xhci_transfer_t),
733 	    offsetof(xhci_transfer_t, xt_link));
734 	cv_init(&xep->xep_state_cv, NULL, CV_DRIVER, NULL);
735 	xep->xep_xd = xd;
736 	xep->xep_xhci = xhcip;
737 	xep->xep_num = epid;
738 	if (ph == NULL) {
739 		xep->xep_pipe = NULL;
740 		xep->xep_type = USB_EP_ATTR_CONTROL;
741 	} else {
742 		xep->xep_pipe = ph;
743 		xep->xep_type = ph->p_ep.bmAttributes & USB_EP_ATTR_MASK;
744 	}
745 
746 	if ((ret = xhci_ring_alloc(xhcip, &xep->xep_ring)) != 0) {
747 		cv_destroy(&xep->xep_state_cv);
748 		list_destroy(&xep->xep_transfers);
749 		kmem_free(xep, sizeof (xhci_endpoint_t));
750 		return (ret);
751 	}
752 
753 	if ((ret = xhci_ring_reset(xhcip, &xep->xep_ring)) != 0) {
754 		xhci_ring_free(&xep->xep_ring);
755 		cv_destroy(&xep->xep_state_cv);
756 		list_destroy(&xep->xep_transfers);
757 		kmem_free(xep, sizeof (xhci_endpoint_t));
758 		return (ret);
759 	}
760 
761 	xd->xd_endpoints[epid] = xep;
762 	if (ph == NULL) {
763 		ret = xhci_endpoint_setup_default_context(xhcip, xd, xep);
764 	} else {
765 		ret = xhci_endpoint_setup_context(xhcip, xd, xep);
766 	}
767 	if (ret != 0) {
768 		xhci_endpoint_fini(xd, xep->xep_num);
769 		return (ret);
770 	}
771 
772 	xep->xep_state |= XHCI_ENDPOINT_OPEN;
773 	return (0);
774 }
775 
776 /*
777  * Mark as open an endpoint that has previously been closed with
778  * xhci_endpoint_close(), but was left otherwise configured with the
779  * controller.  This step ensures that we are attempting to open the endpoint
780  * with parameters that are compatible with the last time it was opened, and
781  * marks the endpoint as eligible for periodic routines.
782  */
783 int
xhci_endpoint_reopen(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,usba_pipe_handle_data_t * ph)784 xhci_endpoint_reopen(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
785     usba_pipe_handle_data_t *ph)
786 {
787 	VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
788 	VERIFY(ph != NULL);
789 	VERIFY3U(xhci_endpoint_pipe_to_epid(ph), ==, xep->xep_num);
790 	VERIFY3U(xep->xep_num, !=, XHCI_DEFAULT_ENDPOINT);
791 
792 	if (xep->xep_type != (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK)) {
793 		/*
794 		 * The endpoint type should not change unless the device has
795 		 * been torn down and recreated by the framework.
796 		 */
797 		return (EINVAL);
798 	}
799 
800 	if (xep->xep_state & XHCI_ENDPOINT_OPEN) {
801 		return (EBUSY);
802 	}
803 
804 	VERIFY(xep->xep_state & XHCI_ENDPOINT_TEARDOWN);
805 	xep->xep_state &= ~XHCI_ENDPOINT_TEARDOWN;
806 
807 	VERIFY3U(xep->xep_timeout, ==, 0);
808 	VERIFY(list_is_empty(&xep->xep_transfers));
809 
810 	VERIFY3P(xep->xep_pipe, ==, NULL);
811 	xep->xep_pipe = ph;
812 
813 	/*
814 	 * Verify that the endpoint context parameters have not changed in a
815 	 * way that requires us to tell the controller about it.
816 	 */
817 	int ret;
818 	if ((ret = xhci_endpoint_setup_context(xhcip, xd, xep)) != 0) {
819 		xep->xep_pipe = NULL;
820 		xhci_endpoint_timeout_cancel(xhcip, xep);
821 		return (ret);
822 	}
823 
824 	xep->xep_state |= XHCI_ENDPOINT_OPEN;
825 	return (0);
826 }
827 
828 /*
829  * Wait until any ongoing resets or time outs are completed.
830  */
831 void
xhci_endpoint_serialize(xhci_t * xhcip,xhci_endpoint_t * xep)832 xhci_endpoint_serialize(xhci_t *xhcip, xhci_endpoint_t *xep)
833 {
834 	VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
835 
836 	while ((xep->xep_state & XHCI_ENDPOINT_SERIALIZE) != 0) {
837 		cv_wait(&xep->xep_state_cv, &xhcip->xhci_lock);
838 	}
839 }
840 
841 /*
842  * Attempt to quiesce an endpoint. Depending on the state of the endpoint, we
843  * may need to simply stop it. Alternatively, we may need to explicitly reset
844  * the endpoint. Once done, this endpoint should be stopped and can be
845  * manipulated.
846  */
847 int
xhci_endpoint_quiesce(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)848 xhci_endpoint_quiesce(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
849 {
850 	int ret = USB_SUCCESS;
851 	xhci_endpoint_context_t *epctx = xd->xd_endout[xep->xep_num];
852 
853 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
854 	ASSERT(xep->xep_state & XHCI_ENDPOINT_QUIESCE);
855 
856 	/*
857 	 * First attempt to stop the endpoint, unless it's halted. We don't
858 	 * really care what state it is in. Note that because other activity
859 	 * could be going on, the state may change on us; however, if it's
860 	 * running, it will always transition to a stopped state and none of the
861 	 * other valid states will allow transitions without us taking an active
862 	 * action.
863 	 */
864 	if (!(xep->xep_state & XHCI_ENDPOINT_HALTED)) {
865 		mutex_exit(&xhcip->xhci_lock);
866 		ret = xhci_command_stop_endpoint(xhcip, xd, xep);
867 		mutex_enter(&xhcip->xhci_lock);
868 
869 		if (ret == USB_INVALID_CONTEXT) {
870 			XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
871 		}
872 	}
873 
874 	/*
875 	 * Now, if we had the HALTED flag set or we failed to stop it due to a
876 	 * context error and we're in the HALTED state now, reset the end point.
877 	 */
878 	if ((xep->xep_state & XHCI_ENDPOINT_HALTED) ||
879 	    (ret == USB_INVALID_CONTEXT &&
880 	    XHCI_EPCTX_STATE(LE_32(epctx->xec_info)) == XHCI_EP_HALTED)) {
881 		mutex_exit(&xhcip->xhci_lock);
882 		ret = xhci_command_reset_endpoint(xhcip, xd, xep);
883 		mutex_enter(&xhcip->xhci_lock);
884 	}
885 
886 	/*
887 	 * Ideally, one of the two commands should have worked; however, we
888 	 * could have had a context error due to being in the wrong state.
889 	 * Verify that we're either in the ERROR or STOPPED state and treat both
890 	 * as success. All callers are assumed to be doing this so they can
891 	 * change the dequeue pointer.
892 	 */
893 	if (ret != USB_SUCCESS && ret != USB_INVALID_CONTEXT) {
894 		return (ret);
895 	}
896 
897 	if (ret == USB_INVALID_CONTEXT) {
898 		XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
899 
900 		switch (XHCI_EPCTX_STATE(LE_32(epctx->xec_info))) {
901 		case XHCI_EP_STOPPED:
902 		case XHCI_EP_ERROR:
903 			/*
904 			 * This is where we wanted to go, so let's just take it.
905 			 */
906 			ret = USB_SUCCESS;
907 			break;
908 		case XHCI_EP_DISABLED:
909 		case XHCI_EP_RUNNING:
910 		case XHCI_EP_HALTED:
911 		default:
912 			/*
913 			 * If we're in any of these, something really weird has
914 			 * happened and it's not worth trying to recover at this
915 			 * point.
916 			 */
917 			xhci_error(xhcip, "!asked to stop endpoint %u on slot "
918 			    "%d and port %d: ended up in unexpected state %d",
919 			    xep->xep_num, xd->xd_slot, xd->xd_port,
920 			    XHCI_EPCTX_STATE(LE_32(epctx->xec_info)));
921 			return (ret);
922 		}
923 	}
924 
925 	/*
926 	 * Now that we're successful, we can clear any possible halted state
927 	 * tracking that we might have had.
928 	 */
929 	if (ret == USB_SUCCESS) {
930 		xep->xep_state &= ~XHCI_ENDPOINT_HALTED;
931 	}
932 
933 	return (ret);
934 }
935 
936 int
xhci_endpoint_ring(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep)937 xhci_endpoint_ring(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
938 {
939 	/*
940 	 * The doorbell ID's are offset by one from the endpoint numbers that we
941 	 * keep.
942 	 */
943 	xhci_put32(xhcip, XHCI_R_DOOR, XHCI_DOORBELL(xd->xd_slot),
944 	    xep->xep_num + 1);
945 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
946 		xhci_error(xhcip, "failed to ring doorbell for slot %d and "
947 		    "endpoint %u: encountered fatal FM register access error",
948 		    xd->xd_slot, xep->xep_num);
949 		xhci_fm_runtime_reset(xhcip);
950 		return (USB_HC_HARDWARE_ERROR);
951 	}
952 
953 	DTRACE_PROBE3(xhci__doorbell__ring, xhci_t *, xhcip, uint32_t,
954 	    XHCI_DOORBELL(xd->xd_slot), uint32_t, xep->xep_num + 1);
955 
956 	return (USB_SUCCESS);
957 }
958 
959 static void
xhci_endpoint_tick(void * arg)960 xhci_endpoint_tick(void *arg)
961 {
962 	int ret;
963 	xhci_transfer_t *xt;
964 	xhci_endpoint_t *xep = arg;
965 	xhci_device_t *xd = xep->xep_xd;
966 	xhci_t *xhcip = xep->xep_xhci;
967 
968 	mutex_enter(&xhcip->xhci_lock);
969 
970 	/*
971 	 * If we have the teardown flag set, then this is going away, don't try
972 	 * to do anything. Also, if somehow a periodic endpoint has something
973 	 * scheduled, just quit now and don't bother.
974 	 */
975 	if (xep->xep_state & (XHCI_ENDPOINT_TEARDOWN |
976 	    XHCI_ENDPOINT_PERIODIC)) {
977 		xep->xep_timeout = 0;
978 		mutex_exit(&xhcip->xhci_lock);
979 		return;
980 	}
981 
982 	/*
983 	 * If something else has already kicked off, something potentially
984 	 * dangerous, just don't bother waiting for it and reschedule.
985 	 */
986 	if (xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) {
987 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
988 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
989 		mutex_exit(&xhcip->xhci_lock);
990 		return;
991 	}
992 
993 	/*
994 	 * At this point, we have an endpoint that we need to consider. See if
995 	 * there are any transfers on it, if none, we're done. If so, check if
996 	 * we have exceeded the timeout. If we have, then we have some work to
997 	 * do.
998 	 */
999 	xt = list_head(&xep->xep_transfers);
1000 	if (xt == NULL) {
1001 		xep->xep_timeout = 0;
1002 		mutex_exit(&xhcip->xhci_lock);
1003 		return;
1004 	}
1005 
1006 	if (xt->xt_timeout > 0) {
1007 		xt->xt_timeout--;
1008 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
1009 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
1010 		mutex_exit(&xhcip->xhci_lock);
1011 		return;
1012 	}
1013 
1014 	/*
1015 	 * This item has timed out. We need to stop the ring and take action.
1016 	 */
1017 	xep->xep_state |= XHCI_ENDPOINT_TIMED_OUT | XHCI_ENDPOINT_QUIESCE;
1018 	ret = xhci_endpoint_quiesce(xhcip, xd, xep);
1019 	if (ret != USB_SUCCESS) {
1020 		/*
1021 		 * If we fail to quiesce during the timeout, then remove the
1022 		 * state flags and hopefully we'll be able to the next time
1023 		 * around or if a reset or polling stop comes in, maybe it can
1024 		 * deal with it.
1025 		 */
1026 		xep->xep_state &= ~(XHCI_ENDPOINT_QUIESCE |
1027 		    XHCI_ENDPOINT_TIMED_OUT);
1028 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
1029 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
1030 		mutex_exit(&xhcip->xhci_lock);
1031 		cv_broadcast(&xep->xep_state_cv);
1032 		xhci_error(xhcip, "failed to successfully quiesce timed out "
1033 		    "endpoint %u of device on slot %d and port %d: device "
1034 		    "remains timed out", xep->xep_num, xd->xd_slot,
1035 		    xd->xd_port);
1036 		return;
1037 	}
1038 
1039 	xhci_ring_skip_transfer(&xep->xep_ring, xt);
1040 	(void) list_remove_head(&xep->xep_transfers);
1041 	mutex_exit(&xhcip->xhci_lock);
1042 
1043 	/*
1044 	 * At this point, we try and set the ring's dequeue pointer. If this
1045 	 * fails, we're left in an awkward state. We've already adjusted the
1046 	 * ring and removed the transfer. All we can really do is go through and
1047 	 * return the transfer and hope that they perhaps attempt to reset the
1048 	 * ring and that will succeed at this point. Based on everything we've
1049 	 * done to set things up, it'd be odd if this did fail.
1050 	 */
1051 	ret = xhci_command_set_tr_dequeue(xhcip, xd, xep);
1052 	mutex_enter(&xhcip->xhci_lock);
1053 	xep->xep_state &= ~XHCI_ENDPOINT_QUIESCE;
1054 	if (ret == USB_SUCCESS) {
1055 		xep->xep_state &= ~XHCI_ENDPOINT_TIMED_OUT;
1056 	} else {
1057 		xhci_error(xhcip, "failed to successfully set transfer ring "
1058 		    "dequeue pointer of timed out endpoint %u of "
1059 		    "device on slot %d and port %d: device remains timed out, "
1060 		    "please use cfgadm to recover", xep->xep_num, xd->xd_slot,
1061 		    xd->xd_port);
1062 	}
1063 	xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
1064 	    drv_usectohz(XHCI_TICK_TIMEOUT_US));
1065 	mutex_exit(&xhcip->xhci_lock);
1066 	cv_broadcast(&xep->xep_state_cv);
1067 
1068 	/*
1069 	 * Because we never time out periodic related activity, we will always
1070 	 * have the request on the transfer.
1071 	 */
1072 	ASSERT(xt->xt_usba_req != NULL);
1073 	usba_hcdi_cb(xep->xep_pipe, xt->xt_usba_req, USB_CR_TIMEOUT);
1074 	xhci_transfer_free(xhcip, xt);
1075 }
1076 
1077 /*
1078  * We've been asked to schedule a series of frames onto the specified endpoint.
1079  * We need to make sure that there is enough room, at which point we can queue
1080  * it and then ring the door bell. Note that we queue in reverse order to make
1081  * sure that if the ring moves on, it won't see the correct cycle bit.
1082  */
1083 int
xhci_endpoint_schedule(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt,boolean_t ring)1084 xhci_endpoint_schedule(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
1085     xhci_transfer_t *xt, boolean_t ring)
1086 {
1087 	int i;
1088 	xhci_ring_t *rp = &xep->xep_ring;
1089 
1090 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1091 	ASSERT(xt->xt_ntrbs > 0);
1092 	ASSERT(xt->xt_trbs != NULL);
1093 
1094 	if ((xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) != 0)
1095 		return (USB_FAILURE);
1096 
1097 	if (xhci_ring_trb_space(rp, xt->xt_ntrbs) == B_FALSE)
1098 		return (USB_NO_RESOURCES);
1099 
1100 	for (i = xt->xt_ntrbs - 1; i > 0; i--) {
1101 		xhci_ring_trb_fill(rp, i, &xt->xt_trbs[i], &xt->xt_trbs_pa[i],
1102 		    B_TRUE);
1103 	}
1104 	xhci_ring_trb_fill(rp, 0U, &xt->xt_trbs[0], &xt->xt_trbs_pa[0],
1105 	    B_FALSE);
1106 
1107 	XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
1108 	xhci_ring_trb_produce(rp, xt->xt_ntrbs);
1109 	list_insert_tail(&xep->xep_transfers, xt);
1110 
1111 	XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
1112 	if (xhci_check_dma_handle(xhcip, &rp->xr_dma) != DDI_FM_OK) {
1113 		xhci_error(xhcip, "failed to write out TRB for device on slot "
1114 		    "%d, port %d, and endpoint %u: encountered fatal FM error "
1115 		    "synchronizing ring DMA memory", xd->xd_slot, xd->xd_port,
1116 		    xep->xep_num);
1117 		xhci_fm_runtime_reset(xhcip);
1118 		return (USB_HC_HARDWARE_ERROR);
1119 	}
1120 
1121 	if (xep->xep_timeout == 0 &&
1122 	    !(xep->xep_state & XHCI_ENDPOINT_PERIODIC)) {
1123 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
1124 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
1125 	}
1126 
1127 	xt->xt_sched_time = gethrtime();
1128 
1129 	if (ring == B_FALSE)
1130 		return (USB_SUCCESS);
1131 
1132 	return (xhci_endpoint_ring(xhcip, xd, xep));
1133 }
1134 
1135 xhci_transfer_t *
xhci_endpoint_determine_transfer(xhci_t * xhcip,xhci_endpoint_t * xep,xhci_trb_t * trb,uint_t * offp)1136 xhci_endpoint_determine_transfer(xhci_t *xhcip, xhci_endpoint_t *xep,
1137     xhci_trb_t *trb, uint_t *offp)
1138 {
1139 	uint_t i;
1140 	uint64_t addr;
1141 	xhci_transfer_t *xt;
1142 
1143 	ASSERT(xhcip != NULL);
1144 	ASSERT(offp != NULL);
1145 	ASSERT(xep != NULL);
1146 	ASSERT(trb != NULL);
1147 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1148 
1149 	if ((xt = list_head(&xep->xep_transfers)) == NULL)
1150 		return (NULL);
1151 
1152 	addr = LE_64(trb->trb_addr);
1153 
1154 	/*
1155 	 * Check if this is the simple case of an event data. If it is, then all
1156 	 * we need to do is look and see its data matches the address of the
1157 	 * transfer.
1158 	 */
1159 	if (XHCI_TRB_GET_ED(LE_32(trb->trb_flags)) != 0) {
1160 		if (LE_64(trb->trb_addr) != (uintptr_t)xt)
1161 			return (NULL);
1162 
1163 		*offp = xt->xt_ntrbs - 1;
1164 		return (xt);
1165 	}
1166 
1167 	/*
1168 	 * This represents an error that has occurred. We need to check two
1169 	 * different things. The first is that the TRB PA maps to one of the
1170 	 * TRBs in the transfer. Secondly, we need to make sure that it makes
1171 	 * sense in the context of the ring and our notion of where the tail is.
1172 	 */
1173 	for (i = 0; i < xt->xt_ntrbs; i++) {
1174 		if (xt->xt_trbs_pa[i] == addr)
1175 			break;
1176 	}
1177 
1178 	if (i == xt->xt_ntrbs)
1179 		return (NULL);
1180 
1181 	if (xhci_ring_trb_valid_range(&xep->xep_ring, LE_64(trb->trb_addr),
1182 	    xt->xt_ntrbs) == -1)
1183 		return (NULL);
1184 
1185 	*offp = i;
1186 	return (xt);
1187 }
1188 
1189 static void
xhci_endpoint_reschedule_periodic(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt)1190 xhci_endpoint_reschedule_periodic(xhci_t *xhcip, xhci_device_t *xd,
1191     xhci_endpoint_t *xep, xhci_transfer_t *xt)
1192 {
1193 	int ret;
1194 	xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
1195 	xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
1196 
1197 	ASSERT3U(xpp->xpp_tsize, >, 0);
1198 
1199 	xt->xt_short = 0;
1200 	xt->xt_cr = USB_CR_OK;
1201 
1202 	mutex_enter(&xhcip->xhci_lock);
1203 
1204 	/*
1205 	 * If we don't have an active poll, then we shouldn't bother trying to
1206 	 * reschedule it. This means that we're trying to stop or we ran out of
1207 	 * memory.
1208 	 */
1209 	if (xpp->xpp_poll_state != XHCI_PERIODIC_POLL_ACTIVE) {
1210 		mutex_exit(&xhcip->xhci_lock);
1211 		return;
1212 	}
1213 
1214 	if (xep->xep_type == USB_EP_ATTR_ISOCH) {
1215 		int i;
1216 		for (i = 0; i < xt->xt_ntrbs; i++) {
1217 			xt->xt_isoc[i].isoc_pkt_actual_length =
1218 			    xt->xt_isoc[i].isoc_pkt_length;
1219 			xt->xt_isoc[i].isoc_pkt_status = USB_CR_OK;
1220 		}
1221 	}
1222 
1223 	/*
1224 	 * In general, there should always be space on the ring for this. The
1225 	 * only reason that rescheduling an existing transfer for a periodic
1226 	 * endpoint wouldn't work is because we have a hardware error, at which
1227 	 * point we're going to be going down hard anyways. We log and bump a
1228 	 * stat here to make this case discoverable in case our assumptions our
1229 	 * wrong.
1230 	 */
1231 	ret = xhci_endpoint_schedule(xhcip, xd, xep, xt, B_TRUE);
1232 	if (ret != 0) {
1233 		xhci_log(xhcip, "!failed to reschedule periodic endpoint %u "
1234 		    "(type %u) on slot %d: %d\n", xep->xep_num, xep->xep_type,
1235 		    xd->xd_slot, ret);
1236 	}
1237 	mutex_exit(&xhcip->xhci_lock);
1238 }
1239 
1240 /*
1241  * We're dealing with a message on a control endpoint. This may be a default
1242  * endpoint or otherwise. These usually come in groups of 3+ TRBs where you have
1243  * a setup stage, data stage (which may have one or more other TRBs) and then a
1244  * final status stage.
1245  *
1246  * We generally set ourselves up such that we get interrupted and notified only
1247  * on the status stage and for short transfers in the data stage. If we
1248  * encounter a short transfer in the data stage, then we need to go through and
1249  * check whether or not the short transfer is allowed. If it is, then there's
1250  * nothing to do. We'll update everything and call back the framework once we
1251  * get the status stage.
1252  */
1253 static boolean_t
xhci_endpoint_control_callback(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt,uint_t off,xhci_trb_t * trb)1254 xhci_endpoint_control_callback(xhci_t *xhcip, xhci_device_t *xd,
1255     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1256 {
1257 	int code;
1258 	usb_ctrl_req_t *ucrp;
1259 	xhci_transfer_t *rem;
1260 
1261 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1262 
1263 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1264 	ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1265 
1266 	/*
1267 	 * Now that we know what this TRB is for, was it for a data/normal stage
1268 	 * or is it the status stage. We cheat by looking at the last entry. If
1269 	 * it's a data stage, then we must have gotten a short write. We record
1270 	 * this fact and whether we should consider the transfer fatal for the
1271 	 * subsequent status stage.
1272 	 */
1273 	if (off != xt->xt_ntrbs - 1) {
1274 		uint_t remain;
1275 		usb_ctrl_req_t *ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1276 
1277 		/*
1278 		 * This is a data stage TRB. The only reason we should have
1279 		 * gotten something for this is beacuse it was short. Make sure
1280 		 * it's okay before we continue.
1281 		 */
1282 		VERIFY3S(code, ==, XHCI_CODE_SHORT_XFER);
1283 		if (!(ucrp->ctrl_attributes & USB_ATTRS_SHORT_XFER_OK)) {
1284 			xt->xt_cr = USB_CR_DATA_UNDERRUN;
1285 			mutex_exit(&xhcip->xhci_lock);
1286 			return (B_TRUE);
1287 		}
1288 
1289 		/*
1290 		 * The value in the resulting trb is how much data remained to
1291 		 * be transferred. Normalize that against the original buffer
1292 		 * size.
1293 		 */
1294 		remain = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1295 		xt->xt_short = xt->xt_buffer.xdb_len - remain;
1296 		mutex_exit(&xhcip->xhci_lock);
1297 		return (B_TRUE);
1298 	}
1299 
1300 	/*
1301 	 * Okay, this is a status stage trb that's in good health. We should
1302 	 * finally go ahead, sync data and try and finally do the callback. If
1303 	 * we have short data, then xt->xt_short will be non-zero.
1304 	 */
1305 	if (xt->xt_data_tohost == B_TRUE) {
1306 		size_t len;
1307 		if (xt->xt_short != 0) {
1308 			len = xt->xt_short;
1309 		} else {
1310 			len = xt->xt_buffer.xdb_len;
1311 		}
1312 
1313 		if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1314 		    DDI_FM_OK) {
1315 			xhci_error(xhcip, "failed to process control transfer "
1316 			    "callback for endpoint %u of device on slot %d and "
1317 			    "port %d: encountered fatal FM error synchronizing "
1318 			    "DMA memory, resetting device", xep->xep_num,
1319 			    xd->xd_slot, xd->xd_port);
1320 			xhci_fm_runtime_reset(xhcip);
1321 			mutex_exit(&xhcip->xhci_lock);
1322 			return (B_FALSE);
1323 		}
1324 
1325 		xhci_transfer_copy(xt, ucrp->ctrl_data->b_rptr, len, B_TRUE);
1326 		ucrp->ctrl_data->b_wptr += len;
1327 	}
1328 
1329 	/*
1330 	 * Now we're done. We can go ahead and bump the ring. Free the transfer
1331 	 * outside of the lock and call back into the framework.
1332 	 */
1333 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1334 	rem = list_remove_head(&xep->xep_transfers);
1335 	VERIFY3P(rem, ==, xt);
1336 	mutex_exit(&xhcip->xhci_lock);
1337 
1338 	usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)ucrp, xt->xt_cr);
1339 	xhci_transfer_free(xhcip, xt);
1340 
1341 	return (B_TRUE);
1342 }
1343 
1344 /*
1345  * Cons up a new usb request for the periodic data transfer if we can. If there
1346  * isn't one available, change the return code to NO_RESOURCES and stop polling
1347  * on this endpoint, thus using and consuming the original request.
1348  */
1349 static usb_opaque_t
xhci_endpoint_dup_periodic(xhci_endpoint_t * xep,xhci_transfer_t * xt,usb_cr_t * cr)1350 xhci_endpoint_dup_periodic(xhci_endpoint_t *xep, xhci_transfer_t *xt,
1351     usb_cr_t *cr)
1352 {
1353 	usb_opaque_t urp;
1354 
1355 	xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
1356 	xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
1357 
1358 	if (XHCI_IS_ONESHOT_XFER(xt)) {
1359 		/*
1360 		 * Oneshot Interrupt IN transfers already have a USB request
1361 		 * which we can just return:
1362 		 */
1363 		return (xt->xt_usba_req);
1364 	}
1365 
1366 	if (xep->xep_type == USB_EP_ATTR_INTR) {
1367 		urp = (usb_opaque_t)usba_hcdi_dup_intr_req(xep->xep_pipe->p_dip,
1368 		    (usb_intr_req_t *)xpp->xpp_usb_req, xpp->xpp_tsize, 0);
1369 	} else {
1370 		urp = (usb_opaque_t)usba_hcdi_dup_isoc_req(xep->xep_pipe->p_dip,
1371 		    (usb_isoc_req_t *)xpp->xpp_usb_req, 0);
1372 	}
1373 	if (urp == NULL) {
1374 		xpp->xpp_poll_state = XHCI_PERIODIC_POLL_NOMEM;
1375 		urp = xpp->xpp_usb_req;
1376 		xpp->xpp_usb_req = NULL;
1377 		*cr = USB_CR_NO_RESOURCES;
1378 	} else {
1379 		mutex_enter(&xep->xep_pipe->p_mutex);
1380 		xep->xep_pipe->p_req_count++;
1381 		mutex_exit(&xep->xep_pipe->p_mutex);
1382 	}
1383 
1384 	return (urp);
1385 }
1386 
1387 xhci_device_t *
xhci_device_lookup_by_slot(xhci_t * xhcip,int slot)1388 xhci_device_lookup_by_slot(xhci_t *xhcip, int slot)
1389 {
1390 	xhci_device_t *xd;
1391 
1392 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1393 
1394 	for (xd = list_head(&xhcip->xhci_usba.xa_devices); xd != NULL;
1395 	    xd = list_next(&xhcip->xhci_usba.xa_devices, xd)) {
1396 		if (xd->xd_slot == slot)
1397 			return (xd);
1398 	}
1399 
1400 	return (NULL);
1401 }
1402 
1403 /*
1404  * Handle things which consist solely of normal tranfers, in other words, bulk
1405  * and interrupt transfers.
1406  */
1407 static boolean_t
xhci_endpoint_norm_callback(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt,uint_t off,xhci_trb_t * trb)1408 xhci_endpoint_norm_callback(xhci_t *xhcip, xhci_device_t *xd,
1409     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1410 {
1411 	int code;
1412 	usb_cr_t cr;
1413 	xhci_transfer_t *rem;
1414 	int attrs;
1415 	mblk_t *mp;
1416 	boolean_t periodic = B_FALSE;
1417 	usb_opaque_t urp;
1418 
1419 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1420 	ASSERT(xep->xep_type == USB_EP_ATTR_BULK ||
1421 	    xep->xep_type == USB_EP_ATTR_INTR);
1422 
1423 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1424 
1425 	if (code == XHCI_CODE_SHORT_XFER) {
1426 		uint_t residue;
1427 		residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1428 
1429 		if (xep->xep_type == USB_EP_ATTR_BULK) {
1430 			VERIFY3U(XHCI_TRB_GET_ED(LE_32(trb->trb_flags)), !=, 0);
1431 			xt->xt_short = residue;
1432 		} else {
1433 			xt->xt_short = xt->xt_buffer.xdb_len - residue;
1434 		}
1435 	}
1436 
1437 	/*
1438 	 * If we have an interrupt from something that's not the last entry,
1439 	 * that must mean we had a short transfer, so there's nothing more for
1440 	 * us to do at the moment. We won't call back until everything's
1441 	 * finished for the general transfer.
1442 	 */
1443 	if (off < xt->xt_ntrbs - 1) {
1444 		mutex_exit(&xhcip->xhci_lock);
1445 		return (B_TRUE);
1446 	}
1447 
1448 	urp = xt->xt_usba_req;
1449 	if (xep->xep_type == USB_EP_ATTR_BULK) {
1450 		usb_bulk_req_t *ubrp = (usb_bulk_req_t *)xt->xt_usba_req;
1451 		attrs = ubrp->bulk_attributes;
1452 		mp = ubrp->bulk_data;
1453 	} else {
1454 		usb_intr_req_t *uirp = (usb_intr_req_t *)xt->xt_usba_req;
1455 
1456 		if (uirp == NULL) {
1457 			periodic = B_TRUE;
1458 			urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1459 			uirp = (usb_intr_req_t *)urp;
1460 
1461 			/*
1462 			 * If we weren't able to duplicate the interrupt, then
1463 			 * we can't put any data in it.
1464 			 */
1465 			if (cr == USB_CR_NO_RESOURCES)
1466 				goto out;
1467 		}
1468 
1469 		attrs = uirp->intr_attributes;
1470 		mp = uirp->intr_data;
1471 	}
1472 
1473 	if (xt->xt_data_tohost == B_TRUE) {
1474 		size_t len;
1475 		if (xt->xt_short != 0) {
1476 			if (!(attrs & USB_ATTRS_SHORT_XFER_OK)) {
1477 				cr = USB_CR_DATA_UNDERRUN;
1478 				goto out;
1479 			}
1480 			len = xt->xt_short;
1481 		} else {
1482 			len = xt->xt_buffer.xdb_len;
1483 		}
1484 
1485 		if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1486 		    DDI_FM_OK) {
1487 			xhci_error(xhcip, "failed to process normal transfer "
1488 			    "callback for endpoint %u of device on slot %d and "
1489 			    "port %d: encountered fatal FM error synchronizing "
1490 			    "DMA memory, resetting device", xep->xep_num,
1491 			    xd->xd_slot, xd->xd_port);
1492 			xhci_fm_runtime_reset(xhcip);
1493 			mutex_exit(&xhcip->xhci_lock);
1494 			return (B_FALSE);
1495 		}
1496 
1497 		xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1498 		mp->b_wptr += len;
1499 	}
1500 	cr = USB_CR_OK;
1501 
1502 out:
1503 	/*
1504 	 * Don't use the address from the TRB here. When we're dealing with
1505 	 * event data that will be entirely wrong.
1506 	 */
1507 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, xt->xt_trbs_pa[off]));
1508 	rem = list_remove_head(&xep->xep_transfers);
1509 	VERIFY3P(rem, ==, xt);
1510 	mutex_exit(&xhcip->xhci_lock);
1511 
1512 	usba_hcdi_cb(xep->xep_pipe, urp, cr);
1513 	if (periodic == B_TRUE) {
1514 		xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1515 	} else {
1516 		xhci_transfer_free(xhcip, xt);
1517 	}
1518 
1519 	return (B_TRUE);
1520 }
1521 
1522 static boolean_t
xhci_endpoint_isoch_callback(xhci_t * xhcip,xhci_device_t * xd,xhci_endpoint_t * xep,xhci_transfer_t * xt,uint_t off,xhci_trb_t * trb)1523 xhci_endpoint_isoch_callback(xhci_t *xhcip, xhci_device_t *xd,
1524     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1525 {
1526 	int code;
1527 	usb_cr_t cr;
1528 	xhci_transfer_t *rem;
1529 	usb_isoc_pkt_descr_t *desc;
1530 	usb_isoc_req_t *usrp;
1531 
1532 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1533 	ASSERT3S(xep->xep_type, ==, USB_EP_ATTR_ISOCH);
1534 
1535 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1536 
1537 	/*
1538 	 * The descriptors that we copy the data from are set up to assume that
1539 	 * everything was OK and we transferred all the requested data.
1540 	 */
1541 	desc = &xt->xt_isoc[off];
1542 	if (code == XHCI_CODE_SHORT_XFER) {
1543 		int residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1544 		desc->isoc_pkt_actual_length -= residue;
1545 	}
1546 
1547 	/*
1548 	 * We don't perform the callback until the very last TRB is returned
1549 	 * here. If we have a TRB report on something else, that means that we
1550 	 * had a short transfer.
1551 	 */
1552 	if (off < xt->xt_ntrbs - 1) {
1553 		mutex_exit(&xhcip->xhci_lock);
1554 		return (B_TRUE);
1555 	}
1556 
1557 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1558 	rem = list_remove_head(&xep->xep_transfers);
1559 	VERIFY3P(rem, ==, xt);
1560 	mutex_exit(&xhcip->xhci_lock);
1561 
1562 	cr = USB_CR_OK;
1563 
1564 	if (xt->xt_data_tohost == B_TRUE) {
1565 		usb_opaque_t urp;
1566 		urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1567 		usrp = (usb_isoc_req_t *)urp;
1568 
1569 		if (cr == USB_CR_OK) {
1570 			mblk_t *mp;
1571 			size_t len;
1572 			if (xhci_transfer_sync(xhcip, xt,
1573 			    DDI_DMA_SYNC_FORCPU) != DDI_FM_OK) {
1574 				xhci_error(xhcip, "failed to process "
1575 				    "isochronous transfer callback for "
1576 				    "endpoint %u of device on slot %d and port "
1577 				    "%d: encountered fatal FM error "
1578 				    "synchronizing DMA memory, resetting "
1579 				    "device",
1580 				    xep->xep_num, xd->xd_slot, xd->xd_port);
1581 				xhci_fm_runtime_reset(xhcip);
1582 				mutex_exit(&xhcip->xhci_lock);
1583 				return (B_FALSE);
1584 			}
1585 
1586 			mp = usrp->isoc_data;
1587 			len = xt->xt_buffer.xdb_len;
1588 			xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1589 			mp->b_wptr += len;
1590 		}
1591 	} else {
1592 		usrp = (usb_isoc_req_t *)xt->xt_usba_req;
1593 	}
1594 
1595 	if (cr == USB_CR_OK) {
1596 		bcopy(xt->xt_isoc, usrp->isoc_pkt_descr,
1597 		    sizeof (usb_isoc_pkt_descr_t) * usrp->isoc_pkts_count);
1598 	}
1599 
1600 	usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)usrp, cr);
1601 	if (xt->xt_data_tohost == B_TRUE) {
1602 		xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1603 	} else {
1604 		xhci_transfer_free(xhcip, xt);
1605 	}
1606 
1607 	return (B_TRUE);
1608 }
1609 
1610 boolean_t
xhci_endpoint_transfer_callback(xhci_t * xhcip,xhci_trb_t * trb)1611 xhci_endpoint_transfer_callback(xhci_t *xhcip, xhci_trb_t *trb)
1612 {
1613 	boolean_t ret;
1614 	int slot, endpoint, code;
1615 	uint_t off;
1616 	xhci_device_t *xd;
1617 	xhci_endpoint_t *xep;
1618 	xhci_transfer_t *xt;
1619 	boolean_t transfer_done;
1620 
1621 	endpoint = XHCI_TRB_GET_EP(LE_32(trb->trb_flags));
1622 	slot = XHCI_TRB_GET_SLOT(LE_32(trb->trb_flags));
1623 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1624 
1625 	switch (code) {
1626 	case XHCI_CODE_RING_UNDERRUN:
1627 	case XHCI_CODE_RING_OVERRUN:
1628 		/*
1629 		 * If we have an ISOC overrun or underrun then there will be no
1630 		 * valid data pointer in the TRB associated with it. Just drive
1631 		 * on.
1632 		 */
1633 		return (B_TRUE);
1634 	case XHCI_CODE_UNDEFINED:
1635 		xhci_error(xhcip, "received transfer trb with undefined fatal "
1636 		    "error: resetting device");
1637 		xhci_fm_runtime_reset(xhcip);
1638 		return (B_FALSE);
1639 	case XHCI_CODE_XFER_STOPPED:
1640 	case XHCI_CODE_XFER_STOPINV:
1641 	case XHCI_CODE_XFER_STOPSHORT:
1642 		/*
1643 		 * This causes us to transition the endpoint to a stopped state.
1644 		 * Each of these indicate a different possible state that we
1645 		 * have to deal with. Effectively we're going to drop it and
1646 		 * leave it up to the consumers to figure out what to do. For
1647 		 * the moment, that's generally okay because stops are only used
1648 		 * in cases where we're cleaning up outstanding reqs, etc.
1649 		 *
1650 		 * We do this before we check for the corresponding transfer as
1651 		 * this will generally be generated by a command issued that's
1652 		 * stopping the ring.
1653 		 */
1654 		return (B_TRUE);
1655 	default:
1656 		break;
1657 	}
1658 
1659 	mutex_enter(&xhcip->xhci_lock);
1660 	xd = xhci_device_lookup_by_slot(xhcip, slot);
1661 	if (xd == NULL) {
1662 		xhci_error(xhcip, "received transfer trb with code %d for "
1663 		    "unknown slot %d and endpoint %d: resetting device", code,
1664 		    slot, endpoint);
1665 		mutex_exit(&xhcip->xhci_lock);
1666 		xhci_fm_runtime_reset(xhcip);
1667 		return (B_FALSE);
1668 	}
1669 
1670 	/*
1671 	 * Endpoint IDs are indexed based on their Device Context Index, which
1672 	 * means that we need to subtract one to get the actual ID that we use.
1673 	 */
1674 	xep = xd->xd_endpoints[endpoint - 1];
1675 	if (xep == NULL) {
1676 		xhci_error(xhcip, "received transfer trb with code %d, slot "
1677 		    "%d, and unknown endpoint %d: resetting device", code,
1678 		    slot, endpoint);
1679 		mutex_exit(&xhcip->xhci_lock);
1680 		xhci_fm_runtime_reset(xhcip);
1681 		return (B_FALSE);
1682 	}
1683 
1684 	/*
1685 	 * The TRB that we recieved may be an event data TRB for a bulk
1686 	 * endpoint, a normal or short completion for any other endpoint or an
1687 	 * error. In all cases, we need to figure out what transfer this
1688 	 * corresponds to. If this is an error, then we need to make sure that
1689 	 * the generating ring has been cleaned up.
1690 	 *
1691 	 * TRBs should be delivered in order, based on the ring. If for some
1692 	 * reason we find something that doesn't add up here, then we need to
1693 	 * assume that something has gone horribly wrong in the system and issue
1694 	 * a runtime reset. We issue the runtime reset rather than just trying
1695 	 * to stop and flush the ring, because it's unclear if we could stop
1696 	 * the ring in time.
1697 	 */
1698 	if ((xt = xhci_endpoint_determine_transfer(xhcip, xep, trb, &off)) ==
1699 	    NULL) {
1700 		xhci_error(xhcip, "received transfer trb with code %d, slot "
1701 		    "%d, and endpoint %d, but does not match current transfer "
1702 		    "for endpoint: resetting device", code, slot, endpoint);
1703 		mutex_exit(&xhcip->xhci_lock);
1704 		xhci_fm_runtime_reset(xhcip);
1705 		return (B_FALSE);
1706 	}
1707 
1708 	transfer_done = B_FALSE;
1709 
1710 	switch (code) {
1711 	case XHCI_CODE_SUCCESS:
1712 	case XHCI_CODE_SHORT_XFER:
1713 		/* Handled by endpoint logic */
1714 		break;
1715 	case XHCI_CODE_STALL:
1716 		/*
1717 		 * This causes us to transition to the halted state;
1718 		 * however, downstream clients are able to handle this just
1719 		 * fine.
1720 		 */
1721 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1722 		xt->xt_cr = USB_CR_STALL;
1723 		transfer_done = B_TRUE;
1724 		break;
1725 	case XHCI_CODE_BABBLE:
1726 		transfer_done = B_TRUE;
1727 		xt->xt_cr = USB_CR_DATA_OVERRUN;
1728 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1729 		break;
1730 	case XHCI_CODE_TXERR:
1731 	case XHCI_CODE_SPLITERR:
1732 		transfer_done = B_TRUE;
1733 		xt->xt_cr = USB_CR_DEV_NOT_RESP;
1734 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1735 		break;
1736 	case XHCI_CODE_BW_OVERRUN:
1737 		transfer_done = B_TRUE;
1738 		xt->xt_cr = USB_CR_DATA_OVERRUN;
1739 		break;
1740 	case XHCI_CODE_DATA_BUF:
1741 		transfer_done = B_TRUE;
1742 		if (xt->xt_data_tohost)
1743 			xt->xt_cr = USB_CR_DATA_OVERRUN;
1744 		else
1745 			xt->xt_cr = USB_CR_DATA_UNDERRUN;
1746 		break;
1747 	default:
1748 		/*
1749 		 * Treat these as general unspecified errors that don't cause a
1750 		 * stop of the ring. Even if it does, a subsequent timeout
1751 		 * should occur which causes us to end up dropping a pipe reset
1752 		 * or at least issuing a reset of the device as part of
1753 		 * quiescing.
1754 		 */
1755 		transfer_done = B_TRUE;
1756 		xt->xt_cr = USB_CR_HC_HARDWARE_ERR;
1757 		break;
1758 	}
1759 
1760 	if (transfer_done == B_TRUE) {
1761 		xhci_transfer_t *alt;
1762 
1763 		alt = list_remove_head(&xep->xep_transfers);
1764 		VERIFY3P(alt, ==, xt);
1765 		mutex_exit(&xhcip->xhci_lock);
1766 		if (xt->xt_usba_req == NULL) {
1767 			usb_opaque_t urp;
1768 
1769 			urp = xhci_endpoint_dup_periodic(xep, xt, &xt->xt_cr);
1770 			usba_hcdi_cb(xep->xep_pipe, urp, xt->xt_cr);
1771 		} else {
1772 			usba_hcdi_cb(xep->xep_pipe,
1773 			    (usb_opaque_t)xt->xt_usba_req, xt->xt_cr);
1774 			xhci_transfer_free(xhcip, xt);
1775 		}
1776 		return (B_TRUE);
1777 	}
1778 
1779 	/*
1780 	 * Process the transfer callback based on the type of endpoint. Each of
1781 	 * these callback functions will end up calling back into USBA via
1782 	 * usba_hcdi_cb() to return transfer information (whether successful or
1783 	 * not). Because we can't hold any locks across a call to that function,
1784 	 * all of these callbacks will drop the xhci_t`xhci_lock by the time
1785 	 * they return. This is why there's no mutex_exit() call before we
1786 	 * return.
1787 	 */
1788 	switch (xep->xep_type) {
1789 	case USB_EP_ATTR_CONTROL:
1790 		ret = xhci_endpoint_control_callback(xhcip, xd, xep, xt, off,
1791 		    trb);
1792 		break;
1793 	case USB_EP_ATTR_BULK:
1794 		ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off, trb);
1795 		break;
1796 	case USB_EP_ATTR_INTR:
1797 		ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off,
1798 		    trb);
1799 		break;
1800 	case USB_EP_ATTR_ISOCH:
1801 		ret = xhci_endpoint_isoch_callback(xhcip, xd, xep, xt, off,
1802 		    trb);
1803 		break;
1804 	default:
1805 		panic("bad endpoint type: %u", xep->xep_type);
1806 	}
1807 
1808 	return (ret);
1809 }
1810