xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci_event.c (revision ec82ef794c304d675af6962e1428b3b12ca2be8b)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2016 Joyent, Inc.
14  * Copyright (c) 2019 by Western Digital Corporation
15  */
16 
17 /*
18  * Event Ring Management
19  *
20  * All activity in xHCI is reported to an event ring, which corresponds directly
21  * with an interrupt. Whether a command is issued or an I/O is issued to a given
22  * device endpoint, it will end up being acknowledged, positively or negatively,
23  * on an event ring.
24  *
25  * Unlike other rings, the OS is a consumer of the event rings, not a producer.
26  * For more information on how the ring is used, see xhci_ring.c. For more
27  * information generally, see xhci.c.
28  *
29  * All of the rings are described in the ERST -- Event Ring Segment Table. As we
30  * only have a single interrupt and a single event ring, we only write a single
31  * entry here.
32  */
33 
34 #include <sys/usb/hcd/xhci/xhci.h>
35 
36 
37 void
xhci_event_fini(xhci_t * xhcip)38 xhci_event_fini(xhci_t *xhcip)
39 {
40 	xhci_event_ring_t *xev = &xhcip->xhci_event;
41 	xhci_ring_free(&xev->xev_ring);
42 	if (xev->xev_segs != NULL)
43 		xhci_dma_free(&xev->xev_dma);
44 	xev->xev_segs = NULL;
45 }
46 
47 /*
48  * Make sure that if we leave here we either have both the ring and table
49  * addresses initialized or neither.
50  */
51 static int
xhci_event_alloc(xhci_t * xhcip,xhci_event_ring_t * xev)52 xhci_event_alloc(xhci_t *xhcip, xhci_event_ring_t *xev)
53 {
54 	int ret;
55 	ddi_dma_attr_t attr;
56 	ddi_device_acc_attr_t acc;
57 
58 	/*
59 	 * This is allocating the segment table. It doesn't have any particular
60 	 * requirements. Though it could be larger, we can get away with our
61 	 * default data structure attributes unless we add a lot more entries.
62 	 */
63 	xhci_dma_acc_attr(xhcip, &acc);
64 	xhci_dma_dma_attr(xhcip, &attr);
65 	if (!xhci_dma_alloc(xhcip, &xev->xev_dma, &attr, &acc, B_FALSE,
66 	    sizeof (xhci_event_segment_t) * XHCI_EVENT_NSEGS, B_FALSE))
67 		return (ENOMEM);
68 	if ((ret = xhci_ring_alloc(xhcip, &xev->xev_ring)) != 0) {
69 		xhci_dma_free(&xev->xev_dma);
70 		return (ret);
71 	}
72 
73 	xev->xev_segs = (void *)xev->xev_dma.xdb_va;
74 	return (0);
75 }
76 
77 int
xhci_event_init(xhci_t * xhcip)78 xhci_event_init(xhci_t *xhcip)
79 {
80 	int ret;
81 	uint32_t reg;
82 	xhci_event_ring_t *xev = &xhcip->xhci_event;
83 
84 	if (xev->xev_segs == NULL) {
85 		if ((ret = xhci_event_alloc(xhcip, xev)) != 0)
86 			return (ret);
87 	}
88 
89 	if ((ret = xhci_ring_reset(xhcip, &xev->xev_ring)) != 0) {
90 		xhci_event_fini(xhcip);
91 		return (ret);
92 	}
93 
94 	bzero(xev->xev_segs, sizeof (xhci_event_segment_t) * XHCI_EVENT_NSEGS);
95 	xev->xev_segs[0].xes_addr = LE_64(xhci_dma_pa(&xev->xev_ring.xr_dma));
96 	xev->xev_segs[0].xes_size = LE_16(xev->xev_ring.xr_ntrb);
97 
98 	reg = xhci_get32(xhcip, XHCI_R_RUN, XHCI_ERSTSZ(0));
99 	reg &= ~XHCI_ERSTS_MASK;
100 	reg |= XHCI_ERSTS_SET(XHCI_EVENT_NSEGS);
101 	xhci_put32(xhcip, XHCI_R_RUN, XHCI_ERSTSZ(0), reg);
102 
103 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERDP(0),
104 	    xhci_dma_pa(&xev->xev_ring.xr_dma));
105 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERSTBA(0),
106 	    xhci_dma_pa(&xev->xev_dma));
107 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
108 		xhci_event_fini(xhcip);
109 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
110 		return (EIO);
111 	}
112 
113 	return (0);
114 }
115 
116 static boolean_t
xhci_event_process_psc(xhci_t * xhcip,xhci_trb_t * trb)117 xhci_event_process_psc(xhci_t *xhcip, xhci_trb_t *trb)
118 {
119 	uint32_t port;
120 
121 	if (XHCI_TRB_GET_CODE(LE_32(trb->trb_status)) != XHCI_CODE_SUCCESS) {
122 		return (B_TRUE);
123 	}
124 
125 	port = XHCI_TRB_PORTID(LE_64(trb->trb_addr));
126 	if (port < 1 || port > xhcip->xhci_caps.xcap_max_ports) {
127 		/*
128 		 * At some point we may want to send a DDI_FM_DEVICE_INVAL_STATE
129 		 * ereport as part of this.
130 		 */
131 		return (B_FALSE);
132 	}
133 
134 	xhci_root_hub_psc_callback(xhcip);
135 	return (B_TRUE);
136 }
137 
138 boolean_t
xhci_event_process_trb(xhci_t * xhcip,xhci_trb_t * trb)139 xhci_event_process_trb(xhci_t *xhcip, xhci_trb_t *trb)
140 {
141 	uint32_t type;
142 
143 	type = LE_32(trb->trb_flags) & XHCI_TRB_TYPE_MASK;
144 	switch (type) {
145 	case XHCI_EVT_PORT_CHANGE:
146 		if (!xhci_event_process_psc(xhcip, trb))
147 			return (B_FALSE);
148 		break;
149 	case XHCI_EVT_CMD_COMPLETE:
150 		if (!xhci_command_event_callback(xhcip, trb))
151 			return (B_FALSE);
152 		break;
153 	case XHCI_EVT_DOORBELL:
154 		/*
155 		 * Because we don't have any VF hardware, this event
156 		 * should never happen. If it does, that probably means
157 		 * something bad has happened and we should reset the
158 		 * device.
159 		 */
160 		xhci_error(xhcip, "received xHCI VF interrupt even "
161 		    "though virtual functions are not supported, "
162 		    "resetting device");
163 		xhci_fm_runtime_reset(xhcip);
164 		return (B_FALSE);
165 	case XHCI_EVT_XFER:
166 		if (!xhci_endpoint_transfer_callback(xhcip, trb))
167 			return (B_FALSE);
168 		break;
169 	/*
170 	 * Ignore other events that come in.
171 	 */
172 	default:
173 		break;
174 	}
175 
176 	return (B_TRUE);
177 }
178 
179 /*
180  * Process the event ring, note we're in interrupt context while doing this.
181  */
182 boolean_t
xhci_event_process(xhci_t * xhcip)183 xhci_event_process(xhci_t *xhcip)
184 {
185 	int nevents;
186 	uint64_t addr;
187 	xhci_ring_t *xrp = &xhcip->xhci_event.xev_ring;
188 
189 	/*
190 	 * While it may be possible for us to transition to an error state at
191 	 * any time because we are reasonably not holding the xhci_t's lock
192 	 * during the entire interrupt (as it doesn't protect any of the event
193 	 * ring's data), we still do an initial test to ensure that we don't go
194 	 * too far down the path.
195 	 */
196 	mutex_enter(&xhcip->xhci_lock);
197 	if (xhcip->xhci_state & XHCI_S_ERROR) {
198 		mutex_exit(&xhcip->xhci_lock);
199 		return (B_FALSE);
200 	}
201 	mutex_exit(&xhcip->xhci_lock);
202 
203 	/*
204 	 * We've seen a few cases, particularly when dealing with controllers
205 	 * where BIOS takeover is involved, that an interrupt gets injected into
206 	 * the system before we've actually finished setting things up. If for
207 	 * some reason that happens, and we don't actually have a ring yet,
208 	 * don't try and do anything.
209 	 */
210 	if (xhcip->xhci_event.xev_segs == NULL)
211 		return (B_TRUE);
212 
213 	XHCI_DMA_SYNC(xrp->xr_dma, DDI_DMA_SYNC_FORKERNEL);
214 	if (xhci_check_dma_handle(xhcip, &xrp->xr_dma) != DDI_FM_OK) {
215 		xhci_error(xhcip, "encountered fatal FM error trying to "
216 		    "synchronize event ring: resetting device");
217 		xhci_fm_runtime_reset(xhcip);
218 		return (B_FALSE);
219 	}
220 
221 	/*
222 	 * Process at most a full ring worth of events.
223 	 */
224 	for (nevents = 0; nevents < xrp->xr_ntrb; nevents++) {
225 		xhci_trb_t *trb;
226 
227 		if ((trb = xhci_ring_event_advance(xrp)) == NULL)
228 			break;
229 
230 		if (!xhci_event_process_trb(xhcip, trb))
231 			return (B_FALSE);
232 	}
233 
234 	addr = xhci_dma_pa(&xrp->xr_dma) + sizeof (xhci_trb_t) * xrp->xr_tail;
235 	addr |= XHCI_ERDP_BUSY;
236 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERDP(0), addr);
237 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
238 		xhci_error(xhcip, "failed to write to event ring dequeue "
239 		    "pointer: encountered fatal FM error, resetting device");
240 		xhci_fm_runtime_reset(xhcip);
241 		return (B_FALSE);
242 	}
243 
244 	return (B_TRUE);
245 }
246