xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci_event.c (revision 458f44a49dc56cd17a39815122214e7a1b4793e3)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2016 Joyent, Inc.
14  */
15 
16 /*
17  * Event Ring Management
18  *
19  * All activity in xHCI is reported to an event ring, which corresponds directly
20  * with an interrupt. Whether a command is issued or an I/O is issued to a given
21  * device endpoint, it will end up being acknowledged, positively or negatively,
22  * on an event ring.
23  *
24  * Unlike other rings, the OS is a consumer of the event rings, not a producer.
25  * For more information on how the ring is used, see xhci_ring.c. For more
26  * information generally, see xhci.c.
27  *
28  * All of the rings are described in the ERST -- Event Ring Segment Table. As we
29  * only have a single interrupt and a single event ring, we only write a single
30  * entry here.
31  */
32 
33 #include <sys/usb/hcd/xhci/xhci.h>
34 
35 
36 void
37 xhci_event_fini(xhci_t *xhcip)
38 {
39 	xhci_event_ring_t *xev = &xhcip->xhci_event;
40 	xhci_ring_free(&xev->xev_ring);
41 	if (xev->xev_segs != NULL)
42 		xhci_dma_free(&xev->xev_dma);
43 	xev->xev_segs = NULL;
44 }
45 
46 /*
47  * Make sure that if we leave here we either have both the ring and table
48  * addresses initialized or neither.
49  */
50 static int
51 xhci_event_alloc(xhci_t *xhcip, xhci_event_ring_t *xev)
52 {
53 	int ret;
54 	ddi_dma_attr_t attr;
55 	ddi_device_acc_attr_t acc;
56 
57 	/*
58 	 * This is allocating the segment table. It doesn't have any particular
59 	 * requirements. Though it could be larger, we can get away with our
60 	 * default data structure attributes unless we add a lot more entries.
61 	 */
62 	xhci_dma_acc_attr(xhcip, &acc);
63 	xhci_dma_dma_attr(xhcip, &attr);
64 	if (!xhci_dma_alloc(xhcip, &xev->xev_dma, &attr, &acc, B_FALSE,
65 	    sizeof (xhci_event_segment_t) * XHCI_EVENT_NSEGS, B_FALSE))
66 		return (ENOMEM);
67 	if ((ret = xhci_ring_alloc(xhcip, &xev->xev_ring)) != 0) {
68 		xhci_dma_free(&xev->xev_dma);
69 		return (ret);
70 	}
71 
72 	xev->xev_segs = (void *)xev->xev_dma.xdb_va;
73 	return (0);
74 }
75 
76 int
77 xhci_event_init(xhci_t *xhcip)
78 {
79 	int ret;
80 	uint32_t reg;
81 	xhci_event_ring_t *xev = &xhcip->xhci_event;
82 
83 	if (xev->xev_segs == NULL) {
84 		if ((ret = xhci_event_alloc(xhcip, xev)) != 0)
85 			return (ret);
86 	}
87 
88 	if ((ret = xhci_ring_reset(xhcip, &xev->xev_ring)) != 0) {
89 		xhci_event_fini(xhcip);
90 		return (ret);
91 	}
92 
93 	bzero(xev->xev_segs, sizeof (xhci_event_segment_t) * XHCI_EVENT_NSEGS);
94 	xev->xev_segs[0].xes_addr = LE_64(xhci_dma_pa(&xev->xev_ring.xr_dma));
95 	xev->xev_segs[0].xes_size = LE_16(xev->xev_ring.xr_ntrb);
96 
97 	reg = xhci_get32(xhcip, XHCI_R_RUN, XHCI_ERSTSZ(0));
98 	reg &= ~XHCI_ERSTS_MASK;
99 	reg |= XHCI_ERSTS_SET(XHCI_EVENT_NSEGS);
100 	xhci_put32(xhcip, XHCI_R_RUN, XHCI_ERSTSZ(0), reg);
101 
102 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERDP(0),
103 	    xhci_dma_pa(&xev->xev_ring.xr_dma));
104 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERSTBA(0),
105 	    xhci_dma_pa(&xev->xev_dma));
106 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
107 		xhci_event_fini(xhcip);
108 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
109 		return (EIO);
110 	}
111 
112 	return (0);
113 }
114 
115 static boolean_t
116 xhci_event_process_psc(xhci_t *xhcip, xhci_trb_t *trb)
117 {
118 	uint32_t port;
119 
120 	if (XHCI_TRB_GET_CODE(LE_32(trb->trb_status)) != XHCI_CODE_SUCCESS) {
121 		return (B_TRUE);
122 	}
123 
124 	port = XHCI_TRB_PORTID(LE_64(trb->trb_addr));
125 	if (port < 1 || port > xhcip->xhci_caps.xcap_max_ports) {
126 		/*
127 		 * At some point we may want to send a DDI_FM_DEVICE_INVAL_STATE
128 		 * ereport as part of this.
129 		 */
130 		return (B_FALSE);
131 	}
132 
133 	xhci_root_hub_psc_callback(xhcip);
134 	return (B_TRUE);
135 }
136 
137 /*
138  * Process the event ring, note we're in interrupt context while doing this.
139  */
140 boolean_t
141 xhci_event_process(xhci_t *xhcip)
142 {
143 	int nevents;
144 	uint64_t addr;
145 	xhci_ring_t *xrp = &xhcip->xhci_event.xev_ring;
146 
147 	/*
148 	 * While it may be possible for us to transition to an error state at
149 	 * any time because we are reasonably not holding the xhci_t's lock
150 	 * during the entire interrupt (as it doesn't protect any of the event
151 	 * ring's data), we still do an initial test to ensure that we don't go
152 	 * too far down the path.
153 	 */
154 	mutex_enter(&xhcip->xhci_lock);
155 	if (xhcip->xhci_state & XHCI_S_ERROR) {
156 		mutex_exit(&xhcip->xhci_lock);
157 		return (B_FALSE);
158 	}
159 	mutex_exit(&xhcip->xhci_lock);
160 
161 	/*
162 	 * We've seen a few cases, particularly when dealing with controllers
163 	 * where BIOS takeover is involved, that an interrupt gets injected into
164 	 * the system before we've actually finished setting things up. If for
165 	 * some reason that happens, and we don't actually have a ring yet,
166 	 * don't try and do anything.
167 	 */
168 	if (xhcip->xhci_event.xev_segs == NULL)
169 		return (B_TRUE);
170 
171 	XHCI_DMA_SYNC(xrp->xr_dma, DDI_DMA_SYNC_FORKERNEL);
172 	if (xhci_check_dma_handle(xhcip, &xrp->xr_dma) != DDI_FM_OK) {
173 		xhci_error(xhcip, "encountered fatal FM error trying to "
174 		    "synchronize event ring: resetting device");
175 		xhci_fm_runtime_reset(xhcip);
176 		return (B_FALSE);
177 	}
178 
179 	/*
180 	 * Process at most a full ring worth of events.
181 	 */
182 	for (nevents = 0; nevents < xrp->xr_ntrb; nevents++) {
183 		xhci_trb_t *trb;
184 		uint32_t type;
185 
186 		if ((trb = xhci_ring_event_advance(xrp)) == NULL)
187 			break;
188 
189 		type = LE_32(trb->trb_flags) & XHCI_TRB_TYPE_MASK;
190 		switch (type) {
191 		case XHCI_EVT_PORT_CHANGE:
192 			if (!xhci_event_process_psc(xhcip, trb))
193 				return (B_FALSE);
194 			break;
195 		case XHCI_EVT_CMD_COMPLETE:
196 			if (!xhci_command_event_callback(xhcip, trb))
197 				return (B_FALSE);
198 			break;
199 		case XHCI_EVT_DOORBELL:
200 			/*
201 			 * Because we don't have any VF hardware, this event
202 			 * should never happen. If it does, that probably means
203 			 * something bad has happened and we should reset the
204 			 * device.
205 			 */
206 			xhci_error(xhcip, "received xHCI VF interrupt even "
207 			    "though virtual functions are not supported, "
208 			    "resetting device");
209 			xhci_fm_runtime_reset(xhcip);
210 			return (B_FALSE);
211 		case XHCI_EVT_XFER:
212 			if (!xhci_endpoint_transfer_callback(xhcip, trb))
213 				return (B_FALSE);
214 			break;
215 		/*
216 		 * Ignore other events that come in.
217 		 */
218 		default:
219 			break;
220 		}
221 	}
222 
223 	addr = xhci_dma_pa(&xrp->xr_dma) + sizeof (xhci_trb_t) * xrp->xr_tail;
224 	addr |= XHCI_ERDP_BUSY;
225 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERDP(0), addr);
226 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
227 		xhci_error(xhcip, "failed to write to event ring dequeue "
228 		    "pointer: encountered fatal FM error, resetting device");
229 		xhci_fm_runtime_reset(xhcip);
230 		return (B_FALSE);
231 	}
232 
233 	return (B_TRUE);
234 }
235