xref: /illumos-gate/usr/src/uts/common/sys/usb/hcd/xhci/xhci.h (revision eb9a1df2aeb866bf1de4494433b6d7e5fa07b3ae)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2018, Joyent, Inc.
14  * Copyright (c) 2019 by Western Digital Corporation
15  */
16 
17 #ifndef _SYS_USB_XHCI_XHCI_H
18 #define	_SYS_USB_XHCI_XHCI_H
19 
20 /*
21  * Extensible Host Controller Interface (xHCI) USB Driver
22  */
23 
24 #include <sys/conf.h>
25 #include <sys/ddi.h>
26 #include <sys/sunddi.h>
27 #include <sys/taskq_impl.h>
28 #include <sys/sysmacros.h>
29 #include <sys/usb/hcd/xhci/xhcireg.h>
30 
31 #include <sys/usb/usba.h>
32 #include <sys/usb/usba/hcdi.h>
33 #include <sys/usb/hubd/hub.h>
34 #include <sys/usb/usba/hubdi.h>
35 #include <sys/usb/hubd/hubdvar.h>
36 
37 
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41 
42 /*
43  * The base segment for DMA attributes was determined to be 4k based on xHCI 1.1
44  * / table 54: Data Structure Max Size, Boundary, and Alignment Requirement
45  * Summary.  This indicates that the required alignment for most things is
46  * PAGESIZE, which in our current implementation is required to be 4K. We
47  * provide the ring segment value below for the things which need 64K alignment
48  *
49  * Similarly, in the same table, the maximum required alignment is 64 bytes,
50  * hence we use that for everything.
51  *
52  * Next is the scatter/gather lengths. For most of the data structures, we only
53  * want to have a single SGL entry, e.g. just a simple flat mapping. For many of
54  * our transfers, we use the same logic to simplify the implementation of the
55  * driver. However, for bulk transfers, which are the largest by far, we want to
56  * be able to leverage SGLs to give us more DMA flexibility.
57  *
58  * We can transfer up to 64K in one transfer request block (TRB) which
59  * corresponds to a single SGL entry. Each ring we create is a single page in
60  * size and will support at most 256 TRBs. To try and give the operating system
61  * flexibility when allocating DMA transfers, we've opted to allow up to 63
62  * SGLs. Because there isn't a good way to support DMA windows with the xHCI
63  * controller design, if this number is too small then DMA allocations and
64  * binding might fail. If the DMA binding fails, the transfer will fail.
65  *
66  * The reason that we use 63 SGLs and not the expected 64 is that we always need
67  * to allocate an additional TRB for the event data. This leaves us with a
68  * nicely divisible number of entries.
69  *
70  * The final piece of this is the maximum sized transfer that the driver
71  * advertises to the broader framework. This is currently sized at 512 KiB. For
72  * reference the ehci driver sized this value at 640 KiB. It's important to
73  * understand that this isn't reflected in the DMA attribute limitation, because
74  * it's not an attribute of the hardware. Experimentally, this has proven to be
75  * sufficient for most of the drivers that we support today. When considering
76  * increasing this number, please note the impact that might have on the
77  * required number of DMA SGL entries required to satisfy the allocation.
78  *
79  * The value of 512 KiB was originally based on the number of SGLs we supported
80  * multiplied by the maximum transfer size. The original number of
81  * XHCI_TRANSFER_DMA_SGL was 8. The 512 KiB value was based upon taking the
82  * number of SGLs and assuming that each TRB used its maximum transfer size of
83  * 64 KiB.
84  */
85 #define	XHCI_TRB_MAX_TRANSFER	65536	/* 64 KiB */
86 #define	XHCI_DMA_ALIGN		64
87 #define	XHCI_DEF_DMA_SGL	1
88 #define	XHCI_TRANSFER_DMA_SGL	63
89 #define	XHCI_MAX_TRANSFER	524288	/* 512 KiB */
90 
91 /*
92  * Properties and values for rerouting ehci ports to xhci.
93  */
94 #define	XHCI_PROP_REROUTE_DISABLE	0
95 #define	XHCI_PROP_REROUTE_DEFAULT	1
96 
97 /*
98  * This number is a bit made up. Truthfully, the API here isn't the most useful
99  * for what we need to define as it should really be based on the endpoint that
100  * we're interested in rather than the device as a whole.
101  *
102  * We're basically being asked how many TRBs we're willing to schedule in one
103  * go. There's no great way to come up with this number, so we basically are
104  * making up something such that we use up a good portion of a ring, but not too
105  * much of it.
106  */
107 #define	XHCI_ISOC_MAX_TRB	64
108 
109 #ifdef	DEBUG
110 #define	XHCI_DMA_SYNC(dma, flag)	VERIFY0(ddi_dma_sync( \
111 					    (dma).xdb_dma_handle, 0, 0, \
112 					    (flag)))
113 #else
114 #define	XHCI_DMA_SYNC(dma, flag)	((void) ddi_dma_sync( \
115 					    (dma).xdb_dma_handle, 0, 0, \
116 					    (flag)))
117 #endif
118 
119 /*
120  * TRBs need to indicate the number of remaining USB packets in the overall
121  * transfer. This is a 5-bit value, which means that the maximum value we can
122  * store in that TRD field is 31.
123  */
124 #define	XHCI_MAX_TDSIZE		31
125 
126 /*
127  * This defines a time in 2-ms ticks that is required to wait for the controller
128  * to be ready to go. Section 5.4.8 of the XHCI specification in the description
129  * of the PORTSC register indicates that the upper bound is 20 ms. Therefore the
130  * number of ticks is 10.
131  */
132 #define	XHCI_POWER_GOOD	10
133 
134 /*
135  * Definitions to determine the default number of interrupts. Note that we only
136  * bother with a single interrupt at this time, though we've arranged the driver
137  * to make it possible to request more if, for some unlikely reason, it becomes
138  * necessary.
139  */
140 #define	XHCI_NINTR	1
141 
142 /*
143  * Default interrupt modulation value. This enables us to have 4000 interrupts /
144  * second. This is supposed to be the default value of the controller. See xHCI
145  * 1.1 / 4.17.2 for more information.
146  */
147 #define	XHCI_IMOD_DEFAULT	0x000003F8U
148 
149 /*
150  * Definitions that surround the default values used in various contexts. These
151  * come from various parts of the xHCI specification. In general, see xHCI 1.1 /
152  * 4.8.2. Note that the MPS_MASK is used for ISOCH and INTR endpoints which have
153  * different sizes.
154  *
155  * The burst member is a bit more complicated. By default for USB 2 devices, it
156  * only matters for ISOCH and INTR endpoints and so we use the macros below to
157  * pull it out of the endpoint description's max packet field. For USB 3, it
158  * matters for non-control endpoints. However, it comes out of a companion
159  * description.
160  *
161  * By default the mult member is zero for all cases except for super speed
162  * ISOCH endpoints, where it comes from the companion descriptor.
163  */
164 #define	XHCI_CONTEXT_DEF_CERR		3
165 #define	XHCI_CONTEXT_ISOCH_CERR		0
166 #define	XHCI_CONTEXT_MPS_MASK		0x07ff
167 #define	XHCI_CONTEXT_BURST_MASK		0x1800
168 #define	XHCI_CONTEXT_BURST_SHIFT	11
169 #define	XHCI_CONTEXT_DEF_MULT		0
170 #define	XHCI_CONTEXT_DEF_MAX_ESIT	0
171 #define	XHCI_CONTEXT_DEF_CTRL_ATL	8
172 
173 /*
174  * This number represents the number of transfers that we'll set up for a given
175  * interrupt transfer. Note that the idea here is that we'll want to allocate a
176  * certain number of transfers to basically ensure that we'll always be able to
177  * have a transfer available, even if the system is a bit caught up in trying to
178  * process it and for some reason we can't fire the interrupt. As such, we
179  * basically want to have enough available that at the fastest interval (125 us)
180  * that we have enough. So in this case we choose 8, with the assumption that we
181  * should be able to process at least one in a given millisecond. Note that this
182  * is not based in fact and is really just as much a guess and a hope.
183  *
184  * While we could then use less resources for other interrupt transfers that are
185  * slower, starting with uniform resource usage will make things a bit easier.
186  */
187 #define	XHCI_INTR_IN_NTRANSFERS	8
188 
189 /*
190  * This number represents the number of xhci_transfer_t structures that we'll
191  * set up for a given isochronous transfer polling request. A given isochronous
192  * transfer may actually have multiple units of time associated with it. As
193  * such, we basically want to treat this like a case of classic double
194  * buffering. We have one ready to go while the other is being filled up. This
195  * will compensate for additional latency in the system. This is smaller than
196  * the Interrupt IN transfer case above as many callers may ask for multiple
197  * intervals in a single request.
198  */
199 #define	XHCI_ISOC_IN_NTRANSFERS	2
200 
201 #define	XHCI_PERIODIC_IN_NTRANSFERS					\
202 	MAX(XHCI_ISOC_IN_NTRANSFERS, XHCI_INTR_IN_NTRANSFERS)
203 
204 /*
205  * Mask for a route string which is a 20-bit value.
206  */
207 #define	XHCI_ROUTE_MASK(x)	((x) & 0xfffff)
208 
209 /*
210  * This is the default tick that we use for timeouts while endpoints have
211  * outstanding, active, non-periodic transfers. We choose one second as the USBA
212  * specifies timeouts in units of seconds. Note that this is in microseconds, so
213  * it can be fed into drv_usectohz().
214  */
215 #define	XHCI_TICK_TIMEOUT_US	(MICROSEC)
216 
217 /*
218  * Set of bits that we need one of to indicate that this port has something
219  * interesting on it.
220  */
221 #define	XHCI_HUB_INTR_CHANGE_MASK	(XHCI_PS_CSC | XHCI_PS_PEC | \
222     XHCI_PS_WRC | XHCI_PS_OCC | XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC)
223 
224 /*
225  * These represent known issues with various xHCI controllers.
226  *
227  *	XHCI_QUIRK_NO_MSI	MSI support on this controller is known to be
228  *				broken.
229  *
230  *	XHCI_QUIRK_32_ONLY	Only use 32-bit DMA addreses with this
231  *				controller.
232  *
233  *	XHCI_QUIRK_INTC_EHCI	This is an Intel platform which supports
234  *				rerouting ports between EHCI and xHCI
235  *				controllers on the platform.
236  */
237 typedef enum xhci_quirk {
238 	XHCI_QUIRK_NO_MSI	= 0x01,
239 	XHCI_QUIRK_32_ONLY	= 0x02,
240 	XHCI_QUIRK_INTC_EHCI	= 0x04
241 } xhci_quirk_t;
242 
243 /*
244  * xHCI capability parameter flags. These are documented in xHCI 1.1 / 5.3.6.
245  */
246 typedef enum xhci_cap_flags {
247 	XCAP_AC64	= 0x001,
248 	XCAP_BNC	= 0x002,
249 	XCAP_CSZ	= 0x004,
250 	XCAP_PPC	= 0x008,
251 	XCAP_PIND	= 0x010,
252 	XCAP_LHRC	= 0x020,
253 	XCAP_LTC	= 0x040,
254 	XCAP_NSS	= 0x080,
255 	XCAP_PAE	= 0x100,
256 	XCAP_SPC	= 0x200,
257 	XCAP_SEC	= 0x400,
258 	XCAP_CFC	= 0x800
259 } xchi_cap_flags_t;
260 
261 /*
262  * Second set of capabilities, these are documented in xHCI 1.1 / 5.3.9.
263  */
264 typedef enum xhci_cap2_flags {
265 	XCAP2_U3C	= 0x01,
266 	XCAP2_CMC	= 0x02,
267 	XCAP2_FMC	= 0x04,
268 	XCAP2_CTC	= 0x08,
269 	XCAP2_LEC	= 0x10,
270 	XCAP2_CIC	= 0x20
271 } xhci_cap2_flags_t;
272 
273 /*
274  * These represent and store the various capability registers that we'll need to
275  * use. In addition, we stash a few other versioning related bits here. Note
276  * that we cache more information than we might need so that we have it for
277  * debugging purposes.
278  */
279 typedef struct xhci_capability {
280 	uint8_t			xcap_usb_vers;
281 	uint16_t		xcap_hci_vers;
282 	uint32_t		xcap_pagesize;
283 	uint8_t			xcap_max_slots;
284 	uint16_t		xcap_max_intrs;
285 	uint8_t			xcap_max_ports;
286 	boolean_t		xcap_ist_micro;
287 	uint8_t			xcap_ist;
288 	uint16_t		xcap_max_esrt;
289 	boolean_t		xcap_scratch_restore;
290 	uint16_t		xcap_max_scratch;
291 	uint8_t			xcap_u1_lat;
292 	uint16_t		xcap_u2_lat;
293 	xchi_cap_flags_t	xcap_flags;
294 	uint8_t			xcap_max_psa;
295 	uint16_t		xcap_xecp_off;
296 	xhci_cap2_flags_t	xcap_flags2;
297 	int			xcap_intr_types;
298 } xhci_capability_t;
299 
300 /*
301  * This represents a single logical DMA allocation. For the vast majority of
302  * non-transfer cases, it only represents a single DMA buffer and not a
303  * scatter-gather list.
304  */
305 typedef struct xhci_dma_buffer {
306 	caddr_t			xdb_va;		/* Buffer VA */
307 	size_t			xdb_len;	/* Buffer logical len */
308 	ddi_acc_handle_t	xdb_acc_handle;	/* Access handle */
309 	ddi_dma_handle_t	xdb_dma_handle;	/* DMA handle */
310 	int			xdb_ncookies;	/* Number of actual cookies */
311 	ddi_dma_cookie_t	xdb_cookies[XHCI_TRANSFER_DMA_SGL];
312 } xhci_dma_buffer_t;
313 
314 /*
315  * This is a single transfer descriptor. It's packed to match the hardware
316  * layout.
317  */
318 #pragma pack(1)
319 typedef struct xhci_trb {
320 	uint64_t	trb_addr;
321 	uint32_t	trb_status;
322 	uint32_t	trb_flags;
323 } xhci_trb_t;
324 #pragma pack()
325 
326 /*
327  * This represents a single transfer that we want to allocate and perform.
328  */
329 typedef struct xhci_transfer {
330 	list_node_t		xt_link;
331 	hrtime_t		xt_sched_time;
332 	xhci_dma_buffer_t	xt_buffer;
333 	uint_t			xt_ntrbs;
334 	uint_t			xt_short;
335 	uint_t			xt_timeout;
336 	usb_cr_t		xt_cr;
337 	boolean_t		xt_data_tohost;
338 	xhci_trb_t		*xt_trbs;
339 	uint64_t		*xt_trbs_pa;
340 	usb_isoc_pkt_descr_t	*xt_isoc;
341 	usb_opaque_t		xt_usba_req;
342 } xhci_transfer_t;
343 
344 /*
345  * This represents a ring in xHCI, upon which event, transfer, and command TRBs
346  * are scheduled.
347  */
348 typedef struct xhci_ring {
349 	xhci_dma_buffer_t	xr_dma;
350 	uint_t			xr_ntrb;
351 	xhci_trb_t		*xr_trb;
352 	uint_t			xr_head;
353 	uint_t			xr_tail;
354 	uint8_t			xr_cycle;
355 } xhci_ring_t;
356 
357 /*
358  * This structure is used to represent the xHCI Device Context Base Address
359  * Array. It's defined in section 6.1 of the specification and is required for
360  * the controller to start.
361  *
362  * The maximum number of slots supported is always 256, therefore we size this
363  * structure at its maximum.
364  */
365 #define	XHCI_MAX_SLOTS	256
366 #define	XHCI_DCBAA_SCRATCHPAD_INDEX	0
367 
368 typedef struct xhci_dcbaa {
369 	uint64_t		*xdc_base_addrs;
370 	xhci_dma_buffer_t	xdc_dma;
371 } xhci_dcbaa_t;
372 
373 typedef struct xhci_scratchpad {
374 	uint64_t		*xsp_addrs;
375 	xhci_dma_buffer_t	xsp_addr_dma;
376 	xhci_dma_buffer_t	*xsp_scratch_dma;
377 } xhci_scratchpad_t;
378 
379 /*
380  * Contexts. These structures are inserted into the DCBAA above and are used for
381  * describing the state of the system. Note, that while many of these are
382  * 32-bytes in size, the xHCI specification defines that they'll be extended to
383  * 64-bytes with all the extra bytes as zeros if the CSZ flag is set in the
384  * HCCPARAMS1 register, e.g. we have the flag XCAP_CSZ set.
385  *
386  * The device context covers the slot context and 31 endpoints.
387  */
388 #define	XHCI_DEVICE_CONTEXT_32	1024
389 #define	XHCI_DEVICE_CONTEXT_64	2048
390 #define	XHCI_NUM_ENDPOINTS	31
391 #define	XHCI_DEFAULT_ENDPOINT	0
392 
393 #pragma pack(1)
394 typedef struct xhci_slot_context {
395 	uint32_t	xsc_info;
396 	uint32_t	xsc_info2;
397 	uint32_t	xsc_tt;
398 	uint32_t	xsc_state;
399 	uint32_t	xsc_reserved[4];
400 } xhci_slot_context_t;
401 
402 typedef struct xhci_endpoint_context {
403 	uint32_t	xec_info;
404 	uint32_t	xec_info2;
405 	uint64_t	xec_dequeue;
406 	uint32_t	xec_txinfo;
407 	uint32_t	xec_reserved[3];
408 } xhci_endpoint_context_t;
409 
410 typedef struct xhci_input_context {
411 	uint32_t	xic_drop_flags;
412 	uint32_t	xic_add_flags;
413 	uint32_t	xic_reserved[6];
414 } xhci_input_context_t;
415 #pragma pack()
416 
417 /*
418  * Definitions and structures for maintaining the event ring.
419  */
420 #define	XHCI_EVENT_NSEGS	1
421 
422 #pragma pack(1)
423 typedef struct xhci_event_segment {
424 	uint64_t	xes_addr;
425 	uint16_t	xes_size;
426 	uint16_t	xes_rsvd0;
427 	uint32_t	xes_rsvd1;
428 } xhci_event_segment_t;
429 #pragma pack()
430 
431 typedef struct xhci_event_ring {
432 	xhci_event_segment_t	*xev_segs;
433 	xhci_dma_buffer_t	xev_dma;
434 	xhci_ring_t		xev_ring;
435 } xhci_event_ring_t;
436 
437 typedef enum xhci_command_ring_state {
438 	XHCI_COMMAND_RING_IDLE		= 0x00,
439 	XHCI_COMMAND_RING_RUNNING	= 0x01,
440 	XHCI_COMMAND_RING_ABORTING	= 0x02,
441 	XHCI_COMMAND_RING_ABORT_DONE	= 0x03
442 } xhci_command_ring_state_t;
443 
444 typedef struct xhci_command_ring {
445 	xhci_ring_t			xcr_ring;
446 	kmutex_t			xcr_lock;
447 	kcondvar_t			xcr_cv;
448 	list_t				xcr_commands;
449 	timeout_id_t			xcr_timeout;
450 	xhci_command_ring_state_t	xcr_state;
451 } xhci_command_ring_t;
452 
453 /*
454  * Individual command states.
455  *
456  * XHCI_COMMAND_S_INIT		The command has yet to be inserted into the
457  *				command ring.
458  *
459  * XHCI_COMMAND_S_QUEUED	The command is queued in the command ring.
460  *
461  * XHCI_COMMAND_S_RECEIVED	A command completion for this was received.
462  *
463  * XHCI_COMMAND_S_DONE		The command has been executed. Note that it may
464  *				have been aborted.
465  *
466  * XHCI_COMMAND_S_RESET		The ring is being reset due to a fatal error and
467  *				this command has been removed from the ring.
468  *				This means it has been aborted, but it was not
469  *				the cause of the abort.
470  *
471  * Note, when adding states, anything after XHCI_COMMAND_S_DONE implies that
472  * upon reaching this state, it is no longer in the ring.
473  */
474 typedef enum xhci_command_state {
475 	XHCI_COMMAND_S_INIT	= 0x00,
476 	XHCI_COMMAND_S_QUEUED	= 0x01,
477 	XHCI_COMMAND_S_RECEIVED = 0x02,
478 	XHCI_COMMAND_S_DONE	= 0x03,
479 	XHCI_COMMAND_S_RESET	= 0x04
480 } xhci_command_state_t;
481 
482 /*
483  * The TRB contents here are always kept in host byte order and are transformed
484  * to little endian when actually scheduled on the ring.
485  */
486 typedef struct xhci_command {
487 	list_node_t		xco_link;
488 	kcondvar_t		xco_cv;
489 	xhci_trb_t		xco_req;
490 	xhci_trb_t		xco_res;
491 	xhci_command_state_t	xco_state;
492 } xhci_command_t;
493 
494 typedef enum xhci_endpoint_state {
495 	XHCI_ENDPOINT_PERIODIC		= 0x01,
496 	XHCI_ENDPOINT_HALTED		= 0x02,
497 	XHCI_ENDPOINT_QUIESCE		= 0x04,
498 	XHCI_ENDPOINT_TIMED_OUT		= 0x08,
499 	/*
500 	 * This is a composite of states that we need to watch for. We don't
501 	 * want to allow ourselves to set one of these flags while one of them
502 	 * is currently active.
503 	 */
504 	XHCI_ENDPOINT_SERIALIZE		= 0x0c,
505 	/*
506 	 * This is a composite of states that we need to make sure that if set,
507 	 * we do not schedule activity on the ring.
508 	 */
509 	XHCI_ENDPOINT_DONT_SCHEDULE	= 0x0e,
510 	/*
511 	 * This enpdoint is being torn down and should make sure it de-schedules
512 	 * itself.
513 	 */
514 	XHCI_ENDPOINT_TEARDOWN		= 0x10,
515 	/*
516 	 * This endpoint is currently used in polled I/O mode by the
517 	 * kernel debugger.
518 	 */
519 	XHCI_ENDPOINT_POLLED		= 0x20
520 } xhci_endpoint_state_t;
521 
522 /*
523  * Forwards required for the endpoint
524  */
525 struct xhci_device;
526 struct xhci;
527 
528 typedef struct xhci_endpoint {
529 	struct xhci		*xep_xhci;
530 	struct xhci_device	*xep_xd;
531 	uint_t			xep_num;
532 	uint_t			xep_type;
533 	xhci_endpoint_state_t	xep_state;
534 	kcondvar_t		xep_state_cv;
535 	timeout_id_t		xep_timeout;
536 	list_t			xep_transfers;
537 	usba_pipe_handle_data_t	*xep_pipe;
538 	xhci_ring_t		xep_ring;
539 } xhci_endpoint_t;
540 
541 typedef struct xhci_device {
542 	list_node_t		xd_link;
543 	usb_port_t		xd_port;
544 	uint8_t			xd_slot;
545 	boolean_t		xd_addressed;
546 	usba_device_t		*xd_usbdev;
547 	xhci_dma_buffer_t	xd_ictx;
548 	kmutex_t		xd_imtx;	/* Protects input contexts */
549 	xhci_input_context_t	*xd_input;
550 	xhci_slot_context_t	*xd_slotin;
551 	xhci_endpoint_context_t	*xd_endin[XHCI_NUM_ENDPOINTS];
552 	xhci_dma_buffer_t	xd_octx;
553 	xhci_slot_context_t	*xd_slotout;
554 	xhci_endpoint_context_t	*xd_endout[XHCI_NUM_ENDPOINTS];
555 	xhci_endpoint_t		*xd_endpoints[XHCI_NUM_ENDPOINTS];
556 } xhci_device_t;
557 
558 typedef enum xhci_periodic_state {
559 	XHCI_PERIODIC_POLL_IDLE	= 0x0,
560 	XHCI_PERIODIC_POLL_ACTIVE,
561 	XHCI_PERIODIC_POLL_NOMEM,
562 	XHCI_PERIODIC_POLL_STOPPING
563 } xhci_periodic_state_t;
564 
565 typedef struct xhci_periodic_pipe {
566 	xhci_periodic_state_t	xpp_poll_state;
567 	usb_opaque_t		xpp_usb_req;
568 	size_t			xpp_tsize;
569 	uint_t			xpp_ntransfers;
570 	xhci_transfer_t		*xpp_transfers[XHCI_PERIODIC_IN_NTRANSFERS];
571 } xhci_periodic_pipe_t;
572 
573 typedef struct xhci_pipe {
574 	list_node_t		xp_link;
575 	hrtime_t		xp_opentime;
576 	usba_pipe_handle_data_t	*xp_pipe;
577 	xhci_endpoint_t		*xp_ep;
578 	xhci_periodic_pipe_t	xp_periodic;
579 } xhci_pipe_t;
580 
581 typedef struct xhci_usba {
582 	usba_hcdi_ops_t		*xa_ops;
583 	ddi_dma_attr_t		xa_dma_attr;
584 	usb_dev_descr_t		xa_dev_descr;
585 	usb_ss_hub_descr_t	xa_hub_descr;
586 	usba_pipe_handle_data_t	*xa_intr_cb_ph;
587 	usb_intr_req_t		*xa_intr_cb_req;
588 	list_t			xa_devices;
589 	list_t			xa_pipes;
590 } xhci_usba_t;
591 
592 typedef enum xhci_attach_seq {
593 	XHCI_ATTACH_FM		= 0x1 << 0,
594 	XHCI_ATTACH_PCI_CONFIG	= 0x1 << 1,
595 	XHCI_ATTACH_REGS_MAP	= 0x1 << 2,
596 	XHCI_ATTACH_INTR_ALLOC	= 0x1 << 3,
597 	XHCI_ATTACH_INTR_ADD	= 0x1 << 4,
598 	XHCI_ATTACH_SYNCH	= 0x1 << 5,
599 	XHCI_ATTACH_INTR_ENABLE	= 0x1 << 6,
600 	XHCI_ATTACH_STARTED	= 0x1 << 7,
601 	XHCI_ATTACH_USBA	= 0x1 << 8,
602 	XHCI_ATTACH_ROOT_HUB	= 0x1 << 9
603 } xhci_attach_seq_t;
604 
605 typedef enum xhci_state_flags {
606 	XHCI_S_ERROR		= 0x1 << 0
607 } xhci_state_flags_t;
608 
609 typedef struct xhci {
610 	dev_info_t		*xhci_dip;
611 	xhci_attach_seq_t	xhci_seq;
612 	int			xhci_fm_caps;
613 	ddi_acc_handle_t	xhci_cfg_handle;
614 	uint16_t		xhci_vendor_id;
615 	uint16_t		xhci_device_id;
616 	caddr_t			xhci_regs_base;
617 	ddi_acc_handle_t	xhci_regs_handle;
618 	uint_t			xhci_regs_capoff;
619 	uint_t			xhci_regs_operoff;
620 	uint_t			xhci_regs_runoff;
621 	uint_t			xhci_regs_dooroff;
622 	xhci_capability_t	xhci_caps;
623 	xhci_quirk_t		xhci_quirks;
624 	ddi_intr_handle_t	xhci_intr_hdl;
625 	int			xhci_intr_num;
626 	int			xhci_intr_type;
627 	uint_t			xhci_intr_pri;
628 	int			xhci_intr_caps;
629 	xhci_dcbaa_t		xhci_dcbaa;
630 	xhci_scratchpad_t	xhci_scratchpad;
631 	xhci_command_ring_t	xhci_command;
632 	xhci_event_ring_t	xhci_event;
633 	taskq_ent_t		xhci_tqe;
634 	kmutex_t		xhci_lock;
635 	kcondvar_t		xhci_statecv;
636 	xhci_state_flags_t	xhci_state;
637 	xhci_usba_t		xhci_usba;
638 } xhci_t;
639 
640 /*
641  * The xHCI memory mapped registers come in four different categories. The
642  * offset to them is variable. These represent the given register set that we're
643  * after.
644  */
645 typedef enum xhci_reg_type {
646 	XHCI_R_CAP,
647 	XHCI_R_OPER,
648 	XHCI_R_RUN,
649 	XHCI_R_DOOR
650 } xhci_reg_type_t;
651 
652 /*
653  * Polled I/O data structure
654  */
655 typedef struct xhci_polled {
656 	/*
657 	 * Pointer to the xhcip structure for the device that is to  be
658 	 * used as input in polled mode.
659 	 */
660 	xhci_t			*xhci_polled_xhci;
661 
662 	/*
663 	 * Pipe handle for the pipe that is to be used as input device
664 	 * in POLLED mode.
665 	 */
666 	usba_pipe_handle_data_t	*xhci_polled_input_pipe_handle;
667 
668 	/* Endpoint for the above */
669 	xhci_endpoint_t		*xhci_polled_endpoint;
670 
671 	/*
672 	 * The buffer that the USB HDI scan codes are copied into.
673 	 * A USB keyboard will report up to 8 bytes consisting of the
674 	 * modifier status, a reserved byte and up to 6 key presses.
675 	 * This buffer is sized to be large enough for one such report.
676 	 */
677 	uchar_t			xhci_polled_buf[8];
678 
679 	/*
680 	 * Track how many times xhci_polled_input_enter() and
681 	 * xhci_polled_input_exit() have been called so that the host
682 	 * controller isn't switched back to OS mode prematurely.
683 	 */
684 	uint_t			xhci_polled_entry;
685 
686 	/*
687 	 * Remember persistent errors that will prevent us from reading
688 	 * further input to avoid repeatedly polling to no avail
689 	 */
690 	int			xhci_polled_persistent_error;
691 } xhci_polled_t;
692 
693 /*
694  * Helper functions
695  */
696 extern xhci_t *xhci_hcdi_get_xhcip_from_dev(usba_device_t *);
697 extern xhci_device_t *xhci_device_lookup_by_slot(xhci_t *, int);
698 
699 /*
700  * Quirks related functions
701  */
702 extern void xhci_quirks_populate(xhci_t *);
703 extern void xhci_reroute_intel(xhci_t *);
704 
705 /*
706  * Interrupt related functions
707  */
708 extern uint_t xhci_intr(caddr_t, caddr_t);
709 extern boolean_t xhci_ddi_intr_disable(xhci_t *);
710 extern boolean_t xhci_ddi_intr_enable(xhci_t *);
711 extern int xhci_intr_conf(xhci_t *);
712 
713 /*
714  * DMA related functions
715  */
716 extern int xhci_check_dma_handle(xhci_t *, xhci_dma_buffer_t *);
717 extern void xhci_dma_acc_attr(xhci_t *, ddi_device_acc_attr_t *);
718 extern void xhci_dma_dma_attr(xhci_t *, ddi_dma_attr_t *);
719 extern void xhci_dma_scratchpad_attr(xhci_t *, ddi_dma_attr_t *);
720 extern void xhci_dma_transfer_attr(xhci_t *, ddi_dma_attr_t *, uint_t);
721 extern void xhci_dma_free(xhci_dma_buffer_t *);
722 extern boolean_t xhci_dma_alloc(xhci_t *, xhci_dma_buffer_t *, ddi_dma_attr_t *,
723     ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t);
724 extern uint64_t xhci_dma_pa(xhci_dma_buffer_t *);
725 
726 /*
727  * DMA Transfer Ring functions
728  */
729 extern xhci_transfer_t *xhci_transfer_alloc(xhci_t *, xhci_endpoint_t *, size_t,
730     uint_t, int);
731 extern void xhci_transfer_free(xhci_t *, xhci_transfer_t *);
732 extern void xhci_transfer_copy(xhci_transfer_t *, void *, size_t, boolean_t);
733 extern int xhci_transfer_sync(xhci_t *, xhci_transfer_t *, uint_t);
734 extern void xhci_transfer_trb_fill_data(xhci_endpoint_t *, xhci_transfer_t *,
735     int, boolean_t);
736 extern void xhci_transfer_calculate_isoc(xhci_device_t *, xhci_endpoint_t *,
737     uint_t, uint_t *, uint_t *);
738 
739 /*
740  * Context (DCBAA, Scratchpad, Slot) functions
741  */
742 extern int xhci_context_init(xhci_t *);
743 extern void xhci_context_fini(xhci_t *);
744 extern boolean_t xhci_context_slot_output_init(xhci_t *, xhci_device_t *);
745 extern void xhci_context_slot_output_fini(xhci_t *, xhci_device_t *);
746 
747 /*
748  * Command Ring Functions
749  */
750 extern int xhci_command_ring_init(xhci_t *);
751 extern void xhci_command_ring_fini(xhci_t *);
752 extern boolean_t xhci_command_event_callback(xhci_t *, xhci_trb_t *trb);
753 
754 extern void xhci_command_init(xhci_command_t *);
755 extern void xhci_command_fini(xhci_command_t *);
756 
757 extern int xhci_command_enable_slot(xhci_t *, uint8_t *);
758 extern int xhci_command_disable_slot(xhci_t *, uint8_t);
759 extern int xhci_command_set_address(xhci_t *, xhci_device_t *, boolean_t);
760 extern int xhci_command_configure_endpoint(xhci_t *, xhci_device_t *);
761 extern int xhci_command_evaluate_context(xhci_t *, xhci_device_t *);
762 extern int xhci_command_reset_endpoint(xhci_t *, xhci_device_t *,
763     xhci_endpoint_t *);
764 extern int xhci_command_set_tr_dequeue(xhci_t *, xhci_device_t *,
765     xhci_endpoint_t *);
766 extern int xhci_command_stop_endpoint(xhci_t *, xhci_device_t *,
767     xhci_endpoint_t *);
768 
769 /*
770  * Event Ring Functions
771  */
772 extern int xhci_event_init(xhci_t *);
773 extern void xhci_event_fini(xhci_t *);
774 extern boolean_t xhci_event_process_trb(xhci_t *, xhci_trb_t *);
775 extern boolean_t xhci_event_process(xhci_t *);
776 
777 /*
778  * General Ring functions
779  */
780 extern void xhci_ring_free(xhci_ring_t *);
781 extern int xhci_ring_reset(xhci_t *, xhci_ring_t *);
782 extern int xhci_ring_alloc(xhci_t *, xhci_ring_t *);
783 
784 /*
785  * Event Ring (Consumer) oriented functions.
786  */
787 extern xhci_trb_t *xhci_ring_event_advance(xhci_ring_t *);
788 
789 
790 /*
791  * Command and Transfer Ring (Producer) oriented functions.
792  */
793 extern boolean_t xhci_ring_trb_tail_valid(xhci_ring_t *, uint64_t);
794 extern int xhci_ring_trb_valid_range(xhci_ring_t *, uint64_t, uint_t);
795 
796 extern boolean_t xhci_ring_trb_space(xhci_ring_t *, uint_t);
797 extern void xhci_ring_trb_fill(xhci_ring_t *, uint_t, xhci_trb_t *, uint64_t *,
798     boolean_t);
799 extern void xhci_ring_trb_produce(xhci_ring_t *, uint_t);
800 extern boolean_t xhci_ring_trb_consumed(xhci_ring_t *, uint64_t);
801 extern void xhci_ring_trb_put(xhci_ring_t *, xhci_trb_t *);
802 extern void xhci_ring_skip(xhci_ring_t *);
803 extern void xhci_ring_skip_transfer(xhci_ring_t *, xhci_transfer_t *);
804 
805 /*
806  * MMIO related functions. Note callers are responsible for checking with FM
807  * after accessing registers.
808  */
809 extern int xhci_check_regs_acc(xhci_t *);
810 
811 extern uint8_t xhci_get8(xhci_t *, xhci_reg_type_t, uintptr_t);
812 extern uint16_t xhci_get16(xhci_t *, xhci_reg_type_t, uintptr_t);
813 extern uint32_t xhci_get32(xhci_t *, xhci_reg_type_t, uintptr_t);
814 extern uint64_t xhci_get64(xhci_t *, xhci_reg_type_t, uintptr_t);
815 
816 extern void xhci_put8(xhci_t *, xhci_reg_type_t, uintptr_t, uint8_t);
817 extern void xhci_put16(xhci_t *, xhci_reg_type_t, uintptr_t, uint16_t);
818 extern void xhci_put32(xhci_t *, xhci_reg_type_t, uintptr_t, uint32_t);
819 extern void xhci_put64(xhci_t *, xhci_reg_type_t, uintptr_t, uint64_t);
820 
821 /*
822  * Runtime FM related functions
823  */
824 extern void xhci_fm_runtime_reset(xhci_t *);
825 
826 /*
827  * Endpoint related functions
828  */
829 extern int xhci_endpoint_init(xhci_t *, xhci_device_t *,
830     usba_pipe_handle_data_t *);
831 extern void xhci_endpoint_fini(xhci_device_t *, int);
832 extern int xhci_endpoint_update_default(xhci_t *, xhci_device_t *,
833     xhci_endpoint_t *);
834 
835 extern int xhci_endpoint_setup_default_context(xhci_t *, xhci_device_t *,
836     xhci_endpoint_t *);
837 
838 extern uint_t xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *);
839 extern boolean_t xhci_endpoint_is_periodic_in(xhci_endpoint_t *);
840 
841 extern int xhci_endpoint_quiesce(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
842 extern int xhci_endpoint_schedule(xhci_t *, xhci_device_t *, xhci_endpoint_t *,
843     xhci_transfer_t *, boolean_t);
844 extern int xhci_endpoint_ring(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
845 extern boolean_t xhci_endpoint_transfer_callback(xhci_t *, xhci_trb_t *);
846 
847 extern xhci_transfer_t *xhci_endpoint_determine_transfer(xhci_t *,
848     xhci_endpoint_t *, xhci_trb_t *, uint_t *);
849 
850 /*
851  * USB Framework related functions
852  */
853 extern int xhci_hcd_init(xhci_t *);
854 extern void xhci_hcd_fini(xhci_t *);
855 
856 /*
857  * Root hub related functions
858  */
859 extern int xhci_root_hub_init(xhci_t *);
860 extern int xhci_root_hub_fini(xhci_t *);
861 extern int xhci_root_hub_ctrl_req(xhci_t *, usba_pipe_handle_data_t *,
862     usb_ctrl_req_t *);
863 extern void xhci_root_hub_psc_callback(xhci_t *);
864 extern int xhci_root_hub_intr_root_enable(xhci_t *, usba_pipe_handle_data_t *,
865     usb_intr_req_t *);
866 extern void xhci_root_hub_intr_root_disable(xhci_t *);
867 
868 /*
869  * Polled I/O functions
870  */
871 extern int xhci_hcdi_console_input_init(usba_pipe_handle_data_t *, uchar_t **,
872     usb_console_info_impl_t *);
873 extern int xhci_hcdi_console_input_fini(usb_console_info_impl_t *);
874 extern int xhci_hcdi_console_input_enter(usb_console_info_impl_t *);
875 extern int xhci_hcdi_console_read(usb_console_info_impl_t *, uint_t *);
876 extern int xhci_hcdi_console_input_exit(usb_console_info_impl_t *);
877 extern int xhci_hcdi_console_output_init(usba_pipe_handle_data_t *,
878     usb_console_info_impl_t *);
879 extern int xhci_hcdi_console_output_fini(usb_console_info_impl_t *);
880 extern int xhci_hcdi_console_output_enter(usb_console_info_impl_t *);
881 extern int xhci_hcdi_console_write(usb_console_info_impl_t *, uchar_t *,
882     uint_t, uint_t *);
883 extern int xhci_hcdi_console_output_exit(usb_console_info_impl_t *);
884 
885 /*
886  * Logging functions
887  */
888 extern void xhci_log(xhci_t *xhcip, const char *fmt, ...) __KPRINTFLIKE(2);
889 extern void xhci_error(xhci_t *xhcip, const char *fmt, ...) __KPRINTFLIKE(2);
890 
891 /*
892  * Misc. data
893  */
894 extern void *xhci_soft_state;
895 
896 #ifdef __cplusplus
897 }
898 #endif
899 
900 #endif /* _SYS_USB_XHCI_XHCI_H */
901