xref: /illumos-gate/usr/src/uts/common/sys/usb/hcd/xhci/xhci.h (revision ce326879a41b052db3abafb44e551f9d9c40cdba)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2016 Joyent, Inc.
14  */
15 
16 #ifndef _SYS_USB_XHCI_XHCI_H
17 #define	_SYS_USB_XHCI_XHCI_H
18 
19 /*
20  * Extensible Host Controller Interface (xHCI) USB Driver
21  */
22 
23 #include <sys/conf.h>
24 #include <sys/ddi.h>
25 #include <sys/sunddi.h>
26 #include <sys/taskq_impl.h>
27 #include <sys/sysmacros.h>
28 #include <sys/usb/hcd/xhci/xhcireg.h>
29 
30 #include <sys/usb/usba.h>
31 #include <sys/usb/usba/hcdi.h>
32 #include <sys/usb/hubd/hub.h>
33 #include <sys/usb/usba/hubdi.h>
34 #include <sys/usb/hubd/hubdvar.h>
35 
36 
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40 
41 /*
42  * The base segment for DMA attributes was determined to be 4k based on xHCI 1.1
43  * / table 54: Data Structure Max Size, Boundary, and Alignment Requirement
44  * Summary.  This indicates that the required alignment for most things is
45  * PAGESIZE, which in our current implementation is required to be 4K. We
46  * provide the ring segment value below for the things which need 64K alignment
47  *
48  * Similarly, in the same table, the maximum required alignment is 64 bytes,
49  * hence we use that for everything.
50  *
51  * Next is the scatter/gather lengths. For most of the data structures, we only
52  * want to have a single SGL entry, e.g. just a simple flat mapping. For many of
53  * our transfers, we use the same logic to simplify the implementation of the
54  * driver. However, for bulk transfers, which are the largest by far, we want to
55  * be able to leverage SGLs to give us more DMA flexibility.
56  *
57  * We can transfer up to 64K in one transfer request block (TRB) which
58  * corresponds to a single SGL entry. Each ring we create is a single page in
59  * size and will support at most 256 TRBs. We've selected to use up to 8 SGLs
60  * for these transfer cases. This allows us to put up to 512 KiB in a given
61  * transfer request and in the worst case, we can have about 30 of them
62  * outstanding. Experimentally, this has proven to be sufficient for most of the
63  * drivers that we support today.
64  */
65 #define	XHCI_TRB_MAX_TRANSFER	65536
66 #define	XHCI_DMA_ALIGN		64
67 #define	XHCI_DEF_DMA_SGL	1
68 #define	XHCI_TRANSFER_DMA_SGL	8
69 #define	XHCI_MAX_TRANSFER	(XHCI_TRB_MAX_TRANSFER * XHCI_TRANSFER_DMA_SGL)
70 #define	XHCI_DMA_STRUCT_SIZE	4096
71 
72 /*
73  * Properties and values for rerouting ehci ports to xhci.
74  */
75 #define	XHCI_PROP_REROUTE_DISABLE	0
76 #define	XHCI_PROP_REROUTE_DEFAULT	1
77 
78 /*
79  * This number is a bit made up. Truthfully, the API here isn't the most useful
80  * for what we need to define as it should really be based on the endpoint that
81  * we're interested in rather than the device as a whole.
82  *
83  * We're basically being asked how many TRBs we're willing to schedule in one
84  * go. There's no great way to come up with this number, so we basically are
85  * making up something such that we use up a good portion of a ring, but not too
86  * much of it.
87  */
88 #define	XHCI_ISOC_MAX_TRB	64
89 
90 #ifdef	DEBUG
91 #define	XHCI_DMA_SYNC(dma, flag)	VERIFY0(ddi_dma_sync( \
92 					    (dma).xdb_dma_handle, 0, 0, \
93 					    (flag)))
94 #else
95 #define	XHCI_DMA_SYNC(dma, flag)	((void) ddi_dma_sync( \
96 					    (dma).xdb_dma_handle, 0, 0, \
97 					    (flag)))
98 #endif
99 
100 /*
101  * This defines a time in 2-ms ticks that is required to wait for the controller
102  * to be ready to go. Section 5.4.8 of the XHCI specification in the description
103  * of the PORTSC register indicates that the upper bound is 20 ms. Therefore the
104  * number of ticks is 10.
105  */
106 #define	XHCI_POWER_GOOD	10
107 
108 /*
109  * Definitions to determine the default number of interrupts. Note that we only
110  * bother with a single interrupt at this time, though we've arranged the driver
111  * to make it possible to request more if, for some unlikely reason, it becomes
112  * necessary.
113  */
114 #define	XHCI_NINTR	1
115 
116 /*
117  * Default interrupt modulation value. This enables us to have 4000 interrupts /
118  * second. This is supposed to be the default value of the controller. See xHCI
119  * 1.1 / 4.17.2 for more information.
120  */
121 #define	XHCI_IMOD_DEFAULT 	0x000003F8U
122 
123 /*
124  * Definitions that surround the default values used in various contexts. These
125  * come from various parts of the xHCI specification. In general, see xHCI 1.1 /
126  * 4.8.2. Note that the MPS_MASK is used for ISOCH and INTR endpoints which have
127  * different sizes.
128  *
129  * The burst member is a bit more complicated. By default for USB 2 devices, it
130  * only matters for ISOCH and INTR endpoints and so we use the macros below to
131  * pull it out of the endpoint description's max packet field. For USB 3, it
132  * matters for non-control endpoints. However, it comes out of a companion
133  * description.
134  *
135  * By default the mult member is zero for all cases except for super speed
136  * ISOCH endpoints, where it comes from the companion descriptor.
137  */
138 #define	XHCI_CONTEXT_DEF_CERR		3
139 #define	XHCI_CONTEXT_ISOCH_CERR		0
140 #define	XHCI_CONTEXT_MPS_MASK		0x07ff
141 #define	XHCI_CONTEXT_BURST_MASK		0x1800
142 #define	XHCI_CONTEXT_BURST_SHIFT	11
143 #define	XHCI_CONTEXT_DEF_MULT		0
144 #define	XHCI_CONTEXT_DEF_MAX_ESIT	0
145 #define	XHCI_CONTEXT_DEF_CTRL_ATL	8
146 
147 /*
148  * This number represents the number of transfers that we'll set up for a given
149  * interrupt transfer. Note that the idea here is that we'll want to allocate a
150  * certain number of transfers to basically ensure that we'll always be able to
151  * have a transfer available, even if the system is a bit caught up in trying to
152  * process it and for some reason we can't fire the interrupt. As such, we
153  * basically want to have enough available that at the fastest interval (125 us)
154  * that we have enough. So in this case we choose 8, with the assumption that we
155  * should be able to process at least one in a given millisecond. Note that this
156  * is not based in fact and is really just as much a guess and a hope.
157  *
158  * While we could then use less resources for other interrupt transfers that are
159  * slower, starting with uniform resource usage will make things a bit easier.
160  */
161 #define	XHCI_INTR_IN_NTRANSFERS	8
162 
163 /*
164  * This number represents the number of xhci_transfer_t structures that we'll
165  * set up for a given isochronous transfer polling request. A given isochronous
166  * transfer may actually have multiple units of time associated with it. As
167  * such, we basically want to treat this like a case of classic double
168  * buffering. We have one ready to go while the other is being filled up. This
169  * will compensate for additional latency in the system. This is smaller than
170  * the Interrupt IN transfer case above as many callers may ask for multiple
171  * intervals in a single request.
172  */
173 #define	XHCI_ISOC_IN_NTRANSFERS	2
174 
175 #define	XHCI_PERIODIC_IN_NTRANSFERS					\
176 	MAX(XHCI_ISOC_IN_NTRANSFERS, XHCI_INTR_IN_NTRANSFERS)
177 
178 /*
179  * Mask for a route string which is a 20-bit value.
180  */
181 #define	XHCI_ROUTE_MASK(x)	((x) & 0xfffff)
182 
183 /*
184  * This is the default tick that we use for timeouts while endpoints have
185  * outstanding, active, non-periodic transfers. We choose one second as the USBA
186  * specifies timeouts in units of seconds. Note that this is in microseconds, so
187  * it can be fed into drv_usectohz().
188  */
189 #define	XHCI_TICK_TIMEOUT_US	(MICROSEC)
190 
191 /*
192  * Set of bits that we need one of to indicate that this port has something
193  * interesting on it.
194  */
195 #define	XHCI_HUB_INTR_CHANGE_MASK	(XHCI_PS_CSC | XHCI_PS_PEC | \
196     XHCI_PS_WRC | XHCI_PS_OCC | XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC)
197 
198 /*
199  * These represent known issues with various xHCI controllers.
200  *
201  * 	XHCI_QUIRK_NO_MSI	MSI support on this controller is known to be
202  * 				broken.
203  *
204  * 	XHCI_QUIRK_32_ONLY	Only use 32-bit DMA addreses with this
205  * 				controller.
206  *
207  * 	XHCI_QUIRK_INTC_EHCI	This is an Intel platform which supports
208  * 				rerouting ports between EHCI and xHCI
209  * 				controllers on the platform.
210  */
211 typedef enum xhci_quirk {
212 	XHCI_QUIRK_NO_MSI	= 0x01,
213 	XHCI_QUIRK_32_ONLY	= 0x02,
214 	XHCI_QUIRK_INTC_EHCI	= 0x04
215 } xhci_quirk_t;
216 
217 /*
218  * xHCI capability parameter flags. These are documented in xHCI 1.1 / 5.3.6.
219  */
220 typedef enum xhci_cap_flags {
221 	XCAP_AC64 	= 0x001,
222 	XCAP_BNC	= 0x002,
223 	XCAP_CSZ	= 0x004,
224 	XCAP_PPC	= 0x008,
225 	XCAP_PIND	= 0x010,
226 	XCAP_LHRC	= 0x020,
227 	XCAP_LTC	= 0x040,
228 	XCAP_NSS	= 0x080,
229 	XCAP_PAE	= 0x100,
230 	XCAP_SPC	= 0x200,
231 	XCAP_SEC	= 0x400,
232 	XCAP_CFC	= 0x800
233 } xchi_cap_flags_t;
234 
235 /*
236  * Second set of capabilities, these are documented in xHCI 1.1 / 5.3.9.
237  */
238 typedef enum xhci_cap2_flags {
239 	XCAP2_U3C	= 0x01,
240 	XCAP2_CMC	= 0x02,
241 	XCAP2_FMC	= 0x04,
242 	XCAP2_CTC	= 0x08,
243 	XCAP2_LEC	= 0x10,
244 	XCAP2_CIC	= 0x20
245 } xhci_cap2_flags_t;
246 
247 /*
248  * These represent and store the various capability registers that we'll need to
249  * use. In addition, we stash a few other versioning related bits here. Note
250  * that we cache more information than we might need so that we have it for
251  * debugging purposes.
252  */
253 typedef struct xhci_capability {
254 	uint8_t			xcap_usb_vers;
255 	uint16_t		xcap_hci_vers;
256 	uint32_t		xcap_pagesize;
257 	uint8_t			xcap_max_slots;
258 	uint16_t		xcap_max_intrs;
259 	uint8_t			xcap_max_ports;
260 	boolean_t		xcap_ist_micro;
261 	uint8_t			xcap_ist;
262 	uint16_t		xcap_max_esrt;
263 	boolean_t		xcap_scratch_restore;
264 	uint16_t		xcap_max_scratch;
265 	uint8_t			xcap_u1_lat;
266 	uint16_t		xcap_u2_lat;
267 	xchi_cap_flags_t	xcap_flags;
268 	uint8_t			xcap_max_psa;
269 	uint16_t		xcap_xecp_off;
270 	xhci_cap2_flags_t	xcap_flags2;
271 	int			xcap_intr_types;
272 } xhci_capability_t;
273 
274 /*
275  * This represents a single logical DMA allocation. For the vast majority of
276  * non-transfer cases, it only represents a single DMA buffer and not a
277  * scatter-gather list.
278  */
279 typedef struct xhci_dma_buffer {
280 	caddr_t			xdb_va;		/* Buffer VA */
281 	size_t			xdb_len;	/* Buffer logical len */
282 	ddi_acc_handle_t	xdb_acc_handle;	/* Access handle */
283 	ddi_dma_handle_t	xdb_dma_handle;	/* DMA handle */
284 	int			xdb_ncookies;	/* Number of actual cookies */
285 	ddi_dma_cookie_t	xdb_cookies[XHCI_TRANSFER_DMA_SGL];
286 } xhci_dma_buffer_t;
287 
288 /*
289  * This is a single transfer descriptor. It's packed to match the hardware
290  * layout.
291  */
292 #pragma pack(1)
293 typedef struct xhci_trb {
294 	uint64_t	trb_addr;
295 	uint32_t	trb_status;
296 	uint32_t	trb_flags;
297 } xhci_trb_t;
298 #pragma pack()
299 
300 /*
301  * This represents a single transfer that we want to allocate and perform.
302  */
303 typedef struct xhci_transfer {
304 	list_node_t		xt_link;
305 	hrtime_t		xt_sched_time;
306 	xhci_dma_buffer_t	xt_buffer;
307 	uint_t			xt_ntrbs;
308 	uint_t			xt_short;
309 	uint_t			xt_timeout;
310 	usb_cr_t		xt_cr;
311 	boolean_t		xt_data_tohost;
312 	xhci_trb_t		*xt_trbs;
313 	usb_isoc_pkt_descr_t	*xt_isoc;
314 	usb_opaque_t		xt_usba_req;
315 } xhci_transfer_t;
316 
317 /*
318  * This represents a ring in xHCI, upon which event, transfer, and command TRBs
319  * are scheduled.
320  */
321 typedef struct xhci_ring {
322 	xhci_dma_buffer_t	xr_dma;
323 	uint_t			xr_ntrb;
324 	xhci_trb_t		*xr_trb;
325 	uint_t			xr_head;
326 	uint_t			xr_tail;
327 	uint8_t			xr_cycle;
328 } xhci_ring_t;
329 
330 /*
331  * This structure is used to represent the xHCI Device Context Base Address
332  * Array. It's defined in section 6.1 of the specification and is required for
333  * the controller to start.
334  *
335  * The maximum number of slots supported is always 256, therefore we size this
336  * structure at its maximum.
337  */
338 #define	XHCI_MAX_SLOTS	256
339 #define	XHCI_DCBAA_SCRATCHPAD_INDEX	0
340 
341 typedef struct xhci_dcbaa {
342 	uint64_t		*xdc_base_addrs;
343 	xhci_dma_buffer_t	xdc_dma;
344 } xhci_dcbaa_t;
345 
346 typedef struct xhci_scratchpad {
347 	uint64_t		*xsp_addrs;
348 	xhci_dma_buffer_t	xsp_addr_dma;
349 	xhci_dma_buffer_t	*xsp_scratch_dma;
350 } xhci_scratchpad_t;
351 
352 /*
353  * Contexts. These structures are inserted into the DCBAA above and are used for
354  * describing the state of the system. Note, that while many of these are
355  * 32-bytes in size, the xHCI specification defines that they'll be extended to
356  * 64-bytes with all the extra bytes as zeros if the CSZ flag is set in the
357  * HCCPARAMS1 register, e.g. we have the flag XCAP_CSZ set.
358  *
359  * The device context covers the slot context and 31 endpoints.
360  */
361 #define	XHCI_DEVICE_CONTEXT_32	1024
362 #define	XHCI_DEVICE_CONTEXT_64	2048
363 #define	XHCI_NUM_ENDPOINTS	31
364 #define	XHCI_DEFAULT_ENDPOINT	0
365 
366 #pragma pack(1)
367 typedef struct xhci_slot_context {
368 	uint32_t	xsc_info;
369 	uint32_t	xsc_info2;
370 	uint32_t	xsc_tt;
371 	uint32_t	xsc_state;
372 	uint32_t	xsc_reserved[4];
373 } xhci_slot_context_t;
374 
375 typedef struct xhci_endpoint_context {
376 	uint32_t	xec_info;
377 	uint32_t	xec_info2;
378 	uint64_t	xec_dequeue;
379 	uint32_t	xec_txinfo;
380 	uint32_t	xec_reserved[3];
381 } xhci_endpoint_context_t;
382 
383 typedef struct xhci_input_context {
384 	uint32_t	xic_drop_flags;
385 	uint32_t	xic_add_flags;
386 	uint32_t	xic_reserved[6];
387 } xhci_input_context_t;
388 #pragma pack()
389 
390 /*
391  * Definitions and structures for maintaining the event ring.
392  */
393 #define	XHCI_EVENT_NSEGS	1
394 
395 #pragma pack(1)
396 typedef struct xhci_event_segment {
397 	uint64_t	xes_addr;
398 	uint16_t	xes_size;
399 	uint16_t	xes_rsvd0;
400 	uint32_t	xes_rsvd1;
401 } xhci_event_segment_t;
402 #pragma pack()
403 
404 typedef struct xhci_event_ring {
405 	xhci_event_segment_t	*xev_segs;
406 	xhci_dma_buffer_t	xev_dma;
407 	xhci_ring_t		xev_ring;
408 } xhci_event_ring_t;
409 
410 typedef enum xhci_command_ring_state {
411 	XHCI_COMMAND_RING_IDLE		= 0x00,
412 	XHCI_COMMAND_RING_RUNNING	= 0x01,
413 	XHCI_COMMAND_RING_ABORTING	= 0x02,
414 	XHCI_COMMAND_RING_ABORT_DONE	= 0x03
415 } xhci_command_ring_state_t;
416 
417 typedef struct xhci_command_ring {
418 	xhci_ring_t			xcr_ring;
419 	kmutex_t			xcr_lock;
420 	kcondvar_t			xcr_cv;
421 	list_t				xcr_commands;
422 	timeout_id_t			xcr_timeout;
423 	xhci_command_ring_state_t	xcr_state;
424 } xhci_command_ring_t;
425 
426 /*
427  * Individual command states.
428  *
429  * XHCI_COMMAND_S_INIT		The command has yet to be inserted into the
430  * 				command ring.
431  *
432  * XHCI_COMMAND_S_QUEUED	The command is queued in the command ring.
433  *
434  * XHCI_COMMAND_S_RECEIVED	A command completion for this was received.
435  *
436  * XHCI_COMMAND_S_DONE		The command has been executed. Note that it may
437  * 				have been aborted.
438  *
439  * XHCI_COMMAND_S_RESET		The ring is being reset due to a fatal error and
440  * 				this command has been removed from the ring.
441  * 				This means it has been aborted, but it was not
442  * 				the cause of the abort.
443  *
444  * Note, when adding states, anything after XHCI_COMMAND_S_DONE implies that
445  * upon reaching this state, it is no longer in the ring.
446  */
447 typedef enum xhci_command_state {
448 	XHCI_COMMAND_S_INIT	= 0x00,
449 	XHCI_COMMAND_S_QUEUED	= 0x01,
450 	XHCI_COMMAND_S_RECEIVED = 0x02,
451 	XHCI_COMMAND_S_DONE	= 0x03,
452 	XHCI_COMMAND_S_RESET	= 0x04
453 } xhci_command_state_t;
454 
455 /*
456  * The TRB contents here are always kept in host byte order and are transformed
457  * to little endian when actually scheduled on the ring.
458  */
459 typedef struct xhci_command {
460 	list_node_t		xco_link;
461 	kcondvar_t		xco_cv;
462 	xhci_trb_t		xco_req;
463 	xhci_trb_t		xco_res;
464 	xhci_command_state_t	xco_state;
465 } xhci_command_t;
466 
467 typedef enum xhci_endpoint_state {
468 	XHCI_ENDPOINT_PERIODIC		= 0x01,
469 	XHCI_ENDPOINT_HALTED		= 0x02,
470 	XHCI_ENDPOINT_QUIESCE		= 0x04,
471 	XHCI_ENDPOINT_TIMED_OUT		= 0x08,
472 	/*
473 	 * This is a composite of states that we need to watch for. We don't
474 	 * want to allow ourselves to set one of these flags while one of them
475 	 * is currently active.
476 	 */
477 	XHCI_ENDPOINT_SERIALIZE		= 0x0c,
478 	/*
479 	 * This is a composite of states that we need to make sure that if set,
480 	 * we do not schedule activity on the ring.
481 	 */
482 	XHCI_ENDPOINT_DONT_SCHEDULE	= 0x0e,
483 	/*
484 	 * This enpdoint is being torn down and should make sure it de-schedules
485 	 * itself.
486 	 */
487 	XHCI_ENDPOINT_TEARDOWN		= 0x10
488 } xhci_endpoint_state_t;
489 
490 /*
491  * Forwards required for the endpoint
492  */
493 struct xhci_device;
494 struct xhci;
495 
496 typedef struct xhci_endpoint {
497 	struct xhci		*xep_xhci;
498 	struct xhci_device	*xep_xd;
499 	uint_t			xep_num;
500 	uint_t			xep_type;
501 	xhci_endpoint_state_t	xep_state;
502 	kcondvar_t		xep_state_cv;
503 	timeout_id_t		xep_timeout;
504 	list_t			xep_transfers;
505 	usba_pipe_handle_data_t	*xep_pipe;
506 	xhci_ring_t		xep_ring;
507 } xhci_endpoint_t;
508 
509 typedef struct xhci_device {
510 	list_node_t		xd_link;
511 	usb_port_t		xd_port;
512 	uint8_t			xd_slot;
513 	boolean_t		xd_addressed;
514 	usba_device_t		*xd_usbdev;
515 	xhci_dma_buffer_t	xd_ictx;
516 	kmutex_t		xd_imtx;	/* Protects input contexts */
517 	xhci_input_context_t	*xd_input;
518 	xhci_slot_context_t	*xd_slotin;
519 	xhci_endpoint_context_t	*xd_endin[XHCI_NUM_ENDPOINTS];
520 	xhci_dma_buffer_t	xd_octx;
521 	xhci_slot_context_t	*xd_slotout;
522 	xhci_endpoint_context_t	*xd_endout[XHCI_NUM_ENDPOINTS];
523 	xhci_endpoint_t		*xd_endpoints[XHCI_NUM_ENDPOINTS];
524 } xhci_device_t;
525 
526 typedef enum xhci_periodic_state {
527 	XHCI_PERIODIC_POLL_IDLE	= 0x0,
528 	XHCI_PERIODIC_POLL_ACTIVE,
529 	XHCI_PERIODIC_POLL_NOMEM,
530 	XHCI_PERIODIC_POLL_STOPPING
531 } xhci_periodic_state_t;
532 
533 typedef struct xhci_periodic_pipe {
534 	xhci_periodic_state_t	xpp_poll_state;
535 	usb_opaque_t		xpp_usb_req;
536 	size_t			xpp_tsize;
537 	uint_t			xpp_ntransfers;
538 	xhci_transfer_t		*xpp_transfers[XHCI_PERIODIC_IN_NTRANSFERS];
539 } xhci_periodic_pipe_t;
540 
541 typedef struct xhci_pipe {
542 	list_node_t		xp_link;
543 	hrtime_t		xp_opentime;
544 	usba_pipe_handle_data_t	*xp_pipe;
545 	xhci_endpoint_t		*xp_ep;
546 	xhci_periodic_pipe_t	xp_periodic;
547 } xhci_pipe_t;
548 
549 typedef struct xhci_usba {
550 	usba_hcdi_ops_t		*xa_ops;
551 	ddi_dma_attr_t		xa_dma_attr;
552 	usb_dev_descr_t		xa_dev_descr;
553 	usb_ss_hub_descr_t	xa_hub_descr;
554 	usba_pipe_handle_data_t	*xa_intr_cb_ph;
555 	usb_intr_req_t		*xa_intr_cb_req;
556 	list_t			xa_devices;
557 	list_t			xa_pipes;
558 } xhci_usba_t;
559 
560 typedef enum xhci_attach_seq {
561 	XHCI_ATTACH_FM		= 0x1 << 0,
562 	XHCI_ATTACH_PCI_CONFIG	= 0x1 << 1,
563 	XHCI_ATTACH_REGS_MAP	= 0x1 << 2,
564 	XHCI_ATTACH_INTR_ALLOC	= 0x1 << 3,
565 	XHCI_ATTACH_INTR_ADD	= 0x1 << 4,
566 	XHCI_ATTACH_SYNCH	= 0x1 << 5,
567 	XHCI_ATTACH_INTR_ENABLE	= 0x1 << 6,
568 	XHCI_ATTACH_STARTED	= 0x1 << 7,
569 	XHCI_ATTACH_USBA	= 0x1 << 8,
570 	XHCI_ATTACH_ROOT_HUB	= 0x1 << 9
571 } xhci_attach_seq_t;
572 
573 typedef enum xhci_state_flags {
574 	XHCI_S_ERROR		= 0x1 << 0
575 } xhci_state_flags_t;
576 
577 typedef struct xhci {
578 	dev_info_t		*xhci_dip;
579 	xhci_attach_seq_t	xhci_seq;
580 	int			xhci_fm_caps;
581 	ddi_acc_handle_t	xhci_cfg_handle;
582 	uint16_t		xhci_vendor_id;
583 	uint16_t		xhci_device_id;
584 	caddr_t			xhci_regs_base;
585 	ddi_acc_handle_t	xhci_regs_handle;
586 	uint_t			xhci_regs_capoff;
587 	uint_t			xhci_regs_operoff;
588 	uint_t			xhci_regs_runoff;
589 	uint_t			xhci_regs_dooroff;
590 	xhci_capability_t	xhci_caps;
591 	xhci_quirk_t		xhci_quirks;
592 	ddi_intr_handle_t	xhci_intr_hdl;
593 	int			xhci_intr_num;
594 	int			xhci_intr_type;
595 	uint_t			xhci_intr_pri;
596 	int			xhci_intr_caps;
597 	xhci_dcbaa_t		xhci_dcbaa;
598 	xhci_scratchpad_t	xhci_scratchpad;
599 	xhci_command_ring_t	xhci_command;
600 	xhci_event_ring_t	xhci_event;
601 	taskq_ent_t		xhci_tqe;
602 	kmutex_t		xhci_lock;
603 	kcondvar_t		xhci_statecv;
604 	xhci_state_flags_t	xhci_state;
605 	xhci_usba_t		xhci_usba;
606 } xhci_t;
607 
608 /*
609  * The xHCI memory mapped registers come in four different categories. The
610  * offset to them is variable. These represent the given register set that we're
611  * after.
612  */
613 typedef enum xhci_reg_type {
614 	XHCI_R_CAP,
615 	XHCI_R_OPER,
616 	XHCI_R_RUN,
617 	XHCI_R_DOOR
618 } xhci_reg_type_t;
619 
620 /*
621  * Quirks related functions
622  */
623 extern void xhci_quirks_populate(xhci_t *);
624 extern void xhci_reroute_intel(xhci_t *);
625 
626 /*
627  * Interrupt related functions
628  */
629 extern uint_t xhci_intr(caddr_t, caddr_t);
630 extern boolean_t xhci_ddi_intr_disable(xhci_t *);
631 extern boolean_t xhci_ddi_intr_enable(xhci_t *);
632 extern int xhci_intr_conf(xhci_t *);
633 
634 /*
635  * DMA related functions
636  */
637 extern int xhci_check_dma_handle(xhci_t *, xhci_dma_buffer_t *);
638 extern void xhci_dma_acc_attr(xhci_t *, ddi_device_acc_attr_t *);
639 extern void xhci_dma_dma_attr(xhci_t *, ddi_dma_attr_t *);
640 extern void xhci_dma_scratchpad_attr(xhci_t *, ddi_dma_attr_t *);
641 extern void xhci_dma_transfer_attr(xhci_t *, ddi_dma_attr_t *, uint_t);
642 extern void xhci_dma_free(xhci_dma_buffer_t *);
643 extern boolean_t xhci_dma_alloc(xhci_t *, xhci_dma_buffer_t *, ddi_dma_attr_t *,
644     ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t);
645 extern uint64_t xhci_dma_pa(xhci_dma_buffer_t *);
646 
647 /*
648  * DMA Transfer Ring functions
649  */
650 extern xhci_transfer_t *xhci_transfer_alloc(xhci_t *, xhci_endpoint_t *, size_t,
651     int, int);
652 extern void xhci_transfer_free(xhci_t *, xhci_transfer_t *);
653 extern void xhci_transfer_copy(xhci_transfer_t *, void *, size_t, boolean_t);
654 extern int xhci_transfer_sync(xhci_t *, xhci_transfer_t *, uint_t);
655 extern void xhci_transfer_trb_fill_data(xhci_endpoint_t *, xhci_transfer_t *,
656     int, boolean_t);
657 extern void xhci_transfer_calculate_isoc(xhci_device_t *, xhci_endpoint_t *,
658     uint_t, uint_t *, uint_t *);
659 
660 /*
661  * Context (DCBAA, Scratchpad, Slot) functions
662  */
663 extern int xhci_context_init(xhci_t *);
664 extern void xhci_context_fini(xhci_t *);
665 extern boolean_t xhci_context_slot_output_init(xhci_t *, xhci_device_t *);
666 extern void xhci_context_slot_output_fini(xhci_t *, xhci_device_t *);
667 
668 /*
669  * Command Ring Functions
670  */
671 extern int xhci_command_ring_init(xhci_t *);
672 extern void xhci_command_ring_fini(xhci_t *);
673 extern boolean_t xhci_command_event_callback(xhci_t *, xhci_trb_t *trb);
674 
675 extern void xhci_command_init(xhci_command_t *);
676 extern void xhci_command_fini(xhci_command_t *);
677 
678 extern int xhci_command_enable_slot(xhci_t *, uint8_t *);
679 extern int xhci_command_disable_slot(xhci_t *, uint8_t);
680 extern int xhci_command_set_address(xhci_t *, xhci_device_t *, boolean_t);
681 extern int xhci_command_configure_endpoint(xhci_t *, xhci_device_t *);
682 extern int xhci_command_evaluate_context(xhci_t *, xhci_device_t *);
683 extern int xhci_command_reset_endpoint(xhci_t *, xhci_device_t *,
684     xhci_endpoint_t *);
685 extern int xhci_command_set_tr_dequeue(xhci_t *, xhci_device_t *,
686     xhci_endpoint_t *);
687 extern int xhci_command_stop_endpoint(xhci_t *, xhci_device_t *,
688     xhci_endpoint_t *);
689 
690 /*
691  * Event Ring Functions
692  */
693 extern int xhci_event_init(xhci_t *);
694 extern void xhci_event_fini(xhci_t *);
695 extern boolean_t xhci_event_process(xhci_t *);
696 
697 /*
698  * General Ring functions
699  */
700 extern void xhci_ring_free(xhci_ring_t *);
701 extern int xhci_ring_reset(xhci_t *, xhci_ring_t *);
702 extern int xhci_ring_alloc(xhci_t *, xhci_ring_t *);
703 
704 /*
705  * Event Ring (Consumer) oriented functions.
706  */
707 extern xhci_trb_t *xhci_ring_event_advance(xhci_ring_t *);
708 
709 
710 /*
711  * Command and Transfer Ring (Producer) oriented functions.
712  */
713 extern boolean_t xhci_ring_trb_tail_valid(xhci_ring_t *, uint64_t);
714 extern int xhci_ring_trb_valid_range(xhci_ring_t *, uint64_t, uint_t);
715 
716 extern boolean_t xhci_ring_trb_space(xhci_ring_t *, uint_t);
717 extern void xhci_ring_trb_fill(xhci_ring_t *, uint_t, xhci_trb_t *, boolean_t);
718 extern void xhci_ring_trb_produce(xhci_ring_t *, uint_t);
719 extern boolean_t xhci_ring_trb_consumed(xhci_ring_t *, uint64_t);
720 extern void xhci_ring_trb_put(xhci_ring_t *, xhci_trb_t *);
721 extern void xhci_ring_skip(xhci_ring_t *);
722 extern void xhci_ring_skip_transfer(xhci_ring_t *, xhci_transfer_t *);
723 
724 /*
725  * MMIO related functions. Note callers are responsible for checking with FM
726  * after accessing registers.
727  */
728 extern int xhci_check_regs_acc(xhci_t *);
729 
730 extern uint8_t xhci_get8(xhci_t *, xhci_reg_type_t, uintptr_t);
731 extern uint16_t xhci_get16(xhci_t *, xhci_reg_type_t, uintptr_t);
732 extern uint32_t xhci_get32(xhci_t *, xhci_reg_type_t, uintptr_t);
733 extern uint64_t xhci_get64(xhci_t *, xhci_reg_type_t, uintptr_t);
734 
735 extern void xhci_put8(xhci_t *, xhci_reg_type_t, uintptr_t, uint8_t);
736 extern void xhci_put16(xhci_t *, xhci_reg_type_t, uintptr_t, uint16_t);
737 extern void xhci_put32(xhci_t *, xhci_reg_type_t, uintptr_t, uint32_t);
738 extern void xhci_put64(xhci_t *, xhci_reg_type_t, uintptr_t, uint64_t);
739 
740 /*
741  * Runtime FM related functions
742  */
743 extern void xhci_fm_runtime_reset(xhci_t *);
744 
745 /*
746  * Endpoint related functions
747  */
748 extern int xhci_endpoint_init(xhci_t *, xhci_device_t *,
749     usba_pipe_handle_data_t *);
750 extern void xhci_endpoint_fini(xhci_device_t *, int);
751 extern int xhci_endpoint_update_default(xhci_t *, xhci_device_t *,
752     xhci_endpoint_t *);
753 
754 extern int xhci_endpoint_setup_default_context(xhci_t *, xhci_device_t *,
755     xhci_endpoint_t *);
756 
757 extern uint_t xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *);
758 extern boolean_t xhci_endpoint_is_periodic_in(xhci_endpoint_t *);
759 
760 extern int xhci_endpoint_quiesce(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
761 extern int xhci_endpoint_schedule(xhci_t *, xhci_device_t *, xhci_endpoint_t *,
762     xhci_transfer_t *, boolean_t);
763 extern int xhci_endpoint_ring(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
764 extern boolean_t xhci_endpoint_transfer_callback(xhci_t *, xhci_trb_t *);
765 
766 /*
767  * USB Framework related functions
768  */
769 extern int xhci_hcd_init(xhci_t *);
770 extern void xhci_hcd_fini(xhci_t *);
771 
772 /*
773  * Root hub related functions
774  */
775 extern int xhci_root_hub_init(xhci_t *);
776 extern int xhci_root_hub_fini(xhci_t *);
777 extern int xhci_root_hub_ctrl_req(xhci_t *, usba_pipe_handle_data_t *,
778     usb_ctrl_req_t *);
779 extern void xhci_root_hub_psc_callback(xhci_t *);
780 extern int xhci_root_hub_intr_root_enable(xhci_t *, usba_pipe_handle_data_t *,
781     usb_intr_req_t *);
782 extern void xhci_root_hub_intr_root_disable(xhci_t *);
783 
784 /*
785  * Logging functions
786  */
787 extern void xhci_log(xhci_t *xhcip, const char *fmt, ...) __KPRINTFLIKE(2);
788 extern void xhci_error(xhci_t *xhcip, const char *fmt, ...) __KPRINTFLIKE(2);
789 
790 /*
791  * Misc. data
792  */
793 extern void *xhci_soft_state;
794 
795 #ifdef __cplusplus
796 }
797 #endif
798 
799 #endif /* _SYS_USB_XHCI_XHCI_H */
800