xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci.c (revision 28ab0ca48b3e331cbbb231b1c8325f9f24f9af95)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2019, Joyent, Inc.
14  */
15 
16 /*
17  * Extensible Host Controller Interface (xHCI) USB Driver
18  *
19  * The xhci driver is an HCI driver for USB that bridges the gap between client
20  * device drivers and implements the actual way that we talk to devices. The
21  * xhci specification provides access to USB 3.x capable devices, as well as all
22  * prior generations. Like other host controllers, it both provides the way to
23  * talk to devices and also is treated like a hub (often called the root hub).
24  *
25  * This driver is part of the USBA (USB Architecture). It implements the HCDI
26  * (host controller device interface) end of USBA. These entry points are used
27  * by the USBA on behalf of client device drivers to access their devices. The
28  * driver also provides notifications to deal with hot plug events, which are
29  * quite common in USB.
30  *
31  * ----------------
32  * USB Introduction
33  * ----------------
34  *
35  * To properly understand the xhci driver and the design of the USBA HCDI
36  * interfaces it implements, it helps to have a bit of background into how USB
37  * devices are structured and understand how they work at a high-level.
38  *
39  * USB devices, like PCI devices, are broken down into different classes of
40  * device. For example, with USB you have hubs, human-input devices (keyboards,
41  * mice, etc.), mass storage, etc. Every device also has a vendor and device ID.
42  * Many client drivers bind to an entire class of device, for example, the hubd
43  * driver (to hubs) or scsa2usb (USB storage). However, there are other drivers
44  * that bind to explicit IDs such as usbsprl (specific USB to Serial devices).
45  *
46  * USB SPEEDS AND VERSIONS
47  *
48  * USB devices are often referred to in two different ways. One way they're
49  * described is with the USB version that they conform to. In the wild, you're
50  * most likely going to see USB 1.1, 2.0, 2.1, and 3.0. However, you may also
51  * see devices referred to as 'full-', 'low-', 'high-', and 'super-' speed
52  * devices.
53  *
54  * The latter description describes the maximum theoretical speed of a given
55  * device. For example, a super-speed device theoretically caps out around 5
56  * Gbit/s, whereas a low-speed device caps out at 1.5 Mbit/s.
57  *
58  * In general, each speed usually corresponds to a specific USB protocol
59  * generation. For example, all USB 3.0 devices are super-speed devices. All
60  * 'high-speed' devices are USB 2.x devices. Full-speed devices are special in
61  * that they can either be USB 1.x or USB 2.x devices. Low-speed devices are
62  * only a USB 1.x thing, they did not jump the fire line to USB 2.x.
63  *
64  * USB 3.0 devices and ports generally have the wiring for both USB 2.0 and USB
65  * 3.0. When a USB 3.x device is plugged into a USB 2.0 port or hub, then it
66  * will report its version as USB 2.1, to indicate that it is actually a USB 3.x
67  * device.
68  *
69  * USB ENDPOINTS
70  *
71  * A given USB device is made up of endpoints. A request, or transfer, is made
72  * to a specific USB endpoint. These endpoints can provide different services
73  * and have different expectations around the size of the data that'll be used
74  * in a given request and the periodicity of requests. Endpoints themselves are
75  * either used to make one-shot requests, for example, making requests to a mass
76  * storage device for a given sector, or for making periodic requests where you
77  * end up polling on the endpoint, for example, polling on a USB keyboard for
78  * keystrokes.
79  *
80  * Each endpoint encodes two different pieces of information: a direction and a
81  * type. There are two different directions: IN and OUT. These refer to the
82  * general direction that data moves relative to the operating system. For
83  * example, an IN transfer transfers data in to the operating system, from the
84  * device. An OUT transfer transfers data from the operating system, out to the
85  * device.
86  *
87  * There are four different kinds of endpoints:
88  *
89  *	BULK		These transfers are large transfers of data to or from
90  *			a device. The most common use for bulk transfers is for
91  *			mass storage devices. Though they are often also used by
92  *			network devices and more. Bulk endpoints do not have an
93  *			explicit time component to them. They are always used
94  *			for one-shot transfers.
95  *
96  *	CONTROL		These transfers are used to manipulate devices
97  *			themselves and are used for USB protocol level
98  *			operations (whether device-specific, class-specific, or
99  *			generic across all of USB). Unlike other transfers,
100  *			control transfers are always bi-directional and use
101  *			different kinds of transfers.
102  *
103  *	INTERRUPT	Interrupt transfers are used for small transfers that
104  *			happen infrequently, but need reasonable latency. A good
105  *			example of interrupt transfers is to receive input from
106  *			a USB keyboard. Interrupt-IN transfers are generally
107  *			polled. Meaning that a client (device driver) opens up
108  *			an interrupt-IN pipe to poll on it, and receives
109  *			periodic updates whenever there is information
110  *			available. However, Interrupt transfers can be used
111  *			as one-shot transfers both going IN and OUT.
112  *
113  *	ISOCHRONOUS	These transfers are things that happen once per
114  *			time-interval at a very regular rate. A good example of
115  *			these transfers are for audio and video. A device may
116  *			describe an interval as 10ms at which point it will read
117  *			or write the next batch of data every 10ms and transform
118  *			it for the user. There are no one-shot Isochronous-IN
119  *			transfers. There are one-shot Isochronous-OUT transfers,
120  *			but these are used by device drivers to always provide
121  *			the system with sufficient data.
122  *
123  * To find out information about the endpoints, USB devices have a series of
124  * descriptors that cover different aspects of the device. For example, there
125  * are endpoint descriptors which cover the properties of endpoints such as the
126  * maximum packet size or polling interval.
127  *
128  * Descriptors exist at all levels of USB. For example, there are general
129  * descriptors for every device. The USB device descriptor is described in
130  * usb_dev_descr(9S). Host controllers will look at these descriptors to ensure
131  * that they program the device correctly; however, they are more often used by
132  * client device drivers. There are also descriptors that exist at a class
133  * level. For example, the hub class has a class-specific descriptor which
134  * describes properties of the hub. That information is requested for and used
135  * by the hub driver.
136  *
137  * All of the different descriptors are gathered by the system and placed into a
138  * tree which USBA sometimes calls the 'Configuration Cloud'. Client device
139  * drivers gain access to this cloud and then use them to open endpoints, which
140  * are called pipes in USBA (and some revisions of the USB specification).
141  *
142  * Each pipe gives access to a specific endpoint on the device which can be used
143  * to perform transfers of a specific type and direction. For example, a mass
144  * storage device often has three different endpoints, the default control
145  * endpoint (which every device has), a Bulk-IN endpoint, and a Bulk-OUT
146  * endpoint. The device driver ends up with three open pipes. One to the default
147  * control endpoint to configure the device, and then the other two are used to
148  * perform I/O.
149  *
150  * These routines translate more or less directly into calls to a host
151  * controller driver. A request to open a pipe takes an endpoint descriptor that
152  * describes the properties of the pipe, and the host controller driver (this
153  * driver) goes through and does any work necessary to allow the client device
154  * driver to access it. Once the pipe is open, it either makes one-shot
155  * transfers specific to the transfer type or it starts performing a periodic
156  * poll of an endpoint.
157  *
158  * All of these different actions translate into requests to the host
159  * controller. The host controller driver itself is in charge of making sure
160  * that all of the required resources for polling are allocated with a request
161  * and then proceed to give the driver's periodic callbacks.
162  *
163  * HUBS AND HOST CONTROLLERS
164  *
165  * Every device is always plugged into a hub, even if the device is itself a
166  * hub. This continues until we reach what we call the root-hub. The root-hub is
167  * special in that it is not an actual USB hub, but is integrated into the host
168  * controller and is manipulated in its own way. For example, the host
169  * controller is used to turn on and off a given port's power. This may happen
170  * over any interface, though the most common way is through PCI.
171  *
172  * In addition to the normal character device that exists for a host controller
173  * driver, as part of attaching, the host controller binds to an instance of the
174  * hubd driver. While the root-hub is a bit of a fiction, everyone models the
175  * root-hub as the same as any other hub that's plugged in. The hub kernel
176  * module doesn't know that the hub isn't a physical device that's been plugged
177  * in. The host controller driver simulates that view by taking hub requests
178  * that are made and translating them into corresponding requests that are
179  * understood by the host controller, for example, reading and writing to a
180  * memory mapped register.
181  *
182  * The hub driver polls for changes in device state using an Interrupt-IN
183  * request, which is the same as is done for the root-hub. This allows the host
184  * controller driver to not have to know about the implementation of device hot
185  * plug, merely react to requests from a hub, the same as if it were an external
186  * device. When the hub driver detects a change, it will go through the
187  * corresponding state machine and attach or detach the corresponding client
188  * device driver, depending if the device was inserted or removed.
189  *
190  * We detect the changes for the Interrupt-IN primarily based on the port state
191  * change events that are delivered to the event ring. Whenever any event is
192  * fired, we use this to update the hub driver about _all_ ports with
193  * outstanding events. This more closely matches how a hub is supposed to behave
194  * and leaves things less likely for the hub driver to end up without clearing a
195  * flag on a port.
196  *
197  * PACKET SIZES AND BURSTING
198  *
199  * A given USB endpoint has an explicit packet size and a number of packets that
200  * can be sent per time interval. These concepts are abstracted away from client
201  * device drives usually, though they sometimes inform the upper bounds of what
202  * a device can perform.
203  *
204  * The host controller uses this information to transform arbitrary transfer
205  * requests into USB protocol packets. One of the nice things about the host
206  * controllers is that they abstract away all of the signaling and semantics of
207  * the actual USB protocols, allowing for life to be slightly easier in the
208  * operating system.
209  *
210  * That said, if the host controller is not programmed correctly, these can end
211  * up causing transaction errors and other problems in response to the data that
212  * the host controller is trying to send or receive.
213  *
214  * ------------
215  * Organization
216  * ------------
217  *
218  * The driver is made up of the following files. Many of these have their own
219  * theory statements to describe what they do. Here, we touch on each of the
220  * purpose of each of these files.
221  *
222  * xhci_command.c:	This file contains the logic to issue commands to the
223  *			controller as well as the actual functions that the
224  *			other parts of the driver use to cause those commands.
225  *
226  * xhci_context.c:	This file manages various data structures used by the
227  *			controller to manage the controller's and device's
228  *			context data structures. See more in the xHCI Overview
229  *			and General Design for more information.
230  *
231  * xhci_dma.c:		This manages the allocation of DMA memory and DMA
232  *			attributes for controller, whether memory is for a
233  *			transfer or something else. This file also deals with
234  *			all the logic of getting data in and out of DMA buffers.
235  *
236  * xhci_endpoint.c:	This manages all of the logic of handling endpoints or
237  *			pipes. It deals with endpoint configuration, I/O
238  *			scheduling, timeouts, and callbacks to USBA.
239  *
240  * xhci_event.c:	This manages callbacks from the hardware to the driver.
241  *			This covers command completion notifications and I/O
242  *			notifications.
243  *
244  * xhci_hub.c:		This manages the virtual root-hub. It basically
245  *			implements and translates all of the USB level requests
246  *			into xhci specific implements. It also contains the
247  *			functions to register this hub with USBA.
248  *
249  * xhci_intr.c:		This manages the underlying interrupt allocation,
250  *			interrupt moderation, and interrupt routines.
251  *
252  * xhci_quirks.c:	This manages information about buggy hardware that's
253  *			been collected and experienced primarily from other
254  *			systems.
255  *
256  * xhci_ring.c:		This manages the abstraction of a ring in xhci, which is
257  *			the primary of communication between the driver and the
258  *			hardware, whether for the controller or a device.
259  *
260  * xhci_usba.c:		This implements all of the HCDI functions required by
261  *			USBA. This is the main entry point that drivers and the
262  *			kernel frameworks will reach to start any operation.
263  *			Many functions here will end up in the command and
264  *			endpoint code.
265  *
266  * xhci.c:		This provides the main kernel DDI interfaces and
267  *			performs device initialization.
268  *
269  * xhci_polled.c:	This provides the polled I/O functions that the
270  *			kernel debugger can use.
271  *
272  * xhci.h:		This is the primary header file which defines
273  *			illumos-specific data structures and constants to manage
274  *			the system.
275  *
276  * xhcireg.h:		This header file defines all of the register offsets,
277  *			masks, and related macros. It also contains all of the
278  *			constants that are used in various structures as defined
279  *			by the specification, such as command offsets, etc.
280  *
281  * xhci_ioctl.h:	This contains a few private ioctls that are used by a
282  *			private debugging command. These are private.
283  *
284  * cmd/xhci/xhci_portsc:	This is a private utility that can be useful for
285  *				debugging xhci state. It is the only consumer of
286  *				xhci_ioctl.h and the private ioctls.
287  *
288  * ----------------------------------
289  * xHCI Overview and Structure Layout
290  * ----------------------------------
291  *
292  * The design and structure of this driver follows from the way that the xHCI
293  * specification tells us that we have to work with hardware. First we'll give a
294  * rough summary of how that works, though the xHCI 1.1 specification should be
295  * referenced when going through this.
296  *
297  * There are three primary parts of the hardware -- registers, contexts, and
298  * rings. The registers are memory mapped registers that come in four sets,
299  * though all are found within the first BAR. These are used to program and
300  * control the hardware and aspects of the devices. Beyond more traditional
301  * device programming there are two primary sets of registers that are
302  * important:
303  *
304  *   o Port Status and Control Registers (XHCI_PORTSC)
305  *   o Doorbell Array (XHCI_DOORBELL)
306  *
307  * The port status and control registers are used to get and manipulate the
308  * status of a given device. For example, turning on and off the power to it.
309  * The Doorbell Array is used to kick off I/O operations and start the
310  * processing of an I/O ring.
311  *
312  * The contexts are data structures that represent various pieces of information
313  * in the controller. These contexts are generally filled out by the driver and
314  * then acknowledged and consumed by the hardware. There are controller-wide
315  * contexts (mostly managed in xhci_context.c) that are used to point to the
316  * contexts that exist for each device in the system. The primary context is
317  * called the Device Context Base Address Array (DCBAA).
318  *
319  * Each device in the system is allocated a 'slot', which is used to index into
320  * the DCBAA. Slots are assigned based on issuing commands to the controller.
321  * There are a fixed number of slots that determine the maximum number of
322  * devices that can end up being supported in the system. Note this includes all
323  * the devices plugged into the USB device tree, not just devices plugged into
324  * ports on the chassis.
325  *
326  * For each device, there is a context structure that describes properties of
327  * the device. For example, what speed is the device, is it a hub, etc. The
328  * context has slots for the device and for each endpoint on the device. As
329  * endpoints are enabled, their context information which describes things like
330  * the maximum packet size, is filled in and enabled. The mapping between these
331  * contexts look like:
332  *
333  *
334  *      DCBAA
335  *    +--------+                    Device Context
336  *    | Slot 0 |------------------>+--------------+
337  *    +--------+                   | Slot Context |
338  *    |  ...   |                   +--------------+       +----------+
339  *    +--------+   +------+        |  Endpoint 0  |------>| I/O Ring |
340  *    | Slot n |-->| NULL |        | Context (Bi) |       +----------+
341  *    +--------+   +------+        +--------------+
342  *                                 |  Endpoint 1  |
343  *                                 | Context (Out)|
344  *                                 +--------------+
345  *                                 |  Endpoint 1  |
346  *                                 | Context (In) |
347  *                                 +--------------+
348  *                                 |      ...     |
349  *                                 +--------------+
350  *                                 | Endpoint 15  |
351  *                                 | Context (In) |
352  *                                 +--------------+
353  *
354  * These contexts are always owned by the controller, though we can read them
355  * after various operations complete. Commands that toggle device state use a
356  * specific input context, which is a variant of the device context. The only
357  * difference is that it has an input context structure ahead of it to say which
358  * sections of the device context should be evaluated.
359  *
360  * Each active endpoint points us to an I/O ring, which leads us to the third
361  * main data structure that's used by the device: rings. Rings are made up of
362  * transfer request blocks (TRBs), which are joined together to form a given
363  * transfer description (TD) which represents a single I/O request.
364  *
365  * These rings are used to issue I/O to individual endpoints, to issue commands
366  * to the controller, and to receive notification of changes and completions.
367  * Issued commands go on the special ring called the command ring while the
368  * change and completion notifications go on the event ring.  More details are
369  * available in xhci_ring.c. Each of these structures is represented by an
370  * xhci_ring_t.
371  *
372  * Each ring can be made up of one or more disjoint regions of DMA; however, we
373  * only use a single one. This also impacts some additional registers and
374  * structures that exist. The event ring has an indirection table called the
375  * Event Ring Segment Table (ERST). Each entry in the table (a segment)
376  * describes a chunk of the event ring.
377  *
378  * One other thing worth calling out is the scratchpad. The scratchpad is a way
379  * for the controller to be given arbitrary memory by the OS that it can use.
380  * There are two parts to the scratchpad. The first part is an array whose
381  * entries contain pointers to the actual addresses for the pages. The second
382  * part that we allocate are the actual pages themselves.
383  *
384  * -----------------------------
385  * Endpoint State and Management
386  * -----------------------------
387  *
388  * Endpoint management is one of the key parts to the xhci driver as every
389  * endpoint is a pipe that a device driver uses, so they are our primary
390  * currency. Endpoints are enabled and disabled when the client device drivers
391  * open and close a pipe. When an endpoint is enabled, we have to fill in an
392  * endpoint's context structure with information about the endpoint. These
393  * basically tell the controller important properties which it uses to ensure
394  * that there is adequate bandwidth for the device.
395  *
396  * Each endpoint has its own ring as described in the previous section. We place
397  * TRBs (transfer request blocks) onto a given ring to request I/O be performed.
398  * Responses are placed on the event ring, in other words, the rings associated
399  * with an endpoint are purely for producing I/O.
400  *
401  * Endpoints have a defined state machine as described in xHCI 1.1 / 4.8.3.
402  * These states generally correspond with the state of the endpoint to process
403  * I/O and handle timeouts. The driver basically follows a similar state machine
404  * as described there. There are some deviations. For example, what they
405  * describe as 'running' we break into both the Idle and Running states below.
406  * We also have a notion of timed out and quiescing. The following image
407  * summarizes the states and transitions:
408  *
409  *     +------+                                +-----------+
410  *     | Idle |---------*--------------------->|  Running  |<-+
411  *     +------+         . I/O queued on        +-----------+  |
412  *        ^               ring and timeout        |  |  |     |
413  *        |               scheduled.              |  |  |     |
414  *        |                                       |  |  |     |
415  *        +-----*---------------------------------+  |  |     |
416  *        |     . No I/Os remain                     |  |     |
417  *        |                                          |  |     |
418  *        |                +------*------------------+  |     |
419  *        |                |      . Timeout             |     |
420  *        |                |        fires for           |     |
421  *        |                |        I/O                 |     |
422  *        |                v                            v     |
423  *        |          +-----------+                +--------+  |
424  *        |          | Timed Out |                | Halted |  |
425  *        |          +-----------+                +--------+  |
426  *        |             |                           |         |
427  *        |             |   +-----------+           |         |
428  *        |             +-->| Quiescing |<----------+         |
429  *        |                 +-----------+                     |
430  *        |   No TRBs.           |                . TRBs      |
431  *        |   remain .           |                . Remain    |
432  *        +----------*----<------+-------->-------*-----------+
433  *
434  * Normally, a given endpoint will oscillate between having TRBs scheduled and
435  * not. Every time a new I/O is added to the endpoint, we'll ring the doorbell,
436  * making sure that we're processing the ring, presuming that the endpoint isn't
437  * in one of the error states.
438  *
439  * To detect device hangs, we have an active timeout(9F) per active endpoint
440  * that ticks at a one second rate while we still have TRBs outstanding on an
441  * endpoint. Once all outstanding TRBs have been processed, the timeout will
442  * stop itself and there will be no active checking until the endpoint has I/O
443  * scheduled on it again.
444  *
445  * There are two primary ways that things can go wrong on the endpoint. We can
446  * either have a timeout or an event that transitions the endpoint to the Halted
447  * state. In the halted state, we need to issue explicit commands to reset the
448  * endpoint before removing the I/O.
449  *
450  * The way we handle both a timeout and a halted condition is similar, but the
451  * way they are triggered is different. When we detect a halted condition, we
452  * don't immediately clean it up, and wait for the client device driver (or USBA
453  * on its behalf) to issue a pipe reset. When we detect a timeout, we
454  * immediately take action (assuming no other action is ongoing).
455  *
456  * In both cases, we quiesce the device, which takes care of dealing with taking
457  * the endpoint from whatever state it may be in and taking the appropriate
458  * actions based on the state machine in xHCI 1.1 / 4.8.3. The end of quiescing
459  * leaves the device stopped, which allows us to update the ring's pointer and
460  * remove any TRBs that are causing problems.
461  *
462  * As part of all this, we ensure that we can only be quiescing the device from
463  * a given path at a time. Any requests to schedule I/O during this time will
464  * generally fail.
465  *
466  * The following image describes the state machine for the timeout logic. It
467  * ties into the image above.
468  *
469  *         +----------+                            +---------+
470  *         | Disabled |-----*--------------------->| Enabled |<--+
471  *         +----------+     . TRBs scheduled       +---------+   *. 1 sec timer
472  *             ^              and no active          |  |  |     |  fires and
473  *             |              timer.                 |  |  |     |  another
474  *             |                                     |  |  +--+--+  quiesce, in
475  *             |                                     |  |     |     a bad state,
476  *             +------*------------------------------+  |     ^     or decrement
477  *             |      . 1 sec timer                     |     |     I/O timeout
478  *             |        fires and                       |     |
479  *             |        no TRBs or                      |     +--------------+
480  *             |        endpoint shutdown               |                    |
481  *             |                                        *. . timer counter   |
482  *             ^                                        |    reaches zero    |
483  *             |                                        v                    |
484  *             |                                +--------------+             |
485  *             +-------------*---------------<--| Quiesce ring |->---*-------+
486  *                           . No more          | and fail I/O |     . restart
487  *                             I/Os             +--------------+       timer as
488  *                                                                     more I/Os
489  *
490  * As we described above, when there are active TRBs and I/Os, a 1 second
491  * timeout(9F) will be active. Each second, we decrement a counter on the
492  * current, active I/O until either a new I/O takes the head, or the counter
493  * reaches zero. If the counter reaches zero, then we go through, quiesce the
494  * ring, and then clean things up.
495  *
496  * ------------------
497  * Periodic Endpoints
498  * ------------------
499  *
500  * It's worth calling out periodic endpoints explicitly, as they operate
501  * somewhat differently. Periodic endpoints are limited to Interrupt-IN and
502  * Isochronous-IN. The USBA often uses the term polling for these. That's
503  * because the client only needs to make a single API call; however, they'll
504  * receive multiple callbacks until either an error occurs or polling is
505  * requested to be terminated.
506  *
507  * When we have one of these periodic requests, we end up always rescheduling
508  * I/O requests, as well as, having a specific number of pre-existing I/O
509  * requests to cover the periodic needs, in case of latency spikes. Normally,
510  * when replying to a request, we use the request handle that we were given.
511  * However, when we have a periodic request, we're required to duplicate the
512  * handle before giving them data.
513  *
514  * However, the duplication is a bit tricky. For everything that was duplicated,
515  * the framework expects us to submit data. Because of that we, don't duplicate
516  * them until they are needed. This minimizes the likelihood that we have
517  * outstanding requests to deal with when we encounter a fatal polling failure.
518  *
519  * Most of the polling setup logic happens in xhci_usba.c in
520  * xhci_hcdi_periodic_init(). The consumption and duplication is handled in
521  * xhci_endpoint.c.
522  *
523  * ----------------
524  * Structure Layout
525  * ----------------
526  *
527  * The following images relate the core data structures. The primary structure
528  * in the system is the xhci_t. This is the per-controller data structure that
529  * exists for each instance of the driver. From there, each device in the system
530  * is represented by an xhci_device_t and each endpoint is represented by an
531  * xhci_endpoint_t. For each client that opens a given endpoint, there is an
532  * xhci_pipe_t. For each I/O related ring, there is an xhci_ring_t in the
533  * system.
534  *
535  *     +------------------------+
536  *     | Per-Controller         |
537  *     | Structure              |
538  *     | xhci_t                 |
539  *     |                        |
540  *     | uint_t              ---+--> Capability regs offset
541  *     | uint_t              ---+--> Operational regs offset
542  *     | uint_t              ---+--> Runtime regs offset
543  *     | uint_t              ---+--> Doorbell regs offset
544  *     | xhci_state_flags_t  ---+--> Device state flags
545  *     | xhci_quirks_t       ---+--> Device quirk flags
546  *     | xhci_capability_t   ---+--> Controller capability structure
547  *     | xhci_dcbaa_t        ---+----------------------------------+
548  *     | xhci_scratchpad_t   ---+---------+                        |
549  *     | xhci_command_ing_t  ---+------+  |                        v
550  *     | xhci_event_ring_t   ---+----+ |  |              +---------------------+
551  *     | xhci_usba_t         ---+--+ | |  |              | Device Context      |
552  *     +------------------------+  | | |  |              | Base Address        |
553  *                                 | | |  |              | Array Structure     |
554  *                                 | | |  |              | xhci_dcbaa_t        |
555  * +-------------------------------+ | |  |              |                     |
556  * | +-------------------------------+ |  |  DCBAA KVA <-+--        uint64_t * |
557  * | |    +----------------------------+  | DMA Buffer <-+-- xhci_dma_buffer_t |
558  * | |    v                               |              +---------------------+
559  * | | +--------------------------+       +-----------------------+
560  * | | | Event Ring               |                               |
561  * | | | Management               |                               |
562  * | | | xhci_event_ring_t        |                               v
563  * | | |                          |   Event Ring        +----------------------+
564  * | | | xhci_event_segment_t * --|-> Segment VA        |   Scratchpad (Extra  |
565  * | | | xhci_dma_buffer_t      --|-> Segment DMA Buf.  |   Controller Memory) |
566  * | | | xhci_ring_t            --|--+                  |    xhci_scratchpad_t |
567  * | | +--------------------------+  |      Scratchpad  |                      |
568  * | |                               | Base Array KVA <-+-          uint64_t * |
569  * | +------------+                  | Array DMA Buf. <-+-   xhci_dma_buffer_t |
570  * |              v                  | Scratchpad DMA <-+- xhci_dma_buffer_t * |
571  * |   +---------------------------+ | Buffer per page  +----------------------+
572  * |   | Command Ring              | |
573  * |   | xhci_command_ring_t       | +------------------------------+
574  * |   |                           |                                |
575  * |   | xhci_ring_t             --+-> Command Ring --->------------+
576  * |   | list_t                  --+-> Command List                 v
577  * |   | timeout_id_t            --+-> Timeout State     +---------------------+
578  * |   | xhci_command_ring_state_t +-> State Flags       | I/O Ring            |
579  * |   +---------------------------+                     | xhci_ring_t         |
580  * |                                                     |                     |
581  * |                                     Ring DMA Buf. <-+-- xhci_dma_buffer_t |
582  * |                                       Ring Length <-+--            uint_t |
583  * |                                    Ring Entry KVA <-+--      xhci_trb_t * |
584  * |    +---------------------------+        Ring Head <-+--            uint_t |
585  * +--->| USBA State                |        Ring Tail <-+--            uint_t |
586  *      | xhci_usba_t               |       Ring Cycle <-+--            uint_t |
587  *      |                           |                    +---------------------+
588  *      | usba_hcdi_ops_t *        -+-> USBA Ops Vector                       ^
589  *      | usb_dev_dscr_t           -+-> USB Virtual Device Descriptor         |
590  *      | usb_ss_hub_descr_t       -+-> USB Virtual Hub Descriptor            |
591  *      | usba_pipe_handle_data_t * +-> Interrupt polling client              |
592  *      | usb_intr_req_t           -+-> Interrupt polling request             |
593  *      | uint32_t                --+-> Interrupt polling device mask         |
594  *      | list_t                  --+-> Pipe List (Active Users)              |
595  *      | list_t                  --+-------------------+                     |
596  *      +---------------------------+                   |                     ^
597  *                                                      |                     |
598  *                                                      v                     |
599  *     +-------------------------------+             +---------------+        |
600  *     | USB Device                    |------------>| USB Device    |--> ... |
601  *     | xhci_device_t                 |             | xhci_device_t |        |
602  *     |                               |             +---------------+        |
603  *     | usb_port_t                  --+-> USB Port plugged into              |
604  *     | uint8_t                     --+-> Slot Number                        |
605  *     | boolean_t                   --+-> Address Assigned                   |
606  *     | usba_device_t *             --+-> USBA Device State                  |
607  *     | xhci_dma_buffer_t           --+-> Input Context DMA Buffer           |
608  *     | xhci_input_context_t *      --+-> Input Context KVA                  |
609  *     | xhci_slot_contex_t *        --+-> Input Slot Context KVA             |
610  *     | xhci_endpoint_context_t *[] --+-> Input Endpoint Context KVA         |
611  *     | xhci_dma_buffer_t           --+-> Output Context DMA Buffer          |
612  *     | xhci_slot_context_t *       --+-> Output Slot Context KVA            ^
613  *     | xhci_endpoint_context_t *[] --+-> Output Endpoint Context KVA        |
614  *     | xhci_endpoint_t *[]         --+-> Endpoint Tracking ---+             |
615  *     +-------------------------------+                        |             |
616  *                                                              |             |
617  *                                                              v             |
618  *     +------------------------------+            +-----------------+        |
619  *     | Endpoint Data                |----------->| Endpoint Data   |--> ... |
620  *     | xhci_endpoint_t              |            | xhci_endpoint_t |        |
621  *     |                              |            +-----------------+        |
622  *     | int                        --+-> Endpoint Number                     |
623  *     | int                        --+-> Endpoint Type                       |
624  *     | xhci_endpoint_state_t      --+-> Endpoint State                      |
625  *     | timeout_id_t               --+-> Endpoint Timeout State              |
626  *     | usba_pipe_handle_data_t *  --+-> USBA Client Handle                  |
627  *     | xhci_ring_t                --+-> Endpoint I/O Ring  -------->--------+
628  *     | list_t                     --+-> Transfer List --------+
629  *     +------------------------------+                         |
630  *                                                              v
631  *     +-------------------------+                  +--------------------+
632  *     | Transfer Structure      |----------------->| Transfer Structure |-> ...
633  *     | xhci_transfer_t         |                  | xhci_transfer_t    |
634  *     |                         |                  +--------------------+
635  *     | xhci_dma_buffer_t     --+-> I/O DMA Buffer
636  *     | uint_t                --+-> Number of TRBs
637  *     | uint_t                --+-> Short transfer data
638  *     | uint_t                --+-> Timeout seconds remaining
639  *     | usb_cr_t              --+-> USB Transfer return value
640  *     | boolean_t             --+-> Data direction
641  *     | xhci_trb_t *          --+-> Host-order transfer requests for I/O
642  *     | usb_isoc_pkt_descr_t * -+-> Isochronous only response data
643  *     | usb_opaque_t          --+-> USBA Request Handle
644  *     +-------------------------+
645  *
646  * -------------
647  * Lock Ordering
648  * -------------
649  *
650  * There are three different tiers of locks that exist in the driver. First,
651  * there is a lock for each controller: xhci_t`xhci_lock. This protects all the
652  * data for that instance of the controller. If there are multiple instances of
653  * the xHCI controller in the system, each one is independent and protected
654  * separately. The two do not share any data.
655  *
656  * From there, there are two other, specific locks in the system:
657  *
658  *   o xhci_command_ring_t`xcr_lock
659  *   o xhci_device_t`xd_imtx
660  *
661  * There is only one xcr_lock per controller, like the xhci_lock. It protects
662  * the state of the command ring. However, there is on xd_imtx per device.
663  * Recall that each device is scoped to a given controller. This protects the
664  * input slot context for a given device.
665  *
666  * There are a few important rules to keep in mind here that are true
667  * universally throughout the driver:
668  *
669  * 1) Always grab the xhci_t`xhci_lock, before grabbing any of the other locks.
670  * 2) A given xhci_device_t`xd_imtx, must be taken before grabbing the
671  *    xhci_command_ring_t`xcr_lock.
672  * 3) A given thread can only hold one of the given xhci_device_t`xd_imtx locks
673  *    at a given time. In other words, we should never be manipulating the input
674  *    context of two different devices at once.
675  * 4) It is safe to hold the xhci_device_t`xd_imtx while tearing down the
676  *    endpoint timer. Conversely, the endpoint specific logic should never enter
677  *    this lock.
678  *
679  * ----------
680  * Polled I/O
681  * ----------
682  *
683  * There is limited support for polled I/O in this driver for use by
684  * the kernel debugger. The driver currently only supports input from
685  * interrupt endpoints which is good enough for USB HID keyboard devices.
686  * Input from bulk endpoints and output are not supported which prevents
687  * using a serial console over USB for kernel debugging.
688  *
689  * --------------------
690  * Relationship to EHCI
691  * --------------------
692  *
693  * On some Intel chipsets, a given physical port on the system may be routed to
694  * one of the EHCI or xHCI controllers. This association can be dynamically
695  * changed by writing to platform specific registers as handled by the quirk
696  * logic in xhci_quirk.c.
697  *
698  * As these ports may support USB 3.x speeds, we always route all such ports to
699  * the xHCI controller, when supported. In addition, to minimize disruptions
700  * from devices being enumerated and attached to the EHCI driver and then
701  * disappearing, we generally attempt to load the xHCI controller before the
702  * EHCI controller. This logic is not done in the driver; however, it is done in
703  * other parts of the kernel like in uts/common/io/consconfig_dacf.c in the
704  * function consconfig_load_drivres().
705  *
706  * -----------
707  * Future Work
708  * -----------
709  *
710  * The primary future work in this driver spans two different, but related
711  * areas. The first area is around controller resets and how they tie into FM.
712  * Presently, we do not have a good way to handle controllers coming and going
713  * in the broader USB stack or properly reconfigure the device after a reset.
714  * Secondly, we don't handle the suspend and resume of devices and drivers.
715  */
716 
717 #include <sys/param.h>
718 #include <sys/modctl.h>
719 #include <sys/conf.h>
720 #include <sys/devops.h>
721 #include <sys/ddi.h>
722 #include <sys/sunddi.h>
723 #include <sys/cmn_err.h>
724 #include <sys/ddifm.h>
725 #include <sys/pci.h>
726 #include <sys/class.h>
727 #include <sys/policy.h>
728 
729 #include <sys/usb/hcd/xhci/xhci.h>
730 #include <sys/usb/hcd/xhci/xhci_ioctl.h>
731 
732 /*
733  * We want to use the first BAR to access its registers. The regs[] array is
734  * ordered based on the rules for the PCI supplement to IEEE 1275. So regs[1]
735  * will always be the first BAR.
736  */
737 #define	XHCI_REG_NUMBER	1
738 
739 /*
740  * This task queue exists as a global taskq that is used for resetting the
741  * device in the face of FM or runtime errors. Each instance of the device
742  * (xhci_t) happens to have a single taskq_dispatch_ent already allocated so we
743  * know that we should always be able to dispatch such an event.
744  */
745 static taskq_t *xhci_taskq;
746 
747 /*
748  * Global soft state for per-instance data. Note that we must use the soft state
749  * routines and cannot use the ddi_set_driver_private() routines. The USB
750  * framework presumes that it can use the dip's private data.
751  */
752 void *xhci_soft_state;
753 
754 /*
755  * This is the time in us that we wait after a controller resets before we
756  * consider reading any register. There are some controllers that want at least
757  * 1 ms, therefore we default to 10 ms.
758  */
759 clock_t xhci_reset_delay = 10000;
760 
761 void
762 xhci_error(xhci_t *xhcip, const char *fmt, ...)
763 {
764 	va_list ap;
765 
766 	va_start(ap, fmt);
767 	if (xhcip != NULL && xhcip->xhci_dip != NULL) {
768 		vdev_err(xhcip->xhci_dip, CE_WARN, fmt, ap);
769 	} else {
770 		vcmn_err(CE_WARN, fmt, ap);
771 	}
772 	va_end(ap);
773 }
774 
775 void
776 xhci_log(xhci_t *xhcip, const char *fmt, ...)
777 {
778 	va_list ap;
779 
780 	va_start(ap, fmt);
781 	if (xhcip != NULL && xhcip->xhci_dip != NULL) {
782 		vdev_err(xhcip->xhci_dip, CE_NOTE, fmt, ap);
783 	} else {
784 		vcmn_err(CE_NOTE, fmt, ap);
785 	}
786 	va_end(ap);
787 }
788 
789 /*
790  * USBA is in charge of creating device nodes for us. USBA explicitly ORs in the
791  * constant HUBD_IS_ROOT_HUB, so we have to undo that when we're looking at
792  * things here. A simple bitwise-and will take care of this. And hey, it could
793  * always be more complex, USBA could clone!
794  */
795 static dev_info_t *
796 xhci_get_dip(dev_t dev)
797 {
798 	xhci_t *xhcip;
799 	int instance = getminor(dev) & ~HUBD_IS_ROOT_HUB;
800 
801 	xhcip = ddi_get_soft_state(xhci_soft_state, instance);
802 	if (xhcip != NULL)
803 		return (xhcip->xhci_dip);
804 	return (NULL);
805 }
806 
807 uint8_t
808 xhci_get8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
809 {
810 	uintptr_t addr, roff;
811 
812 	switch (rtt) {
813 	case XHCI_R_CAP:
814 		roff = xhcip->xhci_regs_capoff;
815 		break;
816 	case XHCI_R_OPER:
817 		roff = xhcip->xhci_regs_operoff;
818 		break;
819 	case XHCI_R_RUN:
820 		roff = xhcip->xhci_regs_runoff;
821 		break;
822 	case XHCI_R_DOOR:
823 		roff = xhcip->xhci_regs_dooroff;
824 		break;
825 	default:
826 		panic("called %s with bad reg type: %d", __func__, rtt);
827 	}
828 	ASSERT(roff != PCI_EINVAL32);
829 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
830 
831 	return (ddi_get8(xhcip->xhci_regs_handle, (void *)addr));
832 }
833 
834 uint16_t
835 xhci_get16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
836 {
837 	uintptr_t addr, roff;
838 
839 	switch (rtt) {
840 	case XHCI_R_CAP:
841 		roff = xhcip->xhci_regs_capoff;
842 		break;
843 	case XHCI_R_OPER:
844 		roff = xhcip->xhci_regs_operoff;
845 		break;
846 	case XHCI_R_RUN:
847 		roff = xhcip->xhci_regs_runoff;
848 		break;
849 	case XHCI_R_DOOR:
850 		roff = xhcip->xhci_regs_dooroff;
851 		break;
852 	default:
853 		panic("called %s with bad reg type: %d", __func__, rtt);
854 	}
855 	ASSERT(roff != PCI_EINVAL32);
856 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
857 
858 	return (ddi_get16(xhcip->xhci_regs_handle, (void *)addr));
859 }
860 
861 uint32_t
862 xhci_get32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
863 {
864 	uintptr_t addr, roff;
865 
866 	switch (rtt) {
867 	case XHCI_R_CAP:
868 		roff = xhcip->xhci_regs_capoff;
869 		break;
870 	case XHCI_R_OPER:
871 		roff = xhcip->xhci_regs_operoff;
872 		break;
873 	case XHCI_R_RUN:
874 		roff = xhcip->xhci_regs_runoff;
875 		break;
876 	case XHCI_R_DOOR:
877 		roff = xhcip->xhci_regs_dooroff;
878 		break;
879 	default:
880 		panic("called %s with bad reg type: %d", __func__, rtt);
881 	}
882 	ASSERT(roff != PCI_EINVAL32);
883 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
884 
885 	return (ddi_get32(xhcip->xhci_regs_handle, (void *)addr));
886 }
887 
888 uint64_t
889 xhci_get64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
890 {
891 	uintptr_t addr, roff;
892 
893 	switch (rtt) {
894 	case XHCI_R_CAP:
895 		roff = xhcip->xhci_regs_capoff;
896 		break;
897 	case XHCI_R_OPER:
898 		roff = xhcip->xhci_regs_operoff;
899 		break;
900 	case XHCI_R_RUN:
901 		roff = xhcip->xhci_regs_runoff;
902 		break;
903 	case XHCI_R_DOOR:
904 		roff = xhcip->xhci_regs_dooroff;
905 		break;
906 	default:
907 		panic("called %s with bad reg type: %d", __func__, rtt);
908 	}
909 	ASSERT(roff != PCI_EINVAL32);
910 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
911 
912 	return (ddi_get64(xhcip->xhci_regs_handle, (void *)addr));
913 }
914 
915 void
916 xhci_put8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint8_t val)
917 {
918 	uintptr_t addr, roff;
919 
920 	switch (rtt) {
921 	case XHCI_R_CAP:
922 		roff = xhcip->xhci_regs_capoff;
923 		break;
924 	case XHCI_R_OPER:
925 		roff = xhcip->xhci_regs_operoff;
926 		break;
927 	case XHCI_R_RUN:
928 		roff = xhcip->xhci_regs_runoff;
929 		break;
930 	case XHCI_R_DOOR:
931 		roff = xhcip->xhci_regs_dooroff;
932 		break;
933 	default:
934 		panic("called %s with bad reg type: %d", __func__, rtt);
935 	}
936 	ASSERT(roff != PCI_EINVAL32);
937 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
938 
939 	ddi_put8(xhcip->xhci_regs_handle, (void *)addr, val);
940 }
941 
942 void
943 xhci_put16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint16_t val)
944 {
945 	uintptr_t addr, roff;
946 
947 	switch (rtt) {
948 	case XHCI_R_CAP:
949 		roff = xhcip->xhci_regs_capoff;
950 		break;
951 	case XHCI_R_OPER:
952 		roff = xhcip->xhci_regs_operoff;
953 		break;
954 	case XHCI_R_RUN:
955 		roff = xhcip->xhci_regs_runoff;
956 		break;
957 	case XHCI_R_DOOR:
958 		roff = xhcip->xhci_regs_dooroff;
959 		break;
960 	default:
961 		panic("called %s with bad reg type: %d", __func__, rtt);
962 	}
963 	ASSERT(roff != PCI_EINVAL32);
964 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
965 
966 	ddi_put16(xhcip->xhci_regs_handle, (void *)addr, val);
967 }
968 
969 void
970 xhci_put32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint32_t val)
971 {
972 	uintptr_t addr, roff;
973 
974 	switch (rtt) {
975 	case XHCI_R_CAP:
976 		roff = xhcip->xhci_regs_capoff;
977 		break;
978 	case XHCI_R_OPER:
979 		roff = xhcip->xhci_regs_operoff;
980 		break;
981 	case XHCI_R_RUN:
982 		roff = xhcip->xhci_regs_runoff;
983 		break;
984 	case XHCI_R_DOOR:
985 		roff = xhcip->xhci_regs_dooroff;
986 		break;
987 	default:
988 		panic("called %s with bad reg type: %d", __func__, rtt);
989 	}
990 	ASSERT(roff != PCI_EINVAL32);
991 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
992 
993 	ddi_put32(xhcip->xhci_regs_handle, (void *)addr, val);
994 }
995 
996 void
997 xhci_put64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint64_t val)
998 {
999 	uintptr_t addr, roff;
1000 
1001 	switch (rtt) {
1002 	case XHCI_R_CAP:
1003 		roff = xhcip->xhci_regs_capoff;
1004 		break;
1005 	case XHCI_R_OPER:
1006 		roff = xhcip->xhci_regs_operoff;
1007 		break;
1008 	case XHCI_R_RUN:
1009 		roff = xhcip->xhci_regs_runoff;
1010 		break;
1011 	case XHCI_R_DOOR:
1012 		roff = xhcip->xhci_regs_dooroff;
1013 		break;
1014 	default:
1015 		panic("called %s with bad reg type: %d", __func__, rtt);
1016 	}
1017 	ASSERT(roff != PCI_EINVAL32);
1018 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
1019 
1020 	ddi_put64(xhcip->xhci_regs_handle, (void *)addr, val);
1021 }
1022 
1023 int
1024 xhci_check_regs_acc(xhci_t *xhcip)
1025 {
1026 	ddi_fm_error_t de;
1027 
1028 	/*
1029 	 * Treat the case where we can't check as fine so we can treat the code
1030 	 * more simply.
1031 	 */
1032 	if (!DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps))
1033 		return (DDI_FM_OK);
1034 
1035 	ddi_fm_acc_err_get(xhcip->xhci_regs_handle, &de, DDI_FME_VERSION);
1036 	ddi_fm_acc_err_clear(xhcip->xhci_regs_handle, DDI_FME_VERSION);
1037 	return (de.fme_status);
1038 }
1039 
1040 /*
1041  * As a leaf PCIe driver, we just post the ereport and continue on.
1042  */
1043 /* ARGSUSED */
1044 static int
1045 xhci_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
1046 {
1047 	pci_ereport_post(dip, err, NULL);
1048 	return (err->fme_status);
1049 }
1050 
1051 static void
1052 xhci_fm_fini(xhci_t *xhcip)
1053 {
1054 	if (xhcip->xhci_fm_caps == 0)
1055 		return;
1056 
1057 	if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1058 		ddi_fm_handler_unregister(xhcip->xhci_dip);
1059 
1060 	if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1061 	    DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1062 		pci_ereport_teardown(xhcip->xhci_dip);
1063 
1064 	ddi_fm_fini(xhcip->xhci_dip);
1065 }
1066 
1067 static void
1068 xhci_fm_init(xhci_t *xhcip)
1069 {
1070 	ddi_iblock_cookie_t iblk;
1071 	int def = DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1072 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE;
1073 
1074 	xhcip->xhci_fm_caps = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
1075 	    DDI_PROP_DONTPASS, "fm_capable", def);
1076 
1077 	if (xhcip->xhci_fm_caps < 0) {
1078 		xhcip->xhci_fm_caps = 0;
1079 	} else if (xhcip->xhci_fm_caps & ~def) {
1080 		xhcip->xhci_fm_caps &= def;
1081 	}
1082 
1083 	if (xhcip->xhci_fm_caps == 0)
1084 		return;
1085 
1086 	ddi_fm_init(xhcip->xhci_dip, &xhcip->xhci_fm_caps, &iblk);
1087 	if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1088 	    DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1089 		pci_ereport_setup(xhcip->xhci_dip);
1090 	}
1091 
1092 	if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1093 		ddi_fm_handler_register(xhcip->xhci_dip,
1094 		    xhci_fm_error_cb, xhcip);
1095 	}
1096 }
1097 
1098 static int
1099 xhci_reg_poll(xhci_t *xhcip, xhci_reg_type_t rt, int reg, uint32_t mask,
1100     uint32_t targ, uint_t tries, int delay_ms)
1101 {
1102 	uint_t i;
1103 
1104 	for (i = 0; i < tries; i++) {
1105 		uint32_t val = xhci_get32(xhcip, rt, reg);
1106 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1107 			ddi_fm_service_impact(xhcip->xhci_dip,
1108 			    DDI_SERVICE_LOST);
1109 			return (EIO);
1110 		}
1111 
1112 		if ((val & mask) == targ)
1113 			return (0);
1114 
1115 		delay(drv_usectohz(delay_ms * 1000));
1116 	}
1117 	return (ETIMEDOUT);
1118 }
1119 
1120 static boolean_t
1121 xhci_regs_map(xhci_t *xhcip)
1122 {
1123 	off_t memsize;
1124 	int ret;
1125 	ddi_device_acc_attr_t da;
1126 
1127 	if (ddi_dev_regsize(xhcip->xhci_dip, XHCI_REG_NUMBER, &memsize) !=
1128 	    DDI_SUCCESS) {
1129 		xhci_error(xhcip, "failed to get register set size");
1130 		return (B_FALSE);
1131 	}
1132 
1133 	bzero(&da, sizeof (ddi_device_acc_attr_t));
1134 	da.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1135 	da.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1136 	da.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1137 	if (DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps)) {
1138 		da.devacc_attr_access = DDI_FLAGERR_ACC;
1139 	} else {
1140 		da.devacc_attr_access = DDI_DEFAULT_ACC;
1141 	}
1142 
1143 	ret = ddi_regs_map_setup(xhcip->xhci_dip, XHCI_REG_NUMBER,
1144 	    &xhcip->xhci_regs_base, 0, memsize, &da, &xhcip->xhci_regs_handle);
1145 
1146 	if (ret != DDI_SUCCESS) {
1147 		xhci_error(xhcip, "failed to map device registers: %d", ret);
1148 		return (B_FALSE);
1149 	}
1150 
1151 	return (B_TRUE);
1152 }
1153 
1154 static boolean_t
1155 xhci_regs_init(xhci_t *xhcip)
1156 {
1157 	/*
1158 	 * The capabilities always begin at offset zero.
1159 	 */
1160 	xhcip->xhci_regs_capoff = 0;
1161 	xhcip->xhci_regs_operoff = xhci_get8(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1162 	xhcip->xhci_regs_runoff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_RTSOFF);
1163 	xhcip->xhci_regs_runoff &= ~0x1f;
1164 	xhcip->xhci_regs_dooroff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_DBOFF);
1165 	xhcip->xhci_regs_dooroff &= ~0x3;
1166 
1167 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1168 		xhci_error(xhcip, "failed to initialize controller register "
1169 		    "offsets: encountered FM register error");
1170 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1171 		return (B_FALSE);
1172 	}
1173 
1174 	return (B_TRUE);
1175 }
1176 
1177 /*
1178  * Read various parameters from PCI configuration space and from the Capability
1179  * registers that we'll need to register the device. We cache all of the
1180  * Capability registers.
1181  */
1182 static boolean_t
1183 xhci_read_params(xhci_t *xhcip)
1184 {
1185 	uint8_t usb;
1186 	uint16_t vers;
1187 	uint32_t struc1, struc2, struc3, cap1, cap2, pgsz;
1188 	uint32_t psize, pbit, capreg;
1189 	xhci_capability_t *xcap;
1190 	unsigned long ps;
1191 
1192 	/*
1193 	 * While it's tempting to do a 16-bit read at offset 0x2, unfortunately,
1194 	 * a few emulated systems don't support reading at offset 0x2 for the
1195 	 * version. Instead we need to read the caplength register and get the
1196 	 * upper two bytes.
1197 	 */
1198 	capreg = xhci_get32(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1199 	vers = XHCI_VERSION_MASK(capreg);
1200 	usb = pci_config_get8(xhcip->xhci_cfg_handle, PCI_XHCI_USBREV);
1201 	struc1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS1);
1202 	struc2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS2);
1203 	struc3 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS3);
1204 	cap1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS1);
1205 	cap2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS2);
1206 	pgsz = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PAGESIZE);
1207 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1208 		xhci_error(xhcip, "failed to read controller parameters: "
1209 		    "encountered FM register error");
1210 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1211 		return (B_FALSE);
1212 	}
1213 
1214 	xcap = &xhcip->xhci_caps;
1215 	xcap->xcap_usb_vers = usb;
1216 	xcap->xcap_hci_vers = vers;
1217 	xcap->xcap_max_slots = XHCI_HCS1_DEVSLOT_MAX(struc1);
1218 	xcap->xcap_max_intrs = XHCI_HCS1_IRQ_MAX(struc1);
1219 	xcap->xcap_max_ports = XHCI_HCS1_N_PORTS(struc1);
1220 	if (xcap->xcap_max_ports > MAX_PORTS) {
1221 		xhci_error(xhcip, "Root hub has %d ports, but system only "
1222 		    "supports %d, limiting to %d\n", xcap->xcap_max_ports,
1223 		    MAX_PORTS, MAX_PORTS);
1224 		xcap->xcap_max_ports = MAX_PORTS;
1225 	}
1226 
1227 	xcap->xcap_ist_micro = XHCI_HCS2_IST_MICRO(struc2);
1228 	xcap->xcap_ist = XHCI_HCS2_IST(struc2);
1229 	xcap->xcap_max_esrt = XHCI_HCS2_ERST_MAX(struc2);
1230 	xcap->xcap_scratch_restore = XHCI_HCS2_SPR(struc2);
1231 	xcap->xcap_max_scratch = XHCI_HCS2_SPB_MAX(struc2);
1232 
1233 	xcap->xcap_u1_lat = XHCI_HCS3_U1_DEL(struc3);
1234 	xcap->xcap_u2_lat = XHCI_HCS3_U2_DEL(struc3);
1235 
1236 	xcap->xcap_flags = XHCI_HCC1_FLAGS_MASK(cap1);
1237 	xcap->xcap_max_psa = XHCI_HCC1_PSA_SZ_MAX(cap1);
1238 	xcap->xcap_xecp_off = XHCI_HCC1_XECP(cap1);
1239 	xcap->xcap_flags2 = XHCI_HCC2_FLAGS_MASK(cap2);
1240 
1241 	/*
1242 	 * We don't have documentation for what changed from before xHCI 0.96,
1243 	 * so we just refuse to support versions before 0.96. We also will
1244 	 * ignore anything with a major version greater than 1.
1245 	 */
1246 	if (xcap->xcap_hci_vers < 0x96 || xcap->xcap_hci_vers >= 0x200) {
1247 		xhci_error(xhcip, "Encountered unsupported xHCI version 0.%2x",
1248 		    xcap->xcap_hci_vers);
1249 		return (B_FALSE);
1250 	}
1251 
1252 	/*
1253 	 * Determine the smallest size page that the controller supports and
1254 	 * make sure that it matches our pagesize. We basically check here for
1255 	 * the presence of 4k and 8k pages. The basis of the pagesize is used
1256 	 * extensively throughout the code and specification. While we could
1257 	 * support other page sizes here, given that we don't support systems
1258 	 * with it at this time, it doesn't make much sense.
1259 	 */
1260 	ps = PAGESIZE;
1261 	if (ps == 0x1000) {
1262 		pbit = XHCI_PAGESIZE_4K;
1263 		psize = 0x1000;
1264 	} else if (ps == 0x2000) {
1265 		pbit = XHCI_PAGESIZE_8K;
1266 		psize = 0x2000;
1267 	} else {
1268 		xhci_error(xhcip, "Encountered host page size that the driver "
1269 		    "doesn't know how to handle: %lx\n", ps);
1270 		return (B_FALSE);
1271 	}
1272 
1273 	if (!(pgsz & pbit)) {
1274 		xhci_error(xhcip, "Encountered controller that didn't support "
1275 		    "the host page size (%d), supports: %x", psize, pgsz);
1276 		return (B_FALSE);
1277 	}
1278 	xcap->xcap_pagesize = psize;
1279 
1280 	return (B_TRUE);
1281 }
1282 
1283 /*
1284  * Apply known workarounds and issues. These reports come from other
1285  * Operating Systems and have been collected over time.
1286  */
1287 static boolean_t
1288 xhci_identify(xhci_t *xhcip)
1289 {
1290 	xhci_quirks_populate(xhcip);
1291 
1292 	if (xhcip->xhci_quirks & XHCI_QUIRK_NO_MSI) {
1293 		xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED;
1294 	} else {
1295 		xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED |
1296 		    DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX;
1297 	}
1298 
1299 	if (xhcip->xhci_quirks & XHCI_QUIRK_32_ONLY) {
1300 		xhcip->xhci_caps.xcap_flags &= ~XCAP_AC64;
1301 	}
1302 
1303 	return (B_TRUE);
1304 }
1305 
1306 static boolean_t
1307 xhci_alloc_intr_handle(xhci_t *xhcip, int type)
1308 {
1309 	int ret;
1310 
1311 	/*
1312 	 * Normally a well-behaving driver would more carefully request an
1313 	 * amount of interrupts based on the number available, etc. But since we
1314 	 * only actually want a single interrupt, we're just going to go ahead
1315 	 * and ask for a single interrupt.
1316 	 */
1317 	ret = ddi_intr_alloc(xhcip->xhci_dip, &xhcip->xhci_intr_hdl, type, 0,
1318 	    XHCI_NINTR, &xhcip->xhci_intr_num, DDI_INTR_ALLOC_NORMAL);
1319 	if (ret != DDI_SUCCESS) {
1320 		xhci_log(xhcip, "!failed to allocate interrupts of type %d: %d",
1321 		    type, ret);
1322 		return (B_FALSE);
1323 	}
1324 	xhcip->xhci_intr_type = type;
1325 
1326 	return (B_TRUE);
1327 }
1328 
1329 static boolean_t
1330 xhci_alloc_intrs(xhci_t *xhcip)
1331 {
1332 	int intr_types, ret;
1333 
1334 	if (XHCI_NINTR > xhcip->xhci_caps.xcap_max_intrs) {
1335 		xhci_error(xhcip, "controller does not support the minimum "
1336 		    "number of interrupts required (%d), supports %d",
1337 		    XHCI_NINTR, xhcip->xhci_caps.xcap_max_intrs);
1338 		return (B_FALSE);
1339 	}
1340 
1341 	if ((ret = ddi_intr_get_supported_types(xhcip->xhci_dip,
1342 	    &intr_types)) != DDI_SUCCESS) {
1343 		xhci_error(xhcip, "failed to get supported interrupt types: "
1344 		    "%d", ret);
1345 		return (B_FALSE);
1346 	}
1347 
1348 	/*
1349 	 * Mask off interrupt types we've already ruled out due to quirks or
1350 	 * other reasons.
1351 	 */
1352 	intr_types &= xhcip->xhci_caps.xcap_intr_types;
1353 	if (intr_types & DDI_INTR_TYPE_MSIX) {
1354 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSIX))
1355 			return (B_TRUE);
1356 	}
1357 
1358 	if (intr_types & DDI_INTR_TYPE_MSI) {
1359 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSI))
1360 			return (B_TRUE);
1361 	}
1362 
1363 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1364 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_FIXED))
1365 			return (B_TRUE);
1366 	}
1367 
1368 	xhci_error(xhcip, "failed to allocate an interrupt, supported types: "
1369 	    "0x%x", intr_types);
1370 	return (B_FALSE);
1371 }
1372 
1373 static boolean_t
1374 xhci_add_intr_handler(xhci_t *xhcip)
1375 {
1376 	int ret;
1377 
1378 	if ((ret = ddi_intr_get_pri(xhcip->xhci_intr_hdl,
1379 	    &xhcip->xhci_intr_pri)) != DDI_SUCCESS) {
1380 		xhci_error(xhcip, "failed to get interrupt priority: %d", ret);
1381 		return (B_FALSE);
1382 	}
1383 
1384 	if ((ret = ddi_intr_get_cap(xhcip->xhci_intr_hdl,
1385 	    &xhcip->xhci_intr_caps)) != DDI_SUCCESS) {
1386 		xhci_error(xhcip, "failed to get interrupt capabilities: %d",
1387 		    ret);
1388 		return (B_FALSE);
1389 	}
1390 
1391 	if ((ret = ddi_intr_add_handler(xhcip->xhci_intr_hdl, xhci_intr, xhcip,
1392 	    (uintptr_t)0)) != DDI_SUCCESS) {
1393 		xhci_error(xhcip, "failed to add interrupt handler: %d", ret);
1394 		return (B_FALSE);
1395 	}
1396 	return (B_TRUE);
1397 }
1398 
1399 /*
1400  * Find a capability with an identifier whose value is 'id'. The 'init' argument
1401  * gives us the offset to start searching at. See xHCI 1.1 / 7 for more
1402  * information. This is more or less exactly like PCI capabilities.
1403  */
1404 static boolean_t
1405 xhci_find_ext_cap(xhci_t *xhcip, uint32_t id, uint32_t init, uint32_t *outp)
1406 {
1407 	uint32_t off;
1408 	uint8_t next = 0;
1409 
1410 	/*
1411 	 * If we have no offset, we're done.
1412 	 */
1413 	if (xhcip->xhci_caps.xcap_xecp_off == 0)
1414 		return (B_FALSE);
1415 
1416 	off = xhcip->xhci_caps.xcap_xecp_off << 2;
1417 	do {
1418 		uint32_t cap_hdr;
1419 
1420 		off += next << 2;
1421 		cap_hdr = xhci_get32(xhcip, XHCI_R_CAP, off);
1422 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1423 			xhci_error(xhcip, "failed to read xhci extended "
1424 			    "capabilities at offset 0x%x: encountered FM "
1425 			    "register error", off);
1426 			ddi_fm_service_impact(xhcip->xhci_dip,
1427 			    DDI_SERVICE_LOST);
1428 			break;
1429 		}
1430 
1431 		if (cap_hdr == PCI_EINVAL32)
1432 			break;
1433 		if (XHCI_XECP_ID(cap_hdr) == id &&
1434 		    (init == UINT32_MAX || off > init)) {
1435 			*outp = off;
1436 			return (B_TRUE);
1437 		}
1438 		next = XHCI_XECP_NEXT(cap_hdr);
1439 		/*
1440 		 * Watch out for overflow if we somehow end up with a more than
1441 		 * 2 GiB space.
1442 		 */
1443 		if (next << 2 > (INT32_MAX - off))
1444 			return (B_FALSE);
1445 	} while (next != 0);
1446 
1447 	return (B_FALSE);
1448 }
1449 
1450 /*
1451  * For mostly information purposes, we'd like to walk to augment the devinfo
1452  * tree with the number of ports that support USB 2 and USB 3. Note though that
1453  * these ports may be overlapping. Many ports can support both USB 2 and USB 3
1454  * and are wired up to the same physical port, even though they show up as
1455  * separate 'ports' in the xhci sense.
1456  */
1457 static boolean_t
1458 xhci_port_count(xhci_t *xhcip)
1459 {
1460 	uint_t nusb2 = 0, fusb2 = 0;
1461 	uint_t nusb30 = 0, fusb30 = 0;
1462 	uint_t nusb31 = 0, fusb31 = 0;
1463 	uint32_t off = UINT32_MAX;
1464 
1465 	while (xhci_find_ext_cap(xhcip, XHCI_ID_PROTOCOLS, off, &off) ==
1466 	    B_TRUE) {
1467 		uint32_t rvers, rport;
1468 		uint8_t maj, min, count, first;
1469 
1470 		/*
1471 		 * See xHCI 1.1 / 7.2 for the format of this. The first uint32_t
1472 		 * has version information while the third uint32_t has the port
1473 		 * count.
1474 		 */
1475 		rvers = xhci_get32(xhcip, XHCI_R_CAP, off);
1476 		rport = xhci_get32(xhcip, XHCI_R_CAP, off + 8);
1477 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1478 			xhci_error(xhcip, "failed to read xhci port counts: "
1479 			    "encountered fatal FM register error");
1480 			ddi_fm_service_impact(xhcip->xhci_dip,
1481 			    DDI_SERVICE_LOST);
1482 			return (B_FALSE);
1483 		}
1484 
1485 		maj = XHCI_XECP_PROT_MAJOR(rvers);
1486 		min = XHCI_XECP_PROT_MINOR(rvers);
1487 		count = XHCI_XECP_PROT_PCOUNT(rport);
1488 		first = XHCI_XECP_PROT_FPORT(rport);
1489 
1490 		/*
1491 		 * In the wild, we've seen some systems that are using a minor
1492 		 * version of 0x10 and some that are using 0x01 in this field.
1493 		 * While the xhci spec says that we should expect it to be a
1494 		 * minor of 0x01 based on the xHCI 1.1 specification Table 155:
1495 		 * xHCI Supported Protocols. However, the USB 3.1 specification
1496 		 * defines the version to be 0x10 when encoded as a BCD style.
1497 		 * As such, handle both and hope we never get to revision 16 of
1498 		 * USB 3.
1499 		 */
1500 		if (maj == 3 && (min == 0x10 || min == 0x01)) {
1501 			nusb31 = count;
1502 			fusb31 = first;
1503 		} else if (maj == 3 && min == 0) {
1504 			nusb30 = count;
1505 			fusb30 = first;
1506 		} else if (maj <= 2) {
1507 			nusb2 = count;
1508 			fusb2 = first;
1509 		} else {
1510 			xhci_error(xhcip, "encountered port capabilities with "
1511 			    "unknown USB version: %x.%x\n", maj, min);
1512 		}
1513 	}
1514 
1515 	/*
1516 	 * These properties are used by FMA and the USB topo module.
1517 	 */
1518 	if (nusb2 > 0) {
1519 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1520 		    "usb2.0-port-count", nusb2);
1521 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1522 		    "usb2.0-first-port", fusb2);
1523 	}
1524 	if (nusb30 > 0) {
1525 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1526 		    "usb3.0-port-count", nusb30);
1527 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1528 		    "usb3.0-first-port", fusb30);
1529 	}
1530 
1531 	if (nusb31 > 0) {
1532 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1533 		    "usb3.1-port-count", nusb31);
1534 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1535 		    "usb3.1-first-port", fusb31);
1536 	}
1537 
1538 	return (B_TRUE);
1539 }
1540 
1541 /*
1542  * Take over control from the BIOS or other firmware, if applicable.
1543  */
1544 static boolean_t
1545 xhci_controller_takeover(xhci_t *xhcip)
1546 {
1547 	int ret;
1548 	uint32_t val, off;
1549 
1550 	/*
1551 	 * If we can't find the legacy capability, then there's nothing to do.
1552 	 */
1553 	if (xhci_find_ext_cap(xhcip, XHCI_ID_USB_LEGACY, UINT32_MAX, &off) ==
1554 	    B_FALSE)
1555 		return (B_TRUE);
1556 	val = xhci_get32(xhcip, XHCI_R_CAP, off);
1557 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1558 		xhci_error(xhcip, "failed to read BIOS take over registers: "
1559 		    "encountered fatal FM register error");
1560 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1561 		return (B_FALSE);
1562 	}
1563 
1564 	if (val & XHCI_BIOS_OWNED) {
1565 		val |= XHCI_OS_OWNED;
1566 		xhci_put32(xhcip, XHCI_R_CAP, off, val);
1567 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1568 			xhci_error(xhcip, "failed to write BIOS take over "
1569 			    "registers: encountered fatal FM register error");
1570 			ddi_fm_service_impact(xhcip->xhci_dip,
1571 			    DDI_SERVICE_LOST);
1572 			return (B_FALSE);
1573 		}
1574 
1575 		/*
1576 		 * Wait up to 5 seconds for things to change. While this number
1577 		 * isn't specified in the xHCI spec, it seems to be the de facto
1578 		 * value that various systems are using today. We'll use a 10ms
1579 		 * interval to check.
1580 		 */
1581 		ret = xhci_reg_poll(xhcip, XHCI_R_CAP, off,
1582 		    XHCI_BIOS_OWNED | XHCI_OS_OWNED, XHCI_OS_OWNED, 500, 10);
1583 		if (ret == EIO)
1584 			return (B_FALSE);
1585 		if (ret == ETIMEDOUT) {
1586 			xhci_log(xhcip, "!timed out waiting for firmware to "
1587 			    "hand off, taking over");
1588 			val &= ~XHCI_BIOS_OWNED;
1589 			xhci_put32(xhcip, XHCI_R_CAP, off, val);
1590 			if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1591 				xhci_error(xhcip, "failed to write forced "
1592 				    "takeover: encountered fatal FM register "
1593 				    "error");
1594 				ddi_fm_service_impact(xhcip->xhci_dip,
1595 				    DDI_SERVICE_LOST);
1596 				return (B_FALSE);
1597 			}
1598 		}
1599 	}
1600 
1601 	val = xhci_get32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS);
1602 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1603 		xhci_error(xhcip, "failed to read legacy control registers: "
1604 		    "encountered fatal FM register error");
1605 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1606 		return (B_FALSE);
1607 	}
1608 	val &= XHCI_XECP_SMI_MASK;
1609 	val |= XHCI_XECP_CLEAR_SMI;
1610 	xhci_put32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS, val);
1611 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1612 		xhci_error(xhcip, "failed to write legacy control registers: "
1613 		    "encountered fatal FM register error");
1614 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1615 		return (B_FALSE);
1616 	}
1617 
1618 	return (B_TRUE);
1619 }
1620 
1621 static int
1622 xhci_controller_stop(xhci_t *xhcip)
1623 {
1624 	uint32_t cmdreg;
1625 
1626 	cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1627 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1628 		xhci_error(xhcip, "failed to read USB Command register: "
1629 		    "encountered fatal FM register error");
1630 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1631 		return (EIO);
1632 	}
1633 
1634 	cmdreg &= ~(XHCI_CMD_RS | XHCI_CMD_INTE);
1635 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1636 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1637 		xhci_error(xhcip, "failed to write USB Command register: "
1638 		    "encountered fatal FM register error");
1639 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1640 		return (EIO);
1641 	}
1642 
1643 	/*
1644 	 * Wait up to 50ms for this to occur. The specification says that this
1645 	 * should stop within 16ms, but we give ourselves a bit more time just
1646 	 * in case.
1647 	 */
1648 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS, XHCI_STS_HCH,
1649 	    XHCI_STS_HCH, 50, 10));
1650 }
1651 
1652 static int
1653 xhci_controller_reset(xhci_t *xhcip)
1654 {
1655 	int ret;
1656 	uint32_t cmdreg;
1657 
1658 	cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1659 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1660 		xhci_error(xhcip, "failed to read USB Command register for "
1661 		    "reset: encountered fatal FM register error");
1662 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1663 		return (EIO);
1664 	}
1665 
1666 	cmdreg |= XHCI_CMD_HCRST;
1667 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1668 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1669 		xhci_error(xhcip, "failed to write USB Command register for "
1670 		    "reset: encountered fatal FM register error");
1671 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1672 		return (EIO);
1673 	}
1674 
1675 	/*
1676 	 * Some controllers apparently don't want to be touched for at least 1ms
1677 	 * after we initiate the reset. Therefore give all controllers this
1678 	 * moment to breathe.
1679 	 */
1680 	delay(drv_usectohz(xhci_reset_delay));
1681 
1682 	/*
1683 	 * To tell that the reset has completed we first verify that the reset
1684 	 * has finished and that the USBCMD register no longer has the reset bit
1685 	 * asserted. However, once that's done we have to go verify that CNR
1686 	 * (Controller Not Ready) is no longer asserted.
1687 	 */
1688 	if ((ret = xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBCMD,
1689 	    XHCI_CMD_HCRST, 0, 500, 10)) != 0)
1690 		return (ret);
1691 
1692 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1693 	    XHCI_STS_CNR, 0, 500, 10));
1694 }
1695 
1696 /*
1697  * Take care of all the required initialization before we can actually enable
1698  * the controller. This means that we need to:
1699  *
1700  *    o Program the maximum number of slots
1701  *    o Program the DCBAAP and allocate the scratchpad
1702  *    o Program the Command Ring
1703  *    o Initialize the Event Ring
1704  *    o Enable interrupts (set imod)
1705  */
1706 static int
1707 xhci_controller_configure(xhci_t *xhcip)
1708 {
1709 	int ret;
1710 	uint32_t config;
1711 
1712 	config = xhci_get32(xhcip, XHCI_R_OPER, XHCI_CONFIG);
1713 	config &= ~XHCI_CONFIG_SLOTS_MASK;
1714 	config |= xhcip->xhci_caps.xcap_max_slots;
1715 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_CONFIG, config);
1716 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1717 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1718 		return (EIO);
1719 	}
1720 
1721 	if ((ret = xhci_context_init(xhcip)) != 0) {
1722 		const char *reason;
1723 		if (ret == EIO) {
1724 			reason = "fatal FM I/O error occurred";
1725 		} else if (ret == ENOMEM) {
1726 			reason = "unable to allocate DMA memory";
1727 		} else {
1728 			reason = "unexpected error occurred";
1729 		}
1730 
1731 		xhci_error(xhcip, "failed to initialize xhci context "
1732 		    "registers: %s (%d)", reason, ret);
1733 		return (ret);
1734 	}
1735 
1736 	if ((ret = xhci_command_ring_init(xhcip)) != 0) {
1737 		xhci_error(xhcip, "failed to initialize commands: %d", ret);
1738 		return (ret);
1739 	}
1740 
1741 	if ((ret = xhci_event_init(xhcip)) != 0) {
1742 		xhci_error(xhcip, "failed to initialize events: %d", ret);
1743 		return (ret);
1744 	}
1745 
1746 	if ((ret = xhci_intr_conf(xhcip)) != 0) {
1747 		xhci_error(xhcip, "failed to configure interrupts: %d", ret);
1748 		return (ret);
1749 	}
1750 
1751 	return (0);
1752 }
1753 
1754 static int
1755 xhci_controller_start(xhci_t *xhcip)
1756 {
1757 	uint32_t reg;
1758 
1759 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1760 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1761 		xhci_error(xhcip, "failed to read USB Command register for "
1762 		    "start: encountered fatal FM register error");
1763 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1764 		return (EIO);
1765 	}
1766 
1767 	reg |= XHCI_CMD_RS;
1768 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, reg);
1769 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1770 		xhci_error(xhcip, "failed to write USB Command register for "
1771 		    "start: encountered fatal FM register error");
1772 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1773 		return (EIO);
1774 	}
1775 
1776 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1777 	    XHCI_STS_HCH, 0, 500, 10));
1778 }
1779 
1780 /* ARGSUSED */
1781 static void
1782 xhci_reset_task(void *arg)
1783 {
1784 	/*
1785 	 * Longer term, we'd like to properly perform a controller reset.
1786 	 * However, that requires a bit more assistance from USBA to work
1787 	 * properly and tear down devices. In the meantime, we panic.
1788 	 */
1789 	panic("XHCI runtime reset required");
1790 }
1791 
1792 /*
1793  * This function is called when we've detected a fatal FM condition that has
1794  * resulted in a loss of service and we need to force a reset of the controller
1795  * as a whole. Only one such reset may be ongoing at a time.
1796  */
1797 void
1798 xhci_fm_runtime_reset(xhci_t *xhcip)
1799 {
1800 	boolean_t locked = B_FALSE;
1801 
1802 	if (mutex_owned(&xhcip->xhci_lock)) {
1803 		locked = B_TRUE;
1804 	} else {
1805 		mutex_enter(&xhcip->xhci_lock);
1806 	}
1807 
1808 	/*
1809 	 * If we're already in the error state than a reset is already ongoing
1810 	 * and there is nothing for us to do here.
1811 	 */
1812 	if (xhcip->xhci_state & XHCI_S_ERROR) {
1813 		goto out;
1814 	}
1815 
1816 	xhcip->xhci_state |= XHCI_S_ERROR;
1817 	ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1818 	taskq_dispatch_ent(xhci_taskq, xhci_reset_task, xhcip, 0,
1819 	    &xhcip->xhci_tqe);
1820 out:
1821 	if (!locked) {
1822 		mutex_exit(&xhcip->xhci_lock);
1823 	}
1824 }
1825 
1826 static int
1827 xhci_ioctl_portsc(xhci_t *xhcip, intptr_t arg)
1828 {
1829 	int i;
1830 	xhci_ioctl_portsc_t xhi;
1831 
1832 	bzero(&xhi, sizeof (xhci_ioctl_portsc_t));
1833 	xhi.xhi_nports = xhcip->xhci_caps.xcap_max_ports;
1834 	for (i = 1; i <= xhcip->xhci_caps.xcap_max_ports; i++) {
1835 		xhi.xhi_portsc[i] = xhci_get32(xhcip, XHCI_R_OPER,
1836 		    XHCI_PORTSC(i));
1837 	}
1838 
1839 	if (ddi_copyout(&xhi, (void *)(uintptr_t)arg, sizeof (xhi), 0) != 0)
1840 		return (EFAULT);
1841 
1842 	return (0);
1843 }
1844 
1845 static int
1846 xhci_ioctl_clear(xhci_t *xhcip, intptr_t arg)
1847 {
1848 	uint32_t reg;
1849 	xhci_ioctl_clear_t xic;
1850 
1851 	if (ddi_copyin((const void *)(uintptr_t)arg, &xic, sizeof (xic),
1852 	    0) != 0)
1853 		return (EFAULT);
1854 
1855 	if (xic.xic_port == 0 || xic.xic_port >
1856 	    xhcip->xhci_caps.xcap_max_ports)
1857 		return (EINVAL);
1858 
1859 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port));
1860 	reg &= ~XHCI_PS_CLEAR;
1861 	reg |= XHCI_PS_CSC | XHCI_PS_PEC | XHCI_PS_WRC | XHCI_PS_OCC |
1862 	    XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC;
1863 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port), reg);
1864 
1865 	return (0);
1866 }
1867 
1868 static int
1869 xhci_ioctl_setpls(xhci_t *xhcip, intptr_t arg)
1870 {
1871 	uint32_t reg;
1872 	xhci_ioctl_setpls_t xis;
1873 
1874 	if (ddi_copyin((const void *)(uintptr_t)arg, &xis, sizeof (xis),
1875 	    0) != 0)
1876 		return (EFAULT);
1877 
1878 	if (xis.xis_port == 0 || xis.xis_port >
1879 	    xhcip->xhci_caps.xcap_max_ports)
1880 		return (EINVAL);
1881 
1882 	if (xis.xis_pls & ~0xf)
1883 		return (EINVAL);
1884 
1885 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port));
1886 	reg &= ~XHCI_PS_CLEAR;
1887 	reg |= XHCI_PS_PLS_SET(xis.xis_pls);
1888 	reg |= XHCI_PS_LWS;
1889 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port), reg);
1890 
1891 	return (0);
1892 }
1893 
1894 static int
1895 xhci_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1896 {
1897 	dev_info_t *dip = xhci_get_dip(*devp);
1898 
1899 	return (usba_hubdi_open(dip, devp, flags, otyp, credp));
1900 }
1901 
1902 static int
1903 xhci_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1904     int *rvalp)
1905 {
1906 	dev_info_t *dip = xhci_get_dip(dev);
1907 
1908 	if (cmd == XHCI_IOCTL_PORTSC ||
1909 	    cmd == XHCI_IOCTL_CLEAR ||
1910 	    cmd == XHCI_IOCTL_SETPLS) {
1911 		xhci_t *xhcip = ddi_get_soft_state(xhci_soft_state,
1912 		    getminor(dev) & ~HUBD_IS_ROOT_HUB);
1913 
1914 		if (secpolicy_hwmanip(credp) != 0 ||
1915 		    crgetzoneid(credp) != GLOBAL_ZONEID)
1916 			return (EPERM);
1917 
1918 		if (mode & FKIOCTL)
1919 			return (ENOTSUP);
1920 
1921 		if (!(mode & FWRITE))
1922 			return (EBADF);
1923 
1924 		if (cmd == XHCI_IOCTL_PORTSC)
1925 			return (xhci_ioctl_portsc(xhcip, arg));
1926 		else if (cmd == XHCI_IOCTL_CLEAR)
1927 			return (xhci_ioctl_clear(xhcip, arg));
1928 		else
1929 			return (xhci_ioctl_setpls(xhcip, arg));
1930 	}
1931 
1932 	return (usba_hubdi_ioctl(dip, dev, cmd, arg, mode, credp, rvalp));
1933 }
1934 
1935 static int
1936 xhci_close(dev_t dev, int flag, int otyp, cred_t *credp)
1937 {
1938 	dev_info_t *dip = xhci_get_dip(dev);
1939 
1940 	return (usba_hubdi_close(dip, dev, flag, otyp, credp));
1941 }
1942 
1943 /*
1944  * We try to clean up everything that we can. The only thing that we let stop us
1945  * at this time is a failure to remove the root hub, which is realistically the
1946  * equivalent of our EBUSY case.
1947  */
1948 static int
1949 xhci_cleanup(xhci_t *xhcip)
1950 {
1951 	int ret, inst;
1952 
1953 	if (xhcip->xhci_seq & XHCI_ATTACH_ROOT_HUB) {
1954 		if ((ret = xhci_root_hub_fini(xhcip)) != 0)
1955 			return (ret);
1956 	}
1957 
1958 	if (xhcip->xhci_seq & XHCI_ATTACH_USBA) {
1959 		xhci_hcd_fini(xhcip);
1960 	}
1961 
1962 	if (xhcip->xhci_seq & XHCI_ATTACH_STARTED) {
1963 		mutex_enter(&xhcip->xhci_lock);
1964 		while (xhcip->xhci_state & XHCI_S_ERROR)
1965 			cv_wait(&xhcip->xhci_statecv, &xhcip->xhci_lock);
1966 		mutex_exit(&xhcip->xhci_lock);
1967 
1968 		(void) xhci_controller_stop(xhcip);
1969 	}
1970 
1971 	/*
1972 	 * Always release the context, command, and event data. They handle the
1973 	 * fact that they me be in an arbitrary state or unallocated.
1974 	 */
1975 	xhci_event_fini(xhcip);
1976 	xhci_command_ring_fini(xhcip);
1977 	xhci_context_fini(xhcip);
1978 
1979 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ENABLE) {
1980 		(void) xhci_ddi_intr_disable(xhcip);
1981 	}
1982 
1983 	if (xhcip->xhci_seq & XHCI_ATTACH_SYNCH) {
1984 		cv_destroy(&xhcip->xhci_statecv);
1985 		mutex_destroy(&xhcip->xhci_lock);
1986 	}
1987 
1988 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ADD) {
1989 		if ((ret = ddi_intr_remove_handler(xhcip->xhci_intr_hdl)) !=
1990 		    DDI_SUCCESS) {
1991 			xhci_error(xhcip, "failed to remove interrupt "
1992 			    "handler: %d", ret);
1993 		}
1994 	}
1995 
1996 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ALLOC) {
1997 		if ((ret = ddi_intr_free(xhcip->xhci_intr_hdl)) !=
1998 		    DDI_SUCCESS) {
1999 			xhci_error(xhcip, "failed to free interrupts: %d", ret);
2000 		}
2001 	}
2002 
2003 	if (xhcip->xhci_seq & XHCI_ATTACH_REGS_MAP) {
2004 		ddi_regs_map_free(&xhcip->xhci_regs_handle);
2005 		xhcip->xhci_regs_handle = NULL;
2006 	}
2007 
2008 	if (xhcip->xhci_seq & XHCI_ATTACH_PCI_CONFIG) {
2009 		pci_config_teardown(&xhcip->xhci_cfg_handle);
2010 		xhcip->xhci_cfg_handle = NULL;
2011 	}
2012 
2013 	if (xhcip->xhci_seq & XHCI_ATTACH_FM) {
2014 		xhci_fm_fini(xhcip);
2015 		xhcip->xhci_fm_caps = 0;
2016 	}
2017 
2018 	inst = ddi_get_instance(xhcip->xhci_dip);
2019 	xhcip->xhci_dip = NULL;
2020 	ddi_soft_state_free(xhci_soft_state, inst);
2021 
2022 	return (DDI_SUCCESS);
2023 }
2024 
2025 static int
2026 xhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2027 {
2028 	int ret, inst, route;
2029 	xhci_t *xhcip;
2030 
2031 	if (cmd != DDI_ATTACH)
2032 		return (DDI_FAILURE);
2033 
2034 	inst = ddi_get_instance(dip);
2035 	if (ddi_soft_state_zalloc(xhci_soft_state, inst) != 0)
2036 		return (DDI_FAILURE);
2037 	xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2038 	xhcip->xhci_dip = dip;
2039 
2040 	xhcip->xhci_regs_capoff = PCI_EINVAL32;
2041 	xhcip->xhci_regs_operoff = PCI_EINVAL32;
2042 	xhcip->xhci_regs_runoff = PCI_EINVAL32;
2043 	xhcip->xhci_regs_dooroff = PCI_EINVAL32;
2044 
2045 	xhci_fm_init(xhcip);
2046 	xhcip->xhci_seq |= XHCI_ATTACH_FM;
2047 
2048 	if (pci_config_setup(xhcip->xhci_dip, &xhcip->xhci_cfg_handle) !=
2049 	    DDI_SUCCESS) {
2050 		goto err;
2051 	}
2052 	xhcip->xhci_seq |= XHCI_ATTACH_PCI_CONFIG;
2053 	xhcip->xhci_vendor_id = pci_config_get16(xhcip->xhci_cfg_handle,
2054 	    PCI_CONF_VENID);
2055 	xhcip->xhci_device_id = pci_config_get16(xhcip->xhci_cfg_handle,
2056 	    PCI_CONF_DEVID);
2057 
2058 	if (xhci_regs_map(xhcip) == B_FALSE) {
2059 		goto err;
2060 	}
2061 
2062 	xhcip->xhci_seq |= XHCI_ATTACH_REGS_MAP;
2063 
2064 	if (xhci_regs_init(xhcip) == B_FALSE)
2065 		goto err;
2066 
2067 	if (xhci_read_params(xhcip) == B_FALSE)
2068 		goto err;
2069 
2070 	if (xhci_identify(xhcip) == B_FALSE)
2071 		goto err;
2072 
2073 	if (xhci_alloc_intrs(xhcip) == B_FALSE)
2074 		goto err;
2075 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ALLOC;
2076 
2077 	if (xhci_add_intr_handler(xhcip) == B_FALSE)
2078 		goto err;
2079 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ADD;
2080 
2081 	mutex_init(&xhcip->xhci_lock, NULL, MUTEX_DRIVER,
2082 	    (void *)(uintptr_t)xhcip->xhci_intr_pri);
2083 	cv_init(&xhcip->xhci_statecv, NULL, CV_DRIVER, NULL);
2084 	xhcip->xhci_seq |= XHCI_ATTACH_SYNCH;
2085 
2086 	if (xhci_port_count(xhcip) == B_FALSE)
2087 		goto err;
2088 
2089 	if (xhci_controller_takeover(xhcip) == B_FALSE)
2090 		goto err;
2091 
2092 	/*
2093 	 * We don't enable interrupts until after we take over the controller
2094 	 * from the BIOS. We've observed cases where this can cause spurious
2095 	 * interrupts.
2096 	 */
2097 	if (xhci_ddi_intr_enable(xhcip) == B_FALSE)
2098 		goto err;
2099 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ENABLE;
2100 
2101 	if ((ret = xhci_controller_stop(xhcip)) != 0) {
2102 		xhci_error(xhcip, "failed to stop controller: %s",
2103 		    ret == EIO ? "encountered FM register error" :
2104 		    "timed out while waiting for controller");
2105 		goto err;
2106 	}
2107 
2108 	if ((ret = xhci_controller_reset(xhcip)) != 0) {
2109 		xhci_error(xhcip, "failed to reset controller: %s",
2110 		    ret == EIO ? "encountered FM register error" :
2111 		    "timed out while waiting for controller");
2112 		goto err;
2113 	}
2114 
2115 	if ((ret = xhci_controller_configure(xhcip)) != 0) {
2116 		xhci_error(xhcip, "failed to configure controller: %d", ret);
2117 		goto err;
2118 	}
2119 
2120 	/*
2121 	 * Some systems support having ports routed to both an ehci and xhci
2122 	 * controller. If we support it and the user hasn't requested otherwise
2123 	 * via a driver.conf tuning, we reroute it now.
2124 	 */
2125 	route = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
2126 	    DDI_PROP_DONTPASS, "xhci-reroute", XHCI_PROP_REROUTE_DEFAULT);
2127 	if (route != XHCI_PROP_REROUTE_DISABLE &&
2128 	    (xhcip->xhci_quirks & XHCI_QUIRK_INTC_EHCI))
2129 		(void) xhci_reroute_intel(xhcip);
2130 
2131 	if ((ret = xhci_controller_start(xhcip)) != 0) {
2132 		xhci_log(xhcip, "failed to reset controller: %s",
2133 		    ret == EIO ? "encountered FM register error" :
2134 		    "timed out while waiting for controller");
2135 		goto err;
2136 	}
2137 	xhcip->xhci_seq |= XHCI_ATTACH_STARTED;
2138 
2139 	/*
2140 	 * Finally, register ourselves with the USB framework itself.
2141 	 */
2142 	if ((ret = xhci_hcd_init(xhcip)) != 0) {
2143 		xhci_error(xhcip, "failed to register hcd with usba");
2144 		goto err;
2145 	}
2146 	xhcip->xhci_seq |= XHCI_ATTACH_USBA;
2147 
2148 	if ((ret = xhci_root_hub_init(xhcip)) != 0) {
2149 		xhci_error(xhcip, "failed to load the root hub driver");
2150 		goto err;
2151 	}
2152 	xhcip->xhci_seq |= XHCI_ATTACH_ROOT_HUB;
2153 
2154 	return (DDI_SUCCESS);
2155 
2156 err:
2157 	(void) xhci_cleanup(xhcip);
2158 	return (DDI_FAILURE);
2159 }
2160 
2161 static int
2162 xhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2163 {
2164 	xhci_t *xhcip;
2165 
2166 	if (cmd != DDI_DETACH)
2167 		return (DDI_FAILURE);
2168 
2169 	xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2170 	if (xhcip == NULL) {
2171 		dev_err(dip, CE_WARN, "detach called without soft state!");
2172 		return (DDI_FAILURE);
2173 	}
2174 
2175 	return (xhci_cleanup(xhcip));
2176 }
2177 
2178 /* ARGSUSED */
2179 static int
2180 xhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **outp)
2181 {
2182 	dev_t dev;
2183 	int inst;
2184 
2185 	switch (cmd) {
2186 	case DDI_INFO_DEVT2DEVINFO:
2187 		dev = (dev_t)arg;
2188 		*outp = xhci_get_dip(dev);
2189 		if (*outp == NULL)
2190 			return (DDI_FAILURE);
2191 		break;
2192 	case DDI_INFO_DEVT2INSTANCE:
2193 		dev = (dev_t)arg;
2194 		inst = getminor(dev) & ~HUBD_IS_ROOT_HUB;
2195 		*outp = (void *)(uintptr_t)inst;
2196 		break;
2197 	default:
2198 		return (DDI_FAILURE);
2199 	}
2200 
2201 	return (DDI_SUCCESS);
2202 }
2203 
2204 static struct cb_ops xhci_cb_ops = {
2205 	xhci_open,		/* cb_open */
2206 	xhci_close,		/* cb_close */
2207 	nodev,			/* cb_strategy */
2208 	nodev,			/* cb_print */
2209 	nodev,			/* cb_dump */
2210 	nodev,			/* cb_read */
2211 	nodev,			/* cb_write */
2212 	xhci_ioctl,		/* cb_ioctl */
2213 	nodev,			/* cb_devmap */
2214 	nodev,			/* cb_mmap */
2215 	nodev,			/* cb_segmap */
2216 	nochpoll,		/* cb_chpoll */
2217 	ddi_prop_op,		/* cb_prop_op */
2218 	NULL,			/* cb_stream */
2219 	D_MP | D_HOTPLUG,	/* cb_flag */
2220 	CB_REV,			/* cb_rev */
2221 	nodev,			/* cb_aread */
2222 	nodev			/* cb_awrite */
2223 };
2224 
2225 static struct dev_ops xhci_dev_ops = {
2226 	DEVO_REV,			/* devo_rev */
2227 	0,				/* devo_refcnt */
2228 	xhci_getinfo,			/* devo_getinfo */
2229 	nulldev,			/* devo_identify */
2230 	nulldev,			/* devo_probe */
2231 	xhci_attach,			/* devo_attach */
2232 	xhci_detach,			/* devo_detach */
2233 	nodev,				/* devo_reset */
2234 	&xhci_cb_ops,			/* devo_cb_ops */
2235 	&usba_hubdi_busops,		/* devo_bus_ops */
2236 	usba_hubdi_root_hub_power,	/* devo_power */
2237 	ddi_quiesce_not_supported	/* devo_quiesce */
2238 };
2239 
2240 static struct modldrv xhci_modldrv = {
2241 	&mod_driverops,
2242 	"USB xHCI Driver",
2243 	&xhci_dev_ops
2244 };
2245 
2246 static struct modlinkage xhci_modlinkage = {
2247 	MODREV_1,
2248 	&xhci_modldrv,
2249 	NULL
2250 };
2251 
2252 int
2253 _init(void)
2254 {
2255 	int ret;
2256 
2257 	if ((ret = ddi_soft_state_init(&xhci_soft_state, sizeof (xhci_t),
2258 	    0)) != 0) {
2259 		return (ret);
2260 	}
2261 
2262 	xhci_taskq = taskq_create("xhci_taskq", 1, minclsyspri, 0, 0, 0);
2263 	if (xhci_taskq == NULL) {
2264 		ddi_soft_state_fini(&xhci_soft_state);
2265 		return (ENOMEM);
2266 	}
2267 
2268 	if ((ret = mod_install(&xhci_modlinkage)) != 0) {
2269 		taskq_destroy(xhci_taskq);
2270 		xhci_taskq = NULL;
2271 	}
2272 
2273 	return (ret);
2274 }
2275 
2276 int
2277 _info(struct modinfo *modinfop)
2278 {
2279 	return (mod_info(&xhci_modlinkage, modinfop));
2280 }
2281 
2282 int
2283 _fini(void)
2284 {
2285 	int ret;
2286 
2287 	if ((ret = mod_remove(&xhci_modlinkage)) != 0)
2288 		return (ret);
2289 
2290 	if (xhci_taskq != NULL) {
2291 		taskq_destroy(xhci_taskq);
2292 		xhci_taskq = NULL;
2293 	}
2294 
2295 	ddi_soft_state_fini(&xhci_soft_state);
2296 
2297 	return (0);
2298 }
2299