xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci.c (revision 08f1bbed5edd2a2e9c8be7b7424c32e67c2f3f2c)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2019, Joyent, Inc.
14  */
15 
16 /*
17  * Extensible Host Controller Interface (xHCI) USB Driver
18  *
19  * The xhci driver is an HCI driver for USB that bridges the gap between client
20  * device drivers and implements the actual way that we talk to devices. The
21  * xhci specification provides access to USB 3.x capable devices, as well as all
22  * prior generations. Like other host controllers, it both provides the way to
23  * talk to devices and also is treated like a hub (often called the root hub).
24  *
25  * This driver is part of the USBA (USB Architecture). It implements the HCDI
26  * (host controller device interface) end of USBA. These entry points are used
27  * by the USBA on behalf of client device drivers to access their devices. The
28  * driver also provides notifications to deal with hot plug events, which are
29  * quite common in USB.
30  *
31  * ----------------
32  * USB Introduction
33  * ----------------
34  *
35  * To properly understand the xhci driver and the design of the USBA HCDI
36  * interfaces it implements, it helps to have a bit of background into how USB
37  * devices are structured and understand how they work at a high-level.
38  *
39  * USB devices, like PCI devices, are broken down into different classes of
40  * device. For example, with USB you have hubs, human-input devices (keyboards,
41  * mice, etc.), mass storage, etc. Every device also has a vendor and device ID.
42  * Many client drivers bind to an entire class of device, for example, the hubd
43  * driver (to hubs) or scsa2usb (USB storage). However, there are other drivers
44  * that bind to explicit IDs such as usbsprl (specific USB to Serial devices).
45  *
46  * USB SPEEDS AND VERSIONS
47  *
48  * USB devices are often referred to in two different ways. One way they're
49  * described is with the USB version that they conform to. In the wild, you're
50  * most likely going to see USB 1.1, 2.0, 2.1, and 3.0. However, you may also
51  * see devices referred to as 'full-', 'low-', 'high-', and 'super-' speed
52  * devices.
53  *
54  * The latter description describes the maximum theoretical speed of a given
55  * device. For example, a super-speed device theoretically caps out around 5
56  * Gbit/s, whereas a low-speed device caps out at 1.5 Mbit/s.
57  *
58  * In general, each speed usually corresponds to a specific USB protocol
59  * generation. For example, all USB 3.0 devices are super-speed devices. All
60  * 'high-speed' devices are USB 2.x devices. Full-speed devices are special in
61  * that they can either be USB 1.x or USB 2.x devices. Low-speed devices are
62  * only a USB 1.x thing, they did not jump the fire line to USB 2.x.
63  *
64  * USB 3.0 devices and ports generally have the wiring for both USB 2.0 and USB
65  * 3.0. When a USB 3.x device is plugged into a USB 2.0 port or hub, then it
66  * will report its version as USB 2.1, to indicate that it is actually a USB 3.x
67  * device.
68  *
69  * USB ENDPOINTS
70  *
71  * A given USB device is made up of endpoints. A request, or transfer, is made
72  * to a specific USB endpoint. These endpoints can provide different services
73  * and have different expectations around the size of the data that'll be used
74  * in a given request and the periodicity of requests. Endpoints themselves are
75  * either used to make one-shot requests, for example, making requests to a mass
76  * storage device for a given sector, or for making periodic requests where you
77  * end up polling on the endpoint, for example, polling on a USB keyboard for
78  * keystrokes.
79  *
80  * Each endpoint encodes two different pieces of information: a direction and a
81  * type. There are two different directions: IN and OUT. These refer to the
82  * general direction that data moves relative to the operating system. For
83  * example, an IN transfer transfers data in to the operating system, from the
84  * device. An OUT transfer transfers data from the operating system, out to the
85  * device.
86  *
87  * There are four different kinds of endpoints:
88  *
89  *	BULK		These transfers are large transfers of data to or from
90  *			a device. The most common use for bulk transfers is for
91  *			mass storage devices. Though they are often also used by
92  *			network devices and more. Bulk endpoints do not have an
93  *			explicit time component to them. They are always used
94  *			for one-shot transfers.
95  *
96  *	CONTROL		These transfers are used to manipulate devices
97  *			themselves and are used for USB protocol level
98  *			operations (whether device-specific, class-specific, or
99  *			generic across all of USB). Unlike other transfers,
100  *			control transfers are always bi-directional and use
101  *			different kinds of transfers.
102  *
103  *	INTERRUPT	Interrupt transfers are used for small transfers that
104  *			happen infrequently, but need reasonable latency. A good
105  *			example of interrupt transfers is to receive input from
106  *			a USB keyboard. Interrupt-IN transfers are generally
107  *			polled. Meaning that a client (device driver) opens up
108  *			an interrupt-IN pipe to poll on it, and receives
109  *			periodic updates whenever there is information
110  *			available. However, Interrupt transfers can be used
111  *			as one-shot transfers both going IN and OUT.
112  *
113  *	ISOCHRONOUS	These transfers are things that happen once per
114  *			time-interval at a very regular rate. A good example of
115  *			these transfers are for audio and video. A device may
116  *			describe an interval as 10ms at which point it will read
117  *			or write the next batch of data every 10ms and transform
118  *			it for the user. There are no one-shot Isochronous-IN
119  *			transfers. There are one-shot Isochronous-OUT transfers,
120  *			but these are used by device drivers to always provide
121  *			the system with sufficient data.
122  *
123  * To find out information about the endpoints, USB devices have a series of
124  * descriptors that cover different aspects of the device. For example, there
125  * are endpoint descriptors which cover the properties of endpoints such as the
126  * maximum packet size or polling interval.
127  *
128  * Descriptors exist at all levels of USB. For example, there are general
129  * descriptors for every device. The USB device descriptor is described in
130  * usb_dev_descr(9S). Host controllers will look at these descriptors to ensure
131  * that they program the device correctly; however, they are more often used by
132  * client device drivers. There are also descriptors that exist at a class
133  * level. For example, the hub class has a class-specific descriptor which
134  * describes properties of the hub. That information is requested for and used
135  * by the hub driver.
136  *
137  * All of the different descriptors are gathered by the system and placed into a
138  * tree which USBA sometimes calls the 'Configuration Cloud'. Client device
139  * drivers gain access to this cloud and then use them to open endpoints, which
140  * are called pipes in USBA (and some revisions of the USB specification).
141  *
142  * Each pipe gives access to a specific endpoint on the device which can be used
143  * to perform transfers of a specific type and direction. For example, a mass
144  * storage device often has three different endpoints, the default control
145  * endpoint (which every device has), a Bulk-IN endpoint, and a Bulk-OUT
146  * endpoint. The device driver ends up with three open pipes. One to the default
147  * control endpoint to configure the device, and then the other two are used to
148  * perform I/O.
149  *
150  * These routines translate more or less directly into calls to a host
151  * controller driver. A request to open a pipe takes an endpoint descriptor that
152  * describes the properties of the pipe, and the host controller driver (this
153  * driver) goes through and does any work necessary to allow the client device
154  * driver to access it. Once the pipe is open, it either makes one-shot
155  * transfers specific to the transfer type or it starts performing a periodic
156  * poll of an endpoint.
157  *
158  * All of these different actions translate into requests to the host
159  * controller. The host controller driver itself is in charge of making sure
160  * that all of the required resources for polling are allocated with a request
161  * and then proceed to give the driver's periodic callbacks.
162  *
163  * HUBS AND HOST CONTROLLERS
164  *
165  * Every device is always plugged into a hub, even if the device is itself a
166  * hub. This continues until we reach what we call the root-hub. The root-hub is
167  * special in that it is not an actual USB hub, but is integrated into the host
168  * controller and is manipulated in its own way. For example, the host
169  * controller is used to turn on and off a given port's power. This may happen
170  * over any interface, though the most common way is through PCI.
171  *
172  * In addition to the normal character device that exists for a host controller
173  * driver, as part of attaching, the host controller binds to an instance of the
174  * hubd driver. While the root-hub is a bit of a fiction, everyone models the
175  * root-hub as the same as any other hub that's plugged in. The hub kernel
176  * module doesn't know that the hub isn't a physical device that's been plugged
177  * in. The host controller driver simulates that view by taking hub requests
178  * that are made and translating them into corresponding requests that are
179  * understood by the host controller, for example, reading and writing to a
180  * memory mapped register.
181  *
182  * The hub driver polls for changes in device state using an Interrupt-IN
183  * request, which is the same as is done for the root-hub. This allows the host
184  * controller driver to not have to know about the implementation of device hot
185  * plug, merely react to requests from a hub, the same as if it were an external
186  * device. When the hub driver detects a change, it will go through the
187  * corresponding state machine and attach or detach the corresponding client
188  * device driver, depending if the device was inserted or removed.
189  *
190  * We detect the changes for the Interrupt-IN primarily based on the port state
191  * change events that are delivered to the event ring. Whenever any event is
192  * fired, we use this to update the hub driver about _all_ ports with
193  * outstanding events. This more closely matches how a hub is supposed to behave
194  * and leaves things less likely for the hub driver to end up without clearing a
195  * flag on a port.
196  *
197  * PACKET SIZES AND BURSTING
198  *
199  * A given USB endpoint has an explicit packet size and a number of packets that
200  * can be sent per time interval. These concepts are abstracted away from client
201  * device drives usually, though they sometimes inform the upper bounds of what
202  * a device can perform.
203  *
204  * The host controller uses this information to transform arbitrary transfer
205  * requests into USB protocol packets. One of the nice things about the host
206  * controllers is that they abstract away all of the signaling and semantics of
207  * the actual USB protocols, allowing for life to be slightly easier in the
208  * operating system.
209  *
210  * That said, if the host controller is not programmed correctly, these can end
211  * up causing transaction errors and other problems in response to the data that
212  * the host controller is trying to send or receive.
213  *
214  * ------------
215  * Organization
216  * ------------
217  *
218  * The driver is made up of the following files. Many of these have their own
219  * theory statements to describe what they do. Here, we touch on each of the
220  * purpose of each of these files.
221  *
222  * xhci_command.c:	This file contains the logic to issue commands to the
223  *			controller as well as the actual functions that the
224  *			other parts of the driver use to cause those commands.
225  *
226  * xhci_context.c:	This file manages various data structures used by the
227  *			controller to manage the controller's and device's
228  *			context data structures. See more in the xHCI Overview
229  *			and General Design for more information.
230  *
231  * xhci_dma.c:		This manages the allocation of DMA memory and DMA
232  *			attributes for controller, whether memory is for a
233  *			transfer or something else. This file also deals with
234  *			all the logic of getting data in and out of DMA buffers.
235  *
236  * xhci_endpoint.c:	This manages all of the logic of handling endpoints or
237  *			pipes. It deals with endpoint configuration, I/O
238  *			scheduling, timeouts, and callbacks to USBA.
239  *
240  * xhci_event.c:	This manages callbacks from the hardware to the driver.
241  *			This covers command completion notifications and I/O
242  *			notifications.
243  *
244  * xhci_hub.c:		This manages the virtual root-hub. It basically
245  *			implements and translates all of the USB level requests
246  *			into xhci specific implements. It also contains the
247  *			functions to register this hub with USBA.
248  *
249  * xhci_intr.c:		This manages the underlying interrupt allocation,
250  *			interrupt moderation, and interrupt routines.
251  *
252  * xhci_quirks.c:	This manages information about buggy hardware that's
253  *			been collected and experienced primarily from other
254  *			systems.
255  *
256  * xhci_ring.c:		This manages the abstraction of a ring in xhci, which is
257  *			the primary of communication between the driver and the
258  *			hardware, whether for the controller or a device.
259  *
260  * xhci_usba.c:		This implements all of the HCDI functions required by
261  *			USBA. This is the main entry point that drivers and the
262  *			kernel frameworks will reach to start any operation.
263  *			Many functions here will end up in the command and
264  *			endpoint code.
265  *
266  * xhci.c:		This provides the main kernel DDI interfaces and
267  *			performs device initialization.
268  *
269  * xhci.h:		This is the primary header file which defines
270  *			illumos-specific data structures and constants to manage
271  *			the system.
272  *
273  * xhcireg.h:		This header file defines all of the register offsets,
274  *			masks, and related macros. It also contains all of the
275  *			constants that are used in various structures as defined
276  *			by the specification, such as command offsets, etc.
277  *
278  * xhci_ioctl.h:	This contains a few private ioctls that are used by a
279  *			private debugging command. These are private.
280  *
281  * cmd/xhci/xhci_portsc:	This is a private utility that can be useful for
282  *				debugging xhci state. It is the only consumer of
283  *				xhci_ioctl.h and the private ioctls.
284  *
285  * ----------------------------------
286  * xHCI Overview and Structure Layout
287  * ----------------------------------
288  *
289  * The design and structure of this driver follows from the way that the xHCI
290  * specification tells us that we have to work with hardware. First we'll give a
291  * rough summary of how that works, though the xHCI 1.1 specification should be
292  * referenced when going through this.
293  *
294  * There are three primary parts of the hardware -- registers, contexts, and
295  * rings. The registers are memory mapped registers that come in four sets,
296  * though all are found within the first BAR. These are used to program and
297  * control the hardware and aspects of the devices. Beyond more traditional
298  * device programming there are two primary sets of registers that are
299  * important:
300  *
301  *   o Port Status and Control Registers (XHCI_PORTSC)
302  *   o Doorbell Array (XHCI_DOORBELL)
303  *
304  * The port status and control registers are used to get and manipulate the
305  * status of a given device. For example, turning on and off the power to it.
306  * The Doorbell Array is used to kick off I/O operations and start the
307  * processing of an I/O ring.
308  *
309  * The contexts are data structures that represent various pieces of information
310  * in the controller. These contexts are generally filled out by the driver and
311  * then acknowledged and consumed by the hardware. There are controller-wide
312  * contexts (mostly managed in xhci_context.c) that are used to point to the
313  * contexts that exist for each device in the system. The primary context is
314  * called the Device Context Base Address Array (DCBAA).
315  *
316  * Each device in the system is allocated a 'slot', which is used to index into
317  * the DCBAA. Slots are assigned based on issuing commands to the controller.
318  * There are a fixed number of slots that determine the maximum number of
319  * devices that can end up being supported in the system. Note this includes all
320  * the devices plugged into the USB device tree, not just devices plugged into
321  * ports on the chassis.
322  *
323  * For each device, there is a context structure that describes properties of
324  * the device. For example, what speed is the device, is it a hub, etc. The
325  * context has slots for the device and for each endpoint on the device. As
326  * endpoints are enabled, their context information which describes things like
327  * the maximum packet size, is filled in and enabled. The mapping between these
328  * contexts look like:
329  *
330  *
331  *      DCBAA
332  *    +--------+                    Device Context
333  *    | Slot 0 |------------------>+--------------+
334  *    +--------+                   | Slot Context |
335  *    |  ...   |                   +--------------+       +----------+
336  *    +--------+   +------+        |  Endpoint 0  |------>| I/O Ring |
337  *    | Slot n |-->| NULL |        | Context (Bi) |       +----------+
338  *    +--------+   +------+        +--------------+
339  *                                 |  Endpoint 1  |
340  *                                 | Context (Out)|
341  *                                 +--------------+
342  *                                 |  Endpoint 1  |
343  *                                 | Context (In) |
344  *                                 +--------------+
345  *                                 |      ...     |
346  *                                 +--------------+
347  *                                 | Endpoint 15  |
348  *                                 | Context (In) |
349  *                                 +--------------+
350  *
351  * These contexts are always owned by the controller, though we can read them
352  * after various operations complete. Commands that toggle device state use a
353  * specific input context, which is a variant of the device context. The only
354  * difference is that it has an input context structure ahead of it to say which
355  * sections of the device context should be evaluated.
356  *
357  * Each active endpoint points us to an I/O ring, which leads us to the third
358  * main data structure that's used by the device: rings. Rings are made up of
359  * transfer request blocks (TRBs), which are joined together to form a given
360  * transfer description (TD) which represents a single I/O request.
361  *
362  * These rings are used to issue I/O to individual endpoints, to issue commands
363  * to the controller, and to receive notification of changes and completions.
364  * Issued commands go on the special ring called the command ring while the
365  * change and completion notifications go on the event ring.  More details are
366  * available in xhci_ring.c. Each of these structures is represented by an
367  * xhci_ring_t.
368  *
369  * Each ring can be made up of one or more disjoint regions of DMA; however, we
370  * only use a single one. This also impacts some additional registers and
371  * structures that exist. The event ring has an indirection table called the
372  * Event Ring Segment Table (ERST). Each entry in the table (a segment)
373  * describes a chunk of the event ring.
374  *
375  * One other thing worth calling out is the scratchpad. The scratchpad is a way
376  * for the controller to be given arbitrary memory by the OS that it can use.
377  * There are two parts to the scratchpad. The first part is an array whose
378  * entries contain pointers to the actual addresses for the pages. The second
379  * part that we allocate are the actual pages themselves.
380  *
381  * -----------------------------
382  * Endpoint State and Management
383  * -----------------------------
384  *
385  * Endpoint management is one of the key parts to the xhci driver as every
386  * endpoint is a pipe that a device driver uses, so they are our primary
387  * currency. Endpoints are enabled and disabled when the client device drivers
388  * open and close a pipe. When an endpoint is enabled, we have to fill in an
389  * endpoint's context structure with information about the endpoint. These
390  * basically tell the controller important properties which it uses to ensure
391  * that there is adequate bandwidth for the device.
392  *
393  * Each endpoint has its own ring as described in the previous section. We place
394  * TRBs (transfer request blocks) onto a given ring to request I/O be performed.
395  * Responses are placed on the event ring, in other words, the rings associated
396  * with an endpoint are purely for producing I/O.
397  *
398  * Endpoints have a defined state machine as described in xHCI 1.1 / 4.8.3.
399  * These states generally correspond with the state of the endpoint to process
400  * I/O and handle timeouts. The driver basically follows a similar state machine
401  * as described there. There are some deviations. For example, what they
402  * describe as 'running' we break into both the Idle and Running states below.
403  * We also have a notion of timed out and quiescing. The following image
404  * summarizes the states and transitions:
405  *
406  *     +------+                                +-----------+
407  *     | Idle |---------*--------------------->|  Running  |<-+
408  *     +------+         . I/O queued on        +-----------+  |
409  *        ^               ring and timeout        |  |  |     |
410  *        |               scheduled.              |  |  |     |
411  *        |                                       |  |  |     |
412  *        +-----*---------------------------------+  |  |     |
413  *        |     . No I/Os remain                     |  |     |
414  *        |                                          |  |     |
415  *        |                +------*------------------+  |     |
416  *        |                |      . Timeout             |     |
417  *        |                |        fires for           |     |
418  *        |                |        I/O                 |     |
419  *        |                v                            v     |
420  *        |          +-----------+                +--------+  |
421  *        |          | Timed Out |                | Halted |  |
422  *        |          +-----------+                +--------+  |
423  *        |             |                           |         |
424  *        |             |   +-----------+           |         |
425  *        |             +-->| Quiescing |<----------+         |
426  *        |                 +-----------+                     |
427  *        |   No TRBs.           |                . TRBs      |
428  *        |   remain .           |                . Remain    |
429  *        +----------*----<------+-------->-------*-----------+
430  *
431  * Normally, a given endpoint will oscillate between having TRBs scheduled and
432  * not. Every time a new I/O is added to the endpoint, we'll ring the doorbell,
433  * making sure that we're processing the ring, presuming that the endpoint isn't
434  * in one of the error states.
435  *
436  * To detect device hangs, we have an active timeout(9F) per active endpoint
437  * that ticks at a one second rate while we still have TRBs outstanding on an
438  * endpoint. Once all outstanding TRBs have been processed, the timeout will
439  * stop itself and there will be no active checking until the endpoint has I/O
440  * scheduled on it again.
441  *
442  * There are two primary ways that things can go wrong on the endpoint. We can
443  * either have a timeout or an event that transitions the endpoint to the Halted
444  * state. In the halted state, we need to issue explicit commands to reset the
445  * endpoint before removing the I/O.
446  *
447  * The way we handle both a timeout and a halted condition is similar, but the
448  * way they are triggered is different. When we detect a halted condition, we
449  * don't immediately clean it up, and wait for the client device driver (or USBA
450  * on its behalf) to issue a pipe reset. When we detect a timeout, we
451  * immediately take action (assuming no other action is ongoing).
452  *
453  * In both cases, we quiesce the device, which takes care of dealing with taking
454  * the endpoint from whatever state it may be in and taking the appropriate
455  * actions based on the state machine in xHCI 1.1 / 4.8.3. The end of quiescing
456  * leaves the device stopped, which allows us to update the ring's pointer and
457  * remove any TRBs that are causing problems.
458  *
459  * As part of all this, we ensure that we can only be quiescing the device from
460  * a given path at a time. Any requests to schedule I/O during this time will
461  * generally fail.
462  *
463  * The following image describes the state machine for the timeout logic. It
464  * ties into the image above.
465  *
466  *         +----------+                            +---------+
467  *         | Disabled |-----*--------------------->| Enabled |<--+
468  *         +----------+     . TRBs scheduled       +---------+   *. 1 sec timer
469  *             ^              and no active          |  |  |     |  fires and
470  *             |              timer.                 |  |  |     |  another
471  *             |                                     |  |  +--+--+  quiesce, in
472  *             |                                     |  |     |     a bad state,
473  *             +------*------------------------------+  |     ^     or decrement
474  *             |      . 1 sec timer                     |     |     I/O timeout
475  *             |        fires and                       |     |
476  *             |        no TRBs or                      |     +--------------+
477  *             |        endpoint shutdown               |                    |
478  *             |                                        *. . timer counter   |
479  *             ^                                        |    reaches zero    |
480  *             |                                        v                    |
481  *             |                                +--------------+             |
482  *             +-------------*---------------<--| Quiesce ring |->---*-------+
483  *                           . No more          | and fail I/O |     . restart
484  *                             I/Os             +--------------+       timer as
485  *                                                                     more I/Os
486  *
487  * As we described above, when there are active TRBs and I/Os, a 1 second
488  * timeout(9F) will be active. Each second, we decrement a counter on the
489  * current, active I/O until either a new I/O takes the head, or the counter
490  * reaches zero. If the counter reaches zero, then we go through, quiesce the
491  * ring, and then clean things up.
492  *
493  * ------------------
494  * Periodic Endpoints
495  * ------------------
496  *
497  * It's worth calling out periodic endpoints explicitly, as they operate
498  * somewhat differently. Periodic endpoints are limited to Interrupt-IN and
499  * Isochronous-IN. The USBA often uses the term polling for these. That's
500  * because the client only needs to make a single API call; however, they'll
501  * receive multiple callbacks until either an error occurs or polling is
502  * requested to be terminated.
503  *
504  * When we have one of these periodic requests, we end up always rescheduling
505  * I/O requests, as well as, having a specific number of pre-existing I/O
506  * requests to cover the periodic needs, in case of latency spikes. Normally,
507  * when replying to a request, we use the request handle that we were given.
508  * However, when we have a periodic request, we're required to duplicate the
509  * handle before giving them data.
510  *
511  * However, the duplication is a bit tricky. For everything that was duplicated,
512  * the framework expects us to submit data. Because of that we, don't duplicate
513  * them until they are needed. This minimizes the likelihood that we have
514  * outstanding requests to deal with when we encounter a fatal polling failure.
515  *
516  * Most of the polling setup logic happens in xhci_usba.c in
517  * xhci_hcdi_periodic_init(). The consumption and duplication is handled in
518  * xhci_endpoint.c.
519  *
520  * ----------------
521  * Structure Layout
522  * ----------------
523  *
524  * The following images relate the core data structures. The primary structure
525  * in the system is the xhci_t. This is the per-controller data structure that
526  * exists for each instance of the driver. From there, each device in the system
527  * is represented by an xhci_device_t and each endpoint is represented by an
528  * xhci_endpoint_t. For each client that opens a given endpoint, there is an
529  * xhci_pipe_t. For each I/O related ring, there is an xhci_ring_t in the
530  * system.
531  *
532  *     +------------------------+
533  *     | Per-Controller         |
534  *     | Structure              |
535  *     | xhci_t                 |
536  *     |                        |
537  *     | uint_t              ---+--> Capability regs offset
538  *     | uint_t              ---+--> Operational regs offset
539  *     | uint_t              ---+--> Runtime regs offset
540  *     | uint_t              ---+--> Doorbell regs offset
541  *     | xhci_state_flags_t  ---+--> Device state flags
542  *     | xhci_quirks_t       ---+--> Device quirk flags
543  *     | xhci_capability_t   ---+--> Controller capability structure
544  *     | xhci_dcbaa_t        ---+----------------------------------+
545  *     | xhci_scratchpad_t   ---+---------+                        |
546  *     | xhci_command_ing_t  ---+------+  |                        v
547  *     | xhci_event_ring_t   ---+----+ |  |              +---------------------+
548  *     | xhci_usba_t         ---+--+ | |  |              | Device Context      |
549  *     +------------------------+  | | |  |              | Base Address        |
550  *                                 | | |  |              | Array Structure     |
551  *                                 | | |  |              | xhci_dcbaa_t        |
552  * +-------------------------------+ | |  |              |                     |
553  * | +-------------------------------+ |  |  DCBAA KVA <-+--        uint64_t * |
554  * | |    +----------------------------+  | DMA Buffer <-+-- xhci_dma_buffer_t |
555  * | |    v                               |              +---------------------+
556  * | | +--------------------------+       +-----------------------+
557  * | | | Event Ring               |                               |
558  * | | | Management               |                               |
559  * | | | xhci_event_ring_t        |                               v
560  * | | |                          |   Event Ring        +----------------------+
561  * | | | xhci_event_segment_t * --|-> Segment VA        |   Scratchpad (Extra  |
562  * | | | xhci_dma_buffer_t      --|-> Segment DMA Buf.  |   Controller Memory) |
563  * | | | xhci_ring_t            --|--+                  |    xhci_scratchpad_t |
564  * | | +--------------------------+  |      Scratchpad  |                      |
565  * | |                               | Base Array KVA <-+-          uint64_t * |
566  * | +------------+                  | Array DMA Buf. <-+-   xhci_dma_buffer_t |
567  * |              v                  | Scratchpad DMA <-+- xhci_dma_buffer_t * |
568  * |   +---------------------------+ | Buffer per page  +----------------------+
569  * |   | Command Ring              | |
570  * |   | xhci_command_ring_t       | +------------------------------+
571  * |   |                           |                                |
572  * |   | xhci_ring_t             --+-> Command Ring --->------------+
573  * |   | list_t                  --+-> Command List                 v
574  * |   | timeout_id_t            --+-> Timeout State     +---------------------+
575  * |   | xhci_command_ring_state_t +-> State Flags       | I/O Ring            |
576  * |   +---------------------------+                     | xhci_ring_t         |
577  * |                                                     |                     |
578  * |                                     Ring DMA Buf. <-+-- xhci_dma_buffer_t |
579  * |                                       Ring Length <-+--            uint_t |
580  * |                                    Ring Entry KVA <-+--      xhci_trb_t * |
581  * |    +---------------------------+        Ring Head <-+--            uint_t |
582  * +--->| USBA State                |        Ring Tail <-+--            uint_t |
583  *      | xhci_usba_t               |       Ring Cycle <-+--            uint_t |
584  *      |                           |                    +---------------------+
585  *      | usba_hcdi_ops_t *        -+-> USBA Ops Vector                       ^
586  *      | usb_dev_dscr_t           -+-> USB Virtual Device Descriptor         |
587  *      | usb_ss_hub_descr_t       -+-> USB Virtual Hub Descriptor            |
588  *      | usba_pipe_handle_data_t * +-> Interrupt polling client              |
589  *      | usb_intr_req_t           -+-> Interrupt polling request             |
590  *      | uint32_t                --+-> Interrupt polling device mask         |
591  *      | list_t                  --+-> Pipe List (Active Users)              |
592  *      | list_t                  --+-------------------+                     |
593  *      +---------------------------+                   |                     ^
594  *                                                      |                     |
595  *                                                      v                     |
596  *     +-------------------------------+             +---------------+        |
597  *     | USB Device                    |------------>| USB Device    |--> ... |
598  *     | xhci_device_t                 |             | xhci_device_t |        |
599  *     |                               |             +---------------+        |
600  *     | usb_port_t                  --+-> USB Port plugged into              |
601  *     | uint8_t                     --+-> Slot Number                        |
602  *     | boolean_t                   --+-> Address Assigned                   |
603  *     | usba_device_t *             --+-> USBA Device State                  |
604  *     | xhci_dma_buffer_t           --+-> Input Context DMA Buffer           |
605  *     | xhci_input_context_t *      --+-> Input Context KVA                  |
606  *     | xhci_slot_contex_t *        --+-> Input Slot Context KVA             |
607  *     | xhci_endpoint_context_t *[] --+-> Input Endpoint Context KVA         |
608  *     | xhci_dma_buffer_t           --+-> Output Context DMA Buffer          |
609  *     | xhci_slot_context_t *       --+-> Output Slot Context KVA            ^
610  *     | xhci_endpoint_context_t *[] --+-> Output Endpoint Context KVA        |
611  *     | xhci_endpoint_t *[]         --+-> Endpoint Tracking ---+             |
612  *     +-------------------------------+                        |             |
613  *                                                              |             |
614  *                                                              v             |
615  *     +------------------------------+            +-----------------+        |
616  *     | Endpoint Data                |----------->| Endpoint Data   |--> ... |
617  *     | xhci_endpoint_t              |            | xhci_endpoint_t |        |
618  *     |                              |            +-----------------+        |
619  *     | int                        --+-> Endpoint Number                     |
620  *     | int                        --+-> Endpoint Type                       |
621  *     | xhci_endpoint_state_t      --+-> Endpoint State                      |
622  *     | timeout_id_t               --+-> Endpoint Timeout State              |
623  *     | usba_pipe_handle_data_t *  --+-> USBA Client Handle                  |
624  *     | xhci_ring_t                --+-> Endpoint I/O Ring  -------->--------+
625  *     | list_t                     --+-> Transfer List --------+
626  *     +------------------------------+                         |
627  *                                                              v
628  *     +-------------------------+                  +--------------------+
629  *     | Transfer Structure      |----------------->| Transfer Structure |-> ...
630  *     | xhci_transfer_t         |                  | xhci_transfer_t    |
631  *     |                         |                  +--------------------+
632  *     | xhci_dma_buffer_t     --+-> I/O DMA Buffer
633  *     | uint_t                --+-> Number of TRBs
634  *     | uint_t                --+-> Short transfer data
635  *     | uint_t                --+-> Timeout seconds remaining
636  *     | usb_cr_t              --+-> USB Transfer return value
637  *     | boolean_t             --+-> Data direction
638  *     | xhci_trb_t *          --+-> Host-order transfer requests for I/O
639  *     | usb_isoc_pkt_descr_t * -+-> Isochronous only response data
640  *     | usb_opaque_t          --+-> USBA Request Handle
641  *     +-------------------------+
642  *
643  * -------------
644  * Lock Ordering
645  * -------------
646  *
647  * There are three different tiers of locks that exist in the driver. First,
648  * there is a lock for each controller: xhci_t`xhci_lock. This protects all the
649  * data for that instance of the controller. If there are multiple instances of
650  * the xHCI controller in the system, each one is independent and protected
651  * separately. The two do not share any data.
652  *
653  * From there, there are two other, specific locks in the system:
654  *
655  *   o xhci_command_ring_t`xcr_lock
656  *   o xhci_device_t`xd_imtx
657  *
658  * There is only one xcr_lock per controller, like the xhci_lock. It protects
659  * the state of the command ring. However, there is on xd_imtx per device.
660  * Recall that each device is scoped to a given controller. This protects the
661  * input slot context for a given device.
662  *
663  * There are a few important rules to keep in mind here that are true
664  * universally throughout the driver:
665  *
666  * 1) Always grab the xhci_t`xhci_lock, before grabbing any of the other locks.
667  * 2) A given xhci_device_t`xd_imtx, must be taken before grabbing the
668  *    xhci_command_ring_t`xcr_lock.
669  * 3) A given thread can only hold one of the given xhci_device_t`xd_imtx locks
670  *    at a given time. In other words, we should never be manipulating the input
671  *    context of two different devices at once.
672  * 4) It is safe to hold the xhci_device_t`xd_imtx while tearing down the
673  *    endpoint timer. Conversely, the endpoint specific logic should never enter
674  *    this lock.
675  *
676  * --------------------
677  * Relationship to EHCI
678  * --------------------
679  *
680  * On some Intel chipsets, a given physical port on the system may be routed to
681  * one of the EHCI or xHCI controllers. This association can be dynamically
682  * changed by writing to platform specific registers as handled by the quirk
683  * logic in xhci_quirk.c.
684  *
685  * As these ports may support USB 3.x speeds, we always route all such ports to
686  * the xHCI controller, when supported. In addition, to minimize disruptions
687  * from devices being enumerated and attached to the EHCI driver and then
688  * disappearing, we generally attempt to load the xHCI controller before the
689  * EHCI controller. This logic is not done in the driver; however, it is done in
690  * other parts of the kernel like in uts/common/io/consconfig_dacf.c in the
691  * function consconfig_load_drivres().
692  *
693  * -----------
694  * Future Work
695  * -----------
696  *
697  * The primary future work in this driver spans two different, but related
698  * areas. The first area is around controller resets and how they tie into FM.
699  * Presently, we do not have a good way to handle controllers coming and going
700  * in the broader USB stack or properly reconfigure the device after a reset.
701  * Secondly, we don't handle the suspend and resume of devices and drivers.
702  */
703 
704 #include <sys/param.h>
705 #include <sys/modctl.h>
706 #include <sys/conf.h>
707 #include <sys/devops.h>
708 #include <sys/ddi.h>
709 #include <sys/sunddi.h>
710 #include <sys/cmn_err.h>
711 #include <sys/ddifm.h>
712 #include <sys/pci.h>
713 #include <sys/class.h>
714 #include <sys/policy.h>
715 
716 #include <sys/usb/hcd/xhci/xhci.h>
717 #include <sys/usb/hcd/xhci/xhci_ioctl.h>
718 
719 /*
720  * We want to use the first BAR to access its registers. The regs[] array is
721  * ordered based on the rules for the PCI supplement to IEEE 1275. So regs[1]
722  * will always be the first BAR.
723  */
724 #define	XHCI_REG_NUMBER	1
725 
726 /*
727  * This task queue exists as a global taskq that is used for resetting the
728  * device in the face of FM or runtime errors. Each instance of the device
729  * (xhci_t) happens to have a single taskq_dispatch_ent already allocated so we
730  * know that we should always be able to dispatch such an event.
731  */
732 static taskq_t *xhci_taskq;
733 
734 /*
735  * Global soft state for per-instance data. Note that we must use the soft state
736  * routines and cannot use the ddi_set_driver_private() routines. The USB
737  * framework presumes that it can use the dip's private data.
738  */
739 void *xhci_soft_state;
740 
741 /*
742  * This is the time in us that we wait after a controller resets before we
743  * consider reading any register. There are some controllers that want at least
744  * 1 ms, therefore we default to 10 ms.
745  */
746 clock_t xhci_reset_delay = 10000;
747 
748 void
749 xhci_error(xhci_t *xhcip, const char *fmt, ...)
750 {
751 	va_list ap;
752 
753 	va_start(ap, fmt);
754 	if (xhcip != NULL && xhcip->xhci_dip != NULL) {
755 		vdev_err(xhcip->xhci_dip, CE_WARN, fmt, ap);
756 	} else {
757 		vcmn_err(CE_WARN, fmt, ap);
758 	}
759 	va_end(ap);
760 }
761 
762 void
763 xhci_log(xhci_t *xhcip, const char *fmt, ...)
764 {
765 	va_list ap;
766 
767 	va_start(ap, fmt);
768 	if (xhcip != NULL && xhcip->xhci_dip != NULL) {
769 		vdev_err(xhcip->xhci_dip, CE_NOTE, fmt, ap);
770 	} else {
771 		vcmn_err(CE_NOTE, fmt, ap);
772 	}
773 	va_end(ap);
774 }
775 
776 /*
777  * USBA is in charge of creating device nodes for us. USBA explicitly ORs in the
778  * constant HUBD_IS_ROOT_HUB, so we have to undo that when we're looking at
779  * things here. A simple bitwise-and will take care of this. And hey, it could
780  * always be more complex, USBA could clone!
781  */
782 static dev_info_t *
783 xhci_get_dip(dev_t dev)
784 {
785 	xhci_t *xhcip;
786 	int instance = getminor(dev) & ~HUBD_IS_ROOT_HUB;
787 
788 	xhcip = ddi_get_soft_state(xhci_soft_state, instance);
789 	if (xhcip != NULL)
790 		return (xhcip->xhci_dip);
791 	return (NULL);
792 }
793 
794 uint8_t
795 xhci_get8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
796 {
797 	uintptr_t addr, roff;
798 
799 	switch (rtt) {
800 	case XHCI_R_CAP:
801 		roff = xhcip->xhci_regs_capoff;
802 		break;
803 	case XHCI_R_OPER:
804 		roff = xhcip->xhci_regs_operoff;
805 		break;
806 	case XHCI_R_RUN:
807 		roff = xhcip->xhci_regs_runoff;
808 		break;
809 	case XHCI_R_DOOR:
810 		roff = xhcip->xhci_regs_dooroff;
811 		break;
812 	default:
813 		panic("called %s with bad reg type: %d", __func__, rtt);
814 	}
815 	ASSERT(roff != PCI_EINVAL32);
816 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
817 
818 	return (ddi_get8(xhcip->xhci_regs_handle, (void *)addr));
819 }
820 
821 uint16_t
822 xhci_get16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
823 {
824 	uintptr_t addr, roff;
825 
826 	switch (rtt) {
827 	case XHCI_R_CAP:
828 		roff = xhcip->xhci_regs_capoff;
829 		break;
830 	case XHCI_R_OPER:
831 		roff = xhcip->xhci_regs_operoff;
832 		break;
833 	case XHCI_R_RUN:
834 		roff = xhcip->xhci_regs_runoff;
835 		break;
836 	case XHCI_R_DOOR:
837 		roff = xhcip->xhci_regs_dooroff;
838 		break;
839 	default:
840 		panic("called %s with bad reg type: %d", __func__, rtt);
841 	}
842 	ASSERT(roff != PCI_EINVAL32);
843 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
844 
845 	return (ddi_get16(xhcip->xhci_regs_handle, (void *)addr));
846 }
847 
848 uint32_t
849 xhci_get32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
850 {
851 	uintptr_t addr, roff;
852 
853 	switch (rtt) {
854 	case XHCI_R_CAP:
855 		roff = xhcip->xhci_regs_capoff;
856 		break;
857 	case XHCI_R_OPER:
858 		roff = xhcip->xhci_regs_operoff;
859 		break;
860 	case XHCI_R_RUN:
861 		roff = xhcip->xhci_regs_runoff;
862 		break;
863 	case XHCI_R_DOOR:
864 		roff = xhcip->xhci_regs_dooroff;
865 		break;
866 	default:
867 		panic("called %s with bad reg type: %d", __func__, rtt);
868 	}
869 	ASSERT(roff != PCI_EINVAL32);
870 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
871 
872 	return (ddi_get32(xhcip->xhci_regs_handle, (void *)addr));
873 }
874 
875 uint64_t
876 xhci_get64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
877 {
878 	uintptr_t addr, roff;
879 
880 	switch (rtt) {
881 	case XHCI_R_CAP:
882 		roff = xhcip->xhci_regs_capoff;
883 		break;
884 	case XHCI_R_OPER:
885 		roff = xhcip->xhci_regs_operoff;
886 		break;
887 	case XHCI_R_RUN:
888 		roff = xhcip->xhci_regs_runoff;
889 		break;
890 	case XHCI_R_DOOR:
891 		roff = xhcip->xhci_regs_dooroff;
892 		break;
893 	default:
894 		panic("called %s with bad reg type: %d", __func__, rtt);
895 	}
896 	ASSERT(roff != PCI_EINVAL32);
897 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
898 
899 	return (ddi_get64(xhcip->xhci_regs_handle, (void *)addr));
900 }
901 
902 void
903 xhci_put8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint8_t val)
904 {
905 	uintptr_t addr, roff;
906 
907 	switch (rtt) {
908 	case XHCI_R_CAP:
909 		roff = xhcip->xhci_regs_capoff;
910 		break;
911 	case XHCI_R_OPER:
912 		roff = xhcip->xhci_regs_operoff;
913 		break;
914 	case XHCI_R_RUN:
915 		roff = xhcip->xhci_regs_runoff;
916 		break;
917 	case XHCI_R_DOOR:
918 		roff = xhcip->xhci_regs_dooroff;
919 		break;
920 	default:
921 		panic("called %s with bad reg type: %d", __func__, rtt);
922 	}
923 	ASSERT(roff != PCI_EINVAL32);
924 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
925 
926 	ddi_put8(xhcip->xhci_regs_handle, (void *)addr, val);
927 }
928 
929 void
930 xhci_put16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint16_t val)
931 {
932 	uintptr_t addr, roff;
933 
934 	switch (rtt) {
935 	case XHCI_R_CAP:
936 		roff = xhcip->xhci_regs_capoff;
937 		break;
938 	case XHCI_R_OPER:
939 		roff = xhcip->xhci_regs_operoff;
940 		break;
941 	case XHCI_R_RUN:
942 		roff = xhcip->xhci_regs_runoff;
943 		break;
944 	case XHCI_R_DOOR:
945 		roff = xhcip->xhci_regs_dooroff;
946 		break;
947 	default:
948 		panic("called %s with bad reg type: %d", __func__, rtt);
949 	}
950 	ASSERT(roff != PCI_EINVAL32);
951 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
952 
953 	ddi_put16(xhcip->xhci_regs_handle, (void *)addr, val);
954 }
955 
956 void
957 xhci_put32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint32_t val)
958 {
959 	uintptr_t addr, roff;
960 
961 	switch (rtt) {
962 	case XHCI_R_CAP:
963 		roff = xhcip->xhci_regs_capoff;
964 		break;
965 	case XHCI_R_OPER:
966 		roff = xhcip->xhci_regs_operoff;
967 		break;
968 	case XHCI_R_RUN:
969 		roff = xhcip->xhci_regs_runoff;
970 		break;
971 	case XHCI_R_DOOR:
972 		roff = xhcip->xhci_regs_dooroff;
973 		break;
974 	default:
975 		panic("called %s with bad reg type: %d", __func__, rtt);
976 	}
977 	ASSERT(roff != PCI_EINVAL32);
978 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
979 
980 	ddi_put32(xhcip->xhci_regs_handle, (void *)addr, val);
981 }
982 
983 void
984 xhci_put64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint64_t val)
985 {
986 	uintptr_t addr, roff;
987 
988 	switch (rtt) {
989 	case XHCI_R_CAP:
990 		roff = xhcip->xhci_regs_capoff;
991 		break;
992 	case XHCI_R_OPER:
993 		roff = xhcip->xhci_regs_operoff;
994 		break;
995 	case XHCI_R_RUN:
996 		roff = xhcip->xhci_regs_runoff;
997 		break;
998 	case XHCI_R_DOOR:
999 		roff = xhcip->xhci_regs_dooroff;
1000 		break;
1001 	default:
1002 		panic("called %s with bad reg type: %d", __func__, rtt);
1003 	}
1004 	ASSERT(roff != PCI_EINVAL32);
1005 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
1006 
1007 	ddi_put64(xhcip->xhci_regs_handle, (void *)addr, val);
1008 }
1009 
1010 int
1011 xhci_check_regs_acc(xhci_t *xhcip)
1012 {
1013 	ddi_fm_error_t de;
1014 
1015 	/*
1016 	 * Treat the case where we can't check as fine so we can treat the code
1017 	 * more simply.
1018 	 */
1019 	if (!DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps))
1020 		return (DDI_FM_OK);
1021 
1022 	ddi_fm_acc_err_get(xhcip->xhci_regs_handle, &de, DDI_FME_VERSION);
1023 	ddi_fm_acc_err_clear(xhcip->xhci_regs_handle, DDI_FME_VERSION);
1024 	return (de.fme_status);
1025 }
1026 
1027 /*
1028  * As a leaf PCIe driver, we just post the ereport and continue on.
1029  */
1030 /* ARGSUSED */
1031 static int
1032 xhci_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
1033 {
1034 	pci_ereport_post(dip, err, NULL);
1035 	return (err->fme_status);
1036 }
1037 
1038 static void
1039 xhci_fm_fini(xhci_t *xhcip)
1040 {
1041 	if (xhcip->xhci_fm_caps == 0)
1042 		return;
1043 
1044 	if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1045 		ddi_fm_handler_unregister(xhcip->xhci_dip);
1046 
1047 	if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1048 	    DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1049 		pci_ereport_teardown(xhcip->xhci_dip);
1050 
1051 	ddi_fm_fini(xhcip->xhci_dip);
1052 }
1053 
1054 static void
1055 xhci_fm_init(xhci_t *xhcip)
1056 {
1057 	ddi_iblock_cookie_t iblk;
1058 	int def = DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1059 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE;
1060 
1061 	xhcip->xhci_fm_caps = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
1062 	    DDI_PROP_DONTPASS, "fm_capable", def);
1063 
1064 	if (xhcip->xhci_fm_caps < 0) {
1065 		xhcip->xhci_fm_caps = 0;
1066 	} else if (xhcip->xhci_fm_caps & ~def) {
1067 		xhcip->xhci_fm_caps &= def;
1068 	}
1069 
1070 	if (xhcip->xhci_fm_caps == 0)
1071 		return;
1072 
1073 	ddi_fm_init(xhcip->xhci_dip, &xhcip->xhci_fm_caps, &iblk);
1074 	if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1075 	    DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1076 		pci_ereport_setup(xhcip->xhci_dip);
1077 	}
1078 
1079 	if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1080 		ddi_fm_handler_register(xhcip->xhci_dip,
1081 		    xhci_fm_error_cb, xhcip);
1082 	}
1083 }
1084 
1085 static int
1086 xhci_reg_poll(xhci_t *xhcip, xhci_reg_type_t rt, int reg, uint32_t mask,
1087     uint32_t targ, uint_t tries, int delay_ms)
1088 {
1089 	uint_t i;
1090 
1091 	for (i = 0; i < tries; i++) {
1092 		uint32_t val = xhci_get32(xhcip, rt, reg);
1093 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1094 			ddi_fm_service_impact(xhcip->xhci_dip,
1095 			    DDI_SERVICE_LOST);
1096 			return (EIO);
1097 		}
1098 
1099 		if ((val & mask) == targ)
1100 			return (0);
1101 
1102 		delay(drv_usectohz(delay_ms * 1000));
1103 	}
1104 	return (ETIMEDOUT);
1105 }
1106 
1107 static boolean_t
1108 xhci_regs_map(xhci_t *xhcip)
1109 {
1110 	off_t memsize;
1111 	int ret;
1112 	ddi_device_acc_attr_t da;
1113 
1114 	if (ddi_dev_regsize(xhcip->xhci_dip, XHCI_REG_NUMBER, &memsize) !=
1115 	    DDI_SUCCESS) {
1116 		xhci_error(xhcip, "failed to get register set size");
1117 		return (B_FALSE);
1118 	}
1119 
1120 	bzero(&da, sizeof (ddi_device_acc_attr_t));
1121 	da.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1122 	da.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1123 	da.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1124 	if (DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps)) {
1125 		da.devacc_attr_access = DDI_FLAGERR_ACC;
1126 	} else {
1127 		da.devacc_attr_access = DDI_DEFAULT_ACC;
1128 	}
1129 
1130 	ret = ddi_regs_map_setup(xhcip->xhci_dip, XHCI_REG_NUMBER,
1131 	    &xhcip->xhci_regs_base, 0, memsize, &da, &xhcip->xhci_regs_handle);
1132 
1133 	if (ret != DDI_SUCCESS) {
1134 		xhci_error(xhcip, "failed to map device registers: %d", ret);
1135 		return (B_FALSE);
1136 	}
1137 
1138 	return (B_TRUE);
1139 }
1140 
1141 static boolean_t
1142 xhci_regs_init(xhci_t *xhcip)
1143 {
1144 	/*
1145 	 * The capabilities always begin at offset zero.
1146 	 */
1147 	xhcip->xhci_regs_capoff = 0;
1148 	xhcip->xhci_regs_operoff = xhci_get8(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1149 	xhcip->xhci_regs_runoff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_RTSOFF);
1150 	xhcip->xhci_regs_runoff &= ~0x1f;
1151 	xhcip->xhci_regs_dooroff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_DBOFF);
1152 	xhcip->xhci_regs_dooroff &= ~0x3;
1153 
1154 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1155 		xhci_error(xhcip, "failed to initialize controller register "
1156 		    "offsets: encountered FM register error");
1157 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1158 		return (B_FALSE);
1159 	}
1160 
1161 	return (B_TRUE);
1162 }
1163 
1164 /*
1165  * Read various parameters from PCI configuration space and from the Capability
1166  * registers that we'll need to register the device. We cache all of the
1167  * Capability registers.
1168  */
1169 static boolean_t
1170 xhci_read_params(xhci_t *xhcip)
1171 {
1172 	uint8_t usb;
1173 	uint16_t vers;
1174 	uint32_t struc1, struc2, struc3, cap1, cap2, pgsz;
1175 	uint32_t psize, pbit, capreg;
1176 	xhci_capability_t *xcap;
1177 	unsigned long ps;
1178 
1179 	/*
1180 	 * While it's tempting to do a 16-bit read at offset 0x2, unfortunately,
1181 	 * a few emulated systems don't support reading at offset 0x2 for the
1182 	 * version. Instead we need to read the caplength register and get the
1183 	 * upper two bytes.
1184 	 */
1185 	capreg = xhci_get32(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1186 	vers = XHCI_VERSION_MASK(capreg);
1187 	usb = pci_config_get8(xhcip->xhci_cfg_handle, PCI_XHCI_USBREV);
1188 	struc1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS1);
1189 	struc2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS2);
1190 	struc3 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS3);
1191 	cap1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS1);
1192 	cap2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS2);
1193 	pgsz = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PAGESIZE);
1194 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1195 		xhci_error(xhcip, "failed to read controller parameters: "
1196 		    "encountered FM register error");
1197 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1198 		return (B_FALSE);
1199 	}
1200 
1201 	xcap = &xhcip->xhci_caps;
1202 	xcap->xcap_usb_vers = usb;
1203 	xcap->xcap_hci_vers = vers;
1204 	xcap->xcap_max_slots = XHCI_HCS1_DEVSLOT_MAX(struc1);
1205 	xcap->xcap_max_intrs = XHCI_HCS1_IRQ_MAX(struc1);
1206 	xcap->xcap_max_ports = XHCI_HCS1_N_PORTS(struc1);
1207 	if (xcap->xcap_max_ports > MAX_PORTS) {
1208 		xhci_error(xhcip, "Root hub has %d ports, but system only "
1209 		    "supports %d, limiting to %d\n", xcap->xcap_max_ports,
1210 		    MAX_PORTS, MAX_PORTS);
1211 		xcap->xcap_max_ports = MAX_PORTS;
1212 	}
1213 
1214 	xcap->xcap_ist_micro = XHCI_HCS2_IST_MICRO(struc2);
1215 	xcap->xcap_ist = XHCI_HCS2_IST(struc2);
1216 	xcap->xcap_max_esrt = XHCI_HCS2_ERST_MAX(struc2);
1217 	xcap->xcap_scratch_restore = XHCI_HCS2_SPR(struc2);
1218 	xcap->xcap_max_scratch = XHCI_HCS2_SPB_MAX(struc2);
1219 
1220 	xcap->xcap_u1_lat = XHCI_HCS3_U1_DEL(struc3);
1221 	xcap->xcap_u2_lat = XHCI_HCS3_U2_DEL(struc3);
1222 
1223 	xcap->xcap_flags = XHCI_HCC1_FLAGS_MASK(cap1);
1224 	xcap->xcap_max_psa = XHCI_HCC1_PSA_SZ_MAX(cap1);
1225 	xcap->xcap_xecp_off = XHCI_HCC1_XECP(cap1);
1226 	xcap->xcap_flags2 = XHCI_HCC2_FLAGS_MASK(cap2);
1227 
1228 	/*
1229 	 * We don't have documentation for what changed from before xHCI 0.96,
1230 	 * so we just refuse to support versions before 0.96. We also will
1231 	 * ignore anything with a major version greater than 1.
1232 	 */
1233 	if (xcap->xcap_hci_vers < 0x96 || xcap->xcap_hci_vers >= 0x200) {
1234 		xhci_error(xhcip, "Encountered unsupported xHCI version 0.%2x",
1235 		    xcap->xcap_hci_vers);
1236 		return (B_FALSE);
1237 	}
1238 
1239 	/*
1240 	 * Determine the smallest size page that the controller supports and
1241 	 * make sure that it matches our pagesize. We basically check here for
1242 	 * the presence of 4k and 8k pages. The basis of the pagesize is used
1243 	 * extensively throughout the code and specification. While we could
1244 	 * support other page sizes here, given that we don't support systems
1245 	 * with it at this time, it doesn't make much sense.
1246 	 */
1247 	ps = PAGESIZE;
1248 	if (ps == 0x1000) {
1249 		pbit = XHCI_PAGESIZE_4K;
1250 		psize = 0x1000;
1251 	} else if (ps == 0x2000) {
1252 		pbit = XHCI_PAGESIZE_8K;
1253 		psize = 0x2000;
1254 	} else {
1255 		xhci_error(xhcip, "Encountered host page size that the driver "
1256 		    "doesn't know how to handle: %lx\n", ps);
1257 		return (B_FALSE);
1258 	}
1259 
1260 	if (!(pgsz & pbit)) {
1261 		xhci_error(xhcip, "Encountered controller that didn't support "
1262 		    "the host page size (%d), supports: %x", psize, pgsz);
1263 		return (B_FALSE);
1264 	}
1265 	xcap->xcap_pagesize = psize;
1266 
1267 	return (B_TRUE);
1268 }
1269 
1270 /*
1271  * Apply known workarounds and issues. These reports come from other
1272  * Operating Systems and have been collected over time.
1273  */
1274 static boolean_t
1275 xhci_identify(xhci_t *xhcip)
1276 {
1277 	xhci_quirks_populate(xhcip);
1278 
1279 	if (xhcip->xhci_quirks & XHCI_QUIRK_NO_MSI) {
1280 		xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED;
1281 	} else {
1282 		xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED |
1283 		    DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX;
1284 	}
1285 
1286 	if (xhcip->xhci_quirks & XHCI_QUIRK_32_ONLY) {
1287 		xhcip->xhci_caps.xcap_flags &= ~XCAP_AC64;
1288 	}
1289 
1290 	return (B_TRUE);
1291 }
1292 
1293 static boolean_t
1294 xhci_alloc_intr_handle(xhci_t *xhcip, int type)
1295 {
1296 	int ret;
1297 
1298 	/*
1299 	 * Normally a well-behaving driver would more carefully request an
1300 	 * amount of interrupts based on the number available, etc. But since we
1301 	 * only actually want a single interrupt, we're just going to go ahead
1302 	 * and ask for a single interrupt.
1303 	 */
1304 	ret = ddi_intr_alloc(xhcip->xhci_dip, &xhcip->xhci_intr_hdl, type, 0,
1305 	    XHCI_NINTR, &xhcip->xhci_intr_num, DDI_INTR_ALLOC_NORMAL);
1306 	if (ret != DDI_SUCCESS) {
1307 		xhci_log(xhcip, "!failed to allocate interrupts of type %d: %d",
1308 		    type, ret);
1309 		return (B_FALSE);
1310 	}
1311 	xhcip->xhci_intr_type = type;
1312 
1313 	return (B_TRUE);
1314 }
1315 
1316 static boolean_t
1317 xhci_alloc_intrs(xhci_t *xhcip)
1318 {
1319 	int intr_types, ret;
1320 
1321 	if (XHCI_NINTR > xhcip->xhci_caps.xcap_max_intrs) {
1322 		xhci_error(xhcip, "controller does not support the minimum "
1323 		    "number of interrupts required (%d), supports %d",
1324 		    XHCI_NINTR, xhcip->xhci_caps.xcap_max_intrs);
1325 		return (B_FALSE);
1326 	}
1327 
1328 	if ((ret = ddi_intr_get_supported_types(xhcip->xhci_dip,
1329 	    &intr_types)) != DDI_SUCCESS) {
1330 		xhci_error(xhcip, "failed to get supported interrupt types: "
1331 		    "%d", ret);
1332 		return (B_FALSE);
1333 	}
1334 
1335 	/*
1336 	 * Mask off interrupt types we've already ruled out due to quirks or
1337 	 * other reasons.
1338 	 */
1339 	intr_types &= xhcip->xhci_caps.xcap_intr_types;
1340 	if (intr_types & DDI_INTR_TYPE_MSIX) {
1341 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSIX))
1342 			return (B_TRUE);
1343 	}
1344 
1345 	if (intr_types & DDI_INTR_TYPE_MSI) {
1346 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSI))
1347 			return (B_TRUE);
1348 	}
1349 
1350 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1351 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_FIXED))
1352 			return (B_TRUE);
1353 	}
1354 
1355 	xhci_error(xhcip, "failed to allocate an interrupt, supported types: "
1356 	    "0x%x", intr_types);
1357 	return (B_FALSE);
1358 }
1359 
1360 static boolean_t
1361 xhci_add_intr_handler(xhci_t *xhcip)
1362 {
1363 	int ret;
1364 
1365 	if ((ret = ddi_intr_get_pri(xhcip->xhci_intr_hdl,
1366 	    &xhcip->xhci_intr_pri)) != DDI_SUCCESS) {
1367 		xhci_error(xhcip, "failed to get interrupt priority: %d", ret);
1368 		return (B_FALSE);
1369 	}
1370 
1371 	if ((ret = ddi_intr_get_cap(xhcip->xhci_intr_hdl,
1372 	    &xhcip->xhci_intr_caps)) != DDI_SUCCESS) {
1373 		xhci_error(xhcip, "failed to get interrupt capabilities: %d",
1374 		    ret);
1375 		return (B_FALSE);
1376 	}
1377 
1378 	if ((ret = ddi_intr_add_handler(xhcip->xhci_intr_hdl, xhci_intr, xhcip,
1379 	    (uintptr_t)0)) != DDI_SUCCESS) {
1380 		xhci_error(xhcip, "failed to add interrupt handler: %d", ret);
1381 		return (B_FALSE);
1382 	}
1383 	return (B_TRUE);
1384 }
1385 
1386 /*
1387  * Find a capability with an identifier whose value is 'id'. The 'init' argument
1388  * gives us the offset to start searching at. See xHCI 1.1 / 7 for more
1389  * information. This is more or less exactly like PCI capabilities.
1390  */
1391 static boolean_t
1392 xhci_find_ext_cap(xhci_t *xhcip, uint32_t id, uint32_t init, uint32_t *outp)
1393 {
1394 	uint32_t off;
1395 	uint8_t next = 0;
1396 
1397 	/*
1398 	 * If we have no offset, we're done.
1399 	 */
1400 	if (xhcip->xhci_caps.xcap_xecp_off == 0)
1401 		return (B_FALSE);
1402 
1403 	off = xhcip->xhci_caps.xcap_xecp_off << 2;
1404 	do {
1405 		uint32_t cap_hdr;
1406 
1407 		off += next << 2;
1408 		cap_hdr = xhci_get32(xhcip, XHCI_R_CAP, off);
1409 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1410 			xhci_error(xhcip, "failed to read xhci extended "
1411 			    "capabilities at offset 0x%x: encountered FM "
1412 			    "register error", off);
1413 			ddi_fm_service_impact(xhcip->xhci_dip,
1414 			    DDI_SERVICE_LOST);
1415 			break;
1416 		}
1417 
1418 		if (cap_hdr == PCI_EINVAL32)
1419 			break;
1420 		if (XHCI_XECP_ID(cap_hdr) == id &&
1421 		    (init == UINT32_MAX || off > init)) {
1422 			*outp = off;
1423 			return (B_TRUE);
1424 		}
1425 		next = XHCI_XECP_NEXT(cap_hdr);
1426 		/*
1427 		 * Watch out for overflow if we somehow end up with a more than
1428 		 * 2 GiB space.
1429 		 */
1430 		if (next << 2 > (INT32_MAX - off))
1431 			return (B_FALSE);
1432 	} while (next != 0);
1433 
1434 	return (B_FALSE);
1435 }
1436 
1437 /*
1438  * For mostly information purposes, we'd like to walk to augment the devinfo
1439  * tree with the number of ports that support USB 2 and USB 3. Note though that
1440  * these ports may be overlapping. Many ports can support both USB 2 and USB 3
1441  * and are wired up to the same physical port, even though they show up as
1442  * separate 'ports' in the xhci sense.
1443  */
1444 static boolean_t
1445 xhci_port_count(xhci_t *xhcip)
1446 {
1447 	uint_t nusb2 = 0, fusb2 = 0;
1448 	uint_t nusb30 = 0, fusb30 = 0;
1449 	uint_t nusb31 = 0, fusb31 = 0;
1450 	uint32_t off = UINT32_MAX;
1451 
1452 	while (xhci_find_ext_cap(xhcip, XHCI_ID_PROTOCOLS, off, &off) ==
1453 	    B_TRUE) {
1454 		uint32_t rvers, rport;
1455 		uint8_t maj, min, count, first;
1456 
1457 		/*
1458 		 * See xHCI 1.1 / 7.2 for the format of this. The first uint32_t
1459 		 * has version information while the third uint32_t has the port
1460 		 * count.
1461 		 */
1462 		rvers = xhci_get32(xhcip, XHCI_R_CAP, off);
1463 		rport = xhci_get32(xhcip, XHCI_R_CAP, off + 8);
1464 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1465 			xhci_error(xhcip, "failed to read xhci port counts: "
1466 			    "encountered fatal FM register error");
1467 			ddi_fm_service_impact(xhcip->xhci_dip,
1468 			    DDI_SERVICE_LOST);
1469 			return (B_FALSE);
1470 		}
1471 
1472 		maj = XHCI_XECP_PROT_MAJOR(rvers);
1473 		min = XHCI_XECP_PROT_MINOR(rvers);
1474 		count = XHCI_XECP_PROT_PCOUNT(rport);
1475 		first = XHCI_XECP_PROT_FPORT(rport);
1476 
1477 		/*
1478 		 * In the wild, we've seen some systems that are using a minor
1479 		 * version of 0x10 and some that are using 0x01 in this field.
1480 		 * While the xhci spec says that we should expect it to be a
1481 		 * minor of 0x01 based on the xHCI 1.1 specification Table 155:
1482 		 * xHCI Supported Protocols. However, the USB 3.1 specification
1483 		 * defines the version to be 0x10 when encoded as a BCD style.
1484 		 * As such, handle both and hope we never get to revision 16 of
1485 		 * USB 3.
1486 		 */
1487 		if (maj == 3 && (min == 0x10 || min == 0x01)) {
1488 			nusb31 = count;
1489 			fusb31 = first;
1490 		} else if (maj == 3 && min == 0) {
1491 			nusb30 = count;
1492 			fusb30 = first;
1493 		} else if (maj <= 2) {
1494 			nusb2 = count;
1495 			fusb2 = first;
1496 		} else {
1497 			xhci_error(xhcip, "encountered port capabilities with "
1498 			    "unknown USB version: %x.%x\n", maj, min);
1499 		}
1500 	}
1501 
1502 	/*
1503 	 * These properties are used by FMA and the USB topo module.
1504 	 */
1505 	if (nusb2 > 0) {
1506 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1507 		    "usb2.0-port-count", nusb2);
1508 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1509 		    "usb2.0-first-port", fusb2);
1510 	}
1511 	if (nusb30 > 0) {
1512 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1513 		    "usb3.0-port-count", nusb30);
1514 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1515 		    "usb3.0-first-port", fusb30);
1516 	}
1517 
1518 	if (nusb31 > 0) {
1519 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1520 		    "usb3.1-port-count", nusb31);
1521 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1522 		    "usb3.1-first-port", fusb31);
1523 	}
1524 
1525 	return (B_TRUE);
1526 }
1527 
1528 /*
1529  * Take over control from the BIOS or other firmware, if applicable.
1530  */
1531 static boolean_t
1532 xhci_controller_takeover(xhci_t *xhcip)
1533 {
1534 	int ret;
1535 	uint32_t val, off;
1536 
1537 	/*
1538 	 * If we can't find the legacy capability, then there's nothing to do.
1539 	 */
1540 	if (xhci_find_ext_cap(xhcip, XHCI_ID_USB_LEGACY, UINT32_MAX, &off) ==
1541 	    B_FALSE)
1542 		return (B_TRUE);
1543 	val = xhci_get32(xhcip, XHCI_R_CAP, off);
1544 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1545 		xhci_error(xhcip, "failed to read BIOS take over registers: "
1546 		    "encountered fatal FM register error");
1547 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1548 		return (B_FALSE);
1549 	}
1550 
1551 	if (val & XHCI_BIOS_OWNED) {
1552 		val |= XHCI_OS_OWNED;
1553 		xhci_put32(xhcip, XHCI_R_CAP, off, val);
1554 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1555 			xhci_error(xhcip, "failed to write BIOS take over "
1556 			    "registers: encountered fatal FM register error");
1557 			ddi_fm_service_impact(xhcip->xhci_dip,
1558 			    DDI_SERVICE_LOST);
1559 			return (B_FALSE);
1560 		}
1561 
1562 		/*
1563 		 * Wait up to 5 seconds for things to change. While this number
1564 		 * isn't specified in the xHCI spec, it seems to be the de facto
1565 		 * value that various systems are using today. We'll use a 10ms
1566 		 * interval to check.
1567 		 */
1568 		ret = xhci_reg_poll(xhcip, XHCI_R_CAP, off,
1569 		    XHCI_BIOS_OWNED | XHCI_OS_OWNED, XHCI_OS_OWNED, 500, 10);
1570 		if (ret == EIO)
1571 			return (B_FALSE);
1572 		if (ret == ETIMEDOUT) {
1573 			xhci_log(xhcip, "!timed out waiting for firmware to "
1574 			    "hand off, taking over");
1575 			val &= ~XHCI_BIOS_OWNED;
1576 			xhci_put32(xhcip, XHCI_R_CAP, off, val);
1577 			if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1578 				xhci_error(xhcip, "failed to write forced "
1579 				    "takeover: encountered fatal FM register "
1580 				    "error");
1581 				ddi_fm_service_impact(xhcip->xhci_dip,
1582 				    DDI_SERVICE_LOST);
1583 				return (B_FALSE);
1584 			}
1585 		}
1586 	}
1587 
1588 	val = xhci_get32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS);
1589 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1590 		xhci_error(xhcip, "failed to read legacy control registers: "
1591 		    "encountered fatal FM register error");
1592 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1593 		return (B_FALSE);
1594 	}
1595 	val &= XHCI_XECP_SMI_MASK;
1596 	val |= XHCI_XECP_CLEAR_SMI;
1597 	xhci_put32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS, val);
1598 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1599 		xhci_error(xhcip, "failed to write legacy control registers: "
1600 		    "encountered fatal FM register error");
1601 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1602 		return (B_FALSE);
1603 	}
1604 
1605 	return (B_TRUE);
1606 }
1607 
1608 static int
1609 xhci_controller_stop(xhci_t *xhcip)
1610 {
1611 	uint32_t cmdreg;
1612 
1613 	cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1614 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1615 		xhci_error(xhcip, "failed to read USB Command register: "
1616 		    "encountered fatal FM register error");
1617 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1618 		return (EIO);
1619 	}
1620 
1621 	cmdreg &= ~(XHCI_CMD_RS | XHCI_CMD_INTE);
1622 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1623 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1624 		xhci_error(xhcip, "failed to write USB Command register: "
1625 		    "encountered fatal FM register error");
1626 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1627 		return (EIO);
1628 	}
1629 
1630 	/*
1631 	 * Wait up to 50ms for this to occur. The specification says that this
1632 	 * should stop within 16ms, but we give ourselves a bit more time just
1633 	 * in case.
1634 	 */
1635 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS, XHCI_STS_HCH,
1636 	    XHCI_STS_HCH, 50, 10));
1637 }
1638 
1639 static int
1640 xhci_controller_reset(xhci_t *xhcip)
1641 {
1642 	int ret;
1643 	uint32_t cmdreg;
1644 
1645 	cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1646 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1647 		xhci_error(xhcip, "failed to read USB Command register for "
1648 		    "reset: encountered fatal FM register error");
1649 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1650 		return (EIO);
1651 	}
1652 
1653 	cmdreg |= XHCI_CMD_HCRST;
1654 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1655 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1656 		xhci_error(xhcip, "failed to write USB Command register for "
1657 		    "reset: encountered fatal FM register error");
1658 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1659 		return (EIO);
1660 	}
1661 
1662 	/*
1663 	 * Some controllers apparently don't want to be touched for at least 1ms
1664 	 * after we initiate the reset. Therefore give all controllers this
1665 	 * moment to breathe.
1666 	 */
1667 	delay(drv_usectohz(xhci_reset_delay));
1668 
1669 	/*
1670 	 * To tell that the reset has completed we first verify that the reset
1671 	 * has finished and that the USBCMD register no longer has the reset bit
1672 	 * asserted. However, once that's done we have to go verify that CNR
1673 	 * (Controller Not Ready) is no longer asserted.
1674 	 */
1675 	if ((ret = xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBCMD,
1676 	    XHCI_CMD_HCRST, 0, 500, 10)) != 0)
1677 		return (ret);
1678 
1679 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1680 	    XHCI_STS_CNR, 0, 500, 10));
1681 }
1682 
1683 /*
1684  * Take care of all the required initialization before we can actually enable
1685  * the controller. This means that we need to:
1686  *
1687  *    o Program the maximum number of slots
1688  *    o Program the DCBAAP and allocate the scratchpad
1689  *    o Program the Command Ring
1690  *    o Initialize the Event Ring
1691  *    o Enable interrupts (set imod)
1692  */
1693 static int
1694 xhci_controller_configure(xhci_t *xhcip)
1695 {
1696 	int ret;
1697 	uint32_t config;
1698 
1699 	config = xhci_get32(xhcip, XHCI_R_OPER, XHCI_CONFIG);
1700 	config &= ~XHCI_CONFIG_SLOTS_MASK;
1701 	config |= xhcip->xhci_caps.xcap_max_slots;
1702 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_CONFIG, config);
1703 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1704 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1705 		return (EIO);
1706 	}
1707 
1708 	if ((ret = xhci_context_init(xhcip)) != 0) {
1709 		const char *reason;
1710 		if (ret == EIO) {
1711 			reason = "fatal FM I/O error occurred";
1712 		} else if (ret == ENOMEM) {
1713 			reason = "unable to allocate DMA memory";
1714 		} else {
1715 			reason = "unexpected error occurred";
1716 		}
1717 
1718 		xhci_error(xhcip, "failed to initialize xhci context "
1719 		    "registers: %s (%d)", reason, ret);
1720 		return (ret);
1721 	}
1722 
1723 	if ((ret = xhci_command_ring_init(xhcip)) != 0) {
1724 		xhci_error(xhcip, "failed to initialize commands: %d", ret);
1725 		return (ret);
1726 	}
1727 
1728 	if ((ret = xhci_event_init(xhcip)) != 0) {
1729 		xhci_error(xhcip, "failed to initialize events: %d", ret);
1730 		return (ret);
1731 	}
1732 
1733 	if ((ret = xhci_intr_conf(xhcip)) != 0) {
1734 		xhci_error(xhcip, "failed to configure interrupts: %d", ret);
1735 		return (ret);
1736 	}
1737 
1738 	return (0);
1739 }
1740 
1741 static int
1742 xhci_controller_start(xhci_t *xhcip)
1743 {
1744 	uint32_t reg;
1745 
1746 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1747 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1748 		xhci_error(xhcip, "failed to read USB Command register for "
1749 		    "start: encountered fatal FM register error");
1750 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1751 		return (EIO);
1752 	}
1753 
1754 	reg |= XHCI_CMD_RS;
1755 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, reg);
1756 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1757 		xhci_error(xhcip, "failed to write USB Command register for "
1758 		    "start: encountered fatal FM register error");
1759 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1760 		return (EIO);
1761 	}
1762 
1763 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1764 	    XHCI_STS_HCH, 0, 500, 10));
1765 }
1766 
1767 /* ARGSUSED */
1768 static void
1769 xhci_reset_task(void *arg)
1770 {
1771 	/*
1772 	 * Longer term, we'd like to properly perform a controller reset.
1773 	 * However, that requires a bit more assistance from USBA to work
1774 	 * properly and tear down devices. In the meantime, we panic.
1775 	 */
1776 	panic("XHCI runtime reset required");
1777 }
1778 
1779 /*
1780  * This function is called when we've detected a fatal FM condition that has
1781  * resulted in a loss of service and we need to force a reset of the controller
1782  * as a whole. Only one such reset may be ongoing at a time.
1783  */
1784 void
1785 xhci_fm_runtime_reset(xhci_t *xhcip)
1786 {
1787 	boolean_t locked = B_FALSE;
1788 
1789 	if (mutex_owned(&xhcip->xhci_lock)) {
1790 		locked = B_TRUE;
1791 	} else {
1792 		mutex_enter(&xhcip->xhci_lock);
1793 	}
1794 
1795 	/*
1796 	 * If we're already in the error state than a reset is already ongoing
1797 	 * and there is nothing for us to do here.
1798 	 */
1799 	if (xhcip->xhci_state & XHCI_S_ERROR) {
1800 		goto out;
1801 	}
1802 
1803 	xhcip->xhci_state |= XHCI_S_ERROR;
1804 	ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1805 	taskq_dispatch_ent(xhci_taskq, xhci_reset_task, xhcip, 0,
1806 	    &xhcip->xhci_tqe);
1807 out:
1808 	if (!locked) {
1809 		mutex_exit(&xhcip->xhci_lock);
1810 	}
1811 }
1812 
1813 static int
1814 xhci_ioctl_portsc(xhci_t *xhcip, intptr_t arg)
1815 {
1816 	int i;
1817 	xhci_ioctl_portsc_t xhi;
1818 
1819 	bzero(&xhi, sizeof (xhci_ioctl_portsc_t));
1820 	xhi.xhi_nports = xhcip->xhci_caps.xcap_max_ports;
1821 	for (i = 1; i <= xhcip->xhci_caps.xcap_max_ports; i++) {
1822 		xhi.xhi_portsc[i] = xhci_get32(xhcip, XHCI_R_OPER,
1823 		    XHCI_PORTSC(i));
1824 	}
1825 
1826 	if (ddi_copyout(&xhi, (void *)(uintptr_t)arg, sizeof (xhi), 0) != 0)
1827 		return (EFAULT);
1828 
1829 	return (0);
1830 }
1831 
1832 static int
1833 xhci_ioctl_clear(xhci_t *xhcip, intptr_t arg)
1834 {
1835 	uint32_t reg;
1836 	xhci_ioctl_clear_t xic;
1837 
1838 	if (ddi_copyin((const void *)(uintptr_t)arg, &xic, sizeof (xic),
1839 	    0) != 0)
1840 		return (EFAULT);
1841 
1842 	if (xic.xic_port == 0 || xic.xic_port >
1843 	    xhcip->xhci_caps.xcap_max_ports)
1844 		return (EINVAL);
1845 
1846 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port));
1847 	reg &= ~XHCI_PS_CLEAR;
1848 	reg |= XHCI_PS_CSC | XHCI_PS_PEC | XHCI_PS_WRC | XHCI_PS_OCC |
1849 	    XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC;
1850 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port), reg);
1851 
1852 	return (0);
1853 }
1854 
1855 static int
1856 xhci_ioctl_setpls(xhci_t *xhcip, intptr_t arg)
1857 {
1858 	uint32_t reg;
1859 	xhci_ioctl_setpls_t xis;
1860 
1861 	if (ddi_copyin((const void *)(uintptr_t)arg, &xis, sizeof (xis),
1862 	    0) != 0)
1863 		return (EFAULT);
1864 
1865 	if (xis.xis_port == 0 || xis.xis_port >
1866 	    xhcip->xhci_caps.xcap_max_ports)
1867 		return (EINVAL);
1868 
1869 	if (xis.xis_pls & ~0xf)
1870 		return (EINVAL);
1871 
1872 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port));
1873 	reg &= ~XHCI_PS_CLEAR;
1874 	reg |= XHCI_PS_PLS_SET(xis.xis_pls);
1875 	reg |= XHCI_PS_LWS;
1876 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port), reg);
1877 
1878 	return (0);
1879 }
1880 
1881 static int
1882 xhci_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1883 {
1884 	dev_info_t *dip = xhci_get_dip(*devp);
1885 
1886 	return (usba_hubdi_open(dip, devp, flags, otyp, credp));
1887 }
1888 
1889 static int
1890 xhci_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1891     int *rvalp)
1892 {
1893 	dev_info_t *dip = xhci_get_dip(dev);
1894 
1895 	if (cmd == XHCI_IOCTL_PORTSC ||
1896 	    cmd == XHCI_IOCTL_CLEAR ||
1897 	    cmd == XHCI_IOCTL_SETPLS) {
1898 		xhci_t *xhcip = ddi_get_soft_state(xhci_soft_state,
1899 		    getminor(dev) & ~HUBD_IS_ROOT_HUB);
1900 
1901 		if (secpolicy_xhci(credp) != 0 ||
1902 		    crgetzoneid(credp) != GLOBAL_ZONEID)
1903 			return (EPERM);
1904 
1905 		if (mode & FKIOCTL)
1906 			return (ENOTSUP);
1907 
1908 		if (!(mode & FWRITE))
1909 			return (EBADF);
1910 
1911 		if (cmd == XHCI_IOCTL_PORTSC)
1912 			return (xhci_ioctl_portsc(xhcip, arg));
1913 		else if (cmd == XHCI_IOCTL_CLEAR)
1914 			return (xhci_ioctl_clear(xhcip, arg));
1915 		else
1916 			return (xhci_ioctl_setpls(xhcip, arg));
1917 	}
1918 
1919 	return (usba_hubdi_ioctl(dip, dev, cmd, arg, mode, credp, rvalp));
1920 }
1921 
1922 static int
1923 xhci_close(dev_t dev, int flag, int otyp, cred_t *credp)
1924 {
1925 	dev_info_t *dip = xhci_get_dip(dev);
1926 
1927 	return (usba_hubdi_close(dip, dev, flag, otyp, credp));
1928 }
1929 
1930 /*
1931  * We try to clean up everything that we can. The only thing that we let stop us
1932  * at this time is a failure to remove the root hub, which is realistically the
1933  * equivalent of our EBUSY case.
1934  */
1935 static int
1936 xhci_cleanup(xhci_t *xhcip)
1937 {
1938 	int ret, inst;
1939 
1940 	if (xhcip->xhci_seq & XHCI_ATTACH_ROOT_HUB) {
1941 		if ((ret = xhci_root_hub_fini(xhcip)) != 0)
1942 			return (ret);
1943 	}
1944 
1945 	if (xhcip->xhci_seq & XHCI_ATTACH_USBA) {
1946 		xhci_hcd_fini(xhcip);
1947 	}
1948 
1949 	if (xhcip->xhci_seq & XHCI_ATTACH_STARTED) {
1950 		mutex_enter(&xhcip->xhci_lock);
1951 		while (xhcip->xhci_state & XHCI_S_ERROR)
1952 			cv_wait(&xhcip->xhci_statecv, &xhcip->xhci_lock);
1953 		mutex_exit(&xhcip->xhci_lock);
1954 
1955 		(void) xhci_controller_stop(xhcip);
1956 	}
1957 
1958 	/*
1959 	 * Always release the context, command, and event data. They handle the
1960 	 * fact that they me be in an arbitrary state or unallocated.
1961 	 */
1962 	xhci_event_fini(xhcip);
1963 	xhci_command_ring_fini(xhcip);
1964 	xhci_context_fini(xhcip);
1965 
1966 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ENABLE) {
1967 		(void) xhci_ddi_intr_disable(xhcip);
1968 	}
1969 
1970 	if (xhcip->xhci_seq & XHCI_ATTACH_SYNCH) {
1971 		cv_destroy(&xhcip->xhci_statecv);
1972 		mutex_destroy(&xhcip->xhci_lock);
1973 	}
1974 
1975 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ADD) {
1976 		if ((ret = ddi_intr_remove_handler(xhcip->xhci_intr_hdl)) !=
1977 		    DDI_SUCCESS) {
1978 			xhci_error(xhcip, "failed to remove interrupt "
1979 			    "handler: %d", ret);
1980 		}
1981 	}
1982 
1983 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ALLOC) {
1984 		if ((ret = ddi_intr_free(xhcip->xhci_intr_hdl)) !=
1985 		    DDI_SUCCESS) {
1986 			xhci_error(xhcip, "failed to free interrupts: %d", ret);
1987 		}
1988 	}
1989 
1990 	if (xhcip->xhci_seq & XHCI_ATTACH_REGS_MAP) {
1991 		ddi_regs_map_free(&xhcip->xhci_regs_handle);
1992 		xhcip->xhci_regs_handle = NULL;
1993 	}
1994 
1995 	if (xhcip->xhci_seq & XHCI_ATTACH_PCI_CONFIG) {
1996 		pci_config_teardown(&xhcip->xhci_cfg_handle);
1997 		xhcip->xhci_cfg_handle = NULL;
1998 	}
1999 
2000 	if (xhcip->xhci_seq & XHCI_ATTACH_FM) {
2001 		xhci_fm_fini(xhcip);
2002 		xhcip->xhci_fm_caps = 0;
2003 	}
2004 
2005 	inst = ddi_get_instance(xhcip->xhci_dip);
2006 	xhcip->xhci_dip = NULL;
2007 	ddi_soft_state_free(xhci_soft_state, inst);
2008 
2009 	return (DDI_SUCCESS);
2010 }
2011 
2012 static int
2013 xhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2014 {
2015 	int ret, inst, route;
2016 	xhci_t *xhcip;
2017 
2018 	if (cmd != DDI_ATTACH)
2019 		return (DDI_FAILURE);
2020 
2021 	inst = ddi_get_instance(dip);
2022 	if (ddi_soft_state_zalloc(xhci_soft_state, inst) != 0)
2023 		return (DDI_FAILURE);
2024 	xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2025 	xhcip->xhci_dip = dip;
2026 
2027 	xhcip->xhci_regs_capoff = PCI_EINVAL32;
2028 	xhcip->xhci_regs_operoff = PCI_EINVAL32;
2029 	xhcip->xhci_regs_runoff = PCI_EINVAL32;
2030 	xhcip->xhci_regs_dooroff = PCI_EINVAL32;
2031 
2032 	xhci_fm_init(xhcip);
2033 	xhcip->xhci_seq |= XHCI_ATTACH_FM;
2034 
2035 	if (pci_config_setup(xhcip->xhci_dip, &xhcip->xhci_cfg_handle) !=
2036 	    DDI_SUCCESS) {
2037 		goto err;
2038 	}
2039 	xhcip->xhci_seq |= XHCI_ATTACH_PCI_CONFIG;
2040 	xhcip->xhci_vendor_id = pci_config_get16(xhcip->xhci_cfg_handle,
2041 	    PCI_CONF_VENID);
2042 	xhcip->xhci_device_id = pci_config_get16(xhcip->xhci_cfg_handle,
2043 	    PCI_CONF_DEVID);
2044 
2045 	if (xhci_regs_map(xhcip) == B_FALSE) {
2046 		goto err;
2047 	}
2048 
2049 	xhcip->xhci_seq |= XHCI_ATTACH_REGS_MAP;
2050 
2051 	if (xhci_regs_init(xhcip) == B_FALSE)
2052 		goto err;
2053 
2054 	if (xhci_read_params(xhcip) == B_FALSE)
2055 		goto err;
2056 
2057 	if (xhci_identify(xhcip) == B_FALSE)
2058 		goto err;
2059 
2060 	if (xhci_alloc_intrs(xhcip) == B_FALSE)
2061 		goto err;
2062 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ALLOC;
2063 
2064 	if (xhci_add_intr_handler(xhcip) == B_FALSE)
2065 		goto err;
2066 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ADD;
2067 
2068 	mutex_init(&xhcip->xhci_lock, NULL, MUTEX_DRIVER,
2069 	    (void *)(uintptr_t)xhcip->xhci_intr_pri);
2070 	cv_init(&xhcip->xhci_statecv, NULL, CV_DRIVER, NULL);
2071 	xhcip->xhci_seq |= XHCI_ATTACH_SYNCH;
2072 
2073 	if (xhci_port_count(xhcip) == B_FALSE)
2074 		goto err;
2075 
2076 	if (xhci_controller_takeover(xhcip) == B_FALSE)
2077 		goto err;
2078 
2079 	/*
2080 	 * We don't enable interrupts until after we take over the controller
2081 	 * from the BIOS. We've observed cases where this can cause spurious
2082 	 * interrupts.
2083 	 */
2084 	if (xhci_ddi_intr_enable(xhcip) == B_FALSE)
2085 		goto err;
2086 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ENABLE;
2087 
2088 	if ((ret = xhci_controller_stop(xhcip)) != 0) {
2089 		xhci_error(xhcip, "failed to stop controller: %s",
2090 		    ret == EIO ? "encountered FM register error" :
2091 		    "timed out while waiting for controller");
2092 		goto err;
2093 	}
2094 
2095 	if ((ret = xhci_controller_reset(xhcip)) != 0) {
2096 		xhci_error(xhcip, "failed to reset controller: %s",
2097 		    ret == EIO ? "encountered FM register error" :
2098 		    "timed out while waiting for controller");
2099 		goto err;
2100 	}
2101 
2102 	if ((ret = xhci_controller_configure(xhcip)) != 0) {
2103 		xhci_error(xhcip, "failed to configure controller: %d", ret);
2104 		goto err;
2105 	}
2106 
2107 	/*
2108 	 * Some systems support having ports routed to both an ehci and xhci
2109 	 * controller. If we support it and the user hasn't requested otherwise
2110 	 * via a driver.conf tuning, we reroute it now.
2111 	 */
2112 	route = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
2113 	    DDI_PROP_DONTPASS, "xhci-reroute", XHCI_PROP_REROUTE_DEFAULT);
2114 	if (route != XHCI_PROP_REROUTE_DISABLE &&
2115 	    (xhcip->xhci_quirks & XHCI_QUIRK_INTC_EHCI))
2116 		(void) xhci_reroute_intel(xhcip);
2117 
2118 	if ((ret = xhci_controller_start(xhcip)) != 0) {
2119 		xhci_log(xhcip, "failed to reset controller: %s",
2120 		    ret == EIO ? "encountered FM register error" :
2121 		    "timed out while waiting for controller");
2122 		goto err;
2123 	}
2124 	xhcip->xhci_seq |= XHCI_ATTACH_STARTED;
2125 
2126 	/*
2127 	 * Finally, register ourselves with the USB framework itself.
2128 	 */
2129 	if ((ret = xhci_hcd_init(xhcip)) != 0) {
2130 		xhci_error(xhcip, "failed to register hcd with usba");
2131 		goto err;
2132 	}
2133 	xhcip->xhci_seq |= XHCI_ATTACH_USBA;
2134 
2135 	if ((ret = xhci_root_hub_init(xhcip)) != 0) {
2136 		xhci_error(xhcip, "failed to load the root hub driver");
2137 		goto err;
2138 	}
2139 	xhcip->xhci_seq |= XHCI_ATTACH_ROOT_HUB;
2140 
2141 	return (DDI_SUCCESS);
2142 
2143 err:
2144 	(void) xhci_cleanup(xhcip);
2145 	return (DDI_FAILURE);
2146 }
2147 
2148 static int
2149 xhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2150 {
2151 	xhci_t *xhcip;
2152 
2153 	if (cmd != DDI_DETACH)
2154 		return (DDI_FAILURE);
2155 
2156 	xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2157 	if (xhcip == NULL) {
2158 		dev_err(dip, CE_WARN, "detach called without soft state!");
2159 		return (DDI_FAILURE);
2160 	}
2161 
2162 	return (xhci_cleanup(xhcip));
2163 }
2164 
2165 /* ARGSUSED */
2166 static int
2167 xhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **outp)
2168 {
2169 	dev_t dev;
2170 	int inst;
2171 
2172 	switch (cmd) {
2173 	case DDI_INFO_DEVT2DEVINFO:
2174 		dev = (dev_t)arg;
2175 		*outp = xhci_get_dip(dev);
2176 		if (*outp == NULL)
2177 			return (DDI_FAILURE);
2178 		break;
2179 	case DDI_INFO_DEVT2INSTANCE:
2180 		dev = (dev_t)arg;
2181 		inst = getminor(dev) & ~HUBD_IS_ROOT_HUB;
2182 		*outp = (void *)(uintptr_t)inst;
2183 		break;
2184 	default:
2185 		return (DDI_FAILURE);
2186 	}
2187 
2188 	return (DDI_SUCCESS);
2189 }
2190 
2191 static struct cb_ops xhci_cb_ops = {
2192 	xhci_open,		/* cb_open */
2193 	xhci_close,		/* cb_close */
2194 	nodev,			/* cb_strategy */
2195 	nodev,			/* cb_print */
2196 	nodev,			/* cb_dump */
2197 	nodev,			/* cb_read */
2198 	nodev,			/* cb_write */
2199 	xhci_ioctl,		/* cb_ioctl */
2200 	nodev,			/* cb_devmap */
2201 	nodev,			/* cb_mmap */
2202 	nodev,			/* cb_segmap */
2203 	nochpoll,		/* cb_chpoll */
2204 	ddi_prop_op,		/* cb_prop_op */
2205 	NULL,			/* cb_stream */
2206 	D_MP | D_HOTPLUG,	/* cb_flag */
2207 	CB_REV,			/* cb_rev */
2208 	nodev,			/* cb_aread */
2209 	nodev			/* cb_awrite */
2210 };
2211 
2212 static struct dev_ops xhci_dev_ops = {
2213 	DEVO_REV,			/* devo_rev */
2214 	0,				/* devo_refcnt */
2215 	xhci_getinfo,			/* devo_getinfo */
2216 	nulldev,			/* devo_identify */
2217 	nulldev,			/* devo_probe */
2218 	xhci_attach,			/* devo_attach */
2219 	xhci_detach,			/* devo_detach */
2220 	nodev,				/* devo_reset */
2221 	&xhci_cb_ops,			/* devo_cb_ops */
2222 	&usba_hubdi_busops,		/* devo_bus_ops */
2223 	usba_hubdi_root_hub_power,	/* devo_power */
2224 	ddi_quiesce_not_supported	/* devo_quiesce */
2225 };
2226 
2227 static struct modldrv xhci_modldrv = {
2228 	&mod_driverops,
2229 	"USB xHCI Driver",
2230 	&xhci_dev_ops
2231 };
2232 
2233 static struct modlinkage xhci_modlinkage = {
2234 	MODREV_1,
2235 	&xhci_modldrv,
2236 	NULL
2237 };
2238 
2239 int
2240 _init(void)
2241 {
2242 	int ret;
2243 
2244 	if ((ret = ddi_soft_state_init(&xhci_soft_state, sizeof (xhci_t),
2245 	    0)) != 0) {
2246 		return (ret);
2247 	}
2248 
2249 	xhci_taskq = taskq_create("xhci_taskq", 1, minclsyspri, 0, 0, 0);
2250 	if (xhci_taskq == NULL) {
2251 		ddi_soft_state_fini(&xhci_soft_state);
2252 		return (ENOMEM);
2253 	}
2254 
2255 	if ((ret = mod_install(&xhci_modlinkage)) != 0) {
2256 		taskq_destroy(xhci_taskq);
2257 		xhci_taskq = NULL;
2258 	}
2259 
2260 	return (ret);
2261 }
2262 
2263 int
2264 _info(struct modinfo *modinfop)
2265 {
2266 	return (mod_info(&xhci_modlinkage, modinfop));
2267 }
2268 
2269 int
2270 _fini(void)
2271 {
2272 	int ret;
2273 
2274 	if ((ret = mod_remove(&xhci_modlinkage)) != 0)
2275 		return (ret);
2276 
2277 	if (xhci_taskq != NULL) {
2278 		taskq_destroy(xhci_taskq);
2279 		xhci_taskq = NULL;
2280 	}
2281 
2282 	ddi_soft_state_fini(&xhci_soft_state);
2283 
2284 	return (0);
2285 }
2286