xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci.c (revision 20a7641f9918de8574b8b3b47dbe35c4bfc78df1)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2019, Joyent, Inc.
14  * Copyright 2022 Oxide Computer Company
15  */
16 
17 /*
18  * Extensible Host Controller Interface (xHCI) USB Driver
19  *
20  * The xhci driver is an HCI driver for USB that bridges the gap between client
21  * device drivers and implements the actual way that we talk to devices. The
22  * xhci specification provides access to USB 3.x capable devices, as well as all
23  * prior generations. Like other host controllers, it both provides the way to
24  * talk to devices and also is treated like a hub (often called the root hub).
25  *
26  * This driver is part of the USBA (USB Architecture). It implements the HCDI
27  * (host controller device interface) end of USBA. These entry points are used
28  * by the USBA on behalf of client device drivers to access their devices. The
29  * driver also provides notifications to deal with hot plug events, which are
30  * quite common in USB.
31  *
32  * ----------------
33  * USB Introduction
34  * ----------------
35  *
36  * To properly understand the xhci driver and the design of the USBA HCDI
37  * interfaces it implements, it helps to have a bit of background into how USB
38  * devices are structured and understand how they work at a high-level.
39  *
40  * USB devices, like PCI devices, are broken down into different classes of
41  * device. For example, with USB you have hubs, human-input devices (keyboards,
42  * mice, etc.), mass storage, etc. Every device also has a vendor and device ID.
43  * Many client drivers bind to an entire class of device, for example, the hubd
44  * driver (to hubs) or scsa2usb (USB storage). However, there are other drivers
45  * that bind to explicit IDs such as usbsprl (specific USB to Serial devices).
46  *
47  * USB SPEEDS AND VERSIONS
48  *
49  * USB devices are often referred to in two different ways. One way they're
50  * described is with the USB version that they conform to. In the wild, you're
51  * most likely going to see USB 1.1, 2.0, 2.1, and 3.0. However, you may also
52  * see devices referred to as 'full-', 'low-', 'high-', and 'super-' speed
53  * devices.
54  *
55  * The latter description describes the maximum theoretical speed of a given
56  * device. For example, a super-speed device theoretically caps out around 5
57  * Gbit/s, whereas a low-speed device caps out at 1.5 Mbit/s.
58  *
59  * In general, each speed usually corresponds to a specific USB protocol
60  * generation. For example, all USB 3.0 devices are super-speed devices. All
61  * 'high-speed' devices are USB 2.x devices. Full-speed devices are special in
62  * that they can either be USB 1.x or USB 2.x devices. Low-speed devices are
63  * only a USB 1.x thing, they did not jump the fire line to USB 2.x.
64  *
65  * USB 3.0 devices and ports generally have the wiring for both USB 2.0 and USB
66  * 3.0. When a USB 3.x device is plugged into a USB 2.0 port or hub, then it
67  * will report its version as USB 2.1, to indicate that it is actually a USB 3.x
68  * device.
69  *
70  * USB ENDPOINTS
71  *
72  * A given USB device is made up of endpoints. A request, or transfer, is made
73  * to a specific USB endpoint. These endpoints can provide different services
74  * and have different expectations around the size of the data that'll be used
75  * in a given request and the periodicity of requests. Endpoints themselves are
76  * either used to make one-shot requests, for example, making requests to a mass
77  * storage device for a given sector, or for making periodic requests where you
78  * end up polling on the endpoint, for example, polling on a USB keyboard for
79  * keystrokes.
80  *
81  * Each endpoint encodes two different pieces of information: a direction and a
82  * type. There are two different directions: IN and OUT. These refer to the
83  * general direction that data moves relative to the operating system. For
84  * example, an IN transfer transfers data in to the operating system, from the
85  * device. An OUT transfer transfers data from the operating system, out to the
86  * device.
87  *
88  * There are four different kinds of endpoints:
89  *
90  *	BULK		These transfers are large transfers of data to or from
91  *			a device. The most common use for bulk transfers is for
92  *			mass storage devices. Though they are often also used by
93  *			network devices and more. Bulk endpoints do not have an
94  *			explicit time component to them. They are always used
95  *			for one-shot transfers.
96  *
97  *	CONTROL		These transfers are used to manipulate devices
98  *			themselves and are used for USB protocol level
99  *			operations (whether device-specific, class-specific, or
100  *			generic across all of USB). Unlike other transfers,
101  *			control transfers are always bi-directional and use
102  *			different kinds of transfers.
103  *
104  *	INTERRUPT	Interrupt transfers are used for small transfers that
105  *			happen infrequently, but need reasonable latency. A good
106  *			example of interrupt transfers is to receive input from
107  *			a USB keyboard. Interrupt-IN transfers are generally
108  *			polled. Meaning that a client (device driver) opens up
109  *			an interrupt-IN pipe to poll on it, and receives
110  *			periodic updates whenever there is information
111  *			available. However, Interrupt transfers can be used
112  *			as one-shot transfers both going IN and OUT.
113  *
114  *	ISOCHRONOUS	These transfers are things that happen once per
115  *			time-interval at a very regular rate. A good example of
116  *			these transfers are for audio and video. A device may
117  *			describe an interval as 10ms at which point it will read
118  *			or write the next batch of data every 10ms and transform
119  *			it for the user. There are no one-shot Isochronous-IN
120  *			transfers. There are one-shot Isochronous-OUT transfers,
121  *			but these are used by device drivers to always provide
122  *			the system with sufficient data.
123  *
124  * To find out information about the endpoints, USB devices have a series of
125  * descriptors that cover different aspects of the device. For example, there
126  * are endpoint descriptors which cover the properties of endpoints such as the
127  * maximum packet size or polling interval.
128  *
129  * Descriptors exist at all levels of USB. For example, there are general
130  * descriptors for every device. The USB device descriptor is described in
131  * usb_dev_descr(9S). Host controllers will look at these descriptors to ensure
132  * that they program the device correctly; however, they are more often used by
133  * client device drivers. There are also descriptors that exist at a class
134  * level. For example, the hub class has a class-specific descriptor which
135  * describes properties of the hub. That information is requested for and used
136  * by the hub driver.
137  *
138  * All of the different descriptors are gathered by the system and placed into a
139  * tree which USBA sometimes calls the 'Configuration Cloud'. Client device
140  * drivers gain access to this cloud and then use them to open endpoints, which
141  * are called pipes in USBA (and some revisions of the USB specification).
142  *
143  * Each pipe gives access to a specific endpoint on the device which can be used
144  * to perform transfers of a specific type and direction. For example, a mass
145  * storage device often has three different endpoints, the default control
146  * endpoint (which every device has), a Bulk-IN endpoint, and a Bulk-OUT
147  * endpoint. The device driver ends up with three open pipes. One to the default
148  * control endpoint to configure the device, and then the other two are used to
149  * perform I/O.
150  *
151  * These routines translate more or less directly into calls to a host
152  * controller driver. A request to open a pipe takes an endpoint descriptor that
153  * describes the properties of the pipe, and the host controller driver (this
154  * driver) goes through and does any work necessary to allow the client device
155  * driver to access it. Once the pipe is open, it either makes one-shot
156  * transfers specific to the transfer type or it starts performing a periodic
157  * poll of an endpoint.
158  *
159  * All of these different actions translate into requests to the host
160  * controller. The host controller driver itself is in charge of making sure
161  * that all of the required resources for polling are allocated with a request
162  * and then proceed to give the driver's periodic callbacks.
163  *
164  * HUBS AND HOST CONTROLLERS
165  *
166  * Every device is always plugged into a hub, even if the device is itself a
167  * hub. This continues until we reach what we call the root-hub. The root-hub is
168  * special in that it is not an actual USB hub, but is integrated into the host
169  * controller and is manipulated in its own way. For example, the host
170  * controller is used to turn on and off a given port's power. This may happen
171  * over any interface, though the most common way is through PCI.
172  *
173  * In addition to the normal character device that exists for a host controller
174  * driver, as part of attaching, the host controller binds to an instance of the
175  * hubd driver. While the root-hub is a bit of a fiction, everyone models the
176  * root-hub as the same as any other hub that's plugged in. The hub kernel
177  * module doesn't know that the hub isn't a physical device that's been plugged
178  * in. The host controller driver simulates that view by taking hub requests
179  * that are made and translating them into corresponding requests that are
180  * understood by the host controller, for example, reading and writing to a
181  * memory mapped register.
182  *
183  * The hub driver polls for changes in device state using an Interrupt-IN
184  * request, which is the same as is done for the root-hub. This allows the host
185  * controller driver to not have to know about the implementation of device hot
186  * plug, merely react to requests from a hub, the same as if it were an external
187  * device. When the hub driver detects a change, it will go through the
188  * corresponding state machine and attach or detach the corresponding client
189  * device driver, depending if the device was inserted or removed.
190  *
191  * We detect the changes for the Interrupt-IN primarily based on the port state
192  * change events that are delivered to the event ring. Whenever any event is
193  * fired, we use this to update the hub driver about _all_ ports with
194  * outstanding events. This more closely matches how a hub is supposed to behave
195  * and leaves things less likely for the hub driver to end up without clearing a
196  * flag on a port.
197  *
198  * PACKET SIZES AND BURSTING
199  *
200  * A given USB endpoint has an explicit packet size and a number of packets that
201  * can be sent per time interval. These concepts are abstracted away from client
202  * device drives usually, though they sometimes inform the upper bounds of what
203  * a device can perform.
204  *
205  * The host controller uses this information to transform arbitrary transfer
206  * requests into USB protocol packets. One of the nice things about the host
207  * controllers is that they abstract away all of the signaling and semantics of
208  * the actual USB protocols, allowing for life to be slightly easier in the
209  * operating system.
210  *
211  * That said, if the host controller is not programmed correctly, these can end
212  * up causing transaction errors and other problems in response to the data that
213  * the host controller is trying to send or receive.
214  *
215  * ------------
216  * Organization
217  * ------------
218  *
219  * The driver is made up of the following files. Many of these have their own
220  * theory statements to describe what they do. Here, we touch on each of the
221  * purpose of each of these files.
222  *
223  * xhci_command.c:	This file contains the logic to issue commands to the
224  *			controller as well as the actual functions that the
225  *			other parts of the driver use to cause those commands.
226  *
227  * xhci_context.c:	This file manages various data structures used by the
228  *			controller to manage the controller's and device's
229  *			context data structures. See more in the xHCI Overview
230  *			and General Design for more information.
231  *
232  * xhci_dma.c:		This manages the allocation of DMA memory and DMA
233  *			attributes for controller, whether memory is for a
234  *			transfer or something else. This file also deals with
235  *			all the logic of getting data in and out of DMA buffers.
236  *
237  * xhci_endpoint.c:	This manages all of the logic of handling endpoints or
238  *			pipes. It deals with endpoint configuration, I/O
239  *			scheduling, timeouts, and callbacks to USBA.
240  *
241  * xhci_event.c:	This manages callbacks from the hardware to the driver.
242  *			This covers command completion notifications and I/O
243  *			notifications.
244  *
245  * xhci_hub.c:		This manages the virtual root-hub. It basically
246  *			implements and translates all of the USB level requests
247  *			into xhci specific implements. It also contains the
248  *			functions to register this hub with USBA.
249  *
250  * xhci_intr.c:		This manages the underlying interrupt allocation,
251  *			interrupt moderation, and interrupt routines.
252  *
253  * xhci_quirks.c:	This manages information about buggy hardware that's
254  *			been collected and experienced primarily from other
255  *			systems.
256  *
257  * xhci_ring.c:		This manages the abstraction of a ring in xhci, which is
258  *			the primary of communication between the driver and the
259  *			hardware, whether for the controller or a device.
260  *
261  * xhci_usba.c:		This implements all of the HCDI functions required by
262  *			USBA. This is the main entry point that drivers and the
263  *			kernel frameworks will reach to start any operation.
264  *			Many functions here will end up in the command and
265  *			endpoint code.
266  *
267  * xhci.c:		This provides the main kernel DDI interfaces and
268  *			performs device initialization.
269  *
270  * xhci_polled.c:	This provides the polled I/O functions that the
271  *			kernel debugger can use.
272  *
273  * xhci.h:		This is the primary header file which defines
274  *			illumos-specific data structures and constants to manage
275  *			the system.
276  *
277  * xhcireg.h:		This header file defines all of the register offsets,
278  *			masks, and related macros. It also contains all of the
279  *			constants that are used in various structures as defined
280  *			by the specification, such as command offsets, etc.
281  *
282  * xhci_ioctl.h:	This contains a few private ioctls that are used by a
283  *			private debugging command. These are private.
284  *
285  * cmd/xhci/xhci_portsc:	This is a private utility that can be useful for
286  *				debugging xhci state. It is the only consumer of
287  *				xhci_ioctl.h and the private ioctls.
288  *
289  * ----------------------------------
290  * xHCI Overview and Structure Layout
291  * ----------------------------------
292  *
293  * The design and structure of this driver follows from the way that the xHCI
294  * specification tells us that we have to work with hardware. First we'll give a
295  * rough summary of how that works, though the xHCI 1.1 specification should be
296  * referenced when going through this.
297  *
298  * There are three primary parts of the hardware -- registers, contexts, and
299  * rings. The registers are memory mapped registers that come in four sets,
300  * though all are found within the first BAR. These are used to program and
301  * control the hardware and aspects of the devices. Beyond more traditional
302  * device programming there are two primary sets of registers that are
303  * important:
304  *
305  *   o Port Status and Control Registers (XHCI_PORTSC)
306  *   o Doorbell Array (XHCI_DOORBELL)
307  *
308  * The port status and control registers are used to get and manipulate the
309  * status of a given device. For example, turning on and off the power to it.
310  * The Doorbell Array is used to kick off I/O operations and start the
311  * processing of an I/O ring.
312  *
313  * The contexts are data structures that represent various pieces of information
314  * in the controller. These contexts are generally filled out by the driver and
315  * then acknowledged and consumed by the hardware. There are controller-wide
316  * contexts (mostly managed in xhci_context.c) that are used to point to the
317  * contexts that exist for each device in the system. The primary context is
318  * called the Device Context Base Address Array (DCBAA).
319  *
320  * Each device in the system is allocated a 'slot', which is used to index into
321  * the DCBAA. Slots are assigned based on issuing commands to the controller.
322  * There are a fixed number of slots that determine the maximum number of
323  * devices that can end up being supported in the system. Note this includes all
324  * the devices plugged into the USB device tree, not just devices plugged into
325  * ports on the chassis.
326  *
327  * For each device, there is a context structure that describes properties of
328  * the device. For example, what speed is the device, is it a hub, etc. The
329  * context has slots for the device and for each endpoint on the device. As
330  * endpoints are enabled, their context information which describes things like
331  * the maximum packet size, is filled in and enabled. The mapping between these
332  * contexts look like:
333  *
334  *
335  *      DCBAA
336  *    +--------+                    Device Context
337  *    | Slot 0 |------------------>+--------------+
338  *    +--------+                   | Slot Context |
339  *    |  ...   |                   +--------------+       +----------+
340  *    +--------+   +------+        |  Endpoint 0  |------>| I/O Ring |
341  *    | Slot n |-->| NULL |        | Context (Bi) |       +----------+
342  *    +--------+   +------+        +--------------+
343  *                                 |  Endpoint 1  |
344  *                                 | Context (Out)|
345  *                                 +--------------+
346  *                                 |  Endpoint 1  |
347  *                                 | Context (In) |
348  *                                 +--------------+
349  *                                 |      ...     |
350  *                                 +--------------+
351  *                                 | Endpoint 15  |
352  *                                 | Context (In) |
353  *                                 +--------------+
354  *
355  * These contexts are always owned by the controller, though we can read them
356  * after various operations complete. Commands that toggle device state use a
357  * specific input context, which is a variant of the device context. The only
358  * difference is that it has an input context structure ahead of it to say which
359  * sections of the device context should be evaluated.
360  *
361  * Each active endpoint points us to an I/O ring, which leads us to the third
362  * main data structure that's used by the device: rings. Rings are made up of
363  * transfer request blocks (TRBs), which are joined together to form a given
364  * transfer description (TD) which represents a single I/O request.
365  *
366  * These rings are used to issue I/O to individual endpoints, to issue commands
367  * to the controller, and to receive notification of changes and completions.
368  * Issued commands go on the special ring called the command ring while the
369  * change and completion notifications go on the event ring.  More details are
370  * available in xhci_ring.c. Each of these structures is represented by an
371  * xhci_ring_t.
372  *
373  * Each ring can be made up of one or more disjoint regions of DMA; however, we
374  * only use a single one. This also impacts some additional registers and
375  * structures that exist. The event ring has an indirection table called the
376  * Event Ring Segment Table (ERST). Each entry in the table (a segment)
377  * describes a chunk of the event ring.
378  *
379  * One other thing worth calling out is the scratchpad. The scratchpad is a way
380  * for the controller to be given arbitrary memory by the OS that it can use.
381  * There are two parts to the scratchpad. The first part is an array whose
382  * entries contain pointers to the actual addresses for the pages. The second
383  * part that we allocate are the actual pages themselves.
384  *
385  * -----------------------------
386  * Endpoint State and Management
387  * -----------------------------
388  *
389  * Endpoint management is one of the key parts to the xhci driver as every
390  * endpoint is a pipe that a device driver uses, so they are our primary
391  * currency. An endpoint is enabled when the client device driver opens the
392  * associated pipe for the first time. When an endpoint is enabled, we have to
393  * fill in an endpoint's context structure with information about the endpoint.
394  * These basically tell the controller important properties which it uses to
395  * ensure that there is adequate bandwidth for the device.
396  *
397  * If the client device closes the pipe again we explicitly stop the endpoint,
398  * moving it to the Halted state, and take ownership of any transfers
399  * previously submitted to the ring but which have not yet completed. A client
400  * may open and close a pipe several times -- ugen(4D) in particular is known
401  * for this -- and we will stop and start the ring accordingly.
402  *
403  * It is tempting to fully unconfigure an endpoint when a pipe is closed, but
404  * some host controllers appear to exhibit undefined behaviour each time the
405  * endpoint is re-enabled this way; e.g., silently dropped transfers. As such,
406  * we wait until the whole device is being torn down to disable all previously
407  * enabled endpoints at once, as part of disabling the device slot.
408  *
409  * Each endpoint has its own ring as described in the previous section. We place
410  * TRBs (transfer request blocks) onto a given ring to request I/O be performed.
411  * Responses are placed on the event ring, in other words, the rings associated
412  * with an endpoint are purely for producing I/O.
413  *
414  * Endpoints have a defined state machine as described in xHCI 1.1 / 4.8.3.
415  * These states generally correspond with the state of the endpoint to process
416  * I/O and handle timeouts. The driver basically follows a similar state machine
417  * as described there. There are some deviations. For example, what they
418  * describe as 'running' we break into both the Idle and Running states below.
419  * We also have a notion of timed out and quiescing. The following image
420  * summarizes the states and transitions:
421  *
422  *     +------+                                +-----------+
423  *     | Idle |---------*--------------------->|  Running  |<-+
424  *     +------+         . I/O queued on        +-----------+  |
425  *        ^               ring and timeout        |  |  |     |
426  *        |               scheduled.              |  |  |     |
427  *        |                                       |  |  |     |
428  *        +-----*---------------------------------+  |  |     |
429  *        |     . No I/Os remain                     |  |     |
430  *        |                                          |  |     |
431  *        |                +------*------------------+  |     |
432  *        |                |      . Timeout             |     |
433  *        |                |        fires for           |     |
434  *        |                |        I/O                 |     |
435  *        |                v                            v     |
436  *        |          +-----------+                +--------+  |
437  *        |          | Timed Out |                | Halted |  |
438  *        |          +-----------+                +--------+  |
439  *        |             |                           |         |
440  *        |             |   +-----------+           |         |
441  *        |             +-->| Quiescing |<----------+         |
442  *        |                 +-----------+                     |
443  *        |   No TRBs.           |                . TRBs      |
444  *        |   remain .           |                . Remain    |
445  *        +----------*----<------+-------->-------*-----------+
446  *
447  * Normally, a given endpoint will oscillate between having TRBs scheduled and
448  * not. Every time a new I/O is added to the endpoint, we'll ring the doorbell,
449  * making sure that we're processing the ring, presuming that the endpoint isn't
450  * in one of the error states.
451  *
452  * To detect device hangs, we have an active timeout(9F) per active endpoint
453  * that ticks at a one second rate while we still have TRBs outstanding on an
454  * endpoint. Once all outstanding TRBs have been processed, the timeout will
455  * stop itself and there will be no active checking until the endpoint has I/O
456  * scheduled on it again.
457  *
458  * There are two primary ways that things can go wrong on the endpoint. We can
459  * either have a timeout or an event that transitions the endpoint to the Halted
460  * state. In the halted state, we need to issue explicit commands to reset the
461  * endpoint before removing the I/O.
462  *
463  * The way we handle both a timeout and a halted condition is similar, but the
464  * way they are triggered is different. When we detect a halted condition, we
465  * don't immediately clean it up, and wait for the client device driver (or USBA
466  * on its behalf) to issue a pipe reset. When we detect a timeout, we
467  * immediately take action (assuming no other action is ongoing).
468  *
469  * In both cases, we quiesce the device, which takes care of dealing with taking
470  * the endpoint from whatever state it may be in and taking the appropriate
471  * actions based on the state machine in xHCI 1.1 / 4.8.3. The end of quiescing
472  * leaves the device stopped, which allows us to update the ring's pointer and
473  * remove any TRBs that are causing problems.
474  *
475  * As part of all this, we ensure that we can only be quiescing the device from
476  * a given path at a time. Any requests to schedule I/O during this time will
477  * generally fail.
478  *
479  * The following image describes the state machine for the timeout logic. It
480  * ties into the image above.
481  *
482  *         +----------+                            +---------+
483  *         | Disabled |-----*--------------------->| Enabled |<--+
484  *         +----------+     . TRBs scheduled       +---------+   *. 1 sec timer
485  *             ^              and no active          |  |  |     |  fires and
486  *             |              timer.                 |  |  |     |  another
487  *             |                                     |  |  +--+--+  quiesce, in
488  *             |                                     |  |     |     a bad state,
489  *             +------*------------------------------+  |     ^     or decrement
490  *             |      . 1 sec timer                     |     |     I/O timeout
491  *             |        fires and                       |     |
492  *             |        no TRBs or                      |     +--------------+
493  *             |        endpoint shutdown               |                    |
494  *             |                                        *. . timer counter   |
495  *             ^                                        |    reaches zero    |
496  *             |                                        v                    |
497  *             |                                +--------------+             |
498  *             +-------------*---------------<--| Quiesce ring |->---*-------+
499  *                           . No more          | and fail I/O |     . restart
500  *                             I/Os             +--------------+       timer as
501  *                                                                     more I/Os
502  *
503  * As we described above, when there are active TRBs and I/Os, a 1 second
504  * timeout(9F) will be active. Each second, we decrement a counter on the
505  * current, active I/O until either a new I/O takes the head, or the counter
506  * reaches zero. If the counter reaches zero, then we go through, quiesce the
507  * ring, and then clean things up.
508  *
509  * ------------------
510  * Periodic Endpoints
511  * ------------------
512  *
513  * It's worth calling out periodic endpoints explicitly, as they operate
514  * somewhat differently. Periodic endpoints are limited to Interrupt-IN and
515  * Isochronous-IN. The USBA often uses the term polling for these. That's
516  * because the client only needs to make a single API call; however, they'll
517  * receive multiple callbacks until either an error occurs or polling is
518  * requested to be terminated.
519  *
520  * When we have one of these periodic requests, we end up always rescheduling
521  * I/O requests, as well as, having a specific number of pre-existing I/O
522  * requests to cover the periodic needs, in case of latency spikes. Normally,
523  * when replying to a request, we use the request handle that we were given.
524  * However, when we have a periodic request, we're required to duplicate the
525  * handle before giving them data.
526  *
527  * However, the duplication is a bit tricky. For everything that was duplicated,
528  * the framework expects us to submit data. Because of that we, don't duplicate
529  * them until they are needed. This minimizes the likelihood that we have
530  * outstanding requests to deal with when we encounter a fatal polling failure.
531  *
532  * Most of the polling setup logic happens in xhci_usba.c in
533  * xhci_hcdi_periodic_init(). The consumption and duplication is handled in
534  * xhci_endpoint.c.
535  *
536  * ----------------
537  * Structure Layout
538  * ----------------
539  *
540  * The following images relate the core data structures. The primary structure
541  * in the system is the xhci_t. This is the per-controller data structure that
542  * exists for each instance of the driver. From there, each device in the system
543  * is represented by an xhci_device_t and each endpoint is represented by an
544  * xhci_endpoint_t. For each client that opens a given endpoint, there is an
545  * xhci_pipe_t. For each I/O related ring, there is an xhci_ring_t in the
546  * system.
547  *
548  *     +------------------------+
549  *     | Per-Controller         |
550  *     | Structure              |
551  *     | xhci_t                 |
552  *     |                        |
553  *     | uint_t              ---+--> Capability regs offset
554  *     | uint_t              ---+--> Operational regs offset
555  *     | uint_t              ---+--> Runtime regs offset
556  *     | uint_t              ---+--> Doorbell regs offset
557  *     | xhci_state_flags_t  ---+--> Device state flags
558  *     | xhci_quirks_t       ---+--> Device quirk flags
559  *     | xhci_capability_t   ---+--> Controller capability structure
560  *     | xhci_dcbaa_t        ---+----------------------------------+
561  *     | xhci_scratchpad_t   ---+---------+                        |
562  *     | xhci_command_ing_t  ---+------+  |                        v
563  *     | xhci_event_ring_t   ---+----+ |  |              +---------------------+
564  *     | xhci_usba_t         ---+--+ | |  |              | Device Context      |
565  *     +------------------------+  | | |  |              | Base Address        |
566  *                                 | | |  |              | Array Structure     |
567  *                                 | | |  |              | xhci_dcbaa_t        |
568  * +-------------------------------+ | |  |              |                     |
569  * | +-------------------------------+ |  |  DCBAA KVA <-+--        uint64_t * |
570  * | |    +----------------------------+  | DMA Buffer <-+-- xhci_dma_buffer_t |
571  * | |    v                               |              +---------------------+
572  * | | +--------------------------+       +-----------------------+
573  * | | | Event Ring               |                               |
574  * | | | Management               |                               |
575  * | | | xhci_event_ring_t        |                               v
576  * | | |                          |   Event Ring        +----------------------+
577  * | | | xhci_event_segment_t * --|-> Segment VA        |   Scratchpad (Extra  |
578  * | | | xhci_dma_buffer_t      --|-> Segment DMA Buf.  |   Controller Memory) |
579  * | | | xhci_ring_t            --|--+                  |    xhci_scratchpad_t |
580  * | | +--------------------------+  |      Scratchpad  |                      |
581  * | |                               | Base Array KVA <-+-          uint64_t * |
582  * | +------------+                  | Array DMA Buf. <-+-   xhci_dma_buffer_t |
583  * |              v                  | Scratchpad DMA <-+- xhci_dma_buffer_t * |
584  * |   +---------------------------+ | Buffer per page  +----------------------+
585  * |   | Command Ring              | |
586  * |   | xhci_command_ring_t       | +------------------------------+
587  * |   |                           |                                |
588  * |   | xhci_ring_t             --+-> Command Ring --->------------+
589  * |   | list_t                  --+-> Command List                 v
590  * |   | timeout_id_t            --+-> Timeout State     +---------------------+
591  * |   | xhci_command_ring_state_t +-> State Flags       | I/O Ring            |
592  * |   +---------------------------+                     | xhci_ring_t         |
593  * |                                                     |                     |
594  * |                                     Ring DMA Buf. <-+-- xhci_dma_buffer_t |
595  * |                                       Ring Length <-+--            uint_t |
596  * |                                    Ring Entry KVA <-+--      xhci_trb_t * |
597  * |    +---------------------------+        Ring Head <-+--            uint_t |
598  * +--->| USBA State                |        Ring Tail <-+--            uint_t |
599  *      | xhci_usba_t               |       Ring Cycle <-+--            uint_t |
600  *      |                           |                    +---------------------+
601  *      | usba_hcdi_ops_t *        -+-> USBA Ops Vector                       ^
602  *      | usb_dev_dscr_t           -+-> USB Virtual Device Descriptor         |
603  *      | usb_ss_hub_descr_t       -+-> USB Virtual Hub Descriptor            |
604  *      | usba_pipe_handle_data_t * +-> Interrupt polling client              |
605  *      | usb_intr_req_t           -+-> Interrupt polling request             |
606  *      | uint32_t                --+-> Interrupt polling device mask         |
607  *      | list_t                  --+-> Pipe List (Active Users)              |
608  *      | list_t                  --+-------------------+                     |
609  *      +---------------------------+                   |                     ^
610  *                                                      |                     |
611  *                                                      v                     |
612  *     +-------------------------------+             +---------------+        |
613  *     | USB Device                    |------------>| USB Device    |--> ... |
614  *     | xhci_device_t                 |             | xhci_device_t |        |
615  *     |                               |             +---------------+        |
616  *     | usb_port_t                  --+-> USB Port plugged into              |
617  *     | uint8_t                     --+-> Slot Number                        |
618  *     | boolean_t                   --+-> Address Assigned                   |
619  *     | usba_device_t *             --+-> USBA Device State                  |
620  *     | xhci_dma_buffer_t           --+-> Input Context DMA Buffer           |
621  *     | xhci_input_context_t *      --+-> Input Context KVA                  |
622  *     | xhci_slot_contex_t *        --+-> Input Slot Context KVA             |
623  *     | xhci_endpoint_context_t *[] --+-> Input Endpoint Context KVA         |
624  *     | xhci_dma_buffer_t           --+-> Output Context DMA Buffer          |
625  *     | xhci_slot_context_t *       --+-> Output Slot Context KVA            ^
626  *     | xhci_endpoint_context_t *[] --+-> Output Endpoint Context KVA        |
627  *     | xhci_endpoint_t *[]         --+-> Endpoint Tracking ---+             |
628  *     +-------------------------------+                        |             |
629  *                                                              |             |
630  *                                                              v             |
631  *     +------------------------------+            +-----------------+        |
632  *     | Endpoint Data                |----------->| Endpoint Data   |--> ... |
633  *     | xhci_endpoint_t              |            | xhci_endpoint_t |        |
634  *     |                              |            +-----------------+        |
635  *     | int                        --+-> Endpoint Number                     |
636  *     | int                        --+-> Endpoint Type                       |
637  *     | xhci_endpoint_state_t      --+-> Endpoint State                      |
638  *     | timeout_id_t               --+-> Endpoint Timeout State              |
639  *     | usba_pipe_handle_data_t *  --+-> USBA Client Handle                  |
640  *     | xhci_ring_t                --+-> Endpoint I/O Ring  -------->--------+
641  *     | list_t                     --+-> Transfer List --------+
642  *     +------------------------------+                         |
643  *                                                              v
644  *     +-------------------------+                  +--------------------+
645  *     | Transfer Structure      |----------------->| Transfer Structure |-> ...
646  *     | xhci_transfer_t         |                  | xhci_transfer_t    |
647  *     |                         |                  +--------------------+
648  *     | xhci_dma_buffer_t     --+-> I/O DMA Buffer
649  *     | uint_t                --+-> Number of TRBs
650  *     | uint_t                --+-> Short transfer data
651  *     | uint_t                --+-> Timeout seconds remaining
652  *     | usb_cr_t              --+-> USB Transfer return value
653  *     | boolean_t             --+-> Data direction
654  *     | xhci_trb_t *          --+-> Host-order transfer requests for I/O
655  *     | usb_isoc_pkt_descr_t * -+-> Isochronous only response data
656  *     | usb_opaque_t          --+-> USBA Request Handle
657  *     +-------------------------+
658  *
659  * -------------
660  * Lock Ordering
661  * -------------
662  *
663  * There are three different tiers of locks that exist in the driver. First,
664  * there is a lock for each controller: xhci_t`xhci_lock. This protects all the
665  * data for that instance of the controller. If there are multiple instances of
666  * the xHCI controller in the system, each one is independent and protected
667  * separately. The two do not share any data.
668  *
669  * From there, there are two other, specific locks in the system:
670  *
671  *   o xhci_command_ring_t`xcr_lock
672  *   o xhci_device_t`xd_imtx
673  *
674  * There is only one xcr_lock per controller, like the xhci_lock. It protects
675  * the state of the command ring. However, there is on xd_imtx per device.
676  * Recall that each device is scoped to a given controller. This protects the
677  * input slot context for a given device.
678  *
679  * There are a few important rules to keep in mind here that are true
680  * universally throughout the driver:
681  *
682  * 1) Always grab the xhci_t`xhci_lock, before grabbing any of the other locks.
683  * 2) A given xhci_device_t`xd_imtx, must be taken before grabbing the
684  *    xhci_command_ring_t`xcr_lock.
685  * 3) A given thread can only hold one of the given xhci_device_t`xd_imtx locks
686  *    at a given time. In other words, we should never be manipulating the input
687  *    context of two different devices at once.
688  * 4) It is safe to hold the xhci_device_t`xd_imtx while tearing down the
689  *    endpoint timer. Conversely, the endpoint specific logic should never enter
690  *    this lock.
691  *
692  * ----------
693  * Polled I/O
694  * ----------
695  *
696  * There is limited support for polled I/O in this driver for use by
697  * the kernel debugger. The driver currently only supports input from
698  * interrupt endpoints which is good enough for USB HID keyboard devices.
699  * Input from bulk endpoints and output are not supported which prevents
700  * using a serial console over USB for kernel debugging.
701  *
702  * --------------------
703  * Relationship to EHCI
704  * --------------------
705  *
706  * On some Intel chipsets, a given physical port on the system may be routed to
707  * one of the EHCI or xHCI controllers. This association can be dynamically
708  * changed by writing to platform specific registers as handled by the quirk
709  * logic in xhci_quirk.c.
710  *
711  * As these ports may support USB 3.x speeds, we always route all such ports to
712  * the xHCI controller, when supported. In addition, to minimize disruptions
713  * from devices being enumerated and attached to the EHCI driver and then
714  * disappearing, we generally attempt to load the xHCI controller before the
715  * EHCI controller. This logic is not done in the driver; however, it is done in
716  * other parts of the kernel like in uts/common/io/consconfig_dacf.c in the
717  * function consconfig_load_drivers().
718  *
719  * -----------
720  * Future Work
721  * -----------
722  *
723  * The primary future work in this driver spans two different, but related
724  * areas. The first area is around controller resets and how they tie into FM.
725  * Presently, we do not have a good way to handle controllers coming and going
726  * in the broader USB stack or properly reconfigure the device after a reset.
727  * Secondly, we don't handle the suspend and resume of devices and drivers.
728  */
729 
730 #include <sys/param.h>
731 #include <sys/modctl.h>
732 #include <sys/conf.h>
733 #include <sys/devops.h>
734 #include <sys/ddi.h>
735 #include <sys/sunddi.h>
736 #include <sys/cmn_err.h>
737 #include <sys/ddifm.h>
738 #include <sys/pci.h>
739 #include <sys/class.h>
740 #include <sys/policy.h>
741 
742 #include <sys/usb/hcd/xhci/xhci.h>
743 #include <sys/usb/hcd/xhci/xhci_ioctl.h>
744 
745 /*
746  * We want to use the first BAR to access its registers. The regs[] array is
747  * ordered based on the rules for the PCI supplement to IEEE 1275. So regs[1]
748  * will always be the first BAR.
749  */
750 #define	XHCI_REG_NUMBER	1
751 
752 /*
753  * This task queue exists as a global taskq that is used for resetting the
754  * device in the face of FM or runtime errors. Each instance of the device
755  * (xhci_t) happens to have a single taskq_dispatch_ent already allocated so we
756  * know that we should always be able to dispatch such an event.
757  */
758 static taskq_t *xhci_taskq;
759 
760 /*
761  * Global soft state for per-instance data. Note that we must use the soft state
762  * routines and cannot use the ddi_set_driver_private() routines. The USB
763  * framework presumes that it can use the dip's private data.
764  */
765 void *xhci_soft_state;
766 
767 /*
768  * This is the time in us that we wait after a controller resets before we
769  * consider reading any register. There are some controllers that want at least
770  * 1 ms, therefore we default to 10 ms.
771  */
772 clock_t xhci_reset_delay = 10000;
773 
774 void
775 xhci_error(xhci_t *xhcip, const char *fmt, ...)
776 {
777 	va_list ap;
778 
779 	va_start(ap, fmt);
780 	if (xhcip != NULL && xhcip->xhci_dip != NULL) {
781 		vdev_err(xhcip->xhci_dip, CE_WARN, fmt, ap);
782 	} else {
783 		vcmn_err(CE_WARN, fmt, ap);
784 	}
785 	va_end(ap);
786 }
787 
788 void
789 xhci_log(xhci_t *xhcip, const char *fmt, ...)
790 {
791 	va_list ap;
792 
793 	va_start(ap, fmt);
794 	if (xhcip != NULL && xhcip->xhci_dip != NULL) {
795 		vdev_err(xhcip->xhci_dip, CE_NOTE, fmt, ap);
796 	} else {
797 		vcmn_err(CE_NOTE, fmt, ap);
798 	}
799 	va_end(ap);
800 }
801 
802 /*
803  * USBA is in charge of creating device nodes for us. USBA explicitly ORs in the
804  * constant HUBD_IS_ROOT_HUB, so we have to undo that when we're looking at
805  * things here. A simple bitwise-and will take care of this. And hey, it could
806  * always be more complex, USBA could clone!
807  */
808 static dev_info_t *
809 xhci_get_dip(dev_t dev)
810 {
811 	xhci_t *xhcip;
812 	int instance = getminor(dev) & ~HUBD_IS_ROOT_HUB;
813 
814 	xhcip = ddi_get_soft_state(xhci_soft_state, instance);
815 	if (xhcip != NULL)
816 		return (xhcip->xhci_dip);
817 	return (NULL);
818 }
819 
820 uint8_t
821 xhci_get8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
822 {
823 	uintptr_t addr, roff;
824 
825 	switch (rtt) {
826 	case XHCI_R_CAP:
827 		roff = xhcip->xhci_regs_capoff;
828 		break;
829 	case XHCI_R_OPER:
830 		roff = xhcip->xhci_regs_operoff;
831 		break;
832 	case XHCI_R_RUN:
833 		roff = xhcip->xhci_regs_runoff;
834 		break;
835 	case XHCI_R_DOOR:
836 		roff = xhcip->xhci_regs_dooroff;
837 		break;
838 	default:
839 		panic("called %s with bad reg type: %d", __func__, rtt);
840 	}
841 	ASSERT(roff != PCI_EINVAL32);
842 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
843 
844 	return (ddi_get8(xhcip->xhci_regs_handle, (void *)addr));
845 }
846 
847 uint16_t
848 xhci_get16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
849 {
850 	uintptr_t addr, roff;
851 
852 	switch (rtt) {
853 	case XHCI_R_CAP:
854 		roff = xhcip->xhci_regs_capoff;
855 		break;
856 	case XHCI_R_OPER:
857 		roff = xhcip->xhci_regs_operoff;
858 		break;
859 	case XHCI_R_RUN:
860 		roff = xhcip->xhci_regs_runoff;
861 		break;
862 	case XHCI_R_DOOR:
863 		roff = xhcip->xhci_regs_dooroff;
864 		break;
865 	default:
866 		panic("called %s with bad reg type: %d", __func__, rtt);
867 	}
868 	ASSERT(roff != PCI_EINVAL32);
869 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
870 
871 	return (ddi_get16(xhcip->xhci_regs_handle, (void *)addr));
872 }
873 
874 uint32_t
875 xhci_get32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
876 {
877 	uintptr_t addr, roff;
878 
879 	switch (rtt) {
880 	case XHCI_R_CAP:
881 		roff = xhcip->xhci_regs_capoff;
882 		break;
883 	case XHCI_R_OPER:
884 		roff = xhcip->xhci_regs_operoff;
885 		break;
886 	case XHCI_R_RUN:
887 		roff = xhcip->xhci_regs_runoff;
888 		break;
889 	case XHCI_R_DOOR:
890 		roff = xhcip->xhci_regs_dooroff;
891 		break;
892 	default:
893 		panic("called %s with bad reg type: %d", __func__, rtt);
894 	}
895 	ASSERT(roff != PCI_EINVAL32);
896 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
897 
898 	return (ddi_get32(xhcip->xhci_regs_handle, (void *)addr));
899 }
900 
901 uint64_t
902 xhci_get64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
903 {
904 	uintptr_t addr, roff;
905 
906 	switch (rtt) {
907 	case XHCI_R_CAP:
908 		roff = xhcip->xhci_regs_capoff;
909 		break;
910 	case XHCI_R_OPER:
911 		roff = xhcip->xhci_regs_operoff;
912 		break;
913 	case XHCI_R_RUN:
914 		roff = xhcip->xhci_regs_runoff;
915 		break;
916 	case XHCI_R_DOOR:
917 		roff = xhcip->xhci_regs_dooroff;
918 		break;
919 	default:
920 		panic("called %s with bad reg type: %d", __func__, rtt);
921 	}
922 	ASSERT(roff != PCI_EINVAL32);
923 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
924 
925 	return (ddi_get64(xhcip->xhci_regs_handle, (void *)addr));
926 }
927 
928 void
929 xhci_put8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint8_t val)
930 {
931 	uintptr_t addr, roff;
932 
933 	switch (rtt) {
934 	case XHCI_R_CAP:
935 		roff = xhcip->xhci_regs_capoff;
936 		break;
937 	case XHCI_R_OPER:
938 		roff = xhcip->xhci_regs_operoff;
939 		break;
940 	case XHCI_R_RUN:
941 		roff = xhcip->xhci_regs_runoff;
942 		break;
943 	case XHCI_R_DOOR:
944 		roff = xhcip->xhci_regs_dooroff;
945 		break;
946 	default:
947 		panic("called %s with bad reg type: %d", __func__, rtt);
948 	}
949 	ASSERT(roff != PCI_EINVAL32);
950 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
951 
952 	ddi_put8(xhcip->xhci_regs_handle, (void *)addr, val);
953 }
954 
955 void
956 xhci_put16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint16_t val)
957 {
958 	uintptr_t addr, roff;
959 
960 	switch (rtt) {
961 	case XHCI_R_CAP:
962 		roff = xhcip->xhci_regs_capoff;
963 		break;
964 	case XHCI_R_OPER:
965 		roff = xhcip->xhci_regs_operoff;
966 		break;
967 	case XHCI_R_RUN:
968 		roff = xhcip->xhci_regs_runoff;
969 		break;
970 	case XHCI_R_DOOR:
971 		roff = xhcip->xhci_regs_dooroff;
972 		break;
973 	default:
974 		panic("called %s with bad reg type: %d", __func__, rtt);
975 	}
976 	ASSERT(roff != PCI_EINVAL32);
977 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
978 
979 	ddi_put16(xhcip->xhci_regs_handle, (void *)addr, val);
980 }
981 
982 void
983 xhci_put32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint32_t val)
984 {
985 	uintptr_t addr, roff;
986 
987 	switch (rtt) {
988 	case XHCI_R_CAP:
989 		roff = xhcip->xhci_regs_capoff;
990 		break;
991 	case XHCI_R_OPER:
992 		roff = xhcip->xhci_regs_operoff;
993 		break;
994 	case XHCI_R_RUN:
995 		roff = xhcip->xhci_regs_runoff;
996 		break;
997 	case XHCI_R_DOOR:
998 		roff = xhcip->xhci_regs_dooroff;
999 		break;
1000 	default:
1001 		panic("called %s with bad reg type: %d", __func__, rtt);
1002 	}
1003 	ASSERT(roff != PCI_EINVAL32);
1004 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
1005 
1006 	ddi_put32(xhcip->xhci_regs_handle, (void *)addr, val);
1007 }
1008 
1009 void
1010 xhci_put64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint64_t val)
1011 {
1012 	uintptr_t addr, roff;
1013 
1014 	switch (rtt) {
1015 	case XHCI_R_CAP:
1016 		roff = xhcip->xhci_regs_capoff;
1017 		break;
1018 	case XHCI_R_OPER:
1019 		roff = xhcip->xhci_regs_operoff;
1020 		break;
1021 	case XHCI_R_RUN:
1022 		roff = xhcip->xhci_regs_runoff;
1023 		break;
1024 	case XHCI_R_DOOR:
1025 		roff = xhcip->xhci_regs_dooroff;
1026 		break;
1027 	default:
1028 		panic("called %s with bad reg type: %d", __func__, rtt);
1029 	}
1030 	ASSERT(roff != PCI_EINVAL32);
1031 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
1032 
1033 	ddi_put64(xhcip->xhci_regs_handle, (void *)addr, val);
1034 }
1035 
1036 int
1037 xhci_check_regs_acc(xhci_t *xhcip)
1038 {
1039 	ddi_fm_error_t de;
1040 
1041 	/*
1042 	 * Treat the case where we can't check as fine so we can treat the code
1043 	 * more simply.
1044 	 */
1045 	if (!DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps))
1046 		return (DDI_FM_OK);
1047 
1048 	ddi_fm_acc_err_get(xhcip->xhci_regs_handle, &de, DDI_FME_VERSION);
1049 	ddi_fm_acc_err_clear(xhcip->xhci_regs_handle, DDI_FME_VERSION);
1050 	return (de.fme_status);
1051 }
1052 
1053 /*
1054  * As a leaf PCIe driver, we just post the ereport and continue on.
1055  */
1056 /* ARGSUSED */
1057 static int
1058 xhci_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
1059 {
1060 	pci_ereport_post(dip, err, NULL);
1061 	return (err->fme_status);
1062 }
1063 
1064 static void
1065 xhci_fm_fini(xhci_t *xhcip)
1066 {
1067 	if (xhcip->xhci_fm_caps == 0)
1068 		return;
1069 
1070 	if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1071 		ddi_fm_handler_unregister(xhcip->xhci_dip);
1072 
1073 	if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1074 	    DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1075 		pci_ereport_teardown(xhcip->xhci_dip);
1076 
1077 	ddi_fm_fini(xhcip->xhci_dip);
1078 }
1079 
1080 static void
1081 xhci_fm_init(xhci_t *xhcip)
1082 {
1083 	ddi_iblock_cookie_t iblk;
1084 	int def = DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1085 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE;
1086 
1087 	xhcip->xhci_fm_caps = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
1088 	    DDI_PROP_DONTPASS, "fm_capable", def);
1089 
1090 	if (xhcip->xhci_fm_caps < 0) {
1091 		xhcip->xhci_fm_caps = 0;
1092 	} else if (xhcip->xhci_fm_caps & ~def) {
1093 		xhcip->xhci_fm_caps &= def;
1094 	}
1095 
1096 	if (xhcip->xhci_fm_caps == 0)
1097 		return;
1098 
1099 	ddi_fm_init(xhcip->xhci_dip, &xhcip->xhci_fm_caps, &iblk);
1100 	if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1101 	    DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1102 		pci_ereport_setup(xhcip->xhci_dip);
1103 	}
1104 
1105 	if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1106 		ddi_fm_handler_register(xhcip->xhci_dip,
1107 		    xhci_fm_error_cb, xhcip);
1108 	}
1109 }
1110 
1111 static int
1112 xhci_reg_poll(xhci_t *xhcip, xhci_reg_type_t rt, int reg, uint32_t mask,
1113     uint32_t targ, uint_t tries, int delay_ms)
1114 {
1115 	uint_t i;
1116 
1117 	for (i = 0; i < tries; i++) {
1118 		uint32_t val = xhci_get32(xhcip, rt, reg);
1119 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1120 			ddi_fm_service_impact(xhcip->xhci_dip,
1121 			    DDI_SERVICE_LOST);
1122 			return (EIO);
1123 		}
1124 
1125 		if ((val & mask) == targ)
1126 			return (0);
1127 
1128 		delay(drv_usectohz(delay_ms * 1000));
1129 	}
1130 	return (ETIMEDOUT);
1131 }
1132 
1133 static boolean_t
1134 xhci_regs_map(xhci_t *xhcip)
1135 {
1136 	off_t memsize;
1137 	int ret;
1138 	ddi_device_acc_attr_t da;
1139 
1140 	if (ddi_dev_regsize(xhcip->xhci_dip, XHCI_REG_NUMBER, &memsize) !=
1141 	    DDI_SUCCESS) {
1142 		xhci_error(xhcip, "failed to get register set size");
1143 		return (B_FALSE);
1144 	}
1145 
1146 	bzero(&da, sizeof (ddi_device_acc_attr_t));
1147 	da.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1148 	da.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1149 	da.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1150 	if (DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps)) {
1151 		da.devacc_attr_access = DDI_FLAGERR_ACC;
1152 	} else {
1153 		da.devacc_attr_access = DDI_DEFAULT_ACC;
1154 	}
1155 
1156 	ret = ddi_regs_map_setup(xhcip->xhci_dip, XHCI_REG_NUMBER,
1157 	    &xhcip->xhci_regs_base, 0, memsize, &da, &xhcip->xhci_regs_handle);
1158 
1159 	if (ret != DDI_SUCCESS) {
1160 		xhci_error(xhcip, "failed to map device registers: %d", ret);
1161 		return (B_FALSE);
1162 	}
1163 
1164 	return (B_TRUE);
1165 }
1166 
1167 static boolean_t
1168 xhci_regs_init(xhci_t *xhcip)
1169 {
1170 	/*
1171 	 * The capabilities always begin at offset zero.
1172 	 */
1173 	xhcip->xhci_regs_capoff = 0;
1174 	xhcip->xhci_regs_operoff = xhci_get8(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1175 	xhcip->xhci_regs_runoff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_RTSOFF);
1176 	xhcip->xhci_regs_runoff &= ~0x1f;
1177 	xhcip->xhci_regs_dooroff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_DBOFF);
1178 	xhcip->xhci_regs_dooroff &= ~0x3;
1179 
1180 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1181 		xhci_error(xhcip, "failed to initialize controller register "
1182 		    "offsets: encountered FM register error");
1183 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1184 		return (B_FALSE);
1185 	}
1186 
1187 	return (B_TRUE);
1188 }
1189 
1190 /*
1191  * Read various parameters from PCI configuration space and from the Capability
1192  * registers that we'll need to register the device. We cache all of the
1193  * Capability registers.
1194  */
1195 static boolean_t
1196 xhci_read_params(xhci_t *xhcip)
1197 {
1198 	uint8_t usb;
1199 	uint16_t vers;
1200 	uint32_t struc1, struc2, struc3, cap1, cap2, pgsz;
1201 	uint32_t psize, pbit, capreg;
1202 	xhci_capability_t *xcap;
1203 	unsigned long ps;
1204 
1205 	/*
1206 	 * While it's tempting to do a 16-bit read at offset 0x2, unfortunately,
1207 	 * a few emulated systems don't support reading at offset 0x2 for the
1208 	 * version. Instead we need to read the caplength register and get the
1209 	 * upper two bytes.
1210 	 */
1211 	capreg = xhci_get32(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1212 	vers = XHCI_VERSION_MASK(capreg);
1213 	usb = pci_config_get8(xhcip->xhci_cfg_handle, PCI_XHCI_USBREV);
1214 	struc1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS1);
1215 	struc2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS2);
1216 	struc3 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS3);
1217 	cap1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS1);
1218 	cap2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS2);
1219 	pgsz = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PAGESIZE);
1220 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1221 		xhci_error(xhcip, "failed to read controller parameters: "
1222 		    "encountered FM register error");
1223 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1224 		return (B_FALSE);
1225 	}
1226 
1227 	xcap = &xhcip->xhci_caps;
1228 	xcap->xcap_usb_vers = usb;
1229 	xcap->xcap_hci_vers = vers;
1230 	xcap->xcap_max_slots = XHCI_HCS1_DEVSLOT_MAX(struc1);
1231 	xcap->xcap_max_intrs = XHCI_HCS1_IRQ_MAX(struc1);
1232 	xcap->xcap_max_ports = XHCI_HCS1_N_PORTS(struc1);
1233 	if (xcap->xcap_max_ports > MAX_PORTS) {
1234 		xhci_error(xhcip, "Root hub has %d ports, but system only "
1235 		    "supports %d, limiting to %d\n", xcap->xcap_max_ports,
1236 		    MAX_PORTS, MAX_PORTS);
1237 		xcap->xcap_max_ports = MAX_PORTS;
1238 	}
1239 
1240 	xcap->xcap_ist_micro = XHCI_HCS2_IST_MICRO(struc2);
1241 	xcap->xcap_ist = XHCI_HCS2_IST(struc2);
1242 	xcap->xcap_max_esrt = XHCI_HCS2_ERST_MAX(struc2);
1243 	xcap->xcap_scratch_restore = XHCI_HCS2_SPR(struc2);
1244 	xcap->xcap_max_scratch = XHCI_HCS2_SPB_MAX(struc2);
1245 
1246 	xcap->xcap_u1_lat = XHCI_HCS3_U1_DEL(struc3);
1247 	xcap->xcap_u2_lat = XHCI_HCS3_U2_DEL(struc3);
1248 
1249 	xcap->xcap_flags = XHCI_HCC1_FLAGS_MASK(cap1);
1250 	xcap->xcap_max_psa = XHCI_HCC1_PSA_SZ_MAX(cap1);
1251 	xcap->xcap_xecp_off = XHCI_HCC1_XECP(cap1);
1252 	xcap->xcap_flags2 = XHCI_HCC2_FLAGS_MASK(cap2);
1253 
1254 	/*
1255 	 * We don't have documentation for what changed from before xHCI 0.96,
1256 	 * so we just refuse to support versions before 0.96. We also will
1257 	 * ignore anything with a major version greater than 1.
1258 	 */
1259 	if (xcap->xcap_hci_vers < 0x96 || xcap->xcap_hci_vers >= 0x200) {
1260 		xhci_error(xhcip, "Encountered unsupported xHCI version 0.%2x",
1261 		    xcap->xcap_hci_vers);
1262 		return (B_FALSE);
1263 	}
1264 
1265 	/*
1266 	 * Determine the smallest size page that the controller supports and
1267 	 * make sure that it matches our pagesize. We basically check here for
1268 	 * the presence of 4k and 8k pages. The basis of the pagesize is used
1269 	 * extensively throughout the code and specification. While we could
1270 	 * support other page sizes here, given that we don't support systems
1271 	 * with it at this time, it doesn't make much sense.
1272 	 */
1273 	ps = PAGESIZE;
1274 	if (ps == 0x1000) {
1275 		pbit = XHCI_PAGESIZE_4K;
1276 		psize = 0x1000;
1277 	} else if (ps == 0x2000) {
1278 		pbit = XHCI_PAGESIZE_8K;
1279 		psize = 0x2000;
1280 	} else {
1281 		xhci_error(xhcip, "Encountered host page size that the driver "
1282 		    "doesn't know how to handle: %lx\n", ps);
1283 		return (B_FALSE);
1284 	}
1285 
1286 	if (!(pgsz & pbit)) {
1287 		xhci_error(xhcip, "Encountered controller that didn't support "
1288 		    "the host page size (%d), supports: %x", psize, pgsz);
1289 		return (B_FALSE);
1290 	}
1291 	xcap->xcap_pagesize = psize;
1292 
1293 	return (B_TRUE);
1294 }
1295 
1296 /*
1297  * Apply known workarounds and issues. These reports come from other
1298  * Operating Systems and have been collected over time.
1299  */
1300 static boolean_t
1301 xhci_identify(xhci_t *xhcip)
1302 {
1303 	xhci_quirks_populate(xhcip);
1304 
1305 	if (xhcip->xhci_quirks & XHCI_QUIRK_NO_MSI) {
1306 		xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED;
1307 	} else {
1308 		xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED |
1309 		    DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX;
1310 	}
1311 
1312 	if (xhcip->xhci_quirks & XHCI_QUIRK_32_ONLY) {
1313 		xhcip->xhci_caps.xcap_flags &= ~XCAP_AC64;
1314 	}
1315 
1316 	return (B_TRUE);
1317 }
1318 
1319 static boolean_t
1320 xhci_alloc_intr_handle(xhci_t *xhcip, int type)
1321 {
1322 	int ret;
1323 
1324 	/*
1325 	 * Normally a well-behaving driver would more carefully request an
1326 	 * amount of interrupts based on the number available, etc. But since we
1327 	 * only actually want a single interrupt, we're just going to go ahead
1328 	 * and ask for a single interrupt.
1329 	 */
1330 	ret = ddi_intr_alloc(xhcip->xhci_dip, &xhcip->xhci_intr_hdl, type, 0,
1331 	    XHCI_NINTR, &xhcip->xhci_intr_num, DDI_INTR_ALLOC_NORMAL);
1332 	if (ret != DDI_SUCCESS) {
1333 		xhci_log(xhcip, "!failed to allocate interrupts of type %d: %d",
1334 		    type, ret);
1335 		return (B_FALSE);
1336 	}
1337 	xhcip->xhci_intr_type = type;
1338 
1339 	return (B_TRUE);
1340 }
1341 
1342 static boolean_t
1343 xhci_alloc_intrs(xhci_t *xhcip)
1344 {
1345 	int intr_types, ret;
1346 
1347 	if (XHCI_NINTR > xhcip->xhci_caps.xcap_max_intrs) {
1348 		xhci_error(xhcip, "controller does not support the minimum "
1349 		    "number of interrupts required (%d), supports %d",
1350 		    XHCI_NINTR, xhcip->xhci_caps.xcap_max_intrs);
1351 		return (B_FALSE);
1352 	}
1353 
1354 	if ((ret = ddi_intr_get_supported_types(xhcip->xhci_dip,
1355 	    &intr_types)) != DDI_SUCCESS) {
1356 		xhci_error(xhcip, "failed to get supported interrupt types: "
1357 		    "%d", ret);
1358 		return (B_FALSE);
1359 	}
1360 
1361 	/*
1362 	 * Mask off interrupt types we've already ruled out due to quirks or
1363 	 * other reasons.
1364 	 */
1365 	intr_types &= xhcip->xhci_caps.xcap_intr_types;
1366 	if (intr_types & DDI_INTR_TYPE_MSIX) {
1367 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSIX))
1368 			return (B_TRUE);
1369 	}
1370 
1371 	if (intr_types & DDI_INTR_TYPE_MSI) {
1372 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSI))
1373 			return (B_TRUE);
1374 	}
1375 
1376 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1377 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_FIXED))
1378 			return (B_TRUE);
1379 	}
1380 
1381 	xhci_error(xhcip, "failed to allocate an interrupt, supported types: "
1382 	    "0x%x", intr_types);
1383 	return (B_FALSE);
1384 }
1385 
1386 static boolean_t
1387 xhci_add_intr_handler(xhci_t *xhcip)
1388 {
1389 	int ret;
1390 
1391 	if ((ret = ddi_intr_get_pri(xhcip->xhci_intr_hdl,
1392 	    &xhcip->xhci_intr_pri)) != DDI_SUCCESS) {
1393 		xhci_error(xhcip, "failed to get interrupt priority: %d", ret);
1394 		return (B_FALSE);
1395 	}
1396 
1397 	if ((ret = ddi_intr_get_cap(xhcip->xhci_intr_hdl,
1398 	    &xhcip->xhci_intr_caps)) != DDI_SUCCESS) {
1399 		xhci_error(xhcip, "failed to get interrupt capabilities: %d",
1400 		    ret);
1401 		return (B_FALSE);
1402 	}
1403 
1404 	if ((ret = ddi_intr_add_handler(xhcip->xhci_intr_hdl, xhci_intr, xhcip,
1405 	    (uintptr_t)0)) != DDI_SUCCESS) {
1406 		xhci_error(xhcip, "failed to add interrupt handler: %d", ret);
1407 		return (B_FALSE);
1408 	}
1409 	return (B_TRUE);
1410 }
1411 
1412 /*
1413  * Find a capability with an identifier whose value is 'id'. The 'init' argument
1414  * gives us the offset to start searching at. See xHCI 1.1 / 7 for more
1415  * information. This is more or less exactly like PCI capabilities.
1416  */
1417 static boolean_t
1418 xhci_find_ext_cap(xhci_t *xhcip, uint32_t id, uint32_t init, uint32_t *outp)
1419 {
1420 	uint32_t off;
1421 	uint8_t next = 0;
1422 
1423 	/*
1424 	 * If we have no offset, we're done.
1425 	 */
1426 	if (xhcip->xhci_caps.xcap_xecp_off == 0)
1427 		return (B_FALSE);
1428 
1429 	off = xhcip->xhci_caps.xcap_xecp_off << 2;
1430 	do {
1431 		uint32_t cap_hdr;
1432 
1433 		off += next << 2;
1434 		cap_hdr = xhci_get32(xhcip, XHCI_R_CAP, off);
1435 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1436 			xhci_error(xhcip, "failed to read xhci extended "
1437 			    "capabilities at offset 0x%x: encountered FM "
1438 			    "register error", off);
1439 			ddi_fm_service_impact(xhcip->xhci_dip,
1440 			    DDI_SERVICE_LOST);
1441 			break;
1442 		}
1443 
1444 		if (cap_hdr == PCI_EINVAL32)
1445 			break;
1446 		if (XHCI_XECP_ID(cap_hdr) == id &&
1447 		    (init == UINT32_MAX || off > init)) {
1448 			*outp = off;
1449 			return (B_TRUE);
1450 		}
1451 		next = XHCI_XECP_NEXT(cap_hdr);
1452 		/*
1453 		 * Watch out for overflow if we somehow end up with a more than
1454 		 * 2 GiB space.
1455 		 */
1456 		if (next << 2 > (INT32_MAX - off))
1457 			return (B_FALSE);
1458 	} while (next != 0);
1459 
1460 	return (B_FALSE);
1461 }
1462 
1463 /*
1464  * For mostly information purposes, we'd like to walk to augment the devinfo
1465  * tree with the number of ports that support USB 2 and USB 3. Note though that
1466  * these ports may be overlapping. Many ports can support both USB 2 and USB 3
1467  * and are wired up to the same physical port, even though they show up as
1468  * separate 'ports' in the xhci sense.
1469  */
1470 static boolean_t
1471 xhci_port_count(xhci_t *xhcip)
1472 {
1473 	uint_t nusb2 = 0, fusb2 = 0;
1474 	uint_t nusb30 = 0, fusb30 = 0;
1475 	uint_t nusb31 = 0, fusb31 = 0;
1476 	uint32_t off = UINT32_MAX;
1477 
1478 	while (xhci_find_ext_cap(xhcip, XHCI_ID_PROTOCOLS, off, &off) ==
1479 	    B_TRUE) {
1480 		uint32_t rvers, rport;
1481 		uint8_t maj, min, count, first;
1482 
1483 		/*
1484 		 * See xHCI 1.1 / 7.2 for the format of this. The first uint32_t
1485 		 * has version information while the third uint32_t has the port
1486 		 * count.
1487 		 */
1488 		rvers = xhci_get32(xhcip, XHCI_R_CAP, off);
1489 		rport = xhci_get32(xhcip, XHCI_R_CAP, off + 8);
1490 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1491 			xhci_error(xhcip, "failed to read xhci port counts: "
1492 			    "encountered fatal FM register error");
1493 			ddi_fm_service_impact(xhcip->xhci_dip,
1494 			    DDI_SERVICE_LOST);
1495 			return (B_FALSE);
1496 		}
1497 
1498 		maj = XHCI_XECP_PROT_MAJOR(rvers);
1499 		min = XHCI_XECP_PROT_MINOR(rvers);
1500 		count = XHCI_XECP_PROT_PCOUNT(rport);
1501 		first = XHCI_XECP_PROT_FPORT(rport);
1502 
1503 		/*
1504 		 * In the wild, we've seen some systems that are using a minor
1505 		 * version of 0x10 and some that are using 0x01 in this field.
1506 		 * While the xhci spec says that we should expect it to be a
1507 		 * minor of 0x01 based on the xHCI 1.1 specification Table 155:
1508 		 * xHCI Supported Protocols. However, the USB 3.1 specification
1509 		 * defines the version to be 0x10 when encoded as a BCD style.
1510 		 * As such, handle both and hope we never get to revision 16 of
1511 		 * USB 3.
1512 		 */
1513 		if (maj == 3 && (min == 0x10 || min == 0x01)) {
1514 			nusb31 = count;
1515 			fusb31 = first;
1516 		} else if (maj == 3 && min == 0) {
1517 			nusb30 = count;
1518 			fusb30 = first;
1519 		} else if (maj <= 2) {
1520 			nusb2 = count;
1521 			fusb2 = first;
1522 		} else {
1523 			xhci_error(xhcip, "encountered port capabilities with "
1524 			    "unknown USB version: %x.%x\n", maj, min);
1525 		}
1526 	}
1527 
1528 	/*
1529 	 * These properties are used by FMA and the USB topo module.
1530 	 */
1531 	if (nusb2 > 0) {
1532 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1533 		    "usb2.0-port-count", nusb2);
1534 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1535 		    "usb2.0-first-port", fusb2);
1536 	}
1537 	if (nusb30 > 0) {
1538 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1539 		    "usb3.0-port-count", nusb30);
1540 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1541 		    "usb3.0-first-port", fusb30);
1542 	}
1543 
1544 	if (nusb31 > 0) {
1545 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1546 		    "usb3.1-port-count", nusb31);
1547 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1548 		    "usb3.1-first-port", fusb31);
1549 	}
1550 
1551 	return (B_TRUE);
1552 }
1553 
1554 /*
1555  * Take over control from the BIOS or other firmware, if applicable.
1556  */
1557 static boolean_t
1558 xhci_controller_takeover(xhci_t *xhcip)
1559 {
1560 	int ret;
1561 	uint32_t val, off;
1562 
1563 	/*
1564 	 * If we can't find the legacy capability, then there's nothing to do.
1565 	 */
1566 	if (xhci_find_ext_cap(xhcip, XHCI_ID_USB_LEGACY, UINT32_MAX, &off) ==
1567 	    B_FALSE)
1568 		return (B_TRUE);
1569 	val = xhci_get32(xhcip, XHCI_R_CAP, off);
1570 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1571 		xhci_error(xhcip, "failed to read BIOS take over registers: "
1572 		    "encountered fatal FM register error");
1573 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1574 		return (B_FALSE);
1575 	}
1576 
1577 	if (val & XHCI_BIOS_OWNED) {
1578 		val |= XHCI_OS_OWNED;
1579 		xhci_put32(xhcip, XHCI_R_CAP, off, val);
1580 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1581 			xhci_error(xhcip, "failed to write BIOS take over "
1582 			    "registers: encountered fatal FM register error");
1583 			ddi_fm_service_impact(xhcip->xhci_dip,
1584 			    DDI_SERVICE_LOST);
1585 			return (B_FALSE);
1586 		}
1587 
1588 		/*
1589 		 * Wait up to 5 seconds for things to change. While this number
1590 		 * isn't specified in the xHCI spec, it seems to be the de facto
1591 		 * value that various systems are using today. We'll use a 10ms
1592 		 * interval to check.
1593 		 */
1594 		ret = xhci_reg_poll(xhcip, XHCI_R_CAP, off,
1595 		    XHCI_BIOS_OWNED | XHCI_OS_OWNED, XHCI_OS_OWNED, 500, 10);
1596 		if (ret == EIO)
1597 			return (B_FALSE);
1598 		if (ret == ETIMEDOUT) {
1599 			xhci_log(xhcip, "!timed out waiting for firmware to "
1600 			    "hand off, taking over");
1601 			val &= ~XHCI_BIOS_OWNED;
1602 			xhci_put32(xhcip, XHCI_R_CAP, off, val);
1603 			if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1604 				xhci_error(xhcip, "failed to write forced "
1605 				    "takeover: encountered fatal FM register "
1606 				    "error");
1607 				ddi_fm_service_impact(xhcip->xhci_dip,
1608 				    DDI_SERVICE_LOST);
1609 				return (B_FALSE);
1610 			}
1611 		}
1612 	}
1613 
1614 	val = xhci_get32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS);
1615 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1616 		xhci_error(xhcip, "failed to read legacy control registers: "
1617 		    "encountered fatal FM register error");
1618 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1619 		return (B_FALSE);
1620 	}
1621 	val &= XHCI_XECP_SMI_MASK;
1622 	val |= XHCI_XECP_CLEAR_SMI;
1623 	xhci_put32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS, val);
1624 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1625 		xhci_error(xhcip, "failed to write legacy control registers: "
1626 		    "encountered fatal FM register error");
1627 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1628 		return (B_FALSE);
1629 	}
1630 
1631 	return (B_TRUE);
1632 }
1633 
1634 static int
1635 xhci_controller_stop(xhci_t *xhcip)
1636 {
1637 	uint32_t cmdreg;
1638 
1639 	cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1640 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1641 		xhci_error(xhcip, "failed to read USB Command register: "
1642 		    "encountered fatal FM register error");
1643 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1644 		return (EIO);
1645 	}
1646 
1647 	cmdreg &= ~(XHCI_CMD_RS | XHCI_CMD_INTE);
1648 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1649 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1650 		xhci_error(xhcip, "failed to write USB Command register: "
1651 		    "encountered fatal FM register error");
1652 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1653 		return (EIO);
1654 	}
1655 
1656 	/*
1657 	 * Wait up to 50ms for this to occur. The specification says that this
1658 	 * should stop within 16ms, but we give ourselves a bit more time just
1659 	 * in case.
1660 	 */
1661 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS, XHCI_STS_HCH,
1662 	    XHCI_STS_HCH, 50, 10));
1663 }
1664 
1665 static int
1666 xhci_controller_reset(xhci_t *xhcip)
1667 {
1668 	int ret;
1669 	uint32_t cmdreg;
1670 
1671 	cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1672 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1673 		xhci_error(xhcip, "failed to read USB Command register for "
1674 		    "reset: encountered fatal FM register error");
1675 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1676 		return (EIO);
1677 	}
1678 
1679 	cmdreg |= XHCI_CMD_HCRST;
1680 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1681 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1682 		xhci_error(xhcip, "failed to write USB Command register for "
1683 		    "reset: encountered fatal FM register error");
1684 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1685 		return (EIO);
1686 	}
1687 
1688 	/*
1689 	 * Some controllers apparently don't want to be touched for at least 1ms
1690 	 * after we initiate the reset. Therefore give all controllers this
1691 	 * moment to breathe.
1692 	 */
1693 	delay(drv_usectohz(xhci_reset_delay));
1694 
1695 	/*
1696 	 * To tell that the reset has completed we first verify that the reset
1697 	 * has finished and that the USBCMD register no longer has the reset bit
1698 	 * asserted. However, once that's done we have to go verify that CNR
1699 	 * (Controller Not Ready) is no longer asserted.
1700 	 */
1701 	if ((ret = xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBCMD,
1702 	    XHCI_CMD_HCRST, 0, 500, 10)) != 0)
1703 		return (ret);
1704 
1705 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1706 	    XHCI_STS_CNR, 0, 500, 10));
1707 }
1708 
1709 /*
1710  * Take care of all the required initialization before we can actually enable
1711  * the controller. This means that we need to:
1712  *
1713  *    o Program the maximum number of slots
1714  *    o Program the DCBAAP and allocate the scratchpad
1715  *    o Program the Command Ring
1716  *    o Initialize the Event Ring
1717  *    o Enable interrupts (set imod)
1718  */
1719 static int
1720 xhci_controller_configure(xhci_t *xhcip)
1721 {
1722 	int ret;
1723 	uint32_t config;
1724 
1725 	config = xhci_get32(xhcip, XHCI_R_OPER, XHCI_CONFIG);
1726 	config &= ~XHCI_CONFIG_SLOTS_MASK;
1727 	config |= xhcip->xhci_caps.xcap_max_slots;
1728 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_CONFIG, config);
1729 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1730 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1731 		return (EIO);
1732 	}
1733 
1734 	if ((ret = xhci_context_init(xhcip)) != 0) {
1735 		const char *reason;
1736 		if (ret == EIO) {
1737 			reason = "fatal FM I/O error occurred";
1738 		} else if (ret == ENOMEM) {
1739 			reason = "unable to allocate DMA memory";
1740 		} else {
1741 			reason = "unexpected error occurred";
1742 		}
1743 
1744 		xhci_error(xhcip, "failed to initialize xhci context "
1745 		    "registers: %s (%d)", reason, ret);
1746 		return (ret);
1747 	}
1748 
1749 	if ((ret = xhci_command_ring_init(xhcip)) != 0) {
1750 		xhci_error(xhcip, "failed to initialize commands: %d", ret);
1751 		return (ret);
1752 	}
1753 
1754 	if ((ret = xhci_event_init(xhcip)) != 0) {
1755 		xhci_error(xhcip, "failed to initialize events: %d", ret);
1756 		return (ret);
1757 	}
1758 
1759 	if ((ret = xhci_intr_conf(xhcip)) != 0) {
1760 		xhci_error(xhcip, "failed to configure interrupts: %d", ret);
1761 		return (ret);
1762 	}
1763 
1764 	return (0);
1765 }
1766 
1767 static int
1768 xhci_controller_start(xhci_t *xhcip)
1769 {
1770 	uint32_t reg;
1771 
1772 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1773 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1774 		xhci_error(xhcip, "failed to read USB Command register for "
1775 		    "start: encountered fatal FM register error");
1776 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1777 		return (EIO);
1778 	}
1779 
1780 	reg |= XHCI_CMD_RS;
1781 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, reg);
1782 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1783 		xhci_error(xhcip, "failed to write USB Command register for "
1784 		    "start: encountered fatal FM register error");
1785 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1786 		return (EIO);
1787 	}
1788 
1789 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1790 	    XHCI_STS_HCH, 0, 500, 10));
1791 }
1792 
1793 /* ARGSUSED */
1794 static void
1795 xhci_reset_task(void *arg)
1796 {
1797 	/*
1798 	 * Longer term, we'd like to properly perform a controller reset.
1799 	 * However, that requires a bit more assistance from USBA to work
1800 	 * properly and tear down devices. In the meantime, we panic.
1801 	 */
1802 	panic("XHCI runtime reset required");
1803 }
1804 
1805 /*
1806  * This function is called when we've detected a fatal FM condition that has
1807  * resulted in a loss of service and we need to force a reset of the controller
1808  * as a whole. Only one such reset may be ongoing at a time.
1809  */
1810 void
1811 xhci_fm_runtime_reset(xhci_t *xhcip)
1812 {
1813 	boolean_t locked = B_FALSE;
1814 
1815 	if (mutex_owned(&xhcip->xhci_lock)) {
1816 		locked = B_TRUE;
1817 	} else {
1818 		mutex_enter(&xhcip->xhci_lock);
1819 	}
1820 
1821 	/*
1822 	 * If we're already in the error state than a reset is already ongoing
1823 	 * and there is nothing for us to do here.
1824 	 */
1825 	if (xhcip->xhci_state & XHCI_S_ERROR) {
1826 		goto out;
1827 	}
1828 
1829 	xhcip->xhci_state |= XHCI_S_ERROR;
1830 	ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1831 	taskq_dispatch_ent(xhci_taskq, xhci_reset_task, xhcip, 0,
1832 	    &xhcip->xhci_tqe);
1833 out:
1834 	if (!locked) {
1835 		mutex_exit(&xhcip->xhci_lock);
1836 	}
1837 }
1838 
1839 static int
1840 xhci_ioctl_portsc(xhci_t *xhcip, intptr_t arg)
1841 {
1842 	int i;
1843 	xhci_ioctl_portsc_t xhi;
1844 
1845 	bzero(&xhi, sizeof (xhci_ioctl_portsc_t));
1846 	xhi.xhi_nports = xhcip->xhci_caps.xcap_max_ports;
1847 	for (i = 1; i <= xhcip->xhci_caps.xcap_max_ports; i++) {
1848 		xhi.xhi_portsc[i] = xhci_get32(xhcip, XHCI_R_OPER,
1849 		    XHCI_PORTSC(i));
1850 	}
1851 
1852 	if (ddi_copyout(&xhi, (void *)(uintptr_t)arg, sizeof (xhi), 0) != 0)
1853 		return (EFAULT);
1854 
1855 	return (0);
1856 }
1857 
1858 static int
1859 xhci_ioctl_clear(xhci_t *xhcip, intptr_t arg)
1860 {
1861 	uint32_t reg;
1862 	xhci_ioctl_clear_t xic;
1863 
1864 	if (ddi_copyin((const void *)(uintptr_t)arg, &xic, sizeof (xic),
1865 	    0) != 0)
1866 		return (EFAULT);
1867 
1868 	if (xic.xic_port == 0 || xic.xic_port >
1869 	    xhcip->xhci_caps.xcap_max_ports)
1870 		return (EINVAL);
1871 
1872 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port));
1873 	reg &= ~XHCI_PS_CLEAR;
1874 	reg |= XHCI_PS_CSC | XHCI_PS_PEC | XHCI_PS_WRC | XHCI_PS_OCC |
1875 	    XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC;
1876 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port), reg);
1877 
1878 	return (0);
1879 }
1880 
1881 static int
1882 xhci_ioctl_setpls(xhci_t *xhcip, intptr_t arg)
1883 {
1884 	uint32_t reg;
1885 	xhci_ioctl_setpls_t xis;
1886 
1887 	if (ddi_copyin((const void *)(uintptr_t)arg, &xis, sizeof (xis),
1888 	    0) != 0)
1889 		return (EFAULT);
1890 
1891 	if (xis.xis_port == 0 || xis.xis_port >
1892 	    xhcip->xhci_caps.xcap_max_ports)
1893 		return (EINVAL);
1894 
1895 	if (xis.xis_pls & ~0xf)
1896 		return (EINVAL);
1897 
1898 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port));
1899 	reg &= ~XHCI_PS_CLEAR;
1900 	reg |= XHCI_PS_PLS_SET(xis.xis_pls);
1901 	reg |= XHCI_PS_LWS;
1902 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port), reg);
1903 
1904 	return (0);
1905 }
1906 
1907 static int
1908 xhci_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1909 {
1910 	dev_info_t *dip = xhci_get_dip(*devp);
1911 
1912 	return (usba_hubdi_open(dip, devp, flags, otyp, credp));
1913 }
1914 
1915 static int
1916 xhci_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1917     int *rvalp)
1918 {
1919 	dev_info_t *dip = xhci_get_dip(dev);
1920 
1921 	if (cmd == XHCI_IOCTL_PORTSC ||
1922 	    cmd == XHCI_IOCTL_CLEAR ||
1923 	    cmd == XHCI_IOCTL_SETPLS) {
1924 		xhci_t *xhcip = ddi_get_soft_state(xhci_soft_state,
1925 		    getminor(dev) & ~HUBD_IS_ROOT_HUB);
1926 
1927 		if (secpolicy_hwmanip(credp) != 0 ||
1928 		    crgetzoneid(credp) != GLOBAL_ZONEID)
1929 			return (EPERM);
1930 
1931 		if (mode & FKIOCTL)
1932 			return (ENOTSUP);
1933 
1934 		if (!(mode & FWRITE))
1935 			return (EBADF);
1936 
1937 		if (cmd == XHCI_IOCTL_PORTSC)
1938 			return (xhci_ioctl_portsc(xhcip, arg));
1939 		else if (cmd == XHCI_IOCTL_CLEAR)
1940 			return (xhci_ioctl_clear(xhcip, arg));
1941 		else
1942 			return (xhci_ioctl_setpls(xhcip, arg));
1943 	}
1944 
1945 	return (usba_hubdi_ioctl(dip, dev, cmd, arg, mode, credp, rvalp));
1946 }
1947 
1948 static int
1949 xhci_close(dev_t dev, int flag, int otyp, cred_t *credp)
1950 {
1951 	dev_info_t *dip = xhci_get_dip(dev);
1952 
1953 	return (usba_hubdi_close(dip, dev, flag, otyp, credp));
1954 }
1955 
1956 /*
1957  * We try to clean up everything that we can. The only thing that we let stop us
1958  * at this time is a failure to remove the root hub, which is realistically the
1959  * equivalent of our EBUSY case.
1960  */
1961 static int
1962 xhci_cleanup(xhci_t *xhcip)
1963 {
1964 	int ret, inst;
1965 
1966 	if (xhcip->xhci_seq & XHCI_ATTACH_ROOT_HUB) {
1967 		if ((ret = xhci_root_hub_fini(xhcip)) != 0)
1968 			return (ret);
1969 	}
1970 
1971 	if (xhcip->xhci_seq & XHCI_ATTACH_USBA) {
1972 		xhci_hcd_fini(xhcip);
1973 	}
1974 
1975 	if (xhcip->xhci_seq & XHCI_ATTACH_STARTED) {
1976 		mutex_enter(&xhcip->xhci_lock);
1977 		while (xhcip->xhci_state & XHCI_S_ERROR)
1978 			cv_wait(&xhcip->xhci_statecv, &xhcip->xhci_lock);
1979 		mutex_exit(&xhcip->xhci_lock);
1980 
1981 		(void) xhci_controller_stop(xhcip);
1982 	}
1983 
1984 	/*
1985 	 * Always release the context, command, and event data. They handle the
1986 	 * fact that they me be in an arbitrary state or unallocated.
1987 	 */
1988 	xhci_event_fini(xhcip);
1989 	xhci_command_ring_fini(xhcip);
1990 	xhci_context_fini(xhcip);
1991 
1992 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ENABLE) {
1993 		(void) xhci_ddi_intr_disable(xhcip);
1994 	}
1995 
1996 	if (xhcip->xhci_seq & XHCI_ATTACH_SYNCH) {
1997 		cv_destroy(&xhcip->xhci_statecv);
1998 		mutex_destroy(&xhcip->xhci_lock);
1999 	}
2000 
2001 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ADD) {
2002 		if ((ret = ddi_intr_remove_handler(xhcip->xhci_intr_hdl)) !=
2003 		    DDI_SUCCESS) {
2004 			xhci_error(xhcip, "failed to remove interrupt "
2005 			    "handler: %d", ret);
2006 		}
2007 	}
2008 
2009 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ALLOC) {
2010 		if ((ret = ddi_intr_free(xhcip->xhci_intr_hdl)) !=
2011 		    DDI_SUCCESS) {
2012 			xhci_error(xhcip, "failed to free interrupts: %d", ret);
2013 		}
2014 	}
2015 
2016 	if (xhcip->xhci_seq & XHCI_ATTACH_REGS_MAP) {
2017 		ddi_regs_map_free(&xhcip->xhci_regs_handle);
2018 		xhcip->xhci_regs_handle = NULL;
2019 	}
2020 
2021 	if (xhcip->xhci_seq & XHCI_ATTACH_PCI_CONFIG) {
2022 		pci_config_teardown(&xhcip->xhci_cfg_handle);
2023 		xhcip->xhci_cfg_handle = NULL;
2024 	}
2025 
2026 	if (xhcip->xhci_seq & XHCI_ATTACH_FM) {
2027 		xhci_fm_fini(xhcip);
2028 		xhcip->xhci_fm_caps = 0;
2029 	}
2030 
2031 	inst = ddi_get_instance(xhcip->xhci_dip);
2032 	xhcip->xhci_dip = NULL;
2033 	ddi_soft_state_free(xhci_soft_state, inst);
2034 
2035 	return (DDI_SUCCESS);
2036 }
2037 
2038 static int
2039 xhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2040 {
2041 	int ret, inst, route;
2042 	xhci_t *xhcip;
2043 
2044 	if (cmd != DDI_ATTACH)
2045 		return (DDI_FAILURE);
2046 
2047 	inst = ddi_get_instance(dip);
2048 	if (ddi_soft_state_zalloc(xhci_soft_state, inst) != 0)
2049 		return (DDI_FAILURE);
2050 	xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2051 	xhcip->xhci_dip = dip;
2052 
2053 	xhcip->xhci_regs_capoff = PCI_EINVAL32;
2054 	xhcip->xhci_regs_operoff = PCI_EINVAL32;
2055 	xhcip->xhci_regs_runoff = PCI_EINVAL32;
2056 	xhcip->xhci_regs_dooroff = PCI_EINVAL32;
2057 
2058 	xhci_fm_init(xhcip);
2059 	xhcip->xhci_seq |= XHCI_ATTACH_FM;
2060 
2061 	if (pci_config_setup(xhcip->xhci_dip, &xhcip->xhci_cfg_handle) !=
2062 	    DDI_SUCCESS) {
2063 		goto err;
2064 	}
2065 	xhcip->xhci_seq |= XHCI_ATTACH_PCI_CONFIG;
2066 	xhcip->xhci_vendor_id = pci_config_get16(xhcip->xhci_cfg_handle,
2067 	    PCI_CONF_VENID);
2068 	xhcip->xhci_device_id = pci_config_get16(xhcip->xhci_cfg_handle,
2069 	    PCI_CONF_DEVID);
2070 
2071 	if (xhci_regs_map(xhcip) == B_FALSE) {
2072 		goto err;
2073 	}
2074 
2075 	xhcip->xhci_seq |= XHCI_ATTACH_REGS_MAP;
2076 
2077 	if (xhci_regs_init(xhcip) == B_FALSE)
2078 		goto err;
2079 
2080 	if (xhci_read_params(xhcip) == B_FALSE)
2081 		goto err;
2082 
2083 	if (xhci_identify(xhcip) == B_FALSE)
2084 		goto err;
2085 
2086 	if (xhci_alloc_intrs(xhcip) == B_FALSE)
2087 		goto err;
2088 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ALLOC;
2089 
2090 	if (xhci_add_intr_handler(xhcip) == B_FALSE)
2091 		goto err;
2092 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ADD;
2093 
2094 	mutex_init(&xhcip->xhci_lock, NULL, MUTEX_DRIVER,
2095 	    (void *)(uintptr_t)xhcip->xhci_intr_pri);
2096 	cv_init(&xhcip->xhci_statecv, NULL, CV_DRIVER, NULL);
2097 	xhcip->xhci_seq |= XHCI_ATTACH_SYNCH;
2098 
2099 	if (xhci_port_count(xhcip) == B_FALSE)
2100 		goto err;
2101 
2102 	if (xhci_controller_takeover(xhcip) == B_FALSE)
2103 		goto err;
2104 
2105 	/*
2106 	 * We don't enable interrupts until after we take over the controller
2107 	 * from the BIOS. We've observed cases where this can cause spurious
2108 	 * interrupts.
2109 	 */
2110 	if (xhci_ddi_intr_enable(xhcip) == B_FALSE)
2111 		goto err;
2112 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ENABLE;
2113 
2114 	if ((ret = xhci_controller_stop(xhcip)) != 0) {
2115 		xhci_error(xhcip, "failed to stop controller: %s",
2116 		    ret == EIO ? "encountered FM register error" :
2117 		    "timed out while waiting for controller");
2118 		goto err;
2119 	}
2120 
2121 	if ((ret = xhci_controller_reset(xhcip)) != 0) {
2122 		xhci_error(xhcip, "failed to reset controller: %s",
2123 		    ret == EIO ? "encountered FM register error" :
2124 		    "timed out while waiting for controller");
2125 		goto err;
2126 	}
2127 
2128 	if ((ret = xhci_controller_configure(xhcip)) != 0) {
2129 		xhci_error(xhcip, "failed to configure controller: %d", ret);
2130 		goto err;
2131 	}
2132 
2133 	/*
2134 	 * Some systems support having ports routed to both an ehci and xhci
2135 	 * controller. If we support it and the user hasn't requested otherwise
2136 	 * via a driver.conf tuning, we reroute it now.
2137 	 */
2138 	route = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
2139 	    DDI_PROP_DONTPASS, "xhci-reroute", XHCI_PROP_REROUTE_DEFAULT);
2140 	if (route != XHCI_PROP_REROUTE_DISABLE &&
2141 	    (xhcip->xhci_quirks & XHCI_QUIRK_INTC_EHCI))
2142 		(void) xhci_reroute_intel(xhcip);
2143 
2144 	if ((ret = xhci_controller_start(xhcip)) != 0) {
2145 		xhci_log(xhcip, "failed to reset controller: %s",
2146 		    ret == EIO ? "encountered FM register error" :
2147 		    "timed out while waiting for controller");
2148 		goto err;
2149 	}
2150 	xhcip->xhci_seq |= XHCI_ATTACH_STARTED;
2151 
2152 	/*
2153 	 * Finally, register ourselves with the USB framework itself.
2154 	 */
2155 	if ((ret = xhci_hcd_init(xhcip)) != 0) {
2156 		xhci_error(xhcip, "failed to register hcd with usba");
2157 		goto err;
2158 	}
2159 	xhcip->xhci_seq |= XHCI_ATTACH_USBA;
2160 
2161 	if ((ret = xhci_root_hub_init(xhcip)) != 0) {
2162 		xhci_error(xhcip, "failed to load the root hub driver");
2163 		goto err;
2164 	}
2165 	xhcip->xhci_seq |= XHCI_ATTACH_ROOT_HUB;
2166 
2167 	return (DDI_SUCCESS);
2168 
2169 err:
2170 	(void) xhci_cleanup(xhcip);
2171 	return (DDI_FAILURE);
2172 }
2173 
2174 static int
2175 xhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2176 {
2177 	xhci_t *xhcip;
2178 
2179 	if (cmd != DDI_DETACH)
2180 		return (DDI_FAILURE);
2181 
2182 	xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2183 	if (xhcip == NULL) {
2184 		dev_err(dip, CE_WARN, "detach called without soft state!");
2185 		return (DDI_FAILURE);
2186 	}
2187 
2188 	return (xhci_cleanup(xhcip));
2189 }
2190 
2191 /* ARGSUSED */
2192 static int
2193 xhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **outp)
2194 {
2195 	dev_t dev;
2196 	int inst;
2197 
2198 	switch (cmd) {
2199 	case DDI_INFO_DEVT2DEVINFO:
2200 		dev = (dev_t)arg;
2201 		*outp = xhci_get_dip(dev);
2202 		if (*outp == NULL)
2203 			return (DDI_FAILURE);
2204 		break;
2205 	case DDI_INFO_DEVT2INSTANCE:
2206 		dev = (dev_t)arg;
2207 		inst = getminor(dev) & ~HUBD_IS_ROOT_HUB;
2208 		*outp = (void *)(uintptr_t)inst;
2209 		break;
2210 	default:
2211 		return (DDI_FAILURE);
2212 	}
2213 
2214 	return (DDI_SUCCESS);
2215 }
2216 
2217 static struct cb_ops xhci_cb_ops = {
2218 	xhci_open,		/* cb_open */
2219 	xhci_close,		/* cb_close */
2220 	nodev,			/* cb_strategy */
2221 	nodev,			/* cb_print */
2222 	nodev,			/* cb_dump */
2223 	nodev,			/* cb_read */
2224 	nodev,			/* cb_write */
2225 	xhci_ioctl,		/* cb_ioctl */
2226 	nodev,			/* cb_devmap */
2227 	nodev,			/* cb_mmap */
2228 	nodev,			/* cb_segmap */
2229 	nochpoll,		/* cb_chpoll */
2230 	ddi_prop_op,		/* cb_prop_op */
2231 	NULL,			/* cb_stream */
2232 	D_MP | D_HOTPLUG,	/* cb_flag */
2233 	CB_REV,			/* cb_rev */
2234 	nodev,			/* cb_aread */
2235 	nodev			/* cb_awrite */
2236 };
2237 
2238 static struct dev_ops xhci_dev_ops = {
2239 	DEVO_REV,			/* devo_rev */
2240 	0,				/* devo_refcnt */
2241 	xhci_getinfo,			/* devo_getinfo */
2242 	nulldev,			/* devo_identify */
2243 	nulldev,			/* devo_probe */
2244 	xhci_attach,			/* devo_attach */
2245 	xhci_detach,			/* devo_detach */
2246 	nodev,				/* devo_reset */
2247 	&xhci_cb_ops,			/* devo_cb_ops */
2248 	&usba_hubdi_busops,		/* devo_bus_ops */
2249 	usba_hubdi_root_hub_power,	/* devo_power */
2250 	ddi_quiesce_not_supported	/* devo_quiesce */
2251 };
2252 
2253 static struct modldrv xhci_modldrv = {
2254 	&mod_driverops,
2255 	"USB xHCI Driver",
2256 	&xhci_dev_ops
2257 };
2258 
2259 static struct modlinkage xhci_modlinkage = {
2260 	MODREV_1,
2261 	&xhci_modldrv,
2262 	NULL
2263 };
2264 
2265 int
2266 _init(void)
2267 {
2268 	int ret;
2269 
2270 	if ((ret = ddi_soft_state_init(&xhci_soft_state, sizeof (xhci_t),
2271 	    0)) != 0) {
2272 		return (ret);
2273 	}
2274 
2275 	xhci_taskq = taskq_create("xhci_taskq", 1, minclsyspri, 0, 0, 0);
2276 	if (xhci_taskq == NULL) {
2277 		ddi_soft_state_fini(&xhci_soft_state);
2278 		return (ENOMEM);
2279 	}
2280 
2281 	if ((ret = mod_install(&xhci_modlinkage)) != 0) {
2282 		taskq_destroy(xhci_taskq);
2283 		xhci_taskq = NULL;
2284 	}
2285 
2286 	return (ret);
2287 }
2288 
2289 int
2290 _info(struct modinfo *modinfop)
2291 {
2292 	return (mod_info(&xhci_modlinkage, modinfop));
2293 }
2294 
2295 int
2296 _fini(void)
2297 {
2298 	int ret;
2299 
2300 	if ((ret = mod_remove(&xhci_modlinkage)) != 0)
2301 		return (ret);
2302 
2303 	if (xhci_taskq != NULL) {
2304 		taskq_destroy(xhci_taskq);
2305 		xhci_taskq = NULL;
2306 	}
2307 
2308 	ddi_soft_state_fini(&xhci_soft_state);
2309 
2310 	return (0);
2311 }
2312