xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci.c (revision 113ae9130ee83ef358c36cc3521d4a55e3e9da00)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2019, Joyent, Inc.
14  * Copyright 2024 Oxide Computer Company
15  */
16 
17 /*
18  * Extensible Host Controller Interface (xHCI) USB Driver
19  *
20  * The xhci driver is an HCI driver for USB that bridges the gap between client
21  * device drivers and implements the actual way that we talk to devices. The
22  * xhci specification provides access to USB 3.x capable devices, as well as all
23  * prior generations. Like other host controllers, it both provides the way to
24  * talk to devices and also is treated like a hub (often called the root hub).
25  *
26  * This driver is part of the USBA (USB Architecture). It implements the HCDI
27  * (host controller device interface) end of USBA. These entry points are used
28  * by the USBA on behalf of client device drivers to access their devices. The
29  * driver also provides notifications to deal with hot plug events, which are
30  * quite common in USB.
31  *
32  * ----------------
33  * USB Introduction
34  * ----------------
35  *
36  * To properly understand the xhci driver and the design of the USBA HCDI
37  * interfaces it implements, it helps to have a bit of background into how USB
38  * devices are structured and understand how they work at a high-level.
39  *
40  * USB devices, like PCI devices, are broken down into different classes of
41  * device. For example, with USB you have hubs, human-input devices (keyboards,
42  * mice, etc.), mass storage, etc. Every device also has a vendor and device ID.
43  * Many client drivers bind to an entire class of device, for example, the hubd
44  * driver (to hubs) or scsa2usb (USB storage). However, there are other drivers
45  * that bind to explicit IDs such as usbsprl (specific USB to Serial devices).
46  *
47  * USB SPEEDS AND VERSIONS
48  *
49  * USB devices are often referred to in two different ways. One way they're
50  * described is with the USB version that they conform to. In the wild, you're
51  * most likely going to see USB 1.1, 2.0, 2.1, and 3.0. However, you may also
52  * see devices referred to as 'full-', 'low-', 'high-', and 'super-' speed
53  * devices.
54  *
55  * The latter description describes the maximum theoretical speed of a given
56  * device. For example, a super-speed device theoretically caps out around 5
57  * Gbit/s, whereas a low-speed device caps out at 1.5 Mbit/s.
58  *
59  * In general, each speed usually corresponds to a specific USB protocol
60  * generation. For example, all USB 3.0 devices are super-speed devices. All
61  * 'high-speed' devices are USB 2.x devices. Full-speed devices are special in
62  * that they can either be USB 1.x or USB 2.x devices. Low-speed devices are
63  * only a USB 1.x thing, they did not jump the fire line to USB 2.x.
64  *
65  * USB 3.0 devices and ports generally have the wiring for both USB 2.0 and USB
66  * 3.0. When a USB 3.x device is plugged into a USB 2.0 port or hub, then it
67  * will report its version as USB 2.1, to indicate that it is actually a USB 3.x
68  * device.
69  *
70  * USB ENDPOINTS
71  *
72  * A given USB device is made up of endpoints. A request, or transfer, is made
73  * to a specific USB endpoint. These endpoints can provide different services
74  * and have different expectations around the size of the data that'll be used
75  * in a given request and the periodicity of requests. Endpoints themselves are
76  * either used to make one-shot requests, for example, making requests to a mass
77  * storage device for a given sector, or for making periodic requests where you
78  * end up polling on the endpoint, for example, polling on a USB keyboard for
79  * keystrokes.
80  *
81  * Each endpoint encodes two different pieces of information: a direction and a
82  * type. There are two different directions: IN and OUT. These refer to the
83  * general direction that data moves relative to the operating system. For
84  * example, an IN transfer transfers data in to the operating system, from the
85  * device. An OUT transfer transfers data from the operating system, out to the
86  * device.
87  *
88  * There are four different kinds of endpoints:
89  *
90  *	BULK		These transfers are large transfers of data to or from
91  *			a device. The most common use for bulk transfers is for
92  *			mass storage devices. Though they are often also used by
93  *			network devices and more. Bulk endpoints do not have an
94  *			explicit time component to them. They are always used
95  *			for one-shot transfers.
96  *
97  *	CONTROL		These transfers are used to manipulate devices
98  *			themselves and are used for USB protocol level
99  *			operations (whether device-specific, class-specific, or
100  *			generic across all of USB). Unlike other transfers,
101  *			control transfers are always bi-directional and use
102  *			different kinds of transfers.
103  *
104  *	INTERRUPT	Interrupt transfers are used for small transfers that
105  *			happen infrequently, but need reasonable latency. A good
106  *			example of interrupt transfers is to receive input from
107  *			a USB keyboard. Interrupt-IN transfers are generally
108  *			polled. Meaning that a client (device driver) opens up
109  *			an interrupt-IN pipe to poll on it, and receives
110  *			periodic updates whenever there is information
111  *			available. However, Interrupt transfers can be used
112  *			as one-shot transfers both going IN and OUT.
113  *
114  *	ISOCHRONOUS	These transfers are things that happen once per
115  *			time-interval at a very regular rate. A good example of
116  *			these transfers are for audio and video. A device may
117  *			describe an interval as 10ms at which point it will read
118  *			or write the next batch of data every 10ms and transform
119  *			it for the user. There are no one-shot Isochronous-IN
120  *			transfers. There are one-shot Isochronous-OUT transfers,
121  *			but these are used by device drivers to always provide
122  *			the system with sufficient data.
123  *
124  * To find out information about the endpoints, USB devices have a series of
125  * descriptors that cover different aspects of the device. For example, there
126  * are endpoint descriptors which cover the properties of endpoints such as the
127  * maximum packet size or polling interval.
128  *
129  * Descriptors exist at all levels of USB. For example, there are general
130  * descriptors for every device. The USB device descriptor is described in
131  * usb_dev_descr(9S). Host controllers will look at these descriptors to ensure
132  * that they program the device correctly; however, they are more often used by
133  * client device drivers. There are also descriptors that exist at a class
134  * level. For example, the hub class has a class-specific descriptor which
135  * describes properties of the hub. That information is requested for and used
136  * by the hub driver.
137  *
138  * All of the different descriptors are gathered by the system and placed into a
139  * tree which USBA sometimes calls the 'Configuration Cloud'. Client device
140  * drivers gain access to this cloud and then use them to open endpoints, which
141  * are called pipes in USBA (and some revisions of the USB specification).
142  *
143  * Each pipe gives access to a specific endpoint on the device which can be used
144  * to perform transfers of a specific type and direction. For example, a mass
145  * storage device often has three different endpoints, the default control
146  * endpoint (which every device has), a Bulk-IN endpoint, and a Bulk-OUT
147  * endpoint. The device driver ends up with three open pipes. One to the default
148  * control endpoint to configure the device, and then the other two are used to
149  * perform I/O.
150  *
151  * These routines translate more or less directly into calls to a host
152  * controller driver. A request to open a pipe takes an endpoint descriptor that
153  * describes the properties of the pipe, and the host controller driver (this
154  * driver) goes through and does any work necessary to allow the client device
155  * driver to access it. Once the pipe is open, it either makes one-shot
156  * transfers specific to the transfer type or it starts performing a periodic
157  * poll of an endpoint.
158  *
159  * All of these different actions translate into requests to the host
160  * controller. The host controller driver itself is in charge of making sure
161  * that all of the required resources for polling are allocated with a request
162  * and then proceed to give the driver's periodic callbacks.
163  *
164  * HUBS AND HOST CONTROLLERS
165  *
166  * Every device is always plugged into a hub, even if the device is itself a
167  * hub. This continues until we reach what we call the root-hub. The root-hub is
168  * special in that it is not an actual USB hub, but is integrated into the host
169  * controller and is manipulated in its own way. For example, the host
170  * controller is used to turn on and off a given port's power. This may happen
171  * over any interface, though the most common way is through PCI.
172  *
173  * In addition to the normal character device that exists for a host controller
174  * driver, as part of attaching, the host controller binds to an instance of the
175  * hubd driver. While the root-hub is a bit of a fiction, everyone models the
176  * root-hub as the same as any other hub that's plugged in. The hub kernel
177  * module doesn't know that the hub isn't a physical device that's been plugged
178  * in. The host controller driver simulates that view by taking hub requests
179  * that are made and translating them into corresponding requests that are
180  * understood by the host controller, for example, reading and writing to a
181  * memory mapped register.
182  *
183  * The hub driver polls for changes in device state using an Interrupt-IN
184  * request, which is the same as is done for the root-hub. This allows the host
185  * controller driver to not have to know about the implementation of device hot
186  * plug, merely react to requests from a hub, the same as if it were an external
187  * device. When the hub driver detects a change, it will go through the
188  * corresponding state machine and attach or detach the corresponding client
189  * device driver, depending if the device was inserted or removed.
190  *
191  * We detect the changes for the Interrupt-IN primarily based on the port state
192  * change events that are delivered to the event ring. Whenever any event is
193  * fired, we use this to update the hub driver about _all_ ports with
194  * outstanding events. This more closely matches how a hub is supposed to behave
195  * and leaves things less likely for the hub driver to end up without clearing a
196  * flag on a port.
197  *
198  * PACKET SIZES AND BURSTING
199  *
200  * A given USB endpoint has an explicit packet size and a number of packets that
201  * can be sent per time interval. These concepts are abstracted away from client
202  * device drives usually, though they sometimes inform the upper bounds of what
203  * a device can perform.
204  *
205  * The host controller uses this information to transform arbitrary transfer
206  * requests into USB protocol packets. One of the nice things about the host
207  * controllers is that they abstract away all of the signaling and semantics of
208  * the actual USB protocols, allowing for life to be slightly easier in the
209  * operating system.
210  *
211  * That said, if the host controller is not programmed correctly, these can end
212  * up causing transaction errors and other problems in response to the data that
213  * the host controller is trying to send or receive.
214  *
215  * ------------
216  * Organization
217  * ------------
218  *
219  * The driver is made up of the following files. Many of these have their own
220  * theory statements to describe what they do. Here, we touch on each of the
221  * purpose of each of these files.
222  *
223  * xhci_command.c:	This file contains the logic to issue commands to the
224  *			controller as well as the actual functions that the
225  *			other parts of the driver use to cause those commands.
226  *
227  * xhci_context.c:	This file manages various data structures used by the
228  *			controller to manage the controller's and device's
229  *			context data structures. See more in the xHCI Overview
230  *			and General Design for more information.
231  *
232  * xhci_dma.c:		This manages the allocation of DMA memory and DMA
233  *			attributes for controller, whether memory is for a
234  *			transfer or something else. This file also deals with
235  *			all the logic of getting data in and out of DMA buffers.
236  *
237  * xhci_endpoint.c:	This manages all of the logic of handling endpoints or
238  *			pipes. It deals with endpoint configuration, I/O
239  *			scheduling, timeouts, and callbacks to USBA.
240  *
241  * xhci_event.c:	This manages callbacks from the hardware to the driver.
242  *			This covers command completion notifications and I/O
243  *			notifications.
244  *
245  * xhci_hub.c:		This manages the virtual root-hub. It basically
246  *			implements and translates all of the USB level requests
247  *			into xhci specific implements. It also contains the
248  *			functions to register this hub with USBA.
249  *
250  * xhci_intr.c:		This manages the underlying interrupt allocation,
251  *			interrupt moderation, and interrupt routines.
252  *
253  * xhci_quirks.c:	This manages information about buggy hardware that's
254  *			been collected and experienced primarily from other
255  *			systems.
256  *
257  * xhci_ring.c:		This manages the abstraction of a ring in xhci, which is
258  *			the primary of communication between the driver and the
259  *			hardware, whether for the controller or a device.
260  *
261  * xhci_usba.c:		This implements all of the HCDI functions required by
262  *			USBA. This is the main entry point that drivers and the
263  *			kernel frameworks will reach to start any operation.
264  *			Many functions here will end up in the command and
265  *			endpoint code.
266  *
267  * xhci.c:		This provides the main kernel DDI interfaces and
268  *			performs device initialization.
269  *
270  * xhci_polled.c:	This provides the polled I/O functions that the
271  *			kernel debugger can use.
272  *
273  * xhci.h:		This is the primary header file which defines
274  *			illumos-specific data structures and constants to manage
275  *			the system.
276  *
277  * xhcireg.h:		This header file defines all of the register offsets,
278  *			masks, and related macros. It also contains all of the
279  *			constants that are used in various structures as defined
280  *			by the specification, such as command offsets, etc.
281  *
282  * xhci_ioctl.h:	This contains a few private ioctls that are used by a
283  *			private debugging command. These are private.
284  *
285  * cmd/xhci/xhci_portsc:	This is a private utility that can be useful for
286  *				debugging xhci state. It is the only consumer of
287  *				xhci_ioctl.h and the private ioctls.
288  *
289  * ----------------------------------
290  * xHCI Overview and Structure Layout
291  * ----------------------------------
292  *
293  * The design and structure of this driver follows from the way that the xHCI
294  * specification tells us that we have to work with hardware. First we'll give a
295  * rough summary of how that works, though the xHCI 1.1 specification should be
296  * referenced when going through this.
297  *
298  * There are three primary parts of the hardware -- registers, contexts, and
299  * rings. The registers are memory mapped registers that come in four sets,
300  * though all are found within the first BAR. These are used to program and
301  * control the hardware and aspects of the devices. Beyond more traditional
302  * device programming there are two primary sets of registers that are
303  * important:
304  *
305  *   o Port Status and Control Registers (XHCI_PORTSC)
306  *   o Doorbell Array (XHCI_DOORBELL)
307  *
308  * The port status and control registers are used to get and manipulate the
309  * status of a given device. For example, turning on and off the power to it.
310  * The Doorbell Array is used to kick off I/O operations and start the
311  * processing of an I/O ring.
312  *
313  * The contexts are data structures that represent various pieces of information
314  * in the controller. These contexts are generally filled out by the driver and
315  * then acknowledged and consumed by the hardware. There are controller-wide
316  * contexts (mostly managed in xhci_context.c) that are used to point to the
317  * contexts that exist for each device in the system. The primary context is
318  * called the Device Context Base Address Array (DCBAA).
319  *
320  * Each device in the system is allocated a 'slot', which is used to index into
321  * the DCBAA. Slots are assigned based on issuing commands to the controller.
322  * There are a fixed number of slots that determine the maximum number of
323  * devices that can end up being supported in the system. Note this includes all
324  * the devices plugged into the USB device tree, not just devices plugged into
325  * ports on the chassis.
326  *
327  * For each device, there is a context structure that describes properties of
328  * the device. For example, what speed is the device, is it a hub, etc. The
329  * context has slots for the device and for each endpoint on the device. As
330  * endpoints are enabled, their context information which describes things like
331  * the maximum packet size, is filled in and enabled. The mapping between these
332  * contexts look like:
333  *
334  *
335  *      DCBAA
336  *    +--------+                    Device Context
337  *    | Slot 0 |------------------>+--------------+
338  *    +--------+                   | Slot Context |
339  *    |  ...   |                   +--------------+       +----------+
340  *    +--------+   +------+        |  Endpoint 0  |------>| I/O Ring |
341  *    | Slot n |-->| NULL |        | Context (Bi) |       +----------+
342  *    +--------+   +------+        +--------------+
343  *                                 |  Endpoint 1  |
344  *                                 | Context (Out)|
345  *                                 +--------------+
346  *                                 |  Endpoint 1  |
347  *                                 | Context (In) |
348  *                                 +--------------+
349  *                                 |      ...     |
350  *                                 +--------------+
351  *                                 | Endpoint 15  |
352  *                                 | Context (In) |
353  *                                 +--------------+
354  *
355  * These contexts are always owned by the controller, though we can read them
356  * after various operations complete. Commands that toggle device state use a
357  * specific input context, which is a variant of the device context. The only
358  * difference is that it has an input context structure ahead of it to say which
359  * sections of the device context should be evaluated.
360  *
361  * Each active endpoint points us to an I/O ring, which leads us to the third
362  * main data structure that's used by the device: rings. Rings are made up of
363  * transfer request blocks (TRBs), which are joined together to form a given
364  * transfer description (TD) which represents a single I/O request.
365  *
366  * These rings are used to issue I/O to individual endpoints, to issue commands
367  * to the controller, and to receive notification of changes and completions.
368  * Issued commands go on the special ring called the command ring while the
369  * change and completion notifications go on the event ring. More details are
370  * available in xhci_ring.c. Each of these structures is represented by an
371  * xhci_ring_t.
372  *
373  * Each ring can be made up of one or more disjoint regions of DMA; however, we
374  * only use a single one. This also impacts some additional registers and
375  * structures that exist. The event ring has an indirection table called the
376  * Event Ring Segment Table (ERST). Each entry in the table (a segment)
377  * describes a chunk of the event ring.
378  *
379  * One other thing worth calling out is the scratchpad. The scratchpad is a way
380  * for the controller to be given arbitrary memory by the OS that it can use.
381  * There are two parts to the scratchpad. The first part is an array whose
382  * entries contain pointers to the actual addresses for the pages. The second
383  * part that we allocate are the actual pages themselves.
384  *
385  * -----------------------------
386  * Endpoint State and Management
387  * -----------------------------
388  *
389  * Endpoint management is one of the key parts to the xhci driver as every
390  * endpoint is a pipe that a device driver uses, so they are our primary
391  * currency. An endpoint is enabled when the client device driver opens the
392  * associated pipe for the first time. When an endpoint is enabled, we have to
393  * fill in an endpoint's context structure with information about the endpoint.
394  * These basically tell the controller important properties which it uses to
395  * ensure that there is adequate bandwidth for the device.
396  *
397  * If the client device closes the pipe again we explicitly stop the endpoint,
398  * moving it to the Halted state, and take ownership of any transfers
399  * previously submitted to the ring but which have not yet completed. A client
400  * may open and close a pipe several times -- ugen(4D) in particular is known
401  * for this -- and we will stop and start the ring accordingly.
402  *
403  * Periodic endpoints (viz., interrupt and isochronous) require reserved
404  * bandwidth in order to guarantee a certain latency of response. The
405  * controller manages these reservations as endpoints are configured and
406  * unconfigured. All hub ports have a maximum available bandwidth, whether a
407  * root port or an external hub. If too many devices have configured periodic
408  * endpoints and the available bandwidth on a particular port is exhausted, the
409  * controller will explicitly fail to configure any more endpoints. To release
410  * bandwidth reservations we must unconfigure the endpoints that have them,
411  * which we do for periodic endpoints when the pipe is closed.
412  *
413  * It is tempting to fully unconfigure all types of endpoint when any pipe is
414  * closed, but some host controllers appear to exhibit undefined behaviour each
415  * time a bulk endpoint is re-enabled this way; e.g., silently dropped
416  * transfers. As such, we wait until the whole device is being torn down to
417  * disable all previously enabled bulk endpoints at once, as part of disabling
418  * the device slot.
419  *
420  * Each endpoint has its own ring as described in the previous section. We place
421  * TRBs (transfer request blocks) onto a given ring to request I/O be performed.
422  * Responses are placed on the event ring, in other words, the rings associated
423  * with an endpoint are purely for producing I/O.
424  *
425  * Endpoints have a defined state machine as described in xHCI 1.1 / 4.8.3.
426  * These states generally correspond with the state of the endpoint to process
427  * I/O and handle timeouts. The driver basically follows a similar state machine
428  * as described there. There are some deviations. For example, what they
429  * describe as 'running' we break into both the Idle and Running states below.
430  * We also have a notion of timed out and quiescing. The following image
431  * summarizes the states and transitions:
432  *
433  *     +------+                                +-----------+
434  *     | Idle |---------*--------------------->|  Running  |<-+
435  *     +------+         . I/O queued on        +-----------+  |
436  *        ^               ring and timeout        |  |  |     |
437  *        |               scheduled.              |  |  |     |
438  *        |                                       |  |  |     |
439  *        +-----*---------------------------------+  |  |     |
440  *        |     . No I/Os remain                     |  |     |
441  *        |                                          |  |     |
442  *        |                +------*------------------+  |     |
443  *        |                |      . Timeout             |     |
444  *        |                |        fires for           |     |
445  *        |                |        I/O                 |     |
446  *        |                v                            v     |
447  *        |          +-----------+                +--------+  |
448  *        |          | Timed Out |                | Halted |  |
449  *        |          +-----------+                +--------+  |
450  *        |             |                           |         |
451  *        |             |   +-----------+           |         |
452  *        |             +-->| Quiescing |<----------+         |
453  *        |                 +-----------+                     |
454  *        |   No TRBs.           |                . TRBs      |
455  *        |   remain .           |                . Remain    |
456  *        +----------*----<------+-------->-------*-----------+
457  *
458  * Normally, a given endpoint will oscillate between having TRBs scheduled and
459  * not. Every time a new I/O is added to the endpoint, we'll ring the doorbell,
460  * making sure that we're processing the ring, presuming that the endpoint isn't
461  * in one of the error states.
462  *
463  * To detect device hangs, we have an active timeout(9F) per active endpoint
464  * that ticks at a one second rate while we still have TRBs outstanding on an
465  * endpoint. Once all outstanding TRBs have been processed, the timeout will
466  * stop itself and there will be no active checking until the endpoint has I/O
467  * scheduled on it again.
468  *
469  * There are two primary ways that things can go wrong on the endpoint. We can
470  * either have a timeout or an event that transitions the endpoint to the Halted
471  * state. In the halted state, we need to issue explicit commands to reset the
472  * endpoint before removing the I/O.
473  *
474  * The way we handle both a timeout and a halted condition is similar, but the
475  * way they are triggered is different. When we detect a halted condition, we
476  * don't immediately clean it up, and wait for the client device driver (or USBA
477  * on its behalf) to issue a pipe reset. When we detect a timeout, we
478  * immediately take action (assuming no other action is ongoing).
479  *
480  * In both cases, we quiesce the device, which takes care of dealing with taking
481  * the endpoint from whatever state it may be in and taking the appropriate
482  * actions based on the state machine in xHCI 1.1 / 4.8.3. The end of quiescing
483  * leaves the device stopped, which allows us to update the ring's pointer and
484  * remove any TRBs that are causing problems.
485  *
486  * As part of all this, we ensure that we can only be quiescing the device from
487  * a given path at a time. Any requests to schedule I/O during this time will
488  * generally fail.
489  *
490  * The following image describes the state machine for the timeout logic. It
491  * ties into the image above.
492  *
493  *         +----------+                            +---------+
494  *         | Disabled |-----*--------------------->| Enabled |<--+
495  *         +----------+     . TRBs scheduled       +---------+   *. 1 sec timer
496  *             ^              and no active          |  |  |     |  fires and
497  *             |              timer.                 |  |  |     |  another
498  *             |                                     |  |  +--+--+  quiesce, in
499  *             |                                     |  |     |     a bad state,
500  *             +------*------------------------------+  |     ^     or decrement
501  *             |      . 1 sec timer                     |     |     I/O timeout
502  *             |        fires and                       |     |
503  *             |        no TRBs or                      |     +--------------+
504  *             |        endpoint shutdown               |                    |
505  *             |                                        *. . timer counter   |
506  *             ^                                        |    reaches zero    |
507  *             |                                        v                    |
508  *             |                                +--------------+             |
509  *             +-------------*---------------<--| Quiesce ring |->---*-------+
510  *                           . No more          | and fail I/O |     . restart
511  *                             I/Os             +--------------+       timer as
512  *                                                                     more I/Os
513  *
514  * As we described above, when there are active TRBs and I/Os, a 1 second
515  * timeout(9F) will be active. Each second, we decrement a counter on the
516  * current, active I/O until either a new I/O takes the head, or the counter
517  * reaches zero. If the counter reaches zero, then we go through, quiesce the
518  * ring, and then clean things up.
519  *
520  * ------------------
521  * Periodic Endpoints
522  * ------------------
523  *
524  * It's worth calling out periodic endpoints explicitly, as they operate
525  * somewhat differently. Periodic endpoints are limited to Interrupt-IN and
526  * Isochronous-IN. The USBA often uses the term polling for these. That's
527  * because the client only needs to make a single API call; however, they'll
528  * receive multiple callbacks until either an error occurs or polling is
529  * requested to be terminated.
530  *
531  * When we have one of these periodic requests, we end up always rescheduling
532  * I/O requests, as well as, having a specific number of pre-existing I/O
533  * requests to cover the periodic needs, in case of latency spikes. Normally,
534  * when replying to a request, we use the request handle that we were given.
535  * However, when we have a periodic request, we're required to duplicate the
536  * handle before giving them data.
537  *
538  * However, the duplication is a bit tricky. For everything that was duplicated,
539  * the framework expects us to submit data. Because of that we, don't duplicate
540  * them until they are needed. This minimizes the likelihood that we have
541  * outstanding requests to deal with when we encounter a fatal polling failure.
542  *
543  * Most of the polling setup logic happens in xhci_usba.c in
544  * xhci_hcdi_periodic_init(). The consumption and duplication is handled in
545  * xhci_endpoint.c.
546  *
547  * ----------------
548  * Structure Layout
549  * ----------------
550  *
551  * The following images relate the core data structures. The primary structure
552  * in the system is the xhci_t. This is the per-controller data structure that
553  * exists for each instance of the driver. From there, each device in the system
554  * is represented by an xhci_device_t and each endpoint is represented by an
555  * xhci_endpoint_t. For each client that opens a given endpoint, there is an
556  * xhci_pipe_t. For each I/O related ring, there is an xhci_ring_t in the
557  * system.
558  *
559  *     +------------------------+
560  *     | Per-Controller         |
561  *     | Structure              |
562  *     | xhci_t                 |
563  *     |                        |
564  *     | uint_t              ---+--> Capability regs offset
565  *     | uint_t              ---+--> Operational regs offset
566  *     | uint_t              ---+--> Runtime regs offset
567  *     | uint_t              ---+--> Doorbell regs offset
568  *     | xhci_state_flags_t  ---+--> Device state flags
569  *     | xhci_quirks_t       ---+--> Device quirk flags
570  *     | xhci_capability_t   ---+--> Controller capability structure
571  *     | xhci_dcbaa_t        ---+----------------------------------+
572  *     | xhci_scratchpad_t   ---+---------+                        |
573  *     | xhci_command_ing_t  ---+------+  |                        v
574  *     | xhci_event_ring_t   ---+----+ |  |              +---------------------+
575  *     | xhci_usba_t         ---+--+ | |  |              | Device Context      |
576  *     +------------------------+  | | |  |              | Base Address        |
577  *                                 | | |  |              | Array Structure     |
578  *                                 | | |  |              | xhci_dcbaa_t        |
579  * +-------------------------------+ | |  |              |                     |
580  * | +-------------------------------+ |  |  DCBAA KVA <-+--        uint64_t * |
581  * | |    +----------------------------+  | DMA Buffer <-+-- xhci_dma_buffer_t |
582  * | |    v                               |              +---------------------+
583  * | | +--------------------------+       +-----------------------+
584  * | | | Event Ring               |                               |
585  * | | | Management               |                               |
586  * | | | xhci_event_ring_t        |                               v
587  * | | |                          |   Event Ring        +----------------------+
588  * | | | xhci_event_segment_t * --|-> Segment VA        |   Scratchpad (Extra  |
589  * | | | xhci_dma_buffer_t      --|-> Segment DMA Buf.  |   Controller Memory) |
590  * | | | xhci_ring_t            --|--+                  |    xhci_scratchpad_t |
591  * | | +--------------------------+  |      Scratchpad  |                      |
592  * | |                               | Base Array KVA <-+-          uint64_t * |
593  * | +------------+                  | Array DMA Buf. <-+-   xhci_dma_buffer_t |
594  * |              v                  | Scratchpad DMA <-+- xhci_dma_buffer_t * |
595  * |   +---------------------------+ | Buffer per page  +----------------------+
596  * |   | Command Ring              | |
597  * |   | xhci_command_ring_t       | +------------------------------+
598  * |   |                           |                                |
599  * |   | xhci_ring_t             --+-> Command Ring --->------------+
600  * |   | list_t                  --+-> Command List                 v
601  * |   | timeout_id_t            --+-> Timeout State     +---------------------+
602  * |   | xhci_command_ring_state_t +-> State Flags       | I/O Ring            |
603  * |   +---------------------------+                     | xhci_ring_t         |
604  * |                                                     |                     |
605  * |                                     Ring DMA Buf. <-+-- xhci_dma_buffer_t |
606  * |                                       Ring Length <-+--            uint_t |
607  * |                                    Ring Entry KVA <-+--      xhci_trb_t * |
608  * |    +---------------------------+        Ring Head <-+--            uint_t |
609  * +--->| USBA State                |        Ring Tail <-+--            uint_t |
610  *      | xhci_usba_t               |       Ring Cycle <-+--            uint_t |
611  *      |                           |                    +---------------------+
612  *      | usba_hcdi_ops_t *        -+-> USBA Ops Vector                       ^
613  *      | usb_dev_dscr_t           -+-> USB Virtual Device Descriptor         |
614  *      | usb_ss_hub_descr_t       -+-> USB Virtual Hub Descriptor            |
615  *      | usba_pipe_handle_data_t * +-> Interrupt polling client              |
616  *      | usb_intr_req_t           -+-> Interrupt polling request             |
617  *      | uint32_t                --+-> Interrupt polling device mask         |
618  *      | list_t                  --+-> Pipe List (Active Users)              |
619  *      | list_t                  --+-------------------+                     |
620  *      +---------------------------+                   |                     ^
621  *                                                      |                     |
622  *                                                      v                     |
623  *     +-------------------------------+             +---------------+        |
624  *     | USB Device                    |------------>| USB Device    |--> ... |
625  *     | xhci_device_t                 |             | xhci_device_t |        |
626  *     |                               |             +---------------+        |
627  *     | usb_port_t                  --+-> USB Port plugged into              |
628  *     | uint8_t                     --+-> Slot Number                        |
629  *     | boolean_t                   --+-> Address Assigned                   |
630  *     | usba_device_t *             --+-> USBA Device State                  |
631  *     | xhci_dma_buffer_t           --+-> Input Context DMA Buffer           |
632  *     | xhci_input_context_t *      --+-> Input Context KVA                  |
633  *     | xhci_slot_contex_t *        --+-> Input Slot Context KVA             |
634  *     | xhci_endpoint_context_t *[] --+-> Input Endpoint Context KVA         |
635  *     | xhci_dma_buffer_t           --+-> Output Context DMA Buffer          |
636  *     | xhci_slot_context_t *       --+-> Output Slot Context KVA            ^
637  *     | xhci_endpoint_context_t *[] --+-> Output Endpoint Context KVA        |
638  *     | xhci_endpoint_t *[]         --+-> Endpoint Tracking ---+             |
639  *     +-------------------------------+                        |             |
640  *                                                              |             |
641  *                                                              v             |
642  *     +------------------------------+            +-----------------+        |
643  *     | Endpoint Data                |----------->| Endpoint Data   |--> ... |
644  *     | xhci_endpoint_t              |            | xhci_endpoint_t |        |
645  *     |                              |            +-----------------+        |
646  *     | int                        --+-> Endpoint Number                     |
647  *     | int                        --+-> Endpoint Type                       |
648  *     | xhci_endpoint_state_t      --+-> Endpoint State                      |
649  *     | timeout_id_t               --+-> Endpoint Timeout State              |
650  *     | usba_pipe_handle_data_t *  --+-> USBA Client Handle                  |
651  *     | xhci_ring_t                --+-> Endpoint I/O Ring  -------->--------+
652  *     | list_t                     --+-> Transfer List --------+
653  *     +------------------------------+                         |
654  *                                                              v
655  *     +-------------------------+                  +--------------------+
656  *     | Transfer Structure      |----------------->| Transfer Structure |-> ...
657  *     | xhci_transfer_t         |                  | xhci_transfer_t    |
658  *     |                         |                  +--------------------+
659  *     | xhci_dma_buffer_t     --+-> I/O DMA Buffer
660  *     | uint_t                --+-> Number of TRBs
661  *     | uint_t                --+-> Short transfer data
662  *     | uint_t                --+-> Timeout seconds remaining
663  *     | usb_cr_t              --+-> USB Transfer return value
664  *     | boolean_t             --+-> Data direction
665  *     | xhci_trb_t *          --+-> Host-order transfer requests for I/O
666  *     | usb_isoc_pkt_descr_t * -+-> Isochronous only response data
667  *     | usb_opaque_t          --+-> USBA Request Handle
668  *     +-------------------------+
669  *
670  * -------------
671  * Lock Ordering
672  * -------------
673  *
674  * There are three different tiers of locks that exist in the driver. First,
675  * there is a lock for each controller: xhci_t`xhci_lock. This protects all the
676  * data for that instance of the controller. If there are multiple instances of
677  * the xHCI controller in the system, each one is independent and protected
678  * separately. The two do not share any data.
679  *
680  * From there, there are two other, specific locks in the system:
681  *
682  *   o xhci_command_ring_t`xcr_lock
683  *   o xhci_device_t`xd_imtx
684  *
685  * There is only one xcr_lock per controller, like the xhci_lock. It protects
686  * the state of the command ring. However, there is one xd_imtx per device.
687  * Recall that each device is scoped to a given controller. This protects the
688  * input slot context for a given device.
689  *
690  * There are a few important rules to keep in mind here that are true
691  * universally throughout the driver:
692  *
693  * 1) Always grab the xhci_t`xhci_lock, before grabbing any of the other locks.
694  * 2) A given xhci_device_t`xd_imtx, must be taken before grabbing the
695  *    xhci_command_ring_t`xcr_lock.
696  * 3) A given thread can only hold one of the given xhci_device_t`xd_imtx locks
697  *    at a given time. In other words, we should never be manipulating the input
698  *    context of two different devices at once.
699  * 4) It is safe to hold the xhci_device_t`xd_imtx while tearing down the
700  *    endpoint timer. Conversely, the endpoint specific logic should never enter
701  *    this lock.
702  *
703  * ----------
704  * Polled I/O
705  * ----------
706  *
707  * There is limited support for polled I/O in this driver for use by
708  * the kernel debugger. The driver currently only supports input from
709  * interrupt endpoints which is good enough for USB HID keyboard devices.
710  * Input from bulk endpoints and output are not supported which prevents
711  * using a serial console over USB for kernel debugging.
712  *
713  * --------------------
714  * Relationship to EHCI
715  * --------------------
716  *
717  * On some Intel chipsets, a given physical port on the system may be routed to
718  * one of the EHCI or xHCI controllers. This association can be dynamically
719  * changed by writing to platform specific registers as handled by the quirk
720  * logic in xhci_quirk.c.
721  *
722  * As these ports may support USB 3.x speeds, we always route all such ports to
723  * the xHCI controller, when supported. In addition, to minimize disruptions
724  * from devices being enumerated and attached to the EHCI driver and then
725  * disappearing, we generally attempt to load the xHCI controller before the
726  * EHCI controller. This logic is not done in the driver; however, it is done in
727  * other parts of the kernel like in uts/common/io/consconfig_dacf.c in the
728  * function consconfig_load_drivers().
729  *
730  * -----------
731  * Future Work
732  * -----------
733  *
734  * The primary future work in this driver spans two different, but related
735  * areas. The first area is around controller resets and how they tie into FM.
736  * Presently, we do not have a good way to handle controllers coming and going
737  * in the broader USB stack or properly reconfigure the device after a reset.
738  * Secondly, we don't handle the suspend and resume of devices and drivers.
739  */
740 
741 #include <sys/param.h>
742 #include <sys/modctl.h>
743 #include <sys/conf.h>
744 #include <sys/devops.h>
745 #include <sys/ddi.h>
746 #include <sys/sunddi.h>
747 #include <sys/cmn_err.h>
748 #include <sys/ddifm.h>
749 #include <sys/pci.h>
750 #include <sys/class.h>
751 #include <sys/policy.h>
752 
753 #include <sys/usb/hcd/xhci/xhci.h>
754 #include <sys/usb/hcd/xhci/xhci_ioctl.h>
755 
756 /*
757  * We want to use the first BAR to access its registers. The regs[] array is
758  * ordered based on the rules for the PCI supplement to IEEE 1275. So regs[1]
759  * will always be the first BAR.
760  */
761 #define	XHCI_REG_NUMBER	1
762 
763 /*
764  * This task queue exists as a global taskq that is used for resetting the
765  * device in the face of FM or runtime errors. Each instance of the device
766  * (xhci_t) happens to have a single taskq_dispatch_ent already allocated so we
767  * know that we should always be able to dispatch such an event.
768  */
769 static taskq_t *xhci_taskq;
770 
771 /*
772  * Global soft state for per-instance data. Note that we must use the soft state
773  * routines and cannot use the ddi_set_driver_private() routines. The USB
774  * framework presumes that it can use the dip's private data.
775  */
776 void *xhci_soft_state;
777 
778 /*
779  * This is the time in us that we wait after a controller resets before we
780  * consider reading any register. There are some controllers that want at least
781  * 1 ms, therefore we default to 10 ms.
782  */
783 clock_t xhci_reset_delay = 10000;
784 
785 void
xhci_error(xhci_t * xhcip,const char * fmt,...)786 xhci_error(xhci_t *xhcip, const char *fmt, ...)
787 {
788 	va_list ap;
789 
790 	va_start(ap, fmt);
791 	if (xhcip != NULL && xhcip->xhci_dip != NULL) {
792 		vdev_err(xhcip->xhci_dip, CE_WARN, fmt, ap);
793 	} else {
794 		vcmn_err(CE_WARN, fmt, ap);
795 	}
796 	va_end(ap);
797 }
798 
799 void
xhci_log(xhci_t * xhcip,const char * fmt,...)800 xhci_log(xhci_t *xhcip, const char *fmt, ...)
801 {
802 	va_list ap;
803 
804 	va_start(ap, fmt);
805 	if (xhcip != NULL && xhcip->xhci_dip != NULL) {
806 		vdev_err(xhcip->xhci_dip, CE_NOTE, fmt, ap);
807 	} else {
808 		vcmn_err(CE_NOTE, fmt, ap);
809 	}
810 	va_end(ap);
811 }
812 
813 /*
814  * USBA is in charge of creating device nodes for us. USBA explicitly ORs in the
815  * constant HUBD_IS_ROOT_HUB, so we have to undo that when we're looking at
816  * things here. A simple bitwise-and will take care of this. And hey, it could
817  * always be more complex, USBA could clone!
818  */
819 static dev_info_t *
xhci_get_dip(dev_t dev)820 xhci_get_dip(dev_t dev)
821 {
822 	xhci_t *xhcip;
823 	int instance = getminor(dev) & ~HUBD_IS_ROOT_HUB;
824 
825 	xhcip = ddi_get_soft_state(xhci_soft_state, instance);
826 	if (xhcip != NULL)
827 		return (xhcip->xhci_dip);
828 	return (NULL);
829 }
830 
831 uint8_t
xhci_get8(xhci_t * xhcip,xhci_reg_type_t rtt,uintptr_t off)832 xhci_get8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
833 {
834 	uintptr_t addr, roff;
835 
836 	switch (rtt) {
837 	case XHCI_R_CAP:
838 		roff = xhcip->xhci_regs_capoff;
839 		break;
840 	case XHCI_R_OPER:
841 		roff = xhcip->xhci_regs_operoff;
842 		break;
843 	case XHCI_R_RUN:
844 		roff = xhcip->xhci_regs_runoff;
845 		break;
846 	case XHCI_R_DOOR:
847 		roff = xhcip->xhci_regs_dooroff;
848 		break;
849 	default:
850 		panic("called %s with bad reg type: %d", __func__, rtt);
851 	}
852 	ASSERT(roff != PCI_EINVAL32);
853 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
854 
855 	return (ddi_get8(xhcip->xhci_regs_handle, (void *)addr));
856 }
857 
858 uint16_t
xhci_get16(xhci_t * xhcip,xhci_reg_type_t rtt,uintptr_t off)859 xhci_get16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
860 {
861 	uintptr_t addr, roff;
862 
863 	switch (rtt) {
864 	case XHCI_R_CAP:
865 		roff = xhcip->xhci_regs_capoff;
866 		break;
867 	case XHCI_R_OPER:
868 		roff = xhcip->xhci_regs_operoff;
869 		break;
870 	case XHCI_R_RUN:
871 		roff = xhcip->xhci_regs_runoff;
872 		break;
873 	case XHCI_R_DOOR:
874 		roff = xhcip->xhci_regs_dooroff;
875 		break;
876 	default:
877 		panic("called %s with bad reg type: %d", __func__, rtt);
878 	}
879 	ASSERT(roff != PCI_EINVAL32);
880 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
881 
882 	return (ddi_get16(xhcip->xhci_regs_handle, (void *)addr));
883 }
884 
885 uint32_t
xhci_get32(xhci_t * xhcip,xhci_reg_type_t rtt,uintptr_t off)886 xhci_get32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
887 {
888 	uintptr_t addr, roff;
889 
890 	switch (rtt) {
891 	case XHCI_R_CAP:
892 		roff = xhcip->xhci_regs_capoff;
893 		break;
894 	case XHCI_R_OPER:
895 		roff = xhcip->xhci_regs_operoff;
896 		break;
897 	case XHCI_R_RUN:
898 		roff = xhcip->xhci_regs_runoff;
899 		break;
900 	case XHCI_R_DOOR:
901 		roff = xhcip->xhci_regs_dooroff;
902 		break;
903 	default:
904 		panic("called %s with bad reg type: %d", __func__, rtt);
905 	}
906 	ASSERT(roff != PCI_EINVAL32);
907 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
908 
909 	return (ddi_get32(xhcip->xhci_regs_handle, (void *)addr));
910 }
911 
912 uint64_t
xhci_get64(xhci_t * xhcip,xhci_reg_type_t rtt,uintptr_t off)913 xhci_get64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
914 {
915 	uintptr_t addr, roff;
916 
917 	switch (rtt) {
918 	case XHCI_R_CAP:
919 		roff = xhcip->xhci_regs_capoff;
920 		break;
921 	case XHCI_R_OPER:
922 		roff = xhcip->xhci_regs_operoff;
923 		break;
924 	case XHCI_R_RUN:
925 		roff = xhcip->xhci_regs_runoff;
926 		break;
927 	case XHCI_R_DOOR:
928 		roff = xhcip->xhci_regs_dooroff;
929 		break;
930 	default:
931 		panic("called %s with bad reg type: %d", __func__, rtt);
932 	}
933 	ASSERT(roff != PCI_EINVAL32);
934 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
935 
936 	return (ddi_get64(xhcip->xhci_regs_handle, (void *)addr));
937 }
938 
939 void
xhci_put8(xhci_t * xhcip,xhci_reg_type_t rtt,uintptr_t off,uint8_t val)940 xhci_put8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint8_t val)
941 {
942 	uintptr_t addr, roff;
943 
944 	switch (rtt) {
945 	case XHCI_R_CAP:
946 		roff = xhcip->xhci_regs_capoff;
947 		break;
948 	case XHCI_R_OPER:
949 		roff = xhcip->xhci_regs_operoff;
950 		break;
951 	case XHCI_R_RUN:
952 		roff = xhcip->xhci_regs_runoff;
953 		break;
954 	case XHCI_R_DOOR:
955 		roff = xhcip->xhci_regs_dooroff;
956 		break;
957 	default:
958 		panic("called %s with bad reg type: %d", __func__, rtt);
959 	}
960 	ASSERT(roff != PCI_EINVAL32);
961 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
962 
963 	ddi_put8(xhcip->xhci_regs_handle, (void *)addr, val);
964 }
965 
966 void
xhci_put16(xhci_t * xhcip,xhci_reg_type_t rtt,uintptr_t off,uint16_t val)967 xhci_put16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint16_t val)
968 {
969 	uintptr_t addr, roff;
970 
971 	switch (rtt) {
972 	case XHCI_R_CAP:
973 		roff = xhcip->xhci_regs_capoff;
974 		break;
975 	case XHCI_R_OPER:
976 		roff = xhcip->xhci_regs_operoff;
977 		break;
978 	case XHCI_R_RUN:
979 		roff = xhcip->xhci_regs_runoff;
980 		break;
981 	case XHCI_R_DOOR:
982 		roff = xhcip->xhci_regs_dooroff;
983 		break;
984 	default:
985 		panic("called %s with bad reg type: %d", __func__, rtt);
986 	}
987 	ASSERT(roff != PCI_EINVAL32);
988 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
989 
990 	ddi_put16(xhcip->xhci_regs_handle, (void *)addr, val);
991 }
992 
993 void
xhci_put32(xhci_t * xhcip,xhci_reg_type_t rtt,uintptr_t off,uint32_t val)994 xhci_put32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint32_t val)
995 {
996 	uintptr_t addr, roff;
997 
998 	switch (rtt) {
999 	case XHCI_R_CAP:
1000 		roff = xhcip->xhci_regs_capoff;
1001 		break;
1002 	case XHCI_R_OPER:
1003 		roff = xhcip->xhci_regs_operoff;
1004 		break;
1005 	case XHCI_R_RUN:
1006 		roff = xhcip->xhci_regs_runoff;
1007 		break;
1008 	case XHCI_R_DOOR:
1009 		roff = xhcip->xhci_regs_dooroff;
1010 		break;
1011 	default:
1012 		panic("called %s with bad reg type: %d", __func__, rtt);
1013 	}
1014 	ASSERT(roff != PCI_EINVAL32);
1015 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
1016 
1017 	ddi_put32(xhcip->xhci_regs_handle, (void *)addr, val);
1018 }
1019 
1020 void
xhci_put64(xhci_t * xhcip,xhci_reg_type_t rtt,uintptr_t off,uint64_t val)1021 xhci_put64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint64_t val)
1022 {
1023 	uintptr_t addr, roff;
1024 
1025 	switch (rtt) {
1026 	case XHCI_R_CAP:
1027 		roff = xhcip->xhci_regs_capoff;
1028 		break;
1029 	case XHCI_R_OPER:
1030 		roff = xhcip->xhci_regs_operoff;
1031 		break;
1032 	case XHCI_R_RUN:
1033 		roff = xhcip->xhci_regs_runoff;
1034 		break;
1035 	case XHCI_R_DOOR:
1036 		roff = xhcip->xhci_regs_dooroff;
1037 		break;
1038 	default:
1039 		panic("called %s with bad reg type: %d", __func__, rtt);
1040 	}
1041 	ASSERT(roff != PCI_EINVAL32);
1042 	addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
1043 
1044 	ddi_put64(xhcip->xhci_regs_handle, (void *)addr, val);
1045 }
1046 
1047 int
xhci_check_regs_acc(xhci_t * xhcip)1048 xhci_check_regs_acc(xhci_t *xhcip)
1049 {
1050 	ddi_fm_error_t de;
1051 
1052 	/*
1053 	 * Treat the case where we can't check as fine so we can treat the code
1054 	 * more simply.
1055 	 */
1056 	if (!DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps))
1057 		return (DDI_FM_OK);
1058 
1059 	ddi_fm_acc_err_get(xhcip->xhci_regs_handle, &de, DDI_FME_VERSION);
1060 	ddi_fm_acc_err_clear(xhcip->xhci_regs_handle, DDI_FME_VERSION);
1061 	return (de.fme_status);
1062 }
1063 
1064 /*
1065  * As a leaf PCIe driver, we just post the ereport and continue on.
1066  */
1067 /* ARGSUSED */
1068 static int
xhci_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)1069 xhci_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
1070 {
1071 	pci_ereport_post(dip, err, NULL);
1072 	return (err->fme_status);
1073 }
1074 
1075 static void
xhci_fm_fini(xhci_t * xhcip)1076 xhci_fm_fini(xhci_t *xhcip)
1077 {
1078 	if (xhcip->xhci_fm_caps == 0)
1079 		return;
1080 
1081 	if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1082 		ddi_fm_handler_unregister(xhcip->xhci_dip);
1083 
1084 	if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1085 	    DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1086 		pci_ereport_teardown(xhcip->xhci_dip);
1087 
1088 	ddi_fm_fini(xhcip->xhci_dip);
1089 }
1090 
1091 static void
xhci_fm_init(xhci_t * xhcip)1092 xhci_fm_init(xhci_t *xhcip)
1093 {
1094 	ddi_iblock_cookie_t iblk;
1095 	int def = DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1096 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE;
1097 
1098 	xhcip->xhci_fm_caps = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
1099 	    DDI_PROP_DONTPASS, "fm_capable", def);
1100 
1101 	if (xhcip->xhci_fm_caps < 0) {
1102 		xhcip->xhci_fm_caps = 0;
1103 	} else if (xhcip->xhci_fm_caps & ~def) {
1104 		xhcip->xhci_fm_caps &= def;
1105 	}
1106 
1107 	if (xhcip->xhci_fm_caps == 0)
1108 		return;
1109 
1110 	ddi_fm_init(xhcip->xhci_dip, &xhcip->xhci_fm_caps, &iblk);
1111 	if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1112 	    DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1113 		pci_ereport_setup(xhcip->xhci_dip);
1114 	}
1115 
1116 	if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1117 		ddi_fm_handler_register(xhcip->xhci_dip,
1118 		    xhci_fm_error_cb, xhcip);
1119 	}
1120 }
1121 
1122 static int
xhci_reg_poll(xhci_t * xhcip,xhci_reg_type_t rt,int reg,uint32_t mask,uint32_t targ,uint_t tries,int delay_ms)1123 xhci_reg_poll(xhci_t *xhcip, xhci_reg_type_t rt, int reg, uint32_t mask,
1124     uint32_t targ, uint_t tries, int delay_ms)
1125 {
1126 	uint_t i;
1127 
1128 	for (i = 0; i < tries; i++) {
1129 		uint32_t val = xhci_get32(xhcip, rt, reg);
1130 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1131 			ddi_fm_service_impact(xhcip->xhci_dip,
1132 			    DDI_SERVICE_LOST);
1133 			return (EIO);
1134 		}
1135 
1136 		if ((val & mask) == targ)
1137 			return (0);
1138 
1139 		delay(drv_usectohz(delay_ms * 1000));
1140 	}
1141 	return (ETIMEDOUT);
1142 }
1143 
1144 static boolean_t
xhci_regs_map(xhci_t * xhcip)1145 xhci_regs_map(xhci_t *xhcip)
1146 {
1147 	off_t memsize;
1148 	int ret;
1149 	ddi_device_acc_attr_t da;
1150 
1151 	if (ddi_dev_regsize(xhcip->xhci_dip, XHCI_REG_NUMBER, &memsize) !=
1152 	    DDI_SUCCESS) {
1153 		xhci_error(xhcip, "failed to get register set size");
1154 		return (B_FALSE);
1155 	}
1156 
1157 	bzero(&da, sizeof (ddi_device_acc_attr_t));
1158 	da.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1159 	da.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1160 	da.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1161 	if (DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps)) {
1162 		da.devacc_attr_access = DDI_FLAGERR_ACC;
1163 	} else {
1164 		da.devacc_attr_access = DDI_DEFAULT_ACC;
1165 	}
1166 
1167 	ret = ddi_regs_map_setup(xhcip->xhci_dip, XHCI_REG_NUMBER,
1168 	    &xhcip->xhci_regs_base, 0, memsize, &da, &xhcip->xhci_regs_handle);
1169 
1170 	if (ret != DDI_SUCCESS) {
1171 		xhci_error(xhcip, "failed to map device registers: %d", ret);
1172 		return (B_FALSE);
1173 	}
1174 
1175 	return (B_TRUE);
1176 }
1177 
1178 static boolean_t
xhci_regs_init(xhci_t * xhcip)1179 xhci_regs_init(xhci_t *xhcip)
1180 {
1181 	/*
1182 	 * The capabilities always begin at offset zero.
1183 	 */
1184 	xhcip->xhci_regs_capoff = 0;
1185 	xhcip->xhci_regs_operoff = xhci_get8(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1186 	xhcip->xhci_regs_runoff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_RTSOFF);
1187 	xhcip->xhci_regs_runoff &= ~0x1f;
1188 	xhcip->xhci_regs_dooroff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_DBOFF);
1189 	xhcip->xhci_regs_dooroff &= ~0x3;
1190 
1191 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1192 		xhci_error(xhcip, "failed to initialize controller register "
1193 		    "offsets: encountered FM register error");
1194 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1195 		return (B_FALSE);
1196 	}
1197 
1198 	return (B_TRUE);
1199 }
1200 
1201 /*
1202  * Read various parameters from PCI configuration space and from the Capability
1203  * registers that we'll need to register the device. We cache all of the
1204  * Capability registers.
1205  */
1206 static boolean_t
xhci_read_params(xhci_t * xhcip)1207 xhci_read_params(xhci_t *xhcip)
1208 {
1209 	uint8_t usb;
1210 	uint16_t vers;
1211 	uint32_t struc1, struc2, struc3, cap1, cap2, pgsz;
1212 	uint32_t psize, pbit, capreg;
1213 	xhci_capability_t *xcap;
1214 	unsigned long ps;
1215 
1216 	/*
1217 	 * While it's tempting to do a 16-bit read at offset 0x2, unfortunately,
1218 	 * a few emulated systems don't support reading at offset 0x2 for the
1219 	 * version. Instead we need to read the caplength register and get the
1220 	 * upper two bytes.
1221 	 */
1222 	capreg = xhci_get32(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1223 	vers = XHCI_VERSION_MASK(capreg);
1224 	usb = pci_config_get8(xhcip->xhci_cfg_handle, PCI_XHCI_USBREV);
1225 	struc1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS1);
1226 	struc2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS2);
1227 	struc3 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS3);
1228 	cap1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS1);
1229 	cap2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS2);
1230 	pgsz = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PAGESIZE);
1231 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1232 		xhci_error(xhcip, "failed to read controller parameters: "
1233 		    "encountered FM register error");
1234 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1235 		return (B_FALSE);
1236 	}
1237 
1238 	xcap = &xhcip->xhci_caps;
1239 	xcap->xcap_usb_vers = usb;
1240 	xcap->xcap_hci_vers = vers;
1241 	xcap->xcap_max_slots = XHCI_HCS1_DEVSLOT_MAX(struc1);
1242 	xcap->xcap_max_intrs = XHCI_HCS1_IRQ_MAX(struc1);
1243 	xcap->xcap_max_ports = XHCI_HCS1_N_PORTS(struc1);
1244 	if (xcap->xcap_max_ports > MAX_PORTS) {
1245 		xhci_error(xhcip, "Root hub has %d ports, but system only "
1246 		    "supports %d, limiting to %d\n", xcap->xcap_max_ports,
1247 		    MAX_PORTS, MAX_PORTS);
1248 		xcap->xcap_max_ports = MAX_PORTS;
1249 	}
1250 
1251 	xcap->xcap_ist_micro = XHCI_HCS2_IST_MICRO(struc2);
1252 	xcap->xcap_ist = XHCI_HCS2_IST(struc2);
1253 	xcap->xcap_max_esrt = XHCI_HCS2_ERST_MAX(struc2);
1254 	xcap->xcap_scratch_restore = XHCI_HCS2_SPR(struc2);
1255 	xcap->xcap_max_scratch = XHCI_HCS2_SPB_MAX(struc2);
1256 
1257 	xcap->xcap_u1_lat = XHCI_HCS3_U1_DEL(struc3);
1258 	xcap->xcap_u2_lat = XHCI_HCS3_U2_DEL(struc3);
1259 
1260 	xcap->xcap_flags = XHCI_HCC1_FLAGS_MASK(cap1);
1261 	xcap->xcap_max_psa = XHCI_HCC1_PSA_SZ_MAX(cap1);
1262 	xcap->xcap_xecp_off = XHCI_HCC1_XECP(cap1);
1263 	xcap->xcap_flags2 = XHCI_HCC2_FLAGS_MASK(cap2);
1264 
1265 	/*
1266 	 * We don't have documentation for what changed from before xHCI 0.96,
1267 	 * so we just refuse to support versions before 0.96. We also will
1268 	 * ignore anything with a major version greater than 1.
1269 	 */
1270 	if (xcap->xcap_hci_vers < 0x96 || xcap->xcap_hci_vers >= 0x200) {
1271 		xhci_error(xhcip, "Encountered unsupported xHCI version 0.%2x",
1272 		    xcap->xcap_hci_vers);
1273 		return (B_FALSE);
1274 	}
1275 
1276 	/*
1277 	 * Determine the smallest size page that the controller supports and
1278 	 * make sure that it matches our pagesize. We basically check here for
1279 	 * the presence of 4k and 8k pages. The basis of the pagesize is used
1280 	 * extensively throughout the code and specification. While we could
1281 	 * support other page sizes here, given that we don't support systems
1282 	 * with it at this time, it doesn't make much sense.
1283 	 */
1284 	ps = PAGESIZE;
1285 	if (ps == 0x1000) {
1286 		pbit = XHCI_PAGESIZE_4K;
1287 		psize = 0x1000;
1288 	} else if (ps == 0x2000) {
1289 		pbit = XHCI_PAGESIZE_8K;
1290 		psize = 0x2000;
1291 	} else {
1292 		xhci_error(xhcip, "Encountered host page size that the driver "
1293 		    "doesn't know how to handle: %lx\n", ps);
1294 		return (B_FALSE);
1295 	}
1296 
1297 	if (!(pgsz & pbit)) {
1298 		xhci_error(xhcip, "Encountered controller that didn't support "
1299 		    "the host page size (%d), supports: %x", psize, pgsz);
1300 		return (B_FALSE);
1301 	}
1302 	xcap->xcap_pagesize = psize;
1303 
1304 	return (B_TRUE);
1305 }
1306 
1307 /*
1308  * Apply known workarounds and issues. These reports come from other
1309  * Operating Systems and have been collected over time.
1310  */
1311 static boolean_t
xhci_identify(xhci_t * xhcip)1312 xhci_identify(xhci_t *xhcip)
1313 {
1314 	xhci_quirks_populate(xhcip);
1315 
1316 	if (xhcip->xhci_quirks & XHCI_QUIRK_NO_MSI) {
1317 		xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED;
1318 	} else {
1319 		xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED |
1320 		    DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX;
1321 	}
1322 
1323 	if (xhcip->xhci_quirks & XHCI_QUIRK_32_ONLY) {
1324 		xhcip->xhci_caps.xcap_flags &= ~XCAP_AC64;
1325 	}
1326 
1327 	return (B_TRUE);
1328 }
1329 
1330 static boolean_t
xhci_alloc_intr_handle(xhci_t * xhcip,int type)1331 xhci_alloc_intr_handle(xhci_t *xhcip, int type)
1332 {
1333 	int ret;
1334 
1335 	/*
1336 	 * Normally a well-behaving driver would more carefully request an
1337 	 * amount of interrupts based on the number available, etc. But since we
1338 	 * only actually want a single interrupt, we're just going to go ahead
1339 	 * and ask for a single interrupt.
1340 	 */
1341 	ret = ddi_intr_alloc(xhcip->xhci_dip, &xhcip->xhci_intr_hdl, type, 0,
1342 	    XHCI_NINTR, &xhcip->xhci_intr_num, DDI_INTR_ALLOC_NORMAL);
1343 	if (ret != DDI_SUCCESS) {
1344 		xhci_log(xhcip, "!failed to allocate interrupts of type %d: %d",
1345 		    type, ret);
1346 		return (B_FALSE);
1347 	}
1348 	xhcip->xhci_intr_type = type;
1349 
1350 	return (B_TRUE);
1351 }
1352 
1353 static boolean_t
xhci_alloc_intrs(xhci_t * xhcip)1354 xhci_alloc_intrs(xhci_t *xhcip)
1355 {
1356 	int intr_types, ret;
1357 
1358 	if (XHCI_NINTR > xhcip->xhci_caps.xcap_max_intrs) {
1359 		xhci_error(xhcip, "controller does not support the minimum "
1360 		    "number of interrupts required (%d), supports %d",
1361 		    XHCI_NINTR, xhcip->xhci_caps.xcap_max_intrs);
1362 		return (B_FALSE);
1363 	}
1364 
1365 	if ((ret = ddi_intr_get_supported_types(xhcip->xhci_dip,
1366 	    &intr_types)) != DDI_SUCCESS) {
1367 		xhci_error(xhcip, "failed to get supported interrupt types: "
1368 		    "%d", ret);
1369 		return (B_FALSE);
1370 	}
1371 
1372 	/*
1373 	 * Mask off interrupt types we've already ruled out due to quirks or
1374 	 * other reasons.
1375 	 */
1376 	intr_types &= xhcip->xhci_caps.xcap_intr_types;
1377 	if (intr_types & DDI_INTR_TYPE_MSIX) {
1378 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSIX))
1379 			return (B_TRUE);
1380 	}
1381 
1382 	if (intr_types & DDI_INTR_TYPE_MSI) {
1383 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSI))
1384 			return (B_TRUE);
1385 	}
1386 
1387 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1388 		if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_FIXED))
1389 			return (B_TRUE);
1390 	}
1391 
1392 	xhci_error(xhcip, "failed to allocate an interrupt, supported types: "
1393 	    "0x%x", intr_types);
1394 	return (B_FALSE);
1395 }
1396 
1397 static boolean_t
xhci_add_intr_handler(xhci_t * xhcip)1398 xhci_add_intr_handler(xhci_t *xhcip)
1399 {
1400 	int ret;
1401 
1402 	if ((ret = ddi_intr_get_pri(xhcip->xhci_intr_hdl,
1403 	    &xhcip->xhci_intr_pri)) != DDI_SUCCESS) {
1404 		xhci_error(xhcip, "failed to get interrupt priority: %d", ret);
1405 		return (B_FALSE);
1406 	}
1407 
1408 	if ((ret = ddi_intr_get_cap(xhcip->xhci_intr_hdl,
1409 	    &xhcip->xhci_intr_caps)) != DDI_SUCCESS) {
1410 		xhci_error(xhcip, "failed to get interrupt capabilities: %d",
1411 		    ret);
1412 		return (B_FALSE);
1413 	}
1414 
1415 	if ((ret = ddi_intr_add_handler(xhcip->xhci_intr_hdl, xhci_intr, xhcip,
1416 	    (uintptr_t)0)) != DDI_SUCCESS) {
1417 		xhci_error(xhcip, "failed to add interrupt handler: %d", ret);
1418 		return (B_FALSE);
1419 	}
1420 	return (B_TRUE);
1421 }
1422 
1423 /*
1424  * Find a capability with an identifier whose value is 'id'. The 'init' argument
1425  * gives us the offset to start searching at. See xHCI 1.1 / 7 for more
1426  * information. This is more or less exactly like PCI capabilities.
1427  */
1428 static boolean_t
xhci_find_ext_cap(xhci_t * xhcip,uint32_t id,uint32_t init,uint32_t * outp)1429 xhci_find_ext_cap(xhci_t *xhcip, uint32_t id, uint32_t init, uint32_t *outp)
1430 {
1431 	uint32_t off;
1432 	uint8_t next = 0;
1433 
1434 	/*
1435 	 * If we have no offset, we're done.
1436 	 */
1437 	if (xhcip->xhci_caps.xcap_xecp_off == 0)
1438 		return (B_FALSE);
1439 
1440 	off = xhcip->xhci_caps.xcap_xecp_off << 2;
1441 	do {
1442 		uint32_t cap_hdr;
1443 
1444 		off += next << 2;
1445 		cap_hdr = xhci_get32(xhcip, XHCI_R_CAP, off);
1446 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1447 			xhci_error(xhcip, "failed to read xhci extended "
1448 			    "capabilities at offset 0x%x: encountered FM "
1449 			    "register error", off);
1450 			ddi_fm_service_impact(xhcip->xhci_dip,
1451 			    DDI_SERVICE_LOST);
1452 			break;
1453 		}
1454 
1455 		if (cap_hdr == PCI_EINVAL32)
1456 			break;
1457 		if (XHCI_XECP_ID(cap_hdr) == id &&
1458 		    (init == UINT32_MAX || off > init)) {
1459 			*outp = off;
1460 			return (B_TRUE);
1461 		}
1462 		next = XHCI_XECP_NEXT(cap_hdr);
1463 		/*
1464 		 * Watch out for overflow if we somehow end up with a more than
1465 		 * 2 GiB space.
1466 		 */
1467 		if (next << 2 > (INT32_MAX - off))
1468 			return (B_FALSE);
1469 	} while (next != 0);
1470 
1471 	return (B_FALSE);
1472 }
1473 
1474 /*
1475  * For mostly information purposes, we'd like to walk to augment the devinfo
1476  * tree with the number of ports that support USB 2 and USB 3. Note though that
1477  * these ports may be overlapping. Many ports can support both USB 2 and USB 3
1478  * and are wired up to the same physical port, even though they show up as
1479  * separate 'ports' in the xhci sense.
1480  */
1481 static boolean_t
xhci_port_count(xhci_t * xhcip)1482 xhci_port_count(xhci_t *xhcip)
1483 {
1484 	uint_t nusb2 = 0, fusb2 = 0;
1485 	uint_t nusb30 = 0, fusb30 = 0;
1486 	uint_t nusb31 = 0, fusb31 = 0;
1487 	uint32_t off = UINT32_MAX;
1488 
1489 	while (xhci_find_ext_cap(xhcip, XHCI_ID_PROTOCOLS, off, &off) ==
1490 	    B_TRUE) {
1491 		uint32_t rvers, rport;
1492 		uint8_t maj, min, count, first;
1493 
1494 		/*
1495 		 * See xHCI 1.1 / 7.2 for the format of this. The first uint32_t
1496 		 * has version information while the third uint32_t has the port
1497 		 * count.
1498 		 */
1499 		rvers = xhci_get32(xhcip, XHCI_R_CAP, off);
1500 		rport = xhci_get32(xhcip, XHCI_R_CAP, off + 8);
1501 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1502 			xhci_error(xhcip, "failed to read xhci port counts: "
1503 			    "encountered fatal FM register error");
1504 			ddi_fm_service_impact(xhcip->xhci_dip,
1505 			    DDI_SERVICE_LOST);
1506 			return (B_FALSE);
1507 		}
1508 
1509 		maj = XHCI_XECP_PROT_MAJOR(rvers);
1510 		min = XHCI_XECP_PROT_MINOR(rvers);
1511 		count = XHCI_XECP_PROT_PCOUNT(rport);
1512 		first = XHCI_XECP_PROT_FPORT(rport);
1513 
1514 		/*
1515 		 * In the wild, we've seen some systems that are using a minor
1516 		 * version of 0x10 and some that are using 0x01 in this field.
1517 		 * While the xhci spec says that we should expect it to be a
1518 		 * minor of 0x01 based on the xHCI 1.1 specification Table 155:
1519 		 * xHCI Supported Protocols. However, the USB 3.1 specification
1520 		 * defines the version to be 0x10 when encoded as a BCD style.
1521 		 * As such, handle both and hope we never get to revision 16 of
1522 		 * USB 3.
1523 		 */
1524 		if (maj == 3 && (min == 0x10 || min == 0x01)) {
1525 			nusb31 = count;
1526 			fusb31 = first;
1527 		} else if (maj == 3 && min == 0) {
1528 			nusb30 = count;
1529 			fusb30 = first;
1530 		} else if (maj <= 2) {
1531 			nusb2 = count;
1532 			fusb2 = first;
1533 		} else {
1534 			xhci_error(xhcip, "encountered port capabilities with "
1535 			    "unknown USB version: %x.%x\n", maj, min);
1536 		}
1537 	}
1538 
1539 	/*
1540 	 * These properties are used by FMA and the USB topo module.
1541 	 */
1542 	if (nusb2 > 0) {
1543 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1544 		    "usb2.0-port-count", nusb2);
1545 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1546 		    "usb2.0-first-port", fusb2);
1547 	}
1548 	if (nusb30 > 0) {
1549 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1550 		    "usb3.0-port-count", nusb30);
1551 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1552 		    "usb3.0-first-port", fusb30);
1553 	}
1554 
1555 	if (nusb31 > 0) {
1556 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1557 		    "usb3.1-port-count", nusb31);
1558 		(void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1559 		    "usb3.1-first-port", fusb31);
1560 	}
1561 
1562 	return (B_TRUE);
1563 }
1564 
1565 /*
1566  * Take over control from the BIOS or other firmware, if applicable.
1567  */
1568 static boolean_t
xhci_controller_takeover(xhci_t * xhcip)1569 xhci_controller_takeover(xhci_t *xhcip)
1570 {
1571 	int ret;
1572 	uint32_t val, off;
1573 
1574 	/*
1575 	 * If we can't find the legacy capability, then there's nothing to do.
1576 	 */
1577 	if (xhci_find_ext_cap(xhcip, XHCI_ID_USB_LEGACY, UINT32_MAX, &off) ==
1578 	    B_FALSE)
1579 		return (B_TRUE);
1580 	val = xhci_get32(xhcip, XHCI_R_CAP, off);
1581 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1582 		xhci_error(xhcip, "failed to read BIOS take over registers: "
1583 		    "encountered fatal FM register error");
1584 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1585 		return (B_FALSE);
1586 	}
1587 
1588 	if (val & XHCI_BIOS_OWNED) {
1589 		val |= XHCI_OS_OWNED;
1590 		xhci_put32(xhcip, XHCI_R_CAP, off, val);
1591 		if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1592 			xhci_error(xhcip, "failed to write BIOS take over "
1593 			    "registers: encountered fatal FM register error");
1594 			ddi_fm_service_impact(xhcip->xhci_dip,
1595 			    DDI_SERVICE_LOST);
1596 			return (B_FALSE);
1597 		}
1598 
1599 		/*
1600 		 * Wait up to 5 seconds for things to change. While this number
1601 		 * isn't specified in the xHCI spec, it seems to be the de facto
1602 		 * value that various systems are using today. We'll use a 10ms
1603 		 * interval to check.
1604 		 */
1605 		ret = xhci_reg_poll(xhcip, XHCI_R_CAP, off,
1606 		    XHCI_BIOS_OWNED | XHCI_OS_OWNED, XHCI_OS_OWNED, 500, 10);
1607 		if (ret == EIO)
1608 			return (B_FALSE);
1609 		if (ret == ETIMEDOUT) {
1610 			xhci_log(xhcip, "!timed out waiting for firmware to "
1611 			    "hand off, taking over");
1612 			val &= ~XHCI_BIOS_OWNED;
1613 			xhci_put32(xhcip, XHCI_R_CAP, off, val);
1614 			if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1615 				xhci_error(xhcip, "failed to write forced "
1616 				    "takeover: encountered fatal FM register "
1617 				    "error");
1618 				ddi_fm_service_impact(xhcip->xhci_dip,
1619 				    DDI_SERVICE_LOST);
1620 				return (B_FALSE);
1621 			}
1622 		}
1623 	}
1624 
1625 	val = xhci_get32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS);
1626 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1627 		xhci_error(xhcip, "failed to read legacy control registers: "
1628 		    "encountered fatal FM register error");
1629 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1630 		return (B_FALSE);
1631 	}
1632 	val &= XHCI_XECP_SMI_MASK;
1633 	val |= XHCI_XECP_CLEAR_SMI;
1634 	xhci_put32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS, val);
1635 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1636 		xhci_error(xhcip, "failed to write legacy control registers: "
1637 		    "encountered fatal FM register error");
1638 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1639 		return (B_FALSE);
1640 	}
1641 
1642 	return (B_TRUE);
1643 }
1644 
1645 static int
xhci_controller_stop(xhci_t * xhcip)1646 xhci_controller_stop(xhci_t *xhcip)
1647 {
1648 	uint32_t cmdreg;
1649 
1650 	cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1651 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1652 		xhci_error(xhcip, "failed to read USB Command register: "
1653 		    "encountered fatal FM register error");
1654 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1655 		return (EIO);
1656 	}
1657 
1658 	cmdreg &= ~(XHCI_CMD_RS | XHCI_CMD_INTE);
1659 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1660 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1661 		xhci_error(xhcip, "failed to write USB Command register: "
1662 		    "encountered fatal FM register error");
1663 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1664 		return (EIO);
1665 	}
1666 
1667 	/*
1668 	 * Wait up to 50ms for this to occur. The specification says that this
1669 	 * should stop within 16ms, but we give ourselves a bit more time just
1670 	 * in case.
1671 	 */
1672 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS, XHCI_STS_HCH,
1673 	    XHCI_STS_HCH, 50, 10));
1674 }
1675 
1676 static int
xhci_controller_reset(xhci_t * xhcip)1677 xhci_controller_reset(xhci_t *xhcip)
1678 {
1679 	int ret;
1680 	uint32_t cmdreg;
1681 
1682 	cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1683 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1684 		xhci_error(xhcip, "failed to read USB Command register for "
1685 		    "reset: encountered fatal FM register error");
1686 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1687 		return (EIO);
1688 	}
1689 
1690 	cmdreg |= XHCI_CMD_HCRST;
1691 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1692 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1693 		xhci_error(xhcip, "failed to write USB Command register for "
1694 		    "reset: encountered fatal FM register error");
1695 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1696 		return (EIO);
1697 	}
1698 
1699 	/*
1700 	 * Some controllers apparently don't want to be touched for at least 1ms
1701 	 * after we initiate the reset. Therefore give all controllers this
1702 	 * moment to breathe.
1703 	 */
1704 	delay(drv_usectohz(xhci_reset_delay));
1705 
1706 	/*
1707 	 * To tell that the reset has completed we first verify that the reset
1708 	 * has finished and that the USBCMD register no longer has the reset bit
1709 	 * asserted. However, once that's done we have to go verify that CNR
1710 	 * (Controller Not Ready) is no longer asserted.
1711 	 */
1712 	if ((ret = xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBCMD,
1713 	    XHCI_CMD_HCRST, 0, 500, 10)) != 0)
1714 		return (ret);
1715 
1716 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1717 	    XHCI_STS_CNR, 0, 500, 10));
1718 }
1719 
1720 /*
1721  * Take care of all the required initialization before we can actually enable
1722  * the controller. This means that we need to:
1723  *
1724  *    o Program the maximum number of slots
1725  *    o Program the DCBAAP and allocate the scratchpad
1726  *    o Program the Command Ring
1727  *    o Initialize the Event Ring
1728  *    o Enable interrupts (set imod)
1729  */
1730 static int
xhci_controller_configure(xhci_t * xhcip)1731 xhci_controller_configure(xhci_t *xhcip)
1732 {
1733 	int ret;
1734 	uint32_t config;
1735 
1736 	config = xhci_get32(xhcip, XHCI_R_OPER, XHCI_CONFIG);
1737 	config &= ~XHCI_CONFIG_SLOTS_MASK;
1738 	config |= xhcip->xhci_caps.xcap_max_slots;
1739 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_CONFIG, config);
1740 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1741 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1742 		return (EIO);
1743 	}
1744 
1745 	if ((ret = xhci_context_init(xhcip)) != 0) {
1746 		const char *reason;
1747 		if (ret == EIO) {
1748 			reason = "fatal FM I/O error occurred";
1749 		} else if (ret == ENOMEM) {
1750 			reason = "unable to allocate DMA memory";
1751 		} else {
1752 			reason = "unexpected error occurred";
1753 		}
1754 
1755 		xhci_error(xhcip, "failed to initialize xhci context "
1756 		    "registers: %s (%d)", reason, ret);
1757 		return (ret);
1758 	}
1759 
1760 	if ((ret = xhci_command_ring_init(xhcip)) != 0) {
1761 		xhci_error(xhcip, "failed to initialize commands: %d", ret);
1762 		return (ret);
1763 	}
1764 
1765 	if ((ret = xhci_event_init(xhcip)) != 0) {
1766 		xhci_error(xhcip, "failed to initialize events: %d", ret);
1767 		return (ret);
1768 	}
1769 
1770 	if ((ret = xhci_intr_conf(xhcip)) != 0) {
1771 		xhci_error(xhcip, "failed to configure interrupts: %d", ret);
1772 		return (ret);
1773 	}
1774 
1775 	return (0);
1776 }
1777 
1778 static int
xhci_controller_start(xhci_t * xhcip)1779 xhci_controller_start(xhci_t *xhcip)
1780 {
1781 	uint32_t reg;
1782 
1783 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1784 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1785 		xhci_error(xhcip, "failed to read USB Command register for "
1786 		    "start: encountered fatal FM register error");
1787 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1788 		return (EIO);
1789 	}
1790 
1791 	reg |= XHCI_CMD_RS;
1792 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, reg);
1793 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1794 		xhci_error(xhcip, "failed to write USB Command register for "
1795 		    "start: encountered fatal FM register error");
1796 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1797 		return (EIO);
1798 	}
1799 
1800 	return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1801 	    XHCI_STS_HCH, 0, 500, 10));
1802 }
1803 
1804 /* ARGSUSED */
1805 static void
xhci_reset_task(void * arg)1806 xhci_reset_task(void *arg)
1807 {
1808 	/*
1809 	 * Longer term, we'd like to properly perform a controller reset.
1810 	 * However, that requires a bit more assistance from USBA to work
1811 	 * properly and tear down devices. In the meantime, we panic.
1812 	 */
1813 	panic("XHCI runtime reset required");
1814 }
1815 
1816 /*
1817  * This function is called when we've detected a fatal FM condition that has
1818  * resulted in a loss of service and we need to force a reset of the controller
1819  * as a whole. Only one such reset may be ongoing at a time.
1820  */
1821 void
xhci_fm_runtime_reset(xhci_t * xhcip)1822 xhci_fm_runtime_reset(xhci_t *xhcip)
1823 {
1824 	boolean_t locked = B_FALSE;
1825 
1826 	if (mutex_owned(&xhcip->xhci_lock)) {
1827 		locked = B_TRUE;
1828 	} else {
1829 		mutex_enter(&xhcip->xhci_lock);
1830 	}
1831 
1832 	/*
1833 	 * If we're already in the error state than a reset is already ongoing
1834 	 * and there is nothing for us to do here.
1835 	 */
1836 	if (xhcip->xhci_state & XHCI_S_ERROR) {
1837 		goto out;
1838 	}
1839 
1840 	xhcip->xhci_state |= XHCI_S_ERROR;
1841 	ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1842 	taskq_dispatch_ent(xhci_taskq, xhci_reset_task, xhcip, 0,
1843 	    &xhcip->xhci_tqe);
1844 out:
1845 	if (!locked) {
1846 		mutex_exit(&xhcip->xhci_lock);
1847 	}
1848 }
1849 
1850 static int
xhci_ioctl_portsc(xhci_t * xhcip,intptr_t arg)1851 xhci_ioctl_portsc(xhci_t *xhcip, intptr_t arg)
1852 {
1853 	int i;
1854 	xhci_ioctl_portsc_t xhi;
1855 
1856 	bzero(&xhi, sizeof (xhci_ioctl_portsc_t));
1857 	xhi.xhi_nports = xhcip->xhci_caps.xcap_max_ports;
1858 	for (i = 1; i <= xhcip->xhci_caps.xcap_max_ports; i++) {
1859 		xhi.xhi_portsc[i] = xhci_get32(xhcip, XHCI_R_OPER,
1860 		    XHCI_PORTSC(i));
1861 	}
1862 
1863 	if (ddi_copyout(&xhi, (void *)(uintptr_t)arg, sizeof (xhi), 0) != 0)
1864 		return (EFAULT);
1865 
1866 	return (0);
1867 }
1868 
1869 static int
xhci_ioctl_clear(xhci_t * xhcip,intptr_t arg)1870 xhci_ioctl_clear(xhci_t *xhcip, intptr_t arg)
1871 {
1872 	uint32_t reg;
1873 	xhci_ioctl_clear_t xic;
1874 
1875 	if (ddi_copyin((const void *)(uintptr_t)arg, &xic, sizeof (xic),
1876 	    0) != 0)
1877 		return (EFAULT);
1878 
1879 	if (xic.xic_port == 0 || xic.xic_port >
1880 	    xhcip->xhci_caps.xcap_max_ports)
1881 		return (EINVAL);
1882 
1883 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port));
1884 	reg &= ~XHCI_PS_CLEAR;
1885 	reg |= XHCI_PS_CSC | XHCI_PS_PEC | XHCI_PS_WRC | XHCI_PS_OCC |
1886 	    XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC;
1887 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port), reg);
1888 
1889 	return (0);
1890 }
1891 
1892 static int
xhci_ioctl_setpls(xhci_t * xhcip,intptr_t arg)1893 xhci_ioctl_setpls(xhci_t *xhcip, intptr_t arg)
1894 {
1895 	uint32_t reg;
1896 	xhci_ioctl_setpls_t xis;
1897 
1898 	if (ddi_copyin((const void *)(uintptr_t)arg, &xis, sizeof (xis),
1899 	    0) != 0)
1900 		return (EFAULT);
1901 
1902 	if (xis.xis_port == 0 || xis.xis_port >
1903 	    xhcip->xhci_caps.xcap_max_ports)
1904 		return (EINVAL);
1905 
1906 	if (xis.xis_pls & ~0xf)
1907 		return (EINVAL);
1908 
1909 	reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port));
1910 	reg &= ~XHCI_PS_CLEAR;
1911 	reg |= XHCI_PS_PLS_SET(xis.xis_pls);
1912 	reg |= XHCI_PS_LWS;
1913 	xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port), reg);
1914 
1915 	return (0);
1916 }
1917 
1918 static int
xhci_open(dev_t * devp,int flags,int otyp,cred_t * credp)1919 xhci_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1920 {
1921 	dev_info_t *dip = xhci_get_dip(*devp);
1922 
1923 	return (usba_hubdi_open(dip, devp, flags, otyp, credp));
1924 }
1925 
1926 static int
xhci_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)1927 xhci_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1928     int *rvalp)
1929 {
1930 	dev_info_t *dip = xhci_get_dip(dev);
1931 
1932 	if (cmd == XHCI_IOCTL_PORTSC ||
1933 	    cmd == XHCI_IOCTL_CLEAR ||
1934 	    cmd == XHCI_IOCTL_SETPLS) {
1935 		xhci_t *xhcip = ddi_get_soft_state(xhci_soft_state,
1936 		    getminor(dev) & ~HUBD_IS_ROOT_HUB);
1937 
1938 		if (secpolicy_hwmanip(credp) != 0 ||
1939 		    crgetzoneid(credp) != GLOBAL_ZONEID)
1940 			return (EPERM);
1941 
1942 		if (mode & FKIOCTL)
1943 			return (ENOTSUP);
1944 
1945 		if (!(mode & FWRITE))
1946 			return (EBADF);
1947 
1948 		if (cmd == XHCI_IOCTL_PORTSC)
1949 			return (xhci_ioctl_portsc(xhcip, arg));
1950 		else if (cmd == XHCI_IOCTL_CLEAR)
1951 			return (xhci_ioctl_clear(xhcip, arg));
1952 		else
1953 			return (xhci_ioctl_setpls(xhcip, arg));
1954 	}
1955 
1956 	return (usba_hubdi_ioctl(dip, dev, cmd, arg, mode, credp, rvalp));
1957 }
1958 
1959 static int
xhci_close(dev_t dev,int flag,int otyp,cred_t * credp)1960 xhci_close(dev_t dev, int flag, int otyp, cred_t *credp)
1961 {
1962 	dev_info_t *dip = xhci_get_dip(dev);
1963 
1964 	return (usba_hubdi_close(dip, dev, flag, otyp, credp));
1965 }
1966 
1967 /*
1968  * We try to clean up everything that we can. The only thing that we let stop us
1969  * at this time is a failure to remove the root hub, which is realistically the
1970  * equivalent of our EBUSY case.
1971  */
1972 static int
xhci_cleanup(xhci_t * xhcip)1973 xhci_cleanup(xhci_t *xhcip)
1974 {
1975 	int ret, inst;
1976 
1977 	if (xhcip->xhci_seq & XHCI_ATTACH_ROOT_HUB) {
1978 		if ((ret = xhci_root_hub_fini(xhcip)) != 0)
1979 			return (ret);
1980 	}
1981 
1982 	if (xhcip->xhci_seq & XHCI_ATTACH_USBA) {
1983 		xhci_hcd_fini(xhcip);
1984 	}
1985 
1986 	if (xhcip->xhci_seq & XHCI_ATTACH_STARTED) {
1987 		mutex_enter(&xhcip->xhci_lock);
1988 		while (xhcip->xhci_state & XHCI_S_ERROR)
1989 			cv_wait(&xhcip->xhci_statecv, &xhcip->xhci_lock);
1990 		mutex_exit(&xhcip->xhci_lock);
1991 
1992 		(void) xhci_controller_stop(xhcip);
1993 	}
1994 
1995 	/*
1996 	 * Always release the context, command, and event data. They handle the
1997 	 * fact that they me be in an arbitrary state or unallocated.
1998 	 */
1999 	xhci_event_fini(xhcip);
2000 	xhci_command_ring_fini(xhcip);
2001 	xhci_context_fini(xhcip);
2002 
2003 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ENABLE) {
2004 		(void) xhci_ddi_intr_disable(xhcip);
2005 	}
2006 
2007 	if (xhcip->xhci_seq & XHCI_ATTACH_SYNCH) {
2008 		cv_destroy(&xhcip->xhci_statecv);
2009 		mutex_destroy(&xhcip->xhci_lock);
2010 	}
2011 
2012 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ADD) {
2013 		if ((ret = ddi_intr_remove_handler(xhcip->xhci_intr_hdl)) !=
2014 		    DDI_SUCCESS) {
2015 			xhci_error(xhcip, "failed to remove interrupt "
2016 			    "handler: %d", ret);
2017 		}
2018 	}
2019 
2020 	if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ALLOC) {
2021 		if ((ret = ddi_intr_free(xhcip->xhci_intr_hdl)) !=
2022 		    DDI_SUCCESS) {
2023 			xhci_error(xhcip, "failed to free interrupts: %d", ret);
2024 		}
2025 	}
2026 
2027 	if (xhcip->xhci_seq & XHCI_ATTACH_REGS_MAP) {
2028 		ddi_regs_map_free(&xhcip->xhci_regs_handle);
2029 		xhcip->xhci_regs_handle = NULL;
2030 	}
2031 
2032 	if (xhcip->xhci_seq & XHCI_ATTACH_PCI_CONFIG) {
2033 		pci_config_teardown(&xhcip->xhci_cfg_handle);
2034 		xhcip->xhci_cfg_handle = NULL;
2035 	}
2036 
2037 	if (xhcip->xhci_seq & XHCI_ATTACH_FM) {
2038 		xhci_fm_fini(xhcip);
2039 		xhcip->xhci_fm_caps = 0;
2040 	}
2041 
2042 	inst = ddi_get_instance(xhcip->xhci_dip);
2043 	xhcip->xhci_dip = NULL;
2044 	ddi_soft_state_free(xhci_soft_state, inst);
2045 
2046 	return (DDI_SUCCESS);
2047 }
2048 
2049 static int
xhci_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)2050 xhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2051 {
2052 	int ret, inst, route;
2053 	xhci_t *xhcip;
2054 
2055 	if (cmd != DDI_ATTACH)
2056 		return (DDI_FAILURE);
2057 
2058 	inst = ddi_get_instance(dip);
2059 	if (ddi_soft_state_zalloc(xhci_soft_state, inst) != 0)
2060 		return (DDI_FAILURE);
2061 	xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2062 	xhcip->xhci_dip = dip;
2063 
2064 	xhcip->xhci_regs_capoff = PCI_EINVAL32;
2065 	xhcip->xhci_regs_operoff = PCI_EINVAL32;
2066 	xhcip->xhci_regs_runoff = PCI_EINVAL32;
2067 	xhcip->xhci_regs_dooroff = PCI_EINVAL32;
2068 
2069 	xhci_fm_init(xhcip);
2070 	xhcip->xhci_seq |= XHCI_ATTACH_FM;
2071 
2072 	if (pci_config_setup(xhcip->xhci_dip, &xhcip->xhci_cfg_handle) !=
2073 	    DDI_SUCCESS) {
2074 		goto err;
2075 	}
2076 	xhcip->xhci_seq |= XHCI_ATTACH_PCI_CONFIG;
2077 	xhcip->xhci_vendor_id = pci_config_get16(xhcip->xhci_cfg_handle,
2078 	    PCI_CONF_VENID);
2079 	xhcip->xhci_device_id = pci_config_get16(xhcip->xhci_cfg_handle,
2080 	    PCI_CONF_DEVID);
2081 
2082 	if (xhci_regs_map(xhcip) == B_FALSE) {
2083 		goto err;
2084 	}
2085 
2086 	xhcip->xhci_seq |= XHCI_ATTACH_REGS_MAP;
2087 
2088 	if (xhci_regs_init(xhcip) == B_FALSE)
2089 		goto err;
2090 
2091 	if (xhci_read_params(xhcip) == B_FALSE)
2092 		goto err;
2093 
2094 	if (xhci_identify(xhcip) == B_FALSE)
2095 		goto err;
2096 
2097 	if (xhci_alloc_intrs(xhcip) == B_FALSE)
2098 		goto err;
2099 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ALLOC;
2100 
2101 	if (xhci_add_intr_handler(xhcip) == B_FALSE)
2102 		goto err;
2103 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ADD;
2104 
2105 	mutex_init(&xhcip->xhci_lock, NULL, MUTEX_DRIVER,
2106 	    (void *)(uintptr_t)xhcip->xhci_intr_pri);
2107 	cv_init(&xhcip->xhci_statecv, NULL, CV_DRIVER, NULL);
2108 	xhcip->xhci_seq |= XHCI_ATTACH_SYNCH;
2109 
2110 	if (xhci_port_count(xhcip) == B_FALSE)
2111 		goto err;
2112 
2113 	if (xhci_controller_takeover(xhcip) == B_FALSE)
2114 		goto err;
2115 
2116 	/*
2117 	 * We don't enable interrupts until after we take over the controller
2118 	 * from the BIOS. We've observed cases where this can cause spurious
2119 	 * interrupts.
2120 	 */
2121 	if (xhci_ddi_intr_enable(xhcip) == B_FALSE)
2122 		goto err;
2123 	xhcip->xhci_seq |= XHCI_ATTACH_INTR_ENABLE;
2124 
2125 	if ((ret = xhci_controller_stop(xhcip)) != 0) {
2126 		xhci_error(xhcip, "failed to stop controller: %s",
2127 		    ret == EIO ? "encountered FM register error" :
2128 		    "timed out while waiting for controller");
2129 		goto err;
2130 	}
2131 
2132 	if ((ret = xhci_controller_reset(xhcip)) != 0) {
2133 		xhci_error(xhcip, "failed to reset controller: %s",
2134 		    ret == EIO ? "encountered FM register error" :
2135 		    "timed out while waiting for controller");
2136 		goto err;
2137 	}
2138 
2139 	if ((ret = xhci_controller_configure(xhcip)) != 0) {
2140 		xhci_error(xhcip, "failed to configure controller: %d", ret);
2141 		goto err;
2142 	}
2143 
2144 	/*
2145 	 * Some systems support having ports routed to both an ehci and xhci
2146 	 * controller. If we support it and the user hasn't requested otherwise
2147 	 * via a driver.conf tuning, we reroute it now.
2148 	 */
2149 	route = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
2150 	    DDI_PROP_DONTPASS, "xhci-reroute", XHCI_PROP_REROUTE_DEFAULT);
2151 	if (route != XHCI_PROP_REROUTE_DISABLE &&
2152 	    (xhcip->xhci_quirks & XHCI_QUIRK_INTC_EHCI))
2153 		(void) xhci_reroute_intel(xhcip);
2154 
2155 	if ((ret = xhci_controller_start(xhcip)) != 0) {
2156 		xhci_log(xhcip, "failed to reset controller: %s",
2157 		    ret == EIO ? "encountered FM register error" :
2158 		    "timed out while waiting for controller");
2159 		goto err;
2160 	}
2161 	xhcip->xhci_seq |= XHCI_ATTACH_STARTED;
2162 
2163 	/*
2164 	 * Finally, register ourselves with the USB framework itself.
2165 	 */
2166 	if ((ret = xhci_hcd_init(xhcip)) != 0) {
2167 		xhci_error(xhcip, "failed to register hcd with usba");
2168 		goto err;
2169 	}
2170 	xhcip->xhci_seq |= XHCI_ATTACH_USBA;
2171 
2172 	if ((ret = xhci_root_hub_init(xhcip)) != 0) {
2173 		xhci_error(xhcip, "failed to load the root hub driver");
2174 		goto err;
2175 	}
2176 	xhcip->xhci_seq |= XHCI_ATTACH_ROOT_HUB;
2177 
2178 	return (DDI_SUCCESS);
2179 
2180 err:
2181 	(void) xhci_cleanup(xhcip);
2182 	return (DDI_FAILURE);
2183 }
2184 
2185 static int
xhci_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2186 xhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2187 {
2188 	xhci_t *xhcip;
2189 
2190 	if (cmd != DDI_DETACH)
2191 		return (DDI_FAILURE);
2192 
2193 	xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2194 	if (xhcip == NULL) {
2195 		dev_err(dip, CE_WARN, "detach called without soft state!");
2196 		return (DDI_FAILURE);
2197 	}
2198 
2199 	return (xhci_cleanup(xhcip));
2200 }
2201 
2202 /* ARGSUSED */
2203 static int
xhci_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** outp)2204 xhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **outp)
2205 {
2206 	dev_t dev;
2207 	int inst;
2208 
2209 	switch (cmd) {
2210 	case DDI_INFO_DEVT2DEVINFO:
2211 		dev = (dev_t)arg;
2212 		*outp = xhci_get_dip(dev);
2213 		if (*outp == NULL)
2214 			return (DDI_FAILURE);
2215 		break;
2216 	case DDI_INFO_DEVT2INSTANCE:
2217 		dev = (dev_t)arg;
2218 		inst = getminor(dev) & ~HUBD_IS_ROOT_HUB;
2219 		*outp = (void *)(uintptr_t)inst;
2220 		break;
2221 	default:
2222 		return (DDI_FAILURE);
2223 	}
2224 
2225 	return (DDI_SUCCESS);
2226 }
2227 
2228 static struct cb_ops xhci_cb_ops = {
2229 	xhci_open,		/* cb_open */
2230 	xhci_close,		/* cb_close */
2231 	nodev,			/* cb_strategy */
2232 	nodev,			/* cb_print */
2233 	nodev,			/* cb_dump */
2234 	nodev,			/* cb_read */
2235 	nodev,			/* cb_write */
2236 	xhci_ioctl,		/* cb_ioctl */
2237 	nodev,			/* cb_devmap */
2238 	nodev,			/* cb_mmap */
2239 	nodev,			/* cb_segmap */
2240 	nochpoll,		/* cb_chpoll */
2241 	ddi_prop_op,		/* cb_prop_op */
2242 	NULL,			/* cb_stream */
2243 	D_MP | D_HOTPLUG,	/* cb_flag */
2244 	CB_REV,			/* cb_rev */
2245 	nodev,			/* cb_aread */
2246 	nodev			/* cb_awrite */
2247 };
2248 
2249 static struct dev_ops xhci_dev_ops = {
2250 	DEVO_REV,			/* devo_rev */
2251 	0,				/* devo_refcnt */
2252 	xhci_getinfo,			/* devo_getinfo */
2253 	nulldev,			/* devo_identify */
2254 	nulldev,			/* devo_probe */
2255 	xhci_attach,			/* devo_attach */
2256 	xhci_detach,			/* devo_detach */
2257 	nodev,				/* devo_reset */
2258 	&xhci_cb_ops,			/* devo_cb_ops */
2259 	&usba_hubdi_busops,		/* devo_bus_ops */
2260 	usba_hubdi_root_hub_power,	/* devo_power */
2261 	ddi_quiesce_not_supported	/* devo_quiesce */
2262 };
2263 
2264 static struct modldrv xhci_modldrv = {
2265 	&mod_driverops,
2266 	"USB xHCI Driver",
2267 	&xhci_dev_ops
2268 };
2269 
2270 static struct modlinkage xhci_modlinkage = {
2271 	MODREV_1,
2272 	&xhci_modldrv,
2273 	NULL
2274 };
2275 
2276 int
_init(void)2277 _init(void)
2278 {
2279 	int ret;
2280 
2281 	if ((ret = ddi_soft_state_init(&xhci_soft_state, sizeof (xhci_t),
2282 	    0)) != 0) {
2283 		return (ret);
2284 	}
2285 
2286 	xhci_taskq = taskq_create("xhci_taskq", 1, minclsyspri, 0, 0, 0);
2287 	if (xhci_taskq == NULL) {
2288 		ddi_soft_state_fini(&xhci_soft_state);
2289 		return (ENOMEM);
2290 	}
2291 
2292 	if ((ret = mod_install(&xhci_modlinkage)) != 0) {
2293 		taskq_destroy(xhci_taskq);
2294 		xhci_taskq = NULL;
2295 	}
2296 
2297 	return (ret);
2298 }
2299 
2300 int
_info(struct modinfo * modinfop)2301 _info(struct modinfo *modinfop)
2302 {
2303 	return (mod_info(&xhci_modlinkage, modinfop));
2304 }
2305 
2306 int
_fini(void)2307 _fini(void)
2308 {
2309 	int ret;
2310 
2311 	if ((ret = mod_remove(&xhci_modlinkage)) != 0)
2312 		return (ret);
2313 
2314 	if (xhci_taskq != NULL) {
2315 		taskq_destroy(xhci_taskq);
2316 		xhci_taskq = NULL;
2317 	}
2318 
2319 	ddi_soft_state_fini(&xhci_soft_state);
2320 
2321 	return (0);
2322 }
2323