xref: /illumos-gate/usr/src/uts/common/io/nvme/nvme.c (revision 2ec63ffb3ec249bd7cb4523118c8437e6c6be335)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2016 The MathWorks, Inc.  All rights reserved.
14  * Copyright 2019 Unix Software Ltd.
15  * Copyright 2020 Joyent, Inc.
16  * Copyright 2020 Racktop Systems.
17  * Copyright 2022 Oxide Computer Company.
18  * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
19  * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
20  */
21 
22 /*
23  * blkdev driver for NVMe compliant storage devices
24  *
25  * This driver targets and is designed to support all NVMe 1.x devices.
26  * Features are added to the driver as we encounter devices that require them
27  * and our needs, so some commands or log pages may not take advantage of newer
28  * features that devices support at this time. When you encounter such a case,
29  * it is generally fine to add that support to the driver as long as you take
30  * care to ensure that the requisite device version is met before using it.
31  *
32  * The driver has only been tested on x86 systems and will not work on big-
33  * endian systems without changes to the code accessing registers and data
34  * structures used by the hardware.
35  *
36  *
37  * Interrupt Usage:
38  *
39  * The driver will use a single interrupt while configuring the device as the
40  * specification requires, but contrary to the specification it will try to use
41  * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it
42  * will switch to multiple-message MSI(-X) if supported. The driver wants to
43  * have one interrupt vector per CPU, but it will work correctly if less are
44  * available. Interrupts can be shared by queues, the interrupt handler will
45  * iterate through the I/O queue array by steps of n_intr_cnt. Usually only
46  * the admin queue will share an interrupt with one I/O queue. The interrupt
47  * handler will retrieve completed commands from all queues sharing an interrupt
48  * vector and will post them to a taskq for completion processing.
49  *
50  *
51  * Command Processing:
52  *
53  * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up
54  * to 65536 I/O commands. The driver will configure one I/O queue pair per
55  * available interrupt vector, with the queue length usually much smaller than
56  * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
57  * interrupt vectors will be used.
58  *
59  * Additionally the hardware provides a single special admin queue pair that can
60  * hold up to 4096 admin commands.
61  *
62  * From the hardware perspective both queues of a queue pair are independent,
63  * but they share some driver state: the command array (holding pointers to
64  * commands currently being processed by the hardware) and the active command
65  * counter. Access to a submission queue and the shared state is protected by
66  * nq_mutex; completion queue is protected by ncq_mutex.
67  *
68  * When a command is submitted to a queue pair the active command counter is
69  * incremented and a pointer to the command is stored in the command array. The
70  * array index is used as command identifier (CID) in the submission queue
71  * entry. Some commands may take a very long time to complete, and if the queue
72  * wraps around in that time a submission may find the next array slot to still
73  * be used by a long-running command. In this case the array is sequentially
74  * searched for the next free slot. The length of the command array is the same
75  * as the configured queue length. Queue overrun is prevented by the semaphore,
76  * so a command submission may block if the queue is full.
77  *
78  *
79  * Polled I/O Support:
80  *
81  * For kernel core dump support the driver can do polled I/O. As interrupts are
82  * turned off while dumping the driver will just submit a command in the regular
83  * way, and then repeatedly attempt a command retrieval until it gets the
84  * command back.
85  *
86  *
87  * Namespace Support:
88  *
89  * NVMe devices can have multiple namespaces, each being a independent data
90  * store. The driver supports multiple namespaces and creates a blkdev interface
91  * for each namespace found. Namespaces can have various attributes to support
92  * protection information. This driver does not support any of this and ignores
93  * namespaces that have these attributes.
94  *
95  * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
96  * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally
97  * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64
98  * if present to generate the devid, and passes the EUI64 to blkdev to use it
99  * in the device node names.
100  *
101  * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a
102  * single controller. This is an artificial limit imposed by the driver to be
103  * able to address a reasonable number of controllers and namespaces using a
104  * 32bit minor node number.
105  *
106  *
107  * Minor nodes:
108  *
109  * For each NVMe device the driver exposes one minor node for the controller and
110  * one minor node for each namespace. The only operations supported by those
111  * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the
112  * interface for the nvmeadm(8) utility.
113  *
114  * Exclusive opens are required for certain ioctl(9E) operations that alter
115  * controller and/or namespace state. While different namespaces may be opened
116  * exclusively in parallel, an exclusive open of the controller minor node
117  * requires that no namespaces are currently open (exclusive or otherwise).
118  * Opening any namespace minor node (exclusive or otherwise) will fail while
119  * the controller minor node is opened exclusively by any other thread. Thus it
120  * is possible for one thread at a time to open the controller minor node
121  * exclusively, and keep it open while opening any namespace minor node of the
122  * same controller, exclusively or otherwise.
123  *
124  *
125  *
126  * Blkdev Interface:
127  *
128  * This driver uses blkdev to do all the heavy lifting involved with presenting
129  * a disk device to the system. As a result, the processing of I/O requests is
130  * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
131  * setup, and splitting of transfers into manageable chunks.
132  *
133  * I/O requests coming in from blkdev are turned into NVM commands and posted to
134  * an I/O queue. The queue is selected by taking the CPU id modulo the number of
135  * queues. There is currently no timeout handling of I/O commands.
136  *
137  * Blkdev also supports querying device/media information and generating a
138  * devid. The driver reports the best block size as determined by the namespace
139  * format back to blkdev as physical block size to support partition and block
140  * alignment. The devid is either based on the namespace GUID or EUI64, if
141  * present, or composed using the device vendor ID, model number, serial number,
142  * and the namespace ID.
143  *
144  *
145  * Error Handling:
146  *
147  * Error handling is currently limited to detecting fatal hardware errors,
148  * either by asynchronous events, or synchronously through command status or
149  * admin command timeouts. In case of severe errors the device is fenced off,
150  * all further requests will return EIO. FMA is then called to fault the device.
151  *
152  * The hardware has a limit for outstanding asynchronous event requests. Before
153  * this limit is known the driver assumes it is at least 1 and posts a single
154  * asynchronous request. Later when the limit is known more asynchronous event
155  * requests are posted to allow quicker reception of error information. When an
156  * asynchronous event is posted by the hardware the driver will parse the error
157  * status fields and log information or fault the device, depending on the
158  * severity of the asynchronous event. The asynchronous event request is then
159  * reused and posted to the admin queue again.
160  *
161  * On command completion the command status is checked for errors. In case of
162  * errors indicating a driver bug the driver panics. Almost all other error
163  * status values just cause EIO to be returned.
164  *
165  * Command timeouts are currently detected for all admin commands except
166  * asynchronous event requests. If a command times out and the hardware appears
167  * to be healthy the driver attempts to abort the command. The original command
168  * timeout is also applied to the abort command. If the abort times out too the
169  * driver assumes the device to be dead, fences it off, and calls FMA to retire
170  * it. In all other cases the aborted command should return immediately with a
171  * status indicating it was aborted, and the driver will wait indefinitely for
172  * that to happen. No timeout handling of normal I/O commands is presently done.
173  *
174  * Any command that times out due to the controller dropping dead will be put on
175  * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA
176  * memory being reused by the system and later be written to by a "dead" NVMe
177  * controller.
178  *
179  *
180  * Locking:
181  *
182  * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held
183  * when accessing shared state and submission queue registers, ncq_mutex
184  * is held when accessing completion queue state and registers.
185  * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while
186  * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both
187  * mutexes themselves.
188  *
189  * Each command also has its own nc_mutex, which is associated with the
190  * condition variable nc_cv. It is only used on admin commands which are run
191  * synchronously. In that case it must be held across calls to
192  * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by
193  * nvme_admin_cmd(). It must also be held whenever the completion state of the
194  * command is changed or while a admin command timeout is handled.
195  *
196  * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first.
197  * More than one nc_mutex may only be held when aborting commands. In this case,
198  * the nc_mutex of the command to be aborted must be held across the call to
199  * nvme_abort_cmd() to prevent the command from completing while the abort is in
200  * progress.
201  *
202  * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be
203  * acquired first. More than one nq_mutex is never held by a single thread.
204  * The ncq_mutex is only held by nvme_retrieve_cmd() and
205  * nvme_process_iocq(). nvme_process_iocq() is only called from the
206  * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the
207  * mutex is non-contentious but is required for implementation completeness
208  * and safety.
209  *
210  * There is one mutex n_minor_mutex which protects all open flags nm_open and
211  * exclusive-open thread pointers nm_oexcl of each minor node associated with a
212  * controller and its namespaces.
213  *
214  * In addition, there is one mutex n_mgmt_mutex which must be held whenever the
215  * driver state for any namespace is changed, especially across calls to
216  * nvme_init_ns(), nvme_attach_ns() and nvme_detach_ns(). Except when detaching
217  * nvme, it should also be held across calls that modify the blkdev handle of a
218  * namespace. Command and queue mutexes may be acquired and released while
219  * n_mgmt_mutex is held, n_minor_mutex should not.
220  *
221  *
222  * Quiesce / Fast Reboot:
223  *
224  * The driver currently does not support fast reboot. A quiesce(9E) entry point
225  * is still provided which is used to send a shutdown notification to the
226  * device.
227  *
228  *
229  * NVMe Hotplug:
230  *
231  * The driver supports hot removal. The driver uses the NDI event framework
232  * to register a callback, nvme_remove_callback, to clean up when a disk is
233  * removed. In particular, the driver will unqueue outstanding I/O commands and
234  * set n_dead on the softstate to true so that other operations, such as ioctls
235  * and command submissions, fail as well.
236  *
237  * While the callback registration relies on the NDI event framework, the
238  * removal event itself is kicked off in the PCIe hotplug framework, when the
239  * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a
240  * device was removed from the slot.
241  *
242  * The NVMe driver instance itself will remain until the final close of the
243  * device.
244  *
245  *
246  * DDI UFM Support
247  *
248  * The driver supports the DDI UFM framework for reporting information about
249  * the device's firmware image and slot configuration. This data can be
250  * queried by userland software via ioctls to the ufm driver. For more
251  * information, see ddi_ufm(9E).
252  *
253  *
254  * Driver Configuration:
255  *
256  * The following driver properties can be changed to control some aspects of the
257  * drivers operation:
258  * - strict-version: can be set to 0 to allow devices conforming to newer
259  *   major versions to be used
260  * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
261  *   specific command status as a fatal error leading device faulting
262  * - admin-queue-len: the maximum length of the admin queue (16-4096)
263  * - io-squeue-len: the maximum length of the I/O submission queues (16-65536)
264  * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536)
265  * - async-event-limit: the maximum number of asynchronous event requests to be
266  *   posted by the driver
267  * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
268  *   cache
269  * - min-phys-block-size: the minimum physical block size to report to blkdev,
270  *   which is among other things the basis for ZFS vdev ashift
271  * - max-submission-queues: the maximum number of I/O submission queues.
272  * - max-completion-queues: the maximum number of I/O completion queues,
273  *   can be less than max-submission-queues, in which case the completion
274  *   queues are shared.
275  *
276  * In addition to the above properties, some device-specific tunables can be
277  * configured using the nvme-config-list global property. The value of this
278  * property is a list of triplets. The formal syntax is:
279  *
280  *   nvme-config-list ::= <triplet> [, <triplet>]* ;
281  *   <triplet>        ::= "<model>" , "<rev-list>" , "<tuple-list>"
282  *   <rev-list>       ::= [ <fwrev> [, <fwrev>]*]
283  *   <tuple-list>     ::= <tunable> [, <tunable>]*
284  *   <tunable>        ::= <name> : <value>
285  *
286  * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and
287  * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list>
288  * contains one or more tunables to apply to all controllers that match the
289  * specified model number and optionally firmware revision. Each <tunable> is a
290  * <name> : <value> pair.  Supported tunables are:
291  *
292  * - ignore-unknown-vendor-status:  can be set to "on" to not handle any vendor
293  *   specific command status as a fatal error leading device faulting
294  *
295  * - min-phys-block-size: the minimum physical block size to report to blkdev,
296  *   which is among other things the basis for ZFS vdev ashift
297  *
298  * - volatile-write-cache: can be set to "on" or "off" to enable or disable the
299  *   volatile write cache, if present
300  *
301  *
302  * TODO:
303  * - figure out sane default for I/O queue depth reported to blkdev
304  * - FMA handling of media errors
305  * - support for devices supporting very large I/O requests using chained PRPs
306  * - support for configuring hardware parameters like interrupt coalescing
307  * - support for media formatting and hard partitioning into namespaces
308  * - support for big-endian systems
309  * - support for fast reboot
310  * - support for NVMe Subsystem Reset (1.1)
311  * - support for Scatter/Gather lists (1.1)
312  * - support for Reservations (1.1)
313  * - support for power management
314  */
315 
316 #include <sys/byteorder.h>
317 #ifdef _BIG_ENDIAN
318 #error nvme driver needs porting for big-endian platforms
319 #endif
320 
321 #include <sys/modctl.h>
322 #include <sys/conf.h>
323 #include <sys/devops.h>
324 #include <sys/ddi.h>
325 #include <sys/ddi_ufm.h>
326 #include <sys/sunddi.h>
327 #include <sys/sunndi.h>
328 #include <sys/bitmap.h>
329 #include <sys/sysmacros.h>
330 #include <sys/param.h>
331 #include <sys/varargs.h>
332 #include <sys/cpuvar.h>
333 #include <sys/disp.h>
334 #include <sys/blkdev.h>
335 #include <sys/atomic.h>
336 #include <sys/archsystm.h>
337 #include <sys/sata/sata_hba.h>
338 #include <sys/stat.h>
339 #include <sys/policy.h>
340 #include <sys/list.h>
341 #include <sys/dkio.h>
342 
343 #include <sys/nvme.h>
344 
345 #ifdef __x86
346 #include <sys/x86_archext.h>
347 #endif
348 
349 #include "nvme_reg.h"
350 #include "nvme_var.h"
351 
352 /*
353  * Assertions to make sure that we've properly captured various aspects of the
354  * packed structures and haven't broken them during updates.
355  */
356 CTASSERT(sizeof (nvme_identify_ctrl_t) == 0x1000);
357 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256);
358 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512);
359 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520);
360 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768);
361 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792);
362 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048);
363 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072);
364 
365 CTASSERT(sizeof (nvme_identify_nsid_t) == 0x1000);
366 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32);
367 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92);
368 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104);
369 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128);
370 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384);
371 
372 CTASSERT(sizeof (nvme_identify_primary_caps_t) == 0x1000);
373 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32);
374 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64);
375 
376 
377 /* NVMe spec version supported */
378 static const int nvme_version_major = 1;
379 
380 /* tunable for admin command timeout in seconds, default is 1s */
381 int nvme_admin_cmd_timeout = 1;
382 
383 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */
384 int nvme_format_cmd_timeout = 600;
385 
386 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */
387 int nvme_commit_save_cmd_timeout = 15;
388 
389 /*
390  * tunable for the size of arbitrary vendor specific admin commands,
391  * default is 16MiB.
392  */
393 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24;
394 
395 /*
396  * tunable for the max timeout of arbitary vendor specific admin commands,
397  * default is 60s.
398  */
399 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60;
400 
401 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
402 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
403 static int nvme_quiesce(dev_info_t *);
404 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
405 static int nvme_setup_interrupts(nvme_t *, int, int);
406 static void nvme_release_interrupts(nvme_t *);
407 static uint_t nvme_intr(caddr_t, caddr_t);
408 
409 static void nvme_shutdown(nvme_t *, int, boolean_t);
410 static boolean_t nvme_reset(nvme_t *, boolean_t);
411 static int nvme_init(nvme_t *);
412 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
413 static void nvme_free_cmd(nvme_cmd_t *);
414 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
415     bd_xfer_t *);
416 static void nvme_admin_cmd(nvme_cmd_t *, int);
417 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *);
418 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
419 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *);
420 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
421 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
422 static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
423 static void nvme_wakeup_cmd(void *);
424 static void nvme_async_event_task(void *);
425 
426 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
427 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
428 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
429 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
430 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
431 static inline int nvme_check_cmd_status(nvme_cmd_t *);
432 
433 static int nvme_abort_cmd(nvme_cmd_t *, uint_t);
434 static void nvme_async_event(nvme_t *);
435 static int nvme_format_nvm(nvme_t *, boolean_t, uint32_t, uint8_t, boolean_t,
436     uint8_t, boolean_t, uint8_t);
437 static int nvme_get_logpage(nvme_t *, boolean_t, void **, size_t *, uint8_t,
438     ...);
439 static int nvme_identify(nvme_t *, boolean_t, uint32_t, void **);
440 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t,
441     uint32_t *);
442 static int nvme_get_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t *,
443     void **, size_t *);
444 static int nvme_write_cache_set(nvme_t *, boolean_t);
445 static int nvme_set_nqueues(nvme_t *);
446 
447 static void nvme_free_dma(nvme_dma_t *);
448 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
449     nvme_dma_t **);
450 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
451     nvme_dma_t **);
452 static void nvme_free_qpair(nvme_qpair_t *);
453 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t);
454 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
455 
456 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
457 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
458 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
459 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
460 
461 static boolean_t nvme_check_regs_hdl(nvme_t *);
462 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
463 
464 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t);
465 
466 static void nvme_bd_xfer_done(void *);
467 static void nvme_bd_driveinfo(void *, bd_drive_t *);
468 static int nvme_bd_mediainfo(void *, bd_media_t *);
469 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
470 static int nvme_bd_read(void *, bd_xfer_t *);
471 static int nvme_bd_write(void *, bd_xfer_t *);
472 static int nvme_bd_sync(void *, bd_xfer_t *);
473 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
474 static int nvme_bd_free_space(void *, bd_xfer_t *);
475 
476 static int nvme_prp_dma_constructor(void *, void *, int);
477 static void nvme_prp_dma_destructor(void *, void *);
478 
479 static void nvme_prepare_devid(nvme_t *, uint32_t);
480 
481 /* DDI UFM callbacks */
482 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
483     ddi_ufm_image_t *);
484 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
485     ddi_ufm_slot_t *);
486 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
487 
488 static int nvme_open(dev_t *, int, int, cred_t *);
489 static int nvme_close(dev_t, int, int, cred_t *);
490 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
491 
492 static int nvme_init_ns(nvme_t *, int);
493 static int nvme_attach_ns(nvme_t *, int);
494 static int nvme_detach_ns(nvme_t *, int);
495 
496 #define	NVME_NSID2NS(nvme, nsid)	(&((nvme)->n_ns[(nsid) - 1]))
497 
498 static ddi_ufm_ops_t nvme_ufm_ops = {
499 	NULL,
500 	nvme_ufm_fill_image,
501 	nvme_ufm_fill_slot,
502 	nvme_ufm_getcaps
503 };
504 
505 #define	NVME_MINOR_INST_SHIFT	9
506 #define	NVME_MINOR(inst, nsid)	(((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
507 #define	NVME_MINOR_INST(minor)	((minor) >> NVME_MINOR_INST_SHIFT)
508 #define	NVME_MINOR_NSID(minor)	((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
509 #define	NVME_MINOR_MAX		(NVME_MINOR(1, 0) - 2)
510 #define	NVME_IS_VENDOR_SPECIFIC_CMD(x)	(((x) >= 0xC0) && ((x) <= 0xFF))
511 #define	NVME_VENDOR_SPECIFIC_LOGPAGE_MIN	0xC0
512 #define	NVME_VENDOR_SPECIFIC_LOGPAGE_MAX	0xFF
513 #define	NVME_IS_VENDOR_SPECIFIC_LOGPAGE(x)	\
514 		(((x) >= NVME_VENDOR_SPECIFIC_LOGPAGE_MIN) && \
515 		((x) <= NVME_VENDOR_SPECIFIC_LOGPAGE_MAX))
516 
517 /*
518  * NVMe versions 1.3 and later actually support log pages up to UINT32_MAX
519  * DWords in size. However, revision 1.3 also modified the layout of the Get Log
520  * Page command significantly relative to version 1.2, including changing
521  * reserved bits, adding new bitfields, and requiring the use of command DWord
522  * 11 to fully specify the size of the log page (the lower and upper 16 bits of
523  * the number of DWords in the page are split between DWord 10 and DWord 11,
524  * respectively).
525  *
526  * All of these impose significantly different layout requirements on the
527  * `nvme_getlogpage_t` type. This could be solved with two different types, or a
528  * complicated/nested union with the two versions as the overlying members. Both
529  * of these are reasonable, if a bit convoluted. However, these is no current
530  * need for such large pages, or a way to test them, as most log pages actually
531  * fit within the current size limit. So for simplicity, we retain the size cap
532  * from version 1.2.
533  *
534  * Note that the number of DWords is zero-based, so we add 1. It is subtracted
535  * to form a zero-based value in `nvme_get_logpage`.
536  */
537 #define	NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE	\
538 		(((1 << 12) + 1) * sizeof (uint32_t))
539 
540 static void *nvme_state;
541 static kmem_cache_t *nvme_cmd_cache;
542 
543 /*
544  * DMA attributes for queue DMA memory
545  *
546  * Queue DMA memory must be page aligned. The maximum length of a queue is
547  * 65536 entries, and an entry can be 64 bytes long.
548  */
549 static ddi_dma_attr_t nvme_queue_dma_attr = {
550 	.dma_attr_version	= DMA_ATTR_V0,
551 	.dma_attr_addr_lo	= 0,
552 	.dma_attr_addr_hi	= 0xffffffffffffffffULL,
553 	.dma_attr_count_max	= (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
554 	.dma_attr_align		= 0x1000,
555 	.dma_attr_burstsizes	= 0x7ff,
556 	.dma_attr_minxfer	= 0x1000,
557 	.dma_attr_maxxfer	= (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
558 	.dma_attr_seg		= 0xffffffffffffffffULL,
559 	.dma_attr_sgllen	= 1,
560 	.dma_attr_granular	= 1,
561 	.dma_attr_flags		= 0,
562 };
563 
564 /*
565  * DMA attributes for transfers using Physical Region Page (PRP) entries
566  *
567  * A PRP entry describes one page of DMA memory using the page size specified
568  * in the controller configuration's memory page size register (CC.MPS). It uses
569  * a 64bit base address aligned to this page size. There is no limitation on
570  * chaining PRPs together for arbitrarily large DMA transfers.
571  */
572 static ddi_dma_attr_t nvme_prp_dma_attr = {
573 	.dma_attr_version	= DMA_ATTR_V0,
574 	.dma_attr_addr_lo	= 0,
575 	.dma_attr_addr_hi	= 0xffffffffffffffffULL,
576 	.dma_attr_count_max	= 0xfff,
577 	.dma_attr_align		= 0x1000,
578 	.dma_attr_burstsizes	= 0x7ff,
579 	.dma_attr_minxfer	= 0x1000,
580 	.dma_attr_maxxfer	= 0x1000,
581 	.dma_attr_seg		= 0xfff,
582 	.dma_attr_sgllen	= -1,
583 	.dma_attr_granular	= 1,
584 	.dma_attr_flags		= 0,
585 };
586 
587 /*
588  * DMA attributes for transfers using scatter/gather lists
589  *
590  * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
591  * 32bit length field. SGL Segment and SGL Last Segment entries require the
592  * length to be a multiple of 16 bytes.
593  */
594 static ddi_dma_attr_t nvme_sgl_dma_attr = {
595 	.dma_attr_version	= DMA_ATTR_V0,
596 	.dma_attr_addr_lo	= 0,
597 	.dma_attr_addr_hi	= 0xffffffffffffffffULL,
598 	.dma_attr_count_max	= 0xffffffffUL,
599 	.dma_attr_align		= 1,
600 	.dma_attr_burstsizes	= 0x7ff,
601 	.dma_attr_minxfer	= 0x10,
602 	.dma_attr_maxxfer	= 0xfffffffffULL,
603 	.dma_attr_seg		= 0xffffffffffffffffULL,
604 	.dma_attr_sgllen	= -1,
605 	.dma_attr_granular	= 0x10,
606 	.dma_attr_flags		= 0
607 };
608 
609 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
610 	.devacc_attr_version	= DDI_DEVICE_ATTR_V0,
611 	.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
612 	.devacc_attr_dataorder	= DDI_STRICTORDER_ACC
613 };
614 
615 static struct cb_ops nvme_cb_ops = {
616 	.cb_open	= nvme_open,
617 	.cb_close	= nvme_close,
618 	.cb_strategy	= nodev,
619 	.cb_print	= nodev,
620 	.cb_dump	= nodev,
621 	.cb_read	= nodev,
622 	.cb_write	= nodev,
623 	.cb_ioctl	= nvme_ioctl,
624 	.cb_devmap	= nodev,
625 	.cb_mmap	= nodev,
626 	.cb_segmap	= nodev,
627 	.cb_chpoll	= nochpoll,
628 	.cb_prop_op	= ddi_prop_op,
629 	.cb_str		= 0,
630 	.cb_flag	= D_NEW | D_MP,
631 	.cb_rev		= CB_REV,
632 	.cb_aread	= nodev,
633 	.cb_awrite	= nodev
634 };
635 
636 static struct dev_ops nvme_dev_ops = {
637 	.devo_rev	= DEVO_REV,
638 	.devo_refcnt	= 0,
639 	.devo_getinfo	= ddi_no_info,
640 	.devo_identify	= nulldev,
641 	.devo_probe	= nulldev,
642 	.devo_attach	= nvme_attach,
643 	.devo_detach	= nvme_detach,
644 	.devo_reset	= nodev,
645 	.devo_cb_ops	= &nvme_cb_ops,
646 	.devo_bus_ops	= NULL,
647 	.devo_power	= NULL,
648 	.devo_quiesce	= nvme_quiesce,
649 };
650 
651 static struct modldrv nvme_modldrv = {
652 	.drv_modops	= &mod_driverops,
653 	.drv_linkinfo	= "NVMe v1.1b",
654 	.drv_dev_ops	= &nvme_dev_ops
655 };
656 
657 static struct modlinkage nvme_modlinkage = {
658 	.ml_rev		= MODREV_1,
659 	.ml_linkage	= { &nvme_modldrv, NULL }
660 };
661 
662 static bd_ops_t nvme_bd_ops = {
663 	.o_version	= BD_OPS_CURRENT_VERSION,
664 	.o_drive_info	= nvme_bd_driveinfo,
665 	.o_media_info	= nvme_bd_mediainfo,
666 	.o_devid_init	= nvme_bd_devid,
667 	.o_sync_cache	= nvme_bd_sync,
668 	.o_read		= nvme_bd_read,
669 	.o_write	= nvme_bd_write,
670 	.o_free_space	= nvme_bd_free_space,
671 };
672 
673 /*
674  * This list will hold commands that have timed out and couldn't be aborted.
675  * As we don't know what the hardware may still do with the DMA memory we can't
676  * free them, so we'll keep them forever on this list where we can easily look
677  * at them with mdb.
678  */
679 static struct list nvme_lost_cmds;
680 static kmutex_t nvme_lc_mutex;
681 
682 int
683 _init(void)
684 {
685 	int error;
686 
687 	error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
688 	if (error != DDI_SUCCESS)
689 		return (error);
690 
691 	nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
692 	    sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
693 
694 	mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
695 	list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
696 	    offsetof(nvme_cmd_t, nc_list));
697 
698 	bd_mod_init(&nvme_dev_ops);
699 
700 	error = mod_install(&nvme_modlinkage);
701 	if (error != DDI_SUCCESS) {
702 		ddi_soft_state_fini(&nvme_state);
703 		mutex_destroy(&nvme_lc_mutex);
704 		list_destroy(&nvme_lost_cmds);
705 		bd_mod_fini(&nvme_dev_ops);
706 	}
707 
708 	return (error);
709 }
710 
711 int
712 _fini(void)
713 {
714 	int error;
715 
716 	if (!list_is_empty(&nvme_lost_cmds))
717 		return (DDI_FAILURE);
718 
719 	error = mod_remove(&nvme_modlinkage);
720 	if (error == DDI_SUCCESS) {
721 		ddi_soft_state_fini(&nvme_state);
722 		kmem_cache_destroy(nvme_cmd_cache);
723 		mutex_destroy(&nvme_lc_mutex);
724 		list_destroy(&nvme_lost_cmds);
725 		bd_mod_fini(&nvme_dev_ops);
726 	}
727 
728 	return (error);
729 }
730 
731 int
732 _info(struct modinfo *modinfop)
733 {
734 	return (mod_info(&nvme_modlinkage, modinfop));
735 }
736 
737 static inline void
738 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
739 {
740 	ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
741 
742 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
743 	ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
744 }
745 
746 static inline void
747 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
748 {
749 	ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
750 
751 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
752 	ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
753 }
754 
755 static inline uint64_t
756 nvme_get64(nvme_t *nvme, uintptr_t reg)
757 {
758 	uint64_t val;
759 
760 	ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
761 
762 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
763 	val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
764 
765 	return (val);
766 }
767 
768 static inline uint32_t
769 nvme_get32(nvme_t *nvme, uintptr_t reg)
770 {
771 	uint32_t val;
772 
773 	ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
774 
775 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
776 	val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
777 
778 	return (val);
779 }
780 
781 static boolean_t
782 nvme_check_regs_hdl(nvme_t *nvme)
783 {
784 	ddi_fm_error_t error;
785 
786 	ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
787 
788 	if (error.fme_status != DDI_FM_OK)
789 		return (B_TRUE);
790 
791 	return (B_FALSE);
792 }
793 
794 static boolean_t
795 nvme_check_dma_hdl(nvme_dma_t *dma)
796 {
797 	ddi_fm_error_t error;
798 
799 	if (dma == NULL)
800 		return (B_FALSE);
801 
802 	ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
803 
804 	if (error.fme_status != DDI_FM_OK)
805 		return (B_TRUE);
806 
807 	return (B_FALSE);
808 }
809 
810 static void
811 nvme_free_dma_common(nvme_dma_t *dma)
812 {
813 	if (dma->nd_dmah != NULL)
814 		(void) ddi_dma_unbind_handle(dma->nd_dmah);
815 	if (dma->nd_acch != NULL)
816 		ddi_dma_mem_free(&dma->nd_acch);
817 	if (dma->nd_dmah != NULL)
818 		ddi_dma_free_handle(&dma->nd_dmah);
819 }
820 
821 static void
822 nvme_free_dma(nvme_dma_t *dma)
823 {
824 	nvme_free_dma_common(dma);
825 	kmem_free(dma, sizeof (*dma));
826 }
827 
828 /* ARGSUSED */
829 static void
830 nvme_prp_dma_destructor(void *buf, void *private)
831 {
832 	nvme_dma_t *dma = (nvme_dma_t *)buf;
833 
834 	nvme_free_dma_common(dma);
835 }
836 
837 static int
838 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
839     size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
840 {
841 	if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
842 	    &dma->nd_dmah) != DDI_SUCCESS) {
843 		/*
844 		 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
845 		 * the only other possible error is DDI_DMA_BADATTR which
846 		 * indicates a driver bug which should cause a panic.
847 		 */
848 		dev_err(nvme->n_dip, CE_PANIC,
849 		    "!failed to get DMA handle, check DMA attributes");
850 		return (DDI_FAILURE);
851 	}
852 
853 	/*
854 	 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
855 	 * or the flags are conflicting, which isn't the case here.
856 	 */
857 	(void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
858 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
859 	    &dma->nd_len, &dma->nd_acch);
860 
861 	if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
862 	    dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
863 	    &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
864 		dev_err(nvme->n_dip, CE_WARN,
865 		    "!failed to bind DMA memory");
866 		atomic_inc_32(&nvme->n_dma_bind_err);
867 		nvme_free_dma_common(dma);
868 		return (DDI_FAILURE);
869 	}
870 
871 	return (DDI_SUCCESS);
872 }
873 
874 static int
875 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
876     ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
877 {
878 	nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
879 
880 	if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
881 	    DDI_SUCCESS) {
882 		*ret = NULL;
883 		kmem_free(dma, sizeof (nvme_dma_t));
884 		return (DDI_FAILURE);
885 	}
886 
887 	bzero(dma->nd_memp, dma->nd_len);
888 
889 	*ret = dma;
890 	return (DDI_SUCCESS);
891 }
892 
893 /* ARGSUSED */
894 static int
895 nvme_prp_dma_constructor(void *buf, void *private, int flags)
896 {
897 	nvme_dma_t *dma = (nvme_dma_t *)buf;
898 	nvme_t *nvme = (nvme_t *)private;
899 
900 	dma->nd_dmah = NULL;
901 	dma->nd_acch = NULL;
902 
903 	if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
904 	    DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
905 		return (-1);
906 	}
907 
908 	ASSERT(dma->nd_ncookie == 1);
909 
910 	dma->nd_cached = B_TRUE;
911 
912 	return (0);
913 }
914 
915 static int
916 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
917     uint_t flags, nvme_dma_t **dma)
918 {
919 	uint32_t len = nentry * qe_len;
920 	ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
921 
922 	len = roundup(len, nvme->n_pagesize);
923 
924 	if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
925 	    != DDI_SUCCESS) {
926 		dev_err(nvme->n_dip, CE_WARN,
927 		    "!failed to get DMA memory for queue");
928 		goto fail;
929 	}
930 
931 	if ((*dma)->nd_ncookie != 1) {
932 		dev_err(nvme->n_dip, CE_WARN,
933 		    "!got too many cookies for queue DMA");
934 		goto fail;
935 	}
936 
937 	return (DDI_SUCCESS);
938 
939 fail:
940 	if (*dma) {
941 		nvme_free_dma(*dma);
942 		*dma = NULL;
943 	}
944 
945 	return (DDI_FAILURE);
946 }
947 
948 static void
949 nvme_free_cq(nvme_cq_t *cq)
950 {
951 	mutex_destroy(&cq->ncq_mutex);
952 
953 	if (cq->ncq_cmd_taskq != NULL)
954 		taskq_destroy(cq->ncq_cmd_taskq);
955 
956 	if (cq->ncq_dma != NULL)
957 		nvme_free_dma(cq->ncq_dma);
958 
959 	kmem_free(cq, sizeof (*cq));
960 }
961 
962 static void
963 nvme_free_qpair(nvme_qpair_t *qp)
964 {
965 	int i;
966 
967 	mutex_destroy(&qp->nq_mutex);
968 	sema_destroy(&qp->nq_sema);
969 
970 	if (qp->nq_sqdma != NULL)
971 		nvme_free_dma(qp->nq_sqdma);
972 
973 	if (qp->nq_active_cmds > 0)
974 		for (i = 0; i != qp->nq_nentry; i++)
975 			if (qp->nq_cmd[i] != NULL)
976 				nvme_free_cmd(qp->nq_cmd[i]);
977 
978 	if (qp->nq_cmd != NULL)
979 		kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
980 
981 	kmem_free(qp, sizeof (nvme_qpair_t));
982 }
983 
984 /*
985  * Destroy the pre-allocated cq array, but only free individual completion
986  * queues from the given starting index.
987  */
988 static void
989 nvme_destroy_cq_array(nvme_t *nvme, uint_t start)
990 {
991 	uint_t i;
992 
993 	for (i = start; i < nvme->n_cq_count; i++)
994 		if (nvme->n_cq[i] != NULL)
995 			nvme_free_cq(nvme->n_cq[i]);
996 
997 	kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count);
998 }
999 
1000 static int
1001 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx,
1002     uint_t nthr)
1003 {
1004 	nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP);
1005 	char name[64];		/* large enough for the taskq name */
1006 
1007 	mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER,
1008 	    DDI_INTR_PRI(nvme->n_intr_pri));
1009 
1010 	if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
1011 	    DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS)
1012 		goto fail;
1013 
1014 	cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp;
1015 	cq->ncq_nentry = nentry;
1016 	cq->ncq_id = idx;
1017 	cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx);
1018 
1019 	/*
1020 	 * Each completion queue has its own command taskq.
1021 	 */
1022 	(void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u",
1023 	    ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx);
1024 
1025 	cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX,
1026 	    TASKQ_PREPOPULATE);
1027 
1028 	if (cq->ncq_cmd_taskq == NULL) {
1029 		dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd "
1030 		    "taskq for cq %u", idx);
1031 		goto fail;
1032 	}
1033 
1034 	*cqp = cq;
1035 	return (DDI_SUCCESS);
1036 
1037 fail:
1038 	nvme_free_cq(cq);
1039 	*cqp = NULL;
1040 
1041 	return (DDI_FAILURE);
1042 }
1043 
1044 /*
1045  * Create the n_cq array big enough to hold "ncq" completion queues.
1046  * If the array already exists it will be re-sized (but only larger).
1047  * The admin queue is included in this array, which boosts the
1048  * max number of entries to UINT16_MAX + 1.
1049  */
1050 static int
1051 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr)
1052 {
1053 	nvme_cq_t **cq;
1054 	uint_t i, cq_count;
1055 
1056 	ASSERT3U(ncq, >, nvme->n_cq_count);
1057 
1058 	cq = nvme->n_cq;
1059 	cq_count = nvme->n_cq_count;
1060 
1061 	nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP);
1062 	nvme->n_cq_count = ncq;
1063 
1064 	for (i = 0; i < cq_count; i++)
1065 		nvme->n_cq[i] = cq[i];
1066 
1067 	for (; i < nvme->n_cq_count; i++)
1068 		if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) !=
1069 		    DDI_SUCCESS)
1070 			goto fail;
1071 
1072 	if (cq != NULL)
1073 		kmem_free(cq, sizeof (*cq) * cq_count);
1074 
1075 	return (DDI_SUCCESS);
1076 
1077 fail:
1078 	nvme_destroy_cq_array(nvme, cq_count);
1079 	/*
1080 	 * Restore the original array
1081 	 */
1082 	nvme->n_cq_count = cq_count;
1083 	nvme->n_cq = cq;
1084 
1085 	return (DDI_FAILURE);
1086 }
1087 
1088 static int
1089 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
1090     uint_t idx)
1091 {
1092 	nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
1093 	uint_t cq_idx;
1094 
1095 	mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
1096 	    DDI_INTR_PRI(nvme->n_intr_pri));
1097 
1098 	/*
1099 	 * The NVMe spec defines that a full queue has one empty (unused) slot;
1100 	 * initialize the semaphore accordingly.
1101 	 */
1102 	sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL);
1103 
1104 	if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
1105 	    DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
1106 		goto fail;
1107 
1108 	/*
1109 	 * idx == 0 is adminq, those above 0 are shared io completion queues.
1110 	 */
1111 	cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1);
1112 	qp->nq_cq = nvme->n_cq[cq_idx];
1113 	qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
1114 	qp->nq_nentry = nentry;
1115 
1116 	qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
1117 
1118 	qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
1119 	qp->nq_next_cmd = 0;
1120 
1121 	*nqp = qp;
1122 	return (DDI_SUCCESS);
1123 
1124 fail:
1125 	nvme_free_qpair(qp);
1126 	*nqp = NULL;
1127 
1128 	return (DDI_FAILURE);
1129 }
1130 
1131 static nvme_cmd_t *
1132 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
1133 {
1134 	nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
1135 
1136 	if (cmd == NULL)
1137 		return (cmd);
1138 
1139 	bzero(cmd, sizeof (nvme_cmd_t));
1140 
1141 	cmd->nc_nvme = nvme;
1142 
1143 	mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
1144 	    DDI_INTR_PRI(nvme->n_intr_pri));
1145 	cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
1146 
1147 	return (cmd);
1148 }
1149 
1150 static void
1151 nvme_free_cmd(nvme_cmd_t *cmd)
1152 {
1153 	/* Don't free commands on the lost commands list. */
1154 	if (list_link_active(&cmd->nc_list))
1155 		return;
1156 
1157 	if (cmd->nc_dma) {
1158 		nvme_free_dma(cmd->nc_dma);
1159 		cmd->nc_dma = NULL;
1160 	}
1161 
1162 	if (cmd->nc_prp) {
1163 		kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp);
1164 		cmd->nc_prp = NULL;
1165 	}
1166 
1167 	cv_destroy(&cmd->nc_cv);
1168 	mutex_destroy(&cmd->nc_mutex);
1169 
1170 	kmem_cache_free(nvme_cmd_cache, cmd);
1171 }
1172 
1173 static void
1174 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1175 {
1176 	sema_p(&qp->nq_sema);
1177 	nvme_submit_cmd_common(qp, cmd);
1178 }
1179 
1180 static int
1181 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1182 {
1183 	if (cmd->nc_nvme->n_dead) {
1184 		return (EIO);
1185 	}
1186 
1187 	if (sema_tryp(&qp->nq_sema) == 0)
1188 		return (EAGAIN);
1189 
1190 	nvme_submit_cmd_common(qp, cmd);
1191 	return (0);
1192 }
1193 
1194 static void
1195 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1196 {
1197 	nvme_reg_sqtdbl_t tail = { 0 };
1198 
1199 	mutex_enter(&qp->nq_mutex);
1200 	cmd->nc_completed = B_FALSE;
1201 
1202 	/*
1203 	 * Now that we hold the queue pair lock, we must check whether or not
1204 	 * the controller has been listed as dead (e.g. was removed due to
1205 	 * hotplug). This is necessary as otherwise we could race with
1206 	 * nvme_remove_callback(). Because this has not been enqueued, we don't
1207 	 * call nvme_unqueue_cmd(), which is why we must manually decrement the
1208 	 * semaphore.
1209 	 */
1210 	if (cmd->nc_nvme->n_dead) {
1211 		taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback,
1212 		    cmd, TQ_NOSLEEP, &cmd->nc_tqent);
1213 		sema_v(&qp->nq_sema);
1214 		mutex_exit(&qp->nq_mutex);
1215 		return;
1216 	}
1217 
1218 	/*
1219 	 * Try to insert the cmd into the active cmd array at the nq_next_cmd
1220 	 * slot. If the slot is already occupied advance to the next slot and
1221 	 * try again. This can happen for long running commands like async event
1222 	 * requests.
1223 	 */
1224 	while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
1225 		qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1226 	qp->nq_cmd[qp->nq_next_cmd] = cmd;
1227 
1228 	qp->nq_active_cmds++;
1229 
1230 	cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
1231 	bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
1232 	(void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
1233 	    sizeof (nvme_sqe_t) * qp->nq_sqtail,
1234 	    sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
1235 	qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1236 
1237 	tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
1238 	nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
1239 
1240 	mutex_exit(&qp->nq_mutex);
1241 }
1242 
1243 static nvme_cmd_t *
1244 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
1245 {
1246 	nvme_cmd_t *cmd;
1247 
1248 	ASSERT(mutex_owned(&qp->nq_mutex));
1249 	ASSERT3S(cid, <, qp->nq_nentry);
1250 
1251 	cmd = qp->nq_cmd[cid];
1252 	qp->nq_cmd[cid] = NULL;
1253 	ASSERT3U(qp->nq_active_cmds, >, 0);
1254 	qp->nq_active_cmds--;
1255 	sema_v(&qp->nq_sema);
1256 
1257 	ASSERT3P(cmd, !=, NULL);
1258 	ASSERT3P(cmd->nc_nvme, ==, nvme);
1259 	ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
1260 
1261 	return (cmd);
1262 }
1263 
1264 /*
1265  * Get the command tied to the next completed cqe and bump along completion
1266  * queue head counter.
1267  */
1268 static nvme_cmd_t *
1269 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq)
1270 {
1271 	nvme_qpair_t *qp;
1272 	nvme_cqe_t *cqe;
1273 	nvme_cmd_t *cmd;
1274 
1275 	ASSERT(mutex_owned(&cq->ncq_mutex));
1276 
1277 	cqe = &cq->ncq_cq[cq->ncq_head];
1278 
1279 	/* Check phase tag of CQE. Hardware inverts it for new entries. */
1280 	if (cqe->cqe_sf.sf_p == cq->ncq_phase)
1281 		return (NULL);
1282 
1283 	qp = nvme->n_ioq[cqe->cqe_sqid];
1284 
1285 	mutex_enter(&qp->nq_mutex);
1286 	cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
1287 	mutex_exit(&qp->nq_mutex);
1288 
1289 	ASSERT(cmd->nc_sqid == cqe->cqe_sqid);
1290 	bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
1291 
1292 	qp->nq_sqhead = cqe->cqe_sqhd;
1293 
1294 	cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry;
1295 
1296 	/* Toggle phase on wrap-around. */
1297 	if (cq->ncq_head == 0)
1298 		cq->ncq_phase = cq->ncq_phase ? 0 : 1;
1299 
1300 	return (cmd);
1301 }
1302 
1303 /*
1304  * Process all completed commands on the io completion queue.
1305  */
1306 static uint_t
1307 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq)
1308 {
1309 	nvme_reg_cqhdbl_t head = { 0 };
1310 	nvme_cmd_t *cmd;
1311 	uint_t completed = 0;
1312 
1313 	if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1314 	    DDI_SUCCESS)
1315 		dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1316 		    __func__);
1317 
1318 	mutex_enter(&cq->ncq_mutex);
1319 
1320 	while ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1321 		taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd,
1322 		    TQ_NOSLEEP, &cmd->nc_tqent);
1323 
1324 		completed++;
1325 	}
1326 
1327 	if (completed > 0) {
1328 		/*
1329 		 * Update the completion queue head doorbell.
1330 		 */
1331 		head.b.cqhdbl_cqh = cq->ncq_head;
1332 		nvme_put32(nvme, cq->ncq_hdbl, head.r);
1333 	}
1334 
1335 	mutex_exit(&cq->ncq_mutex);
1336 
1337 	return (completed);
1338 }
1339 
1340 static nvme_cmd_t *
1341 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
1342 {
1343 	nvme_cq_t *cq = qp->nq_cq;
1344 	nvme_reg_cqhdbl_t head = { 0 };
1345 	nvme_cmd_t *cmd;
1346 
1347 	if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1348 	    DDI_SUCCESS)
1349 		dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1350 		    __func__);
1351 
1352 	mutex_enter(&cq->ncq_mutex);
1353 
1354 	if ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1355 		head.b.cqhdbl_cqh = cq->ncq_head;
1356 		nvme_put32(nvme, cq->ncq_hdbl, head.r);
1357 	}
1358 
1359 	mutex_exit(&cq->ncq_mutex);
1360 
1361 	return (cmd);
1362 }
1363 
1364 static int
1365 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
1366 {
1367 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1368 
1369 	dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1370 	    "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
1371 	    "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
1372 	    cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
1373 	    cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
1374 
1375 	if (cmd->nc_xfer != NULL)
1376 		bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1377 
1378 	if (cmd->nc_nvme->n_strict_version) {
1379 		cmd->nc_nvme->n_dead = B_TRUE;
1380 		ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1381 	}
1382 
1383 	return (EIO);
1384 }
1385 
1386 static int
1387 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
1388 {
1389 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1390 
1391 	dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1392 	    "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
1393 	    "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
1394 	    cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
1395 	    cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
1396 	if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
1397 		cmd->nc_nvme->n_dead = B_TRUE;
1398 		ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1399 	}
1400 
1401 	return (EIO);
1402 }
1403 
1404 static int
1405 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
1406 {
1407 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1408 
1409 	switch (cqe->cqe_sf.sf_sc) {
1410 	case NVME_CQE_SC_INT_NVM_WRITE:
1411 		/* write fail */
1412 		/* TODO: post ereport */
1413 		if (cmd->nc_xfer != NULL)
1414 			bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1415 		return (EIO);
1416 
1417 	case NVME_CQE_SC_INT_NVM_READ:
1418 		/* read fail */
1419 		/* TODO: post ereport */
1420 		if (cmd->nc_xfer != NULL)
1421 			bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1422 		return (EIO);
1423 
1424 	default:
1425 		return (nvme_check_unknown_cmd_status(cmd));
1426 	}
1427 }
1428 
1429 static int
1430 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
1431 {
1432 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1433 
1434 	switch (cqe->cqe_sf.sf_sc) {
1435 	case NVME_CQE_SC_GEN_SUCCESS:
1436 		return (0);
1437 
1438 	/*
1439 	 * Errors indicating a bug in the driver should cause a panic.
1440 	 */
1441 	case NVME_CQE_SC_GEN_INV_OPC:
1442 		/* Invalid Command Opcode */
1443 		if (!cmd->nc_dontpanic)
1444 			dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1445 			    "programming error: invalid opcode in cmd %p",
1446 			    (void *)cmd);
1447 		return (EINVAL);
1448 
1449 	case NVME_CQE_SC_GEN_INV_FLD:
1450 		/* Invalid Field in Command */
1451 		if (!cmd->nc_dontpanic)
1452 			dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1453 			    "programming error: invalid field in cmd %p",
1454 			    (void *)cmd);
1455 		return (EIO);
1456 
1457 	case NVME_CQE_SC_GEN_ID_CNFL:
1458 		/* Command ID Conflict */
1459 		dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1460 		    "cmd ID conflict in cmd %p", (void *)cmd);
1461 		return (0);
1462 
1463 	case NVME_CQE_SC_GEN_INV_NS:
1464 		/* Invalid Namespace or Format */
1465 		if (!cmd->nc_dontpanic)
1466 			dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1467 			    "programming error: invalid NS/format in cmd %p",
1468 			    (void *)cmd);
1469 		return (EINVAL);
1470 
1471 	case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
1472 		/* LBA Out Of Range */
1473 		dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1474 		    "LBA out of range in cmd %p", (void *)cmd);
1475 		return (0);
1476 
1477 	/*
1478 	 * Non-fatal errors, handle gracefully.
1479 	 */
1480 	case NVME_CQE_SC_GEN_DATA_XFR_ERR:
1481 		/* Data Transfer Error (DMA) */
1482 		/* TODO: post ereport */
1483 		atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err);
1484 		if (cmd->nc_xfer != NULL)
1485 			bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1486 		return (EIO);
1487 
1488 	case NVME_CQE_SC_GEN_INTERNAL_ERR:
1489 		/*
1490 		 * Internal Error. The spec (v1.0, section 4.5.1.2) says
1491 		 * detailed error information is returned as async event,
1492 		 * so we pretty much ignore the error here and handle it
1493 		 * in the async event handler.
1494 		 */
1495 		atomic_inc_32(&cmd->nc_nvme->n_internal_err);
1496 		if (cmd->nc_xfer != NULL)
1497 			bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1498 		return (EIO);
1499 
1500 	case NVME_CQE_SC_GEN_ABORT_REQUEST:
1501 		/*
1502 		 * Command Abort Requested. This normally happens only when a
1503 		 * command times out.
1504 		 */
1505 		/* TODO: post ereport or change blkdev to handle this? */
1506 		atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err);
1507 		return (ECANCELED);
1508 
1509 	case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
1510 		/* Command Aborted due to Power Loss Notification */
1511 		ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1512 		cmd->nc_nvme->n_dead = B_TRUE;
1513 		return (EIO);
1514 
1515 	case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
1516 		/* Command Aborted due to SQ Deletion */
1517 		atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del);
1518 		return (EIO);
1519 
1520 	case NVME_CQE_SC_GEN_NVM_CAP_EXC:
1521 		/* Capacity Exceeded */
1522 		atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc);
1523 		if (cmd->nc_xfer != NULL)
1524 			bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1525 		return (EIO);
1526 
1527 	case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
1528 		/* Namespace Not Ready */
1529 		atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy);
1530 		if (cmd->nc_xfer != NULL)
1531 			bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1532 		return (EIO);
1533 
1534 	default:
1535 		return (nvme_check_unknown_cmd_status(cmd));
1536 	}
1537 }
1538 
1539 static int
1540 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
1541 {
1542 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1543 
1544 	switch (cqe->cqe_sf.sf_sc) {
1545 	case NVME_CQE_SC_SPC_INV_CQ:
1546 		/* Completion Queue Invalid */
1547 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
1548 		atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err);
1549 		return (EINVAL);
1550 
1551 	case NVME_CQE_SC_SPC_INV_QID:
1552 		/* Invalid Queue Identifier */
1553 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
1554 		    cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
1555 		    cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
1556 		    cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
1557 		atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err);
1558 		return (EINVAL);
1559 
1560 	case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
1561 		/* Max Queue Size Exceeded */
1562 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
1563 		    cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
1564 		atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc);
1565 		return (EINVAL);
1566 
1567 	case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
1568 		/* Abort Command Limit Exceeded */
1569 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
1570 		dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1571 		    "abort command limit exceeded in cmd %p", (void *)cmd);
1572 		return (0);
1573 
1574 	case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
1575 		/* Async Event Request Limit Exceeded */
1576 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
1577 		dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1578 		    "async event request limit exceeded in cmd %p",
1579 		    (void *)cmd);
1580 		return (0);
1581 
1582 	case NVME_CQE_SC_SPC_INV_INT_VECT:
1583 		/* Invalid Interrupt Vector */
1584 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
1585 		atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect);
1586 		return (EINVAL);
1587 
1588 	case NVME_CQE_SC_SPC_INV_LOG_PAGE:
1589 		/* Invalid Log Page */
1590 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
1591 		atomic_inc_32(&cmd->nc_nvme->n_inv_log_page);
1592 		return (EINVAL);
1593 
1594 	case NVME_CQE_SC_SPC_INV_FORMAT:
1595 		/* Invalid Format */
1596 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT);
1597 		atomic_inc_32(&cmd->nc_nvme->n_inv_format);
1598 		if (cmd->nc_xfer != NULL)
1599 			bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1600 		return (EINVAL);
1601 
1602 	case NVME_CQE_SC_SPC_INV_Q_DEL:
1603 		/* Invalid Queue Deletion */
1604 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
1605 		atomic_inc_32(&cmd->nc_nvme->n_inv_q_del);
1606 		return (EINVAL);
1607 
1608 	case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
1609 		/* Conflicting Attributes */
1610 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
1611 		    cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
1612 		    cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1613 		atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr);
1614 		if (cmd->nc_xfer != NULL)
1615 			bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1616 		return (EINVAL);
1617 
1618 	case NVME_CQE_SC_SPC_NVM_INV_PROT:
1619 		/* Invalid Protection Information */
1620 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
1621 		    cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
1622 		    cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1623 		atomic_inc_32(&cmd->nc_nvme->n_inv_prot);
1624 		if (cmd->nc_xfer != NULL)
1625 			bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1626 		return (EINVAL);
1627 
1628 	case NVME_CQE_SC_SPC_NVM_READONLY:
1629 		/* Write to Read Only Range */
1630 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1631 		atomic_inc_32(&cmd->nc_nvme->n_readonly);
1632 		if (cmd->nc_xfer != NULL)
1633 			bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1634 		return (EROFS);
1635 
1636 	case NVME_CQE_SC_SPC_INV_FW_SLOT:
1637 		/* Invalid Firmware Slot */
1638 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1639 		return (EINVAL);
1640 
1641 	case NVME_CQE_SC_SPC_INV_FW_IMG:
1642 		/* Invalid Firmware Image */
1643 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1644 		return (EINVAL);
1645 
1646 	case NVME_CQE_SC_SPC_FW_RESET:
1647 		/* Conventional Reset Required */
1648 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1649 		return (0);
1650 
1651 	case NVME_CQE_SC_SPC_FW_NSSR:
1652 		/* NVMe Subsystem Reset Required */
1653 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1654 		return (0);
1655 
1656 	case NVME_CQE_SC_SPC_FW_NEXT_RESET:
1657 		/* Activation Requires Reset */
1658 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1659 		return (0);
1660 
1661 	case NVME_CQE_SC_SPC_FW_MTFA:
1662 		/* Activation Requires Maximum Time Violation */
1663 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1664 		return (EAGAIN);
1665 
1666 	case NVME_CQE_SC_SPC_FW_PROHIBITED:
1667 		/* Activation Prohibited */
1668 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1669 		return (EINVAL);
1670 
1671 	case NVME_CQE_SC_SPC_FW_OVERLAP:
1672 		/* Overlapping Firmware Ranges */
1673 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD);
1674 		return (EINVAL);
1675 
1676 	default:
1677 		return (nvme_check_unknown_cmd_status(cmd));
1678 	}
1679 }
1680 
1681 static inline int
1682 nvme_check_cmd_status(nvme_cmd_t *cmd)
1683 {
1684 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1685 
1686 	/*
1687 	 * Take a shortcut if the controller is dead, or if
1688 	 * command status indicates no error.
1689 	 */
1690 	if (cmd->nc_nvme->n_dead)
1691 		return (EIO);
1692 
1693 	if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1694 	    cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
1695 		return (0);
1696 
1697 	if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
1698 		return (nvme_check_generic_cmd_status(cmd));
1699 	else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
1700 		return (nvme_check_specific_cmd_status(cmd));
1701 	else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
1702 		return (nvme_check_integrity_cmd_status(cmd));
1703 	else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
1704 		return (nvme_check_vendor_cmd_status(cmd));
1705 
1706 	return (nvme_check_unknown_cmd_status(cmd));
1707 }
1708 
1709 static int
1710 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec)
1711 {
1712 	nvme_t *nvme = abort_cmd->nc_nvme;
1713 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1714 	nvme_abort_cmd_t ac = { 0 };
1715 	int ret = 0;
1716 
1717 	sema_p(&nvme->n_abort_sema);
1718 
1719 	ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid;
1720 	ac.b.ac_sqid = abort_cmd->nc_sqid;
1721 
1722 	cmd->nc_sqid = 0;
1723 	cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
1724 	cmd->nc_callback = nvme_wakeup_cmd;
1725 	cmd->nc_sqe.sqe_cdw10 = ac.r;
1726 
1727 	/*
1728 	 * Send the ABORT to the hardware. The ABORT command will return _after_
1729 	 * the aborted command has completed (aborted or otherwise), but since
1730 	 * we still hold the aborted command's mutex its callback hasn't been
1731 	 * processed yet.
1732 	 */
1733 	nvme_admin_cmd(cmd, sec);
1734 	sema_v(&nvme->n_abort_sema);
1735 
1736 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1737 		dev_err(nvme->n_dip, CE_WARN,
1738 		    "!ABORT failed with sct = %x, sc = %x",
1739 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1740 		atomic_inc_32(&nvme->n_abort_failed);
1741 	} else {
1742 		dev_err(nvme->n_dip, CE_WARN,
1743 		    "!ABORT of command %d/%d %ssuccessful",
1744 		    abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid,
1745 		    cmd->nc_cqe.cqe_dw0 & 1 ? "un" : "");
1746 		if ((cmd->nc_cqe.cqe_dw0 & 1) == 0)
1747 			atomic_inc_32(&nvme->n_cmd_aborted);
1748 	}
1749 
1750 	nvme_free_cmd(cmd);
1751 	return (ret);
1752 }
1753 
1754 /*
1755  * nvme_wait_cmd -- wait for command completion or timeout
1756  *
1757  * In case of a serious error or a timeout of the abort command the hardware
1758  * will be declared dead and FMA will be notified.
1759  */
1760 static void
1761 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec)
1762 {
1763 	clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC);
1764 	nvme_t *nvme = cmd->nc_nvme;
1765 	nvme_reg_csts_t csts;
1766 	nvme_qpair_t *qp;
1767 
1768 	ASSERT(mutex_owned(&cmd->nc_mutex));
1769 
1770 	while (!cmd->nc_completed) {
1771 		if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1)
1772 			break;
1773 	}
1774 
1775 	if (cmd->nc_completed)
1776 		return;
1777 
1778 	/*
1779 	 * The command timed out.
1780 	 *
1781 	 * Check controller for fatal status, any errors associated with the
1782 	 * register or DMA handle, or for a double timeout (abort command timed
1783 	 * out). If necessary log a warning and call FMA.
1784 	 */
1785 	csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1786 	dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
1787 	    "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
1788 	    cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
1789 	atomic_inc_32(&nvme->n_cmd_timeout);
1790 
1791 	if (csts.b.csts_cfs ||
1792 	    nvme_check_regs_hdl(nvme) ||
1793 	    nvme_check_dma_hdl(cmd->nc_dma) ||
1794 	    cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
1795 		ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1796 		nvme->n_dead = B_TRUE;
1797 	} else if (nvme_abort_cmd(cmd, sec) == 0) {
1798 		/*
1799 		 * If the abort succeeded the command should complete
1800 		 * immediately with an appropriate status.
1801 		 */
1802 		while (!cmd->nc_completed)
1803 			cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
1804 
1805 		return;
1806 	}
1807 
1808 	qp = nvme->n_ioq[cmd->nc_sqid];
1809 
1810 	mutex_enter(&qp->nq_mutex);
1811 	(void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
1812 	mutex_exit(&qp->nq_mutex);
1813 
1814 	/*
1815 	 * As we don't know what the presumed dead hardware might still do with
1816 	 * the DMA memory, we'll put the command on the lost commands list if it
1817 	 * has any DMA memory.
1818 	 */
1819 	if (cmd->nc_dma != NULL) {
1820 		mutex_enter(&nvme_lc_mutex);
1821 		list_insert_head(&nvme_lost_cmds, cmd);
1822 		mutex_exit(&nvme_lc_mutex);
1823 	}
1824 }
1825 
1826 static void
1827 nvme_wakeup_cmd(void *arg)
1828 {
1829 	nvme_cmd_t *cmd = arg;
1830 
1831 	mutex_enter(&cmd->nc_mutex);
1832 	cmd->nc_completed = B_TRUE;
1833 	cv_signal(&cmd->nc_cv);
1834 	mutex_exit(&cmd->nc_mutex);
1835 }
1836 
1837 static void
1838 nvme_async_event_task(void *arg)
1839 {
1840 	nvme_cmd_t *cmd = arg;
1841 	nvme_t *nvme = cmd->nc_nvme;
1842 	nvme_error_log_entry_t *error_log = NULL;
1843 	nvme_health_log_t *health_log = NULL;
1844 	nvme_nschange_list_t *nslist = NULL;
1845 	size_t logsize = 0;
1846 	nvme_async_event_t event;
1847 
1848 	/*
1849 	 * Check for errors associated with the async request itself. The only
1850 	 * command-specific error is "async event limit exceeded", which
1851 	 * indicates a programming error in the driver and causes a panic in
1852 	 * nvme_check_cmd_status().
1853 	 *
1854 	 * Other possible errors are various scenarios where the async request
1855 	 * was aborted, or internal errors in the device. Internal errors are
1856 	 * reported to FMA, the command aborts need no special handling here.
1857 	 *
1858 	 * And finally, at least qemu nvme does not support async events,
1859 	 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we
1860 	 * will avoid posting async events.
1861 	 */
1862 
1863 	if (nvme_check_cmd_status(cmd) != 0) {
1864 		dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1865 		    "!async event request returned failure, sct = %x, "
1866 		    "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
1867 		    cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
1868 		    cmd->nc_cqe.cqe_sf.sf_m);
1869 
1870 		if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1871 		    cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
1872 			cmd->nc_nvme->n_dead = B_TRUE;
1873 			ddi_fm_service_impact(cmd->nc_nvme->n_dip,
1874 			    DDI_SERVICE_LOST);
1875 		}
1876 
1877 		if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1878 		    cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
1879 		    cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
1880 			nvme->n_async_event_supported = B_FALSE;
1881 		}
1882 
1883 		nvme_free_cmd(cmd);
1884 		return;
1885 	}
1886 
1887 	event.r = cmd->nc_cqe.cqe_dw0;
1888 
1889 	/* Clear CQE and re-submit the async request. */
1890 	bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
1891 	nvme_submit_admin_cmd(nvme->n_adminq, cmd);
1892 
1893 	switch (event.b.ae_type) {
1894 	case NVME_ASYNC_TYPE_ERROR:
1895 		if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
1896 			(void) nvme_get_logpage(nvme, B_FALSE,
1897 			    (void **)&error_log, &logsize, event.b.ae_logpage);
1898 		} else {
1899 			dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1900 			    "async event reply: %d", event.b.ae_logpage);
1901 			atomic_inc_32(&nvme->n_wrong_logpage);
1902 		}
1903 
1904 		switch (event.b.ae_info) {
1905 		case NVME_ASYNC_ERROR_INV_SQ:
1906 			dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1907 			    "invalid submission queue");
1908 			return;
1909 
1910 		case NVME_ASYNC_ERROR_INV_DBL:
1911 			dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1912 			    "invalid doorbell write value");
1913 			return;
1914 
1915 		case NVME_ASYNC_ERROR_DIAGFAIL:
1916 			dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
1917 			ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1918 			nvme->n_dead = B_TRUE;
1919 			atomic_inc_32(&nvme->n_diagfail_event);
1920 			break;
1921 
1922 		case NVME_ASYNC_ERROR_PERSISTENT:
1923 			dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
1924 			    "device error");
1925 			ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1926 			nvme->n_dead = B_TRUE;
1927 			atomic_inc_32(&nvme->n_persistent_event);
1928 			break;
1929 
1930 		case NVME_ASYNC_ERROR_TRANSIENT:
1931 			dev_err(nvme->n_dip, CE_WARN, "!transient internal "
1932 			    "device error");
1933 			/* TODO: send ereport */
1934 			atomic_inc_32(&nvme->n_transient_event);
1935 			break;
1936 
1937 		case NVME_ASYNC_ERROR_FW_LOAD:
1938 			dev_err(nvme->n_dip, CE_WARN,
1939 			    "!firmware image load error");
1940 			atomic_inc_32(&nvme->n_fw_load_event);
1941 			break;
1942 		}
1943 		break;
1944 
1945 	case NVME_ASYNC_TYPE_HEALTH:
1946 		if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
1947 			(void) nvme_get_logpage(nvme, B_FALSE,
1948 			    (void **)&health_log, &logsize, event.b.ae_logpage,
1949 			    -1);
1950 		} else {
1951 			dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1952 			    "async event reply: %d", event.b.ae_logpage);
1953 			atomic_inc_32(&nvme->n_wrong_logpage);
1954 		}
1955 
1956 		switch (event.b.ae_info) {
1957 		case NVME_ASYNC_HEALTH_RELIABILITY:
1958 			dev_err(nvme->n_dip, CE_WARN,
1959 			    "!device reliability compromised");
1960 			/* TODO: send ereport */
1961 			atomic_inc_32(&nvme->n_reliability_event);
1962 			break;
1963 
1964 		case NVME_ASYNC_HEALTH_TEMPERATURE:
1965 			dev_err(nvme->n_dip, CE_WARN,
1966 			    "!temperature above threshold");
1967 			/* TODO: send ereport */
1968 			atomic_inc_32(&nvme->n_temperature_event);
1969 			break;
1970 
1971 		case NVME_ASYNC_HEALTH_SPARE:
1972 			dev_err(nvme->n_dip, CE_WARN,
1973 			    "!spare space below threshold");
1974 			/* TODO: send ereport */
1975 			atomic_inc_32(&nvme->n_spare_event);
1976 			break;
1977 		}
1978 		break;
1979 
1980 	case NVME_ASYNC_TYPE_NOTICE:
1981 		switch (event.b.ae_info) {
1982 		case NVME_ASYNC_NOTICE_NS_CHANGE:
1983 			dev_err(nvme->n_dip, CE_NOTE,
1984 			    "namespace attribute change event, "
1985 			    "logpage = %x", event.b.ae_logpage);
1986 			atomic_inc_32(&nvme->n_notice_event);
1987 
1988 			if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE)
1989 				break;
1990 
1991 			if (nvme_get_logpage(nvme, B_FALSE, (void **)&nslist,
1992 			    &logsize, event.b.ae_logpage, -1) != 0) {
1993 				break;
1994 			}
1995 
1996 			if (nslist->nscl_ns[0] == UINT32_MAX) {
1997 				dev_err(nvme->n_dip, CE_CONT,
1998 				    "more than %u namespaces have changed.\n",
1999 				    NVME_NSCHANGE_LIST_SIZE);
2000 				break;
2001 			}
2002 
2003 			mutex_enter(&nvme->n_mgmt_mutex);
2004 			for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) {
2005 				uint32_t nsid = nslist->nscl_ns[i];
2006 
2007 				if (nsid == 0)	/* end of list */
2008 					break;
2009 
2010 				dev_err(nvme->n_dip, CE_NOTE,
2011 				    "!namespace %u (%s) has changed.", nsid,
2012 				    NVME_NSID2NS(nvme, nsid)->ns_name);
2013 
2014 				if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
2015 					continue;
2016 
2017 				bd_state_change(
2018 				    NVME_NSID2NS(nvme, nsid)->ns_bd_hdl);
2019 			}
2020 			mutex_exit(&nvme->n_mgmt_mutex);
2021 
2022 			break;
2023 
2024 		case NVME_ASYNC_NOTICE_FW_ACTIVATE:
2025 			dev_err(nvme->n_dip, CE_NOTE,
2026 			    "firmware activation starting, "
2027 			    "logpage = %x", event.b.ae_logpage);
2028 			atomic_inc_32(&nvme->n_notice_event);
2029 			break;
2030 
2031 		case NVME_ASYNC_NOTICE_TELEMETRY:
2032 			dev_err(nvme->n_dip, CE_NOTE,
2033 			    "telemetry log changed, "
2034 			    "logpage = %x", event.b.ae_logpage);
2035 			atomic_inc_32(&nvme->n_notice_event);
2036 			break;
2037 
2038 		case NVME_ASYNC_NOTICE_NS_ASYMM:
2039 			dev_err(nvme->n_dip, CE_NOTE,
2040 			    "asymmetric namespace access change, "
2041 			    "logpage = %x", event.b.ae_logpage);
2042 			atomic_inc_32(&nvme->n_notice_event);
2043 			break;
2044 
2045 		case NVME_ASYNC_NOTICE_LATENCYLOG:
2046 			dev_err(nvme->n_dip, CE_NOTE,
2047 			    "predictable latency event aggregate log change, "
2048 			    "logpage = %x", event.b.ae_logpage);
2049 			atomic_inc_32(&nvme->n_notice_event);
2050 			break;
2051 
2052 		case NVME_ASYNC_NOTICE_LBASTATUS:
2053 			dev_err(nvme->n_dip, CE_NOTE,
2054 			    "LBA status information alert, "
2055 			    "logpage = %x", event.b.ae_logpage);
2056 			atomic_inc_32(&nvme->n_notice_event);
2057 			break;
2058 
2059 		case NVME_ASYNC_NOTICE_ENDURANCELOG:
2060 			dev_err(nvme->n_dip, CE_NOTE,
2061 			    "endurance group event aggregate log page change, "
2062 			    "logpage = %x", event.b.ae_logpage);
2063 			atomic_inc_32(&nvme->n_notice_event);
2064 			break;
2065 
2066 		default:
2067 			dev_err(nvme->n_dip, CE_WARN,
2068 			    "!unknown notice async event received, "
2069 			    "info = %x, logpage = %x", event.b.ae_info,
2070 			    event.b.ae_logpage);
2071 			atomic_inc_32(&nvme->n_unknown_event);
2072 			break;
2073 		}
2074 		break;
2075 
2076 	case NVME_ASYNC_TYPE_VENDOR:
2077 		dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
2078 		    "received, info = %x, logpage = %x", event.b.ae_info,
2079 		    event.b.ae_logpage);
2080 		atomic_inc_32(&nvme->n_vendor_event);
2081 		break;
2082 
2083 	default:
2084 		dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
2085 		    "type = %x, info = %x, logpage = %x", event.b.ae_type,
2086 		    event.b.ae_info, event.b.ae_logpage);
2087 		atomic_inc_32(&nvme->n_unknown_event);
2088 		break;
2089 	}
2090 
2091 	if (error_log != NULL)
2092 		kmem_free(error_log, logsize);
2093 
2094 	if (health_log != NULL)
2095 		kmem_free(health_log, logsize);
2096 
2097 	if (nslist != NULL)
2098 		kmem_free(nslist, logsize);
2099 }
2100 
2101 static void
2102 nvme_admin_cmd(nvme_cmd_t *cmd, int sec)
2103 {
2104 	mutex_enter(&cmd->nc_mutex);
2105 	nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd);
2106 	nvme_wait_cmd(cmd, sec);
2107 	mutex_exit(&cmd->nc_mutex);
2108 }
2109 
2110 static void
2111 nvme_async_event(nvme_t *nvme)
2112 {
2113 	nvme_cmd_t *cmd;
2114 
2115 	cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2116 	cmd->nc_sqid = 0;
2117 	cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
2118 	cmd->nc_callback = nvme_async_event_task;
2119 	cmd->nc_dontpanic = B_TRUE;
2120 
2121 	nvme_submit_admin_cmd(nvme->n_adminq, cmd);
2122 }
2123 
2124 static int
2125 nvme_format_nvm(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t lbaf,
2126     boolean_t ms, uint8_t pi, boolean_t pil, uint8_t ses)
2127 {
2128 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2129 	nvme_format_nvm_t format_nvm = { 0 };
2130 	int ret;
2131 
2132 	format_nvm.b.fm_lbaf = lbaf & 0xf;
2133 	format_nvm.b.fm_ms = ms ? 1 : 0;
2134 	format_nvm.b.fm_pi = pi & 0x7;
2135 	format_nvm.b.fm_pil = pil ? 1 : 0;
2136 	format_nvm.b.fm_ses = ses & 0x7;
2137 
2138 	cmd->nc_sqid = 0;
2139 	cmd->nc_callback = nvme_wakeup_cmd;
2140 	cmd->nc_sqe.sqe_nsid = nsid;
2141 	cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
2142 	cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
2143 
2144 	/*
2145 	 * Some devices like Samsung SM951 don't allow formatting of all
2146 	 * namespaces in one command. Handle that gracefully.
2147 	 */
2148 	if (nsid == (uint32_t)-1)
2149 		cmd->nc_dontpanic = B_TRUE;
2150 	/*
2151 	 * If this format request was initiated by the user, then don't allow a
2152 	 * programmer error to panic the system.
2153 	 */
2154 	if (user)
2155 		cmd->nc_dontpanic = B_TRUE;
2156 
2157 	nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
2158 
2159 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2160 		dev_err(nvme->n_dip, CE_WARN,
2161 		    "!FORMAT failed with sct = %x, sc = %x",
2162 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2163 	}
2164 
2165 	nvme_free_cmd(cmd);
2166 	return (ret);
2167 }
2168 
2169 /*
2170  * The `bufsize` parameter is usually an output parameter, set by this routine
2171  * when filling in the supported types of logpages from the device. However, for
2172  * vendor-specific pages, it is an input parameter, and must be set
2173  * appropriately by callers.
2174  */
2175 static int
2176 nvme_get_logpage(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize,
2177     uint8_t logpage, ...)
2178 {
2179 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2180 	nvme_getlogpage_t getlogpage = { 0 };
2181 	va_list ap;
2182 	int ret;
2183 
2184 	va_start(ap, logpage);
2185 
2186 	cmd->nc_sqid = 0;
2187 	cmd->nc_callback = nvme_wakeup_cmd;
2188 	cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
2189 
2190 	if (user)
2191 		cmd->nc_dontpanic = B_TRUE;
2192 
2193 	getlogpage.b.lp_lid = logpage;
2194 
2195 	switch (logpage) {
2196 	case NVME_LOGPAGE_ERROR:
2197 		cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
2198 		*bufsize = MIN(NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE,
2199 		    nvme->n_error_log_len * sizeof (nvme_error_log_entry_t));
2200 		break;
2201 
2202 	case NVME_LOGPAGE_HEALTH:
2203 		cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t);
2204 		*bufsize = sizeof (nvme_health_log_t);
2205 		break;
2206 
2207 	case NVME_LOGPAGE_FWSLOT:
2208 		cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
2209 		*bufsize = sizeof (nvme_fwslot_log_t);
2210 		break;
2211 
2212 	case NVME_LOGPAGE_NSCHANGE:
2213 		cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
2214 		*bufsize = sizeof (nvme_nschange_list_t);
2215 		break;
2216 
2217 	default:
2218 		/*
2219 		 * This intentionally only checks against the minimum valid
2220 		 * log page ID. `logpage` is a uint8_t, and `0xFF` is a valid
2221 		 * page ID, so this one-sided check avoids a compiler error
2222 		 * about a check that's always true.
2223 		 */
2224 		if (logpage < NVME_VENDOR_SPECIFIC_LOGPAGE_MIN) {
2225 			dev_err(nvme->n_dip, CE_WARN,
2226 			    "!unknown log page requested: %d", logpage);
2227 			atomic_inc_32(&nvme->n_unknown_logpage);
2228 			ret = EINVAL;
2229 			goto fail;
2230 		}
2231 		cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t);
2232 	}
2233 
2234 	va_end(ap);
2235 
2236 	getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1;
2237 
2238 	cmd->nc_sqe.sqe_cdw10 = getlogpage.r;
2239 
2240 	if (nvme_zalloc_dma(nvme, *bufsize,
2241 	    DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
2242 		dev_err(nvme->n_dip, CE_WARN,
2243 		    "!nvme_zalloc_dma failed for GET LOG PAGE");
2244 		ret = ENOMEM;
2245 		goto fail;
2246 	}
2247 
2248 	if ((ret = nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah)) != 0)
2249 		goto fail;
2250 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2251 
2252 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2253 		dev_err(nvme->n_dip, CE_WARN,
2254 		    "!GET LOG PAGE failed with sct = %x, sc = %x",
2255 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2256 		goto fail;
2257 	}
2258 
2259 	*buf = kmem_alloc(*bufsize, KM_SLEEP);
2260 	bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize);
2261 
2262 fail:
2263 	nvme_free_cmd(cmd);
2264 
2265 	return (ret);
2266 }
2267 
2268 static int
2269 nvme_identify(nvme_t *nvme, boolean_t user, uint32_t nsid, void **buf)
2270 {
2271 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2272 	int ret;
2273 
2274 	if (buf == NULL)
2275 		return (EINVAL);
2276 
2277 	cmd->nc_sqid = 0;
2278 	cmd->nc_callback = nvme_wakeup_cmd;
2279 	cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
2280 	cmd->nc_sqe.sqe_nsid = nsid;
2281 	cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL;
2282 
2283 	if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
2284 	    &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
2285 		dev_err(nvme->n_dip, CE_WARN,
2286 		    "!nvme_zalloc_dma failed for IDENTIFY");
2287 		ret = ENOMEM;
2288 		goto fail;
2289 	}
2290 
2291 	if (cmd->nc_dma->nd_ncookie > 2) {
2292 		dev_err(nvme->n_dip, CE_WARN,
2293 		    "!too many DMA cookies for IDENTIFY");
2294 		atomic_inc_32(&nvme->n_too_many_cookies);
2295 		ret = ENOMEM;
2296 		goto fail;
2297 	}
2298 
2299 	cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
2300 	if (cmd->nc_dma->nd_ncookie > 1) {
2301 		ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
2302 		    &cmd->nc_dma->nd_cookie);
2303 		cmd->nc_sqe.sqe_dptr.d_prp[1] =
2304 		    cmd->nc_dma->nd_cookie.dmac_laddress;
2305 	}
2306 
2307 	if (user)
2308 		cmd->nc_dontpanic = B_TRUE;
2309 
2310 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2311 
2312 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2313 		dev_err(nvme->n_dip, CE_WARN,
2314 		    "!IDENTIFY failed with sct = %x, sc = %x",
2315 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2316 		goto fail;
2317 	}
2318 
2319 	*buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
2320 	bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
2321 
2322 fail:
2323 	nvme_free_cmd(cmd);
2324 
2325 	return (ret);
2326 }
2327 
2328 static int
2329 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
2330     uint32_t val, uint32_t *res)
2331 {
2332 	_NOTE(ARGUNUSED(nsid));
2333 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2334 	int ret = EINVAL;
2335 
2336 	ASSERT(res != NULL);
2337 
2338 	cmd->nc_sqid = 0;
2339 	cmd->nc_callback = nvme_wakeup_cmd;
2340 	cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
2341 	cmd->nc_sqe.sqe_cdw10 = feature;
2342 	cmd->nc_sqe.sqe_cdw11 = val;
2343 
2344 	if (user)
2345 		cmd->nc_dontpanic = B_TRUE;
2346 
2347 	switch (feature) {
2348 	case NVME_FEAT_WRITE_CACHE:
2349 		if (!nvme->n_write_cache_present)
2350 			goto fail;
2351 		break;
2352 
2353 	case NVME_FEAT_NQUEUES:
2354 		break;
2355 
2356 	default:
2357 		goto fail;
2358 	}
2359 
2360 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2361 
2362 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2363 		dev_err(nvme->n_dip, CE_WARN,
2364 		    "!SET FEATURES %d failed with sct = %x, sc = %x",
2365 		    feature, cmd->nc_cqe.cqe_sf.sf_sct,
2366 		    cmd->nc_cqe.cqe_sf.sf_sc);
2367 		goto fail;
2368 	}
2369 
2370 	*res = cmd->nc_cqe.cqe_dw0;
2371 
2372 fail:
2373 	nvme_free_cmd(cmd);
2374 	return (ret);
2375 }
2376 
2377 static int
2378 nvme_get_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
2379     uint32_t *res, void **buf, size_t *bufsize)
2380 {
2381 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2382 	int ret = EINVAL;
2383 
2384 	ASSERT(res != NULL);
2385 
2386 	if (bufsize != NULL)
2387 		*bufsize = 0;
2388 
2389 	cmd->nc_sqid = 0;
2390 	cmd->nc_callback = nvme_wakeup_cmd;
2391 	cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES;
2392 	cmd->nc_sqe.sqe_cdw10 = feature;
2393 	cmd->nc_sqe.sqe_cdw11 = *res;
2394 
2395 	/*
2396 	 * For some of the optional features there doesn't seem to be a method
2397 	 * of detecting whether it is supported other than using it.  This will
2398 	 * cause "Invalid Field in Command" error, which is normally considered
2399 	 * a programming error.  Set the nc_dontpanic flag to override the panic
2400 	 * in nvme_check_generic_cmd_status().
2401 	 */
2402 	switch (feature) {
2403 	case NVME_FEAT_ARBITRATION:
2404 	case NVME_FEAT_POWER_MGMT:
2405 	case NVME_FEAT_TEMPERATURE:
2406 	case NVME_FEAT_ERROR:
2407 	case NVME_FEAT_NQUEUES:
2408 	case NVME_FEAT_INTR_COAL:
2409 	case NVME_FEAT_INTR_VECT:
2410 	case NVME_FEAT_WRITE_ATOM:
2411 	case NVME_FEAT_ASYNC_EVENT:
2412 		break;
2413 
2414 	case NVME_FEAT_WRITE_CACHE:
2415 		if (!nvme->n_write_cache_present)
2416 			goto fail;
2417 		break;
2418 
2419 	case NVME_FEAT_LBA_RANGE:
2420 		if (!nvme->n_lba_range_supported)
2421 			goto fail;
2422 
2423 		cmd->nc_dontpanic = B_TRUE;
2424 		cmd->nc_sqe.sqe_nsid = nsid;
2425 		ASSERT(bufsize != NULL);
2426 		*bufsize = NVME_LBA_RANGE_BUFSIZE;
2427 		break;
2428 
2429 	case NVME_FEAT_AUTO_PST:
2430 		if (!nvme->n_auto_pst_supported)
2431 			goto fail;
2432 
2433 		ASSERT(bufsize != NULL);
2434 		*bufsize = NVME_AUTO_PST_BUFSIZE;
2435 		break;
2436 
2437 	case NVME_FEAT_PROGRESS:
2438 		if (!nvme->n_progress_supported)
2439 			goto fail;
2440 
2441 		cmd->nc_dontpanic = B_TRUE;
2442 		break;
2443 
2444 	default:
2445 		goto fail;
2446 	}
2447 
2448 	if (user)
2449 		cmd->nc_dontpanic = B_TRUE;
2450 
2451 	if (bufsize != NULL && *bufsize != 0) {
2452 		if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ,
2453 		    &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
2454 			dev_err(nvme->n_dip, CE_WARN,
2455 			    "!nvme_zalloc_dma failed for GET FEATURES");
2456 			ret = ENOMEM;
2457 			goto fail;
2458 		}
2459 
2460 		if (cmd->nc_dma->nd_ncookie > 2) {
2461 			dev_err(nvme->n_dip, CE_WARN,
2462 			    "!too many DMA cookies for GET FEATURES");
2463 			atomic_inc_32(&nvme->n_too_many_cookies);
2464 			ret = ENOMEM;
2465 			goto fail;
2466 		}
2467 
2468 		cmd->nc_sqe.sqe_dptr.d_prp[0] =
2469 		    cmd->nc_dma->nd_cookie.dmac_laddress;
2470 		if (cmd->nc_dma->nd_ncookie > 1) {
2471 			ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
2472 			    &cmd->nc_dma->nd_cookie);
2473 			cmd->nc_sqe.sqe_dptr.d_prp[1] =
2474 			    cmd->nc_dma->nd_cookie.dmac_laddress;
2475 		}
2476 	}
2477 
2478 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2479 
2480 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2481 		boolean_t known = B_TRUE;
2482 
2483 		/* Check if this is unsupported optional feature */
2484 		if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2485 		    cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) {
2486 			switch (feature) {
2487 			case NVME_FEAT_LBA_RANGE:
2488 				nvme->n_lba_range_supported = B_FALSE;
2489 				break;
2490 			case NVME_FEAT_PROGRESS:
2491 				nvme->n_progress_supported = B_FALSE;
2492 				break;
2493 			default:
2494 				known = B_FALSE;
2495 				break;
2496 			}
2497 		} else {
2498 			known = B_FALSE;
2499 		}
2500 
2501 		/* Report the error otherwise */
2502 		if (!known) {
2503 			dev_err(nvme->n_dip, CE_WARN,
2504 			    "!GET FEATURES %d failed with sct = %x, sc = %x",
2505 			    feature, cmd->nc_cqe.cqe_sf.sf_sct,
2506 			    cmd->nc_cqe.cqe_sf.sf_sc);
2507 		}
2508 
2509 		goto fail;
2510 	}
2511 
2512 	if (bufsize != NULL && *bufsize != 0) {
2513 		ASSERT(buf != NULL);
2514 		*buf = kmem_alloc(*bufsize, KM_SLEEP);
2515 		bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize);
2516 	}
2517 
2518 	*res = cmd->nc_cqe.cqe_dw0;
2519 
2520 fail:
2521 	nvme_free_cmd(cmd);
2522 	return (ret);
2523 }
2524 
2525 static int
2526 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
2527 {
2528 	nvme_write_cache_t nwc = { 0 };
2529 
2530 	if (enable)
2531 		nwc.b.wc_wce = 1;
2532 
2533 	return (nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_WRITE_CACHE,
2534 	    nwc.r, &nwc.r));
2535 }
2536 
2537 static int
2538 nvme_set_nqueues(nvme_t *nvme)
2539 {
2540 	nvme_nqueues_t nq = { 0 };
2541 	int ret;
2542 
2543 	/*
2544 	 * The default is to allocate one completion queue per vector.
2545 	 */
2546 	if (nvme->n_completion_queues == -1)
2547 		nvme->n_completion_queues = nvme->n_intr_cnt;
2548 
2549 	/*
2550 	 * There is no point in having more completion queues than
2551 	 * interrupt vectors.
2552 	 */
2553 	nvme->n_completion_queues = MIN(nvme->n_completion_queues,
2554 	    nvme->n_intr_cnt);
2555 
2556 	/*
2557 	 * The default is to use one submission queue per completion queue.
2558 	 */
2559 	if (nvme->n_submission_queues == -1)
2560 		nvme->n_submission_queues = nvme->n_completion_queues;
2561 
2562 	/*
2563 	 * There is no point in having more compeletion queues than
2564 	 * submission queues.
2565 	 */
2566 	nvme->n_completion_queues = MIN(nvme->n_completion_queues,
2567 	    nvme->n_submission_queues);
2568 
2569 	ASSERT(nvme->n_submission_queues > 0);
2570 	ASSERT(nvme->n_completion_queues > 0);
2571 
2572 	nq.b.nq_nsq = nvme->n_submission_queues - 1;
2573 	nq.b.nq_ncq = nvme->n_completion_queues - 1;
2574 
2575 	ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r,
2576 	    &nq.r);
2577 
2578 	if (ret == 0) {
2579 		/*
2580 		 * Never use more than the requested number of queues.
2581 		 */
2582 		nvme->n_submission_queues = MIN(nvme->n_submission_queues,
2583 		    nq.b.nq_nsq + 1);
2584 		nvme->n_completion_queues = MIN(nvme->n_completion_queues,
2585 		    nq.b.nq_ncq + 1);
2586 	}
2587 
2588 	return (ret);
2589 }
2590 
2591 static int
2592 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq)
2593 {
2594 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2595 	nvme_create_queue_dw10_t dw10 = { 0 };
2596 	nvme_create_cq_dw11_t c_dw11 = { 0 };
2597 	int ret;
2598 
2599 	dw10.b.q_qid = cq->ncq_id;
2600 	dw10.b.q_qsize = cq->ncq_nentry - 1;
2601 
2602 	c_dw11.b.cq_pc = 1;
2603 	c_dw11.b.cq_ien = 1;
2604 	c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt;
2605 
2606 	cmd->nc_sqid = 0;
2607 	cmd->nc_callback = nvme_wakeup_cmd;
2608 	cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
2609 	cmd->nc_sqe.sqe_cdw10 = dw10.r;
2610 	cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
2611 	cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress;
2612 
2613 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2614 
2615 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2616 		dev_err(nvme->n_dip, CE_WARN,
2617 		    "!CREATE CQUEUE failed with sct = %x, sc = %x",
2618 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2619 	}
2620 
2621 	nvme_free_cmd(cmd);
2622 
2623 	return (ret);
2624 }
2625 
2626 static int
2627 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
2628 {
2629 	nvme_cq_t *cq = qp->nq_cq;
2630 	nvme_cmd_t *cmd;
2631 	nvme_create_queue_dw10_t dw10 = { 0 };
2632 	nvme_create_sq_dw11_t s_dw11 = { 0 };
2633 	int ret;
2634 
2635 	/*
2636 	 * It is possible to have more qpairs than completion queues,
2637 	 * and when the idx > ncq_id, that completion queue is shared
2638 	 * and has already been created.
2639 	 */
2640 	if (idx <= cq->ncq_id &&
2641 	    nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS)
2642 		return (DDI_FAILURE);
2643 
2644 	dw10.b.q_qid = idx;
2645 	dw10.b.q_qsize = qp->nq_nentry - 1;
2646 
2647 	s_dw11.b.sq_pc = 1;
2648 	s_dw11.b.sq_cqid = cq->ncq_id;
2649 
2650 	cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2651 	cmd->nc_sqid = 0;
2652 	cmd->nc_callback = nvme_wakeup_cmd;
2653 	cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
2654 	cmd->nc_sqe.sqe_cdw10 = dw10.r;
2655 	cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
2656 	cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
2657 
2658 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2659 
2660 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2661 		dev_err(nvme->n_dip, CE_WARN,
2662 		    "!CREATE SQUEUE failed with sct = %x, sc = %x",
2663 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2664 	}
2665 
2666 	nvme_free_cmd(cmd);
2667 
2668 	return (ret);
2669 }
2670 
2671 static boolean_t
2672 nvme_reset(nvme_t *nvme, boolean_t quiesce)
2673 {
2674 	nvme_reg_csts_t csts;
2675 	int i;
2676 
2677 	nvme_put32(nvme, NVME_REG_CC, 0);
2678 
2679 	csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2680 	if (csts.b.csts_rdy == 1) {
2681 		nvme_put32(nvme, NVME_REG_CC, 0);
2682 		for (i = 0; i != nvme->n_timeout * 10; i++) {
2683 			csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2684 			if (csts.b.csts_rdy == 0)
2685 				break;
2686 
2687 			if (quiesce)
2688 				drv_usecwait(50000);
2689 			else
2690 				delay(drv_usectohz(50000));
2691 		}
2692 	}
2693 
2694 	nvme_put32(nvme, NVME_REG_AQA, 0);
2695 	nvme_put32(nvme, NVME_REG_ASQ, 0);
2696 	nvme_put32(nvme, NVME_REG_ACQ, 0);
2697 
2698 	csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2699 	return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
2700 }
2701 
2702 static void
2703 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce)
2704 {
2705 	nvme_reg_cc_t cc;
2706 	nvme_reg_csts_t csts;
2707 	int i;
2708 
2709 	ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT);
2710 
2711 	cc.r = nvme_get32(nvme, NVME_REG_CC);
2712 	cc.b.cc_shn = mode & 0x3;
2713 	nvme_put32(nvme, NVME_REG_CC, cc.r);
2714 
2715 	for (i = 0; i != 10; i++) {
2716 		csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2717 		if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
2718 			break;
2719 
2720 		if (quiesce)
2721 			drv_usecwait(100000);
2722 		else
2723 			delay(drv_usectohz(100000));
2724 	}
2725 }
2726 
2727 /*
2728  * Return length of string without trailing spaces.
2729  */
2730 static int
2731 nvme_strlen(const char *str, int len)
2732 {
2733 	if (len <= 0)
2734 		return (0);
2735 
2736 	while (str[--len] == ' ')
2737 		;
2738 
2739 	return (++len);
2740 }
2741 
2742 static void
2743 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val)
2744 {
2745 	ulong_t bsize = 0;
2746 	char *msg = "";
2747 
2748 	if (ddi_strtoul(val, NULL, 0, &bsize) != 0)
2749 		goto err;
2750 
2751 	if (!ISP2(bsize)) {
2752 		msg = ": not a power of 2";
2753 		goto err;
2754 	}
2755 
2756 	if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) {
2757 		msg = ": too low";
2758 		goto err;
2759 	}
2760 
2761 	nvme->n_min_block_size = bsize;
2762 	return;
2763 
2764 err:
2765 	dev_err(nvme->n_dip, CE_WARN,
2766 	    "!nvme-config-list: ignoring invalid min-phys-block-size '%s' "
2767 	    "for model '%s'%s", val, model, msg);
2768 
2769 	nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
2770 }
2771 
2772 static void
2773 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val,
2774     boolean_t *b)
2775 {
2776 	if (strcmp(val, "on") == 0 ||
2777 	    strcmp(val, "true") == 0)
2778 		*b = B_TRUE;
2779 	else if (strcmp(val, "off") == 0 ||
2780 	    strcmp(val, "false") == 0)
2781 		*b = B_FALSE;
2782 	else
2783 		dev_err(nvme->n_dip, CE_WARN,
2784 		    "!nvme-config-list: invalid value for %s '%s'"
2785 		    " for model '%s', ignoring", name, val, model);
2786 }
2787 
2788 static void
2789 nvme_config_list(nvme_t *nvme)
2790 {
2791 	char	**config_list;
2792 	uint_t	nelem;
2793 	int	rv, i;
2794 
2795 	/*
2796 	 * We're following the pattern of 'sd-config-list' here, but extend it.
2797 	 * Instead of two we have three separate strings for "model", "fwrev",
2798 	 * and "name-value-list".
2799 	 */
2800 	rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip,
2801 	    DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem);
2802 
2803 	if (rv != DDI_PROP_SUCCESS) {
2804 		if (rv == DDI_PROP_CANNOT_DECODE) {
2805 			dev_err(nvme->n_dip, CE_WARN,
2806 			    "!nvme-config-list: cannot be decoded");
2807 		}
2808 
2809 		return;
2810 	}
2811 
2812 	if ((nelem % 3) != 0) {
2813 		dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be "
2814 		    "triplets of <model>/<fwrev>/<name-value-list> strings ");
2815 		goto out;
2816 	}
2817 
2818 	for (i = 0; i < nelem; i += 3) {
2819 		char	*model = config_list[i];
2820 		char	*fwrev = config_list[i + 1];
2821 		char	*nvp, *save_nv;
2822 		int	id_model_len, id_fwrev_len;
2823 
2824 		id_model_len = nvme_strlen(nvme->n_idctl->id_model,
2825 		    sizeof (nvme->n_idctl->id_model));
2826 
2827 		if (strlen(model) != id_model_len)
2828 			continue;
2829 
2830 		if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0)
2831 			continue;
2832 
2833 		id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev,
2834 		    sizeof (nvme->n_idctl->id_fwrev));
2835 
2836 		if (strlen(fwrev) != 0) {
2837 			boolean_t match = B_FALSE;
2838 			char *fwr, *last_fw;
2839 
2840 			for (fwr = strtok_r(fwrev, ",", &last_fw);
2841 			    fwr != NULL;
2842 			    fwr = strtok_r(NULL, ",", &last_fw)) {
2843 				if (strlen(fwr) != id_fwrev_len)
2844 					continue;
2845 
2846 				if (strncmp(fwr, nvme->n_idctl->id_fwrev,
2847 				    id_fwrev_len) == 0)
2848 					match = B_TRUE;
2849 			}
2850 
2851 			if (!match)
2852 				continue;
2853 		}
2854 
2855 		/*
2856 		 * We should now have a comma-separated list of name:value
2857 		 * pairs.
2858 		 */
2859 		for (nvp = strtok_r(config_list[i + 2], ",", &save_nv);
2860 		    nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) {
2861 			char	*name = nvp;
2862 			char	*val = strchr(nvp, ':');
2863 
2864 			if (val == NULL || name == val) {
2865 				dev_err(nvme->n_dip, CE_WARN,
2866 				    "!nvme-config-list: <name-value-list> "
2867 				    "for model '%s' is malformed", model);
2868 				goto out;
2869 			}
2870 
2871 			/*
2872 			 * Null-terminate 'name', move 'val' past ':' sep.
2873 			 */
2874 			*val++ = '\0';
2875 
2876 			/*
2877 			 * Process the name:val pairs that we know about.
2878 			 */
2879 			if (strcmp(name, "ignore-unknown-vendor-status") == 0) {
2880 				nvme_config_boolean(nvme, model, name, val,
2881 				    &nvme->n_ignore_unknown_vendor_status);
2882 			} else if (strcmp(name, "min-phys-block-size") == 0) {
2883 				nvme_config_min_block_size(nvme, model, val);
2884 			} else if (strcmp(name, "volatile-write-cache") == 0) {
2885 				nvme_config_boolean(nvme, model, name, val,
2886 				    &nvme->n_write_cache_enabled);
2887 			} else {
2888 				/*
2889 				 * Unknown 'name'.
2890 				 */
2891 				dev_err(nvme->n_dip, CE_WARN,
2892 				    "!nvme-config-list: unknown config '%s' "
2893 				    "for model '%s', ignoring", name, model);
2894 			}
2895 		}
2896 	}
2897 
2898 out:
2899 	ddi_prop_free(config_list);
2900 }
2901 
2902 static void
2903 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
2904 {
2905 	/*
2906 	 * Section 7.7 of the spec describes how to get a unique ID for
2907 	 * the controller: the vendor ID, the model name and the serial
2908 	 * number shall be unique when combined.
2909 	 *
2910 	 * If a namespace has no EUI64 we use the above and add the hex
2911 	 * namespace ID to get a unique ID for the namespace.
2912 	 */
2913 	char model[sizeof (nvme->n_idctl->id_model) + 1];
2914 	char serial[sizeof (nvme->n_idctl->id_serial) + 1];
2915 
2916 	bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
2917 	bcopy(nvme->n_idctl->id_serial, serial,
2918 	    sizeof (nvme->n_idctl->id_serial));
2919 
2920 	model[sizeof (nvme->n_idctl->id_model)] = '\0';
2921 	serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
2922 
2923 	NVME_NSID2NS(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X",
2924 	    nvme->n_idctl->id_vid, model, serial, nsid);
2925 }
2926 
2927 static boolean_t
2928 nvme_allocated_ns(nvme_namespace_t *ns)
2929 {
2930 	nvme_t *nvme = ns->ns_nvme;
2931 
2932 	ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex));
2933 
2934 	/*
2935 	 * Since we don't know any better, we assume all namespaces to be
2936 	 * allocated.
2937 	 */
2938 	return (B_TRUE);
2939 }
2940 
2941 static boolean_t
2942 nvme_active_ns(nvme_namespace_t *ns)
2943 {
2944 	nvme_t *nvme = ns->ns_nvme;
2945 	boolean_t ret = B_FALSE;
2946 	uint64_t *ptr;
2947 
2948 	ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex));
2949 
2950 	/*
2951 	 * Check whether the IDENTIFY NAMESPACE data is zero-filled.
2952 	 */
2953 	for (ptr = (uint64_t *)ns->ns_idns;
2954 	    ptr != (uint64_t *)(ns->ns_idns + 1);
2955 	    ptr++) {
2956 		if (*ptr != 0) {
2957 			ret = B_TRUE;
2958 			break;
2959 		}
2960 	}
2961 
2962 	return (ret);
2963 }
2964 
2965 static int
2966 nvme_init_ns(nvme_t *nvme, int nsid)
2967 {
2968 	nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid);
2969 	nvme_identify_nsid_t *idns;
2970 	boolean_t was_ignored;
2971 	int last_rp;
2972 
2973 	ns->ns_nvme = nvme;
2974 
2975 	ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex));
2976 
2977 	if (nvme_identify(nvme, B_FALSE, nsid, (void **)&idns) != 0) {
2978 		dev_err(nvme->n_dip, CE_WARN,
2979 		    "!failed to identify namespace %d", nsid);
2980 		return (DDI_FAILURE);
2981 	}
2982 
2983 	if (ns->ns_idns != NULL)
2984 		kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t));
2985 
2986 	ns->ns_idns = idns;
2987 	ns->ns_id = nsid;
2988 
2989 	was_ignored = ns->ns_ignore;
2990 
2991 	ns->ns_allocated = nvme_allocated_ns(ns);
2992 	ns->ns_active = nvme_active_ns(ns);
2993 
2994 	ns->ns_block_count = idns->id_nsize;
2995 	ns->ns_block_size =
2996 	    1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
2997 	ns->ns_best_block_size = ns->ns_block_size;
2998 
2999 	/*
3000 	 * Get the EUI64 if present.
3001 	 */
3002 	if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
3003 		bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
3004 
3005 	/*
3006 	 * Get the NGUID if present.
3007 	 */
3008 	if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2))
3009 		bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid));
3010 
3011 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
3012 	if (*(uint64_t *)ns->ns_eui64 != 0) {
3013 		uint8_t *eui64 = ns->ns_eui64;
3014 
3015 		(void) snprintf(ns->ns_name, sizeof (ns->ns_name),
3016 		    "%02x%02x%02x%02x%02x%02x%02x%02x",
3017 		    eui64[0], eui64[1], eui64[2], eui64[3],
3018 		    eui64[4], eui64[5], eui64[6], eui64[7]);
3019 	} else {
3020 		(void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%d",
3021 		    ns->ns_id);
3022 
3023 		nvme_prepare_devid(nvme, ns->ns_id);
3024 	}
3025 
3026 	/*
3027 	 * Find the LBA format with no metadata and the best relative
3028 	 * performance. A value of 3 means "degraded", 0 is best.
3029 	 */
3030 	last_rp = 3;
3031 	for (int j = 0; j <= idns->id_nlbaf; j++) {
3032 		if (idns->id_lbaf[j].lbaf_lbads == 0)
3033 			break;
3034 		if (idns->id_lbaf[j].lbaf_ms != 0)
3035 			continue;
3036 		if (idns->id_lbaf[j].lbaf_rp >= last_rp)
3037 			continue;
3038 		last_rp = idns->id_lbaf[j].lbaf_rp;
3039 		ns->ns_best_block_size =
3040 		    1 << idns->id_lbaf[j].lbaf_lbads;
3041 	}
3042 
3043 	if (ns->ns_best_block_size < nvme->n_min_block_size)
3044 		ns->ns_best_block_size = nvme->n_min_block_size;
3045 
3046 	was_ignored = ns->ns_ignore;
3047 
3048 	/*
3049 	 * We currently don't support namespaces that use either:
3050 	 * - protection information
3051 	 * - illegal block size (< 512)
3052 	 */
3053 	if (idns->id_dps.dp_pinfo) {
3054 		dev_err(nvme->n_dip, CE_WARN,
3055 		    "!ignoring namespace %d, unsupported feature: "
3056 		    "pinfo = %d", nsid, idns->id_dps.dp_pinfo);
3057 		ns->ns_ignore = B_TRUE;
3058 	} else if (ns->ns_block_size < 512) {
3059 		dev_err(nvme->n_dip, CE_WARN,
3060 		    "!ignoring namespace %d, unsupported block size %"PRIu64,
3061 		    nsid, (uint64_t)ns->ns_block_size);
3062 		ns->ns_ignore = B_TRUE;
3063 	} else {
3064 		ns->ns_ignore = B_FALSE;
3065 	}
3066 
3067 	/*
3068 	 * Keep a count of namespaces which are attachable.
3069 	 * See comments in nvme_bd_driveinfo() to understand its effect.
3070 	 */
3071 	if (was_ignored) {
3072 		/*
3073 		 * Previously ignored, but now not. Count it.
3074 		 */
3075 		if (!ns->ns_ignore)
3076 			nvme->n_namespaces_attachable++;
3077 	} else {
3078 		/*
3079 		 * Wasn't ignored previously, but now needs to be.
3080 		 * Discount it.
3081 		 */
3082 		if (ns->ns_ignore)
3083 			nvme->n_namespaces_attachable--;
3084 	}
3085 
3086 	return (DDI_SUCCESS);
3087 }
3088 
3089 static int
3090 nvme_attach_ns(nvme_t *nvme, int nsid)
3091 {
3092 	nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid);
3093 
3094 	ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex));
3095 
3096 	if (ns->ns_ignore)
3097 		return (ENOTSUP);
3098 
3099 	if (ns->ns_bd_hdl == NULL) {
3100 		bd_ops_t ops = nvme_bd_ops;
3101 
3102 		if (!nvme->n_idctl->id_oncs.on_dset_mgmt)
3103 			ops.o_free_space = NULL;
3104 
3105 		ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr,
3106 		    KM_SLEEP);
3107 
3108 		if (ns->ns_bd_hdl == NULL) {
3109 			dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev "
3110 			    "handle for namespace id %d", nsid);
3111 			return (EINVAL);
3112 		}
3113 	}
3114 
3115 	if (bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl) != DDI_SUCCESS)
3116 		return (EBUSY);
3117 
3118 	ns->ns_attached = B_TRUE;
3119 
3120 	return (0);
3121 }
3122 
3123 static int
3124 nvme_detach_ns(nvme_t *nvme, int nsid)
3125 {
3126 	nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid);
3127 	int rv;
3128 
3129 	ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex));
3130 
3131 	if (ns->ns_ignore || !ns->ns_attached)
3132 		return (0);
3133 
3134 	ASSERT(ns->ns_bd_hdl != NULL);
3135 	rv = bd_detach_handle(ns->ns_bd_hdl);
3136 	if (rv != DDI_SUCCESS)
3137 		return (EBUSY);
3138 	else
3139 		ns->ns_attached = B_FALSE;
3140 
3141 	return (0);
3142 }
3143 
3144 static int
3145 nvme_init(nvme_t *nvme)
3146 {
3147 	nvme_reg_cc_t cc = { 0 };
3148 	nvme_reg_aqa_t aqa = { 0 };
3149 	nvme_reg_asq_t asq = { 0 };
3150 	nvme_reg_acq_t acq = { 0 };
3151 	nvme_reg_cap_t cap;
3152 	nvme_reg_vs_t vs;
3153 	nvme_reg_csts_t csts;
3154 	int i = 0;
3155 	uint16_t nqueues;
3156 	uint_t tq_threads;
3157 	char model[sizeof (nvme->n_idctl->id_model) + 1];
3158 	char *vendor, *product;
3159 
3160 	/* Check controller version */
3161 	vs.r = nvme_get32(nvme, NVME_REG_VS);
3162 	nvme->n_version.v_major = vs.b.vs_mjr;
3163 	nvme->n_version.v_minor = vs.b.vs_mnr;
3164 	dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d",
3165 	    nvme->n_version.v_major, nvme->n_version.v_minor);
3166 
3167 	if (nvme->n_version.v_major > nvme_version_major) {
3168 		dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x",
3169 		    nvme_version_major);
3170 		if (nvme->n_strict_version)
3171 			goto fail;
3172 	}
3173 
3174 	/* retrieve controller configuration */
3175 	cap.r = nvme_get64(nvme, NVME_REG_CAP);
3176 
3177 	if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
3178 		dev_err(nvme->n_dip, CE_WARN,
3179 		    "!NVM command set not supported by hardware");
3180 		goto fail;
3181 	}
3182 
3183 	nvme->n_nssr_supported = cap.b.cap_nssrs;
3184 	nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
3185 	nvme->n_timeout = cap.b.cap_to;
3186 	nvme->n_arbitration_mechanisms = cap.b.cap_ams;
3187 	nvme->n_cont_queues_reqd = cap.b.cap_cqr;
3188 	nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
3189 
3190 	/*
3191 	 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
3192 	 * the base page size of 4k (1<<12), so add 12 here to get the real
3193 	 * page size value.
3194 	 */
3195 	nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
3196 	    cap.b.cap_mpsmax + 12);
3197 	nvme->n_pagesize = 1UL << (nvme->n_pageshift);
3198 
3199 	/*
3200 	 * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
3201 	 */
3202 	nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
3203 	nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
3204 
3205 	/*
3206 	 * Set up PRP DMA to transfer 1 page-aligned page at a time.
3207 	 * Maxxfer may be increased after we identified the controller limits.
3208 	 */
3209 	nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
3210 	nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
3211 	nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
3212 	nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
3213 
3214 	/*
3215 	 * Reset controller if it's still in ready state.
3216 	 */
3217 	if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
3218 		dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
3219 		ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
3220 		nvme->n_dead = B_TRUE;
3221 		goto fail;
3222 	}
3223 
3224 	/*
3225 	 * Create the cq array with one completion queue to be assigned
3226 	 * to the admin queue pair and a limited number of taskqs (4).
3227 	 */
3228 	if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) !=
3229 	    DDI_SUCCESS) {
3230 		dev_err(nvme->n_dip, CE_WARN,
3231 		    "!failed to pre-allocate admin completion queue");
3232 		goto fail;
3233 	}
3234 	/*
3235 	 * Create the admin queue pair.
3236 	 */
3237 	if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
3238 	    != DDI_SUCCESS) {
3239 		dev_err(nvme->n_dip, CE_WARN,
3240 		    "!unable to allocate admin qpair");
3241 		goto fail;
3242 	}
3243 	nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
3244 	nvme->n_ioq[0] = nvme->n_adminq;
3245 
3246 	nvme->n_progress |= NVME_ADMIN_QUEUE;
3247 
3248 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
3249 	    "admin-queue-len", nvme->n_admin_queue_len);
3250 
3251 	aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
3252 	asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
3253 	acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress;
3254 
3255 	ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
3256 	ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
3257 
3258 	nvme_put32(nvme, NVME_REG_AQA, aqa.r);
3259 	nvme_put64(nvme, NVME_REG_ASQ, asq);
3260 	nvme_put64(nvme, NVME_REG_ACQ, acq);
3261 
3262 	cc.b.cc_ams = 0;	/* use Round-Robin arbitration */
3263 	cc.b.cc_css = 0;	/* use NVM command set */
3264 	cc.b.cc_mps = nvme->n_pageshift - 12;
3265 	cc.b.cc_shn = 0;	/* no shutdown in progress */
3266 	cc.b.cc_en = 1;		/* enable controller */
3267 	cc.b.cc_iosqes = 6;	/* submission queue entry is 2^6 bytes long */
3268 	cc.b.cc_iocqes = 4;	/* completion queue entry is 2^4 bytes long */
3269 
3270 	nvme_put32(nvme, NVME_REG_CC, cc.r);
3271 
3272 	/*
3273 	 * Wait for the controller to become ready.
3274 	 */
3275 	csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3276 	if (csts.b.csts_rdy == 0) {
3277 		for (i = 0; i != nvme->n_timeout * 10; i++) {
3278 			delay(drv_usectohz(50000));
3279 			csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3280 
3281 			if (csts.b.csts_cfs == 1) {
3282 				dev_err(nvme->n_dip, CE_WARN,
3283 				    "!controller fatal status at init");
3284 				ddi_fm_service_impact(nvme->n_dip,
3285 				    DDI_SERVICE_LOST);
3286 				nvme->n_dead = B_TRUE;
3287 				goto fail;
3288 			}
3289 
3290 			if (csts.b.csts_rdy == 1)
3291 				break;
3292 		}
3293 	}
3294 
3295 	if (csts.b.csts_rdy == 0) {
3296 		dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
3297 		ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
3298 		nvme->n_dead = B_TRUE;
3299 		goto fail;
3300 	}
3301 
3302 	/*
3303 	 * Assume an abort command limit of 1. We'll destroy and re-init
3304 	 * that later when we know the true abort command limit.
3305 	 */
3306 	sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
3307 
3308 	/*
3309 	 * Set up initial interrupt for admin queue.
3310 	 */
3311 	if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
3312 	    != DDI_SUCCESS) &&
3313 	    (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
3314 	    != DDI_SUCCESS) &&
3315 	    (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
3316 	    != DDI_SUCCESS)) {
3317 		dev_err(nvme->n_dip, CE_WARN,
3318 		    "!failed to setup initial interrupt");
3319 		goto fail;
3320 	}
3321 
3322 	/*
3323 	 * Post an asynchronous event command to catch errors.
3324 	 * We assume the asynchronous events are supported as required by
3325 	 * specification (Figure 40 in section 5 of NVMe 1.2).
3326 	 * However, since at least qemu does not follow the specification,
3327 	 * we need a mechanism to protect ourselves.
3328 	 */
3329 	nvme->n_async_event_supported = B_TRUE;
3330 	nvme_async_event(nvme);
3331 
3332 	/*
3333 	 * Identify Controller
3334 	 */
3335 	if (nvme_identify(nvme, B_FALSE, 0, (void **)&nvme->n_idctl) != 0) {
3336 		dev_err(nvme->n_dip, CE_WARN,
3337 		    "!failed to identify controller");
3338 		goto fail;
3339 	}
3340 
3341 	/*
3342 	 * Process nvme-config-list (if present) in nvme.conf.
3343 	 */
3344 	nvme_config_list(nvme);
3345 
3346 	/*
3347 	 * Get Vendor & Product ID
3348 	 */
3349 	bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
3350 	model[sizeof (nvme->n_idctl->id_model)] = '\0';
3351 	sata_split_model(model, &vendor, &product);
3352 
3353 	if (vendor == NULL)
3354 		nvme->n_vendor = strdup("NVMe");
3355 	else
3356 		nvme->n_vendor = strdup(vendor);
3357 
3358 	nvme->n_product = strdup(product);
3359 
3360 	/*
3361 	 * Get controller limits.
3362 	 */
3363 	nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
3364 	    MIN(nvme->n_admin_queue_len / 10,
3365 	    MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
3366 
3367 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
3368 	    "async-event-limit", nvme->n_async_event_limit);
3369 
3370 	nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
3371 
3372 	/*
3373 	 * Reinitialize the semaphore with the true abort command limit
3374 	 * supported by the hardware. It's not necessary to disable interrupts
3375 	 * as only command aborts use the semaphore, and no commands are
3376 	 * executed or aborted while we're here.
3377 	 */
3378 	sema_destroy(&nvme->n_abort_sema);
3379 	sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
3380 	    SEMA_DRIVER, NULL);
3381 
3382 	nvme->n_progress |= NVME_CTRL_LIMITS;
3383 
3384 	if (nvme->n_idctl->id_mdts == 0)
3385 		nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
3386 	else
3387 		nvme->n_max_data_transfer_size =
3388 		    1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
3389 
3390 	nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
3391 
3392 	/*
3393 	 * Limit n_max_data_transfer_size to what we can handle in one PRP.
3394 	 * Chained PRPs are currently unsupported.
3395 	 *
3396 	 * This is a no-op on hardware which doesn't support a transfer size
3397 	 * big enough to require chained PRPs.
3398 	 */
3399 	nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
3400 	    (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
3401 
3402 	nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
3403 
3404 	/*
3405 	 * Make sure the minimum/maximum queue entry sizes are not
3406 	 * larger/smaller than the default.
3407 	 */
3408 
3409 	if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
3410 	    ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
3411 	    ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
3412 	    ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
3413 		goto fail;
3414 
3415 	/*
3416 	 * Check for the presence of a Volatile Write Cache. If present,
3417 	 * enable or disable based on the value of the property
3418 	 * volatile-write-cache-enable (default is enabled).
3419 	 */
3420 	nvme->n_write_cache_present =
3421 	    nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
3422 
3423 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
3424 	    "volatile-write-cache-present",
3425 	    nvme->n_write_cache_present ? 1 : 0);
3426 
3427 	if (!nvme->n_write_cache_present) {
3428 		nvme->n_write_cache_enabled = B_FALSE;
3429 	} else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
3430 	    != 0) {
3431 		dev_err(nvme->n_dip, CE_WARN,
3432 		    "!failed to %sable volatile write cache",
3433 		    nvme->n_write_cache_enabled ? "en" : "dis");
3434 		/*
3435 		 * Assume the cache is (still) enabled.
3436 		 */
3437 		nvme->n_write_cache_enabled = B_TRUE;
3438 	}
3439 
3440 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
3441 	    "volatile-write-cache-enable",
3442 	    nvme->n_write_cache_enabled ? 1 : 0);
3443 
3444 	/*
3445 	 * Assume LBA Range Type feature is supported. If it isn't this
3446 	 * will be set to B_FALSE by nvme_get_features().
3447 	 */
3448 	nvme->n_lba_range_supported = B_TRUE;
3449 
3450 	/*
3451 	 * Check support for Autonomous Power State Transition.
3452 	 */
3453 	if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
3454 		nvme->n_auto_pst_supported =
3455 		    nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE;
3456 
3457 	/*
3458 	 * Assume Software Progress Marker feature is supported.  If it isn't
3459 	 * this will be set to B_FALSE by nvme_get_features().
3460 	 */
3461 	nvme->n_progress_supported = B_TRUE;
3462 
3463 	/*
3464 	 * Get number of supported namespaces and allocate namespace array.
3465 	 */
3466 	nvme->n_namespace_count = nvme->n_idctl->id_nn;
3467 
3468 	if (nvme->n_namespace_count == 0) {
3469 		dev_err(nvme->n_dip, CE_WARN,
3470 		    "!controllers without namespaces are not supported");
3471 		goto fail;
3472 	}
3473 
3474 	if (nvme->n_namespace_count > NVME_MINOR_MAX) {
3475 		dev_err(nvme->n_dip, CE_WARN,
3476 		    "!too many namespaces: %d, limiting to %d\n",
3477 		    nvme->n_namespace_count, NVME_MINOR_MAX);
3478 		nvme->n_namespace_count = NVME_MINOR_MAX;
3479 	}
3480 
3481 	nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
3482 	    nvme->n_namespace_count, KM_SLEEP);
3483 
3484 	/*
3485 	 * Try to set up MSI/MSI-X interrupts.
3486 	 */
3487 	if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
3488 	    != 0) {
3489 		nvme_release_interrupts(nvme);
3490 
3491 		nqueues = MIN(UINT16_MAX, ncpus);
3492 
3493 		if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
3494 		    nqueues) != DDI_SUCCESS) &&
3495 		    (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
3496 		    nqueues) != DDI_SUCCESS)) {
3497 			dev_err(nvme->n_dip, CE_WARN,
3498 			    "!failed to setup MSI/MSI-X interrupts");
3499 			goto fail;
3500 		}
3501 	}
3502 
3503 	/*
3504 	 * Create I/O queue pairs.
3505 	 */
3506 
3507 	if (nvme_set_nqueues(nvme) != 0) {
3508 		dev_err(nvme->n_dip, CE_WARN,
3509 		    "!failed to set number of I/O queues to %d",
3510 		    nvme->n_intr_cnt);
3511 		goto fail;
3512 	}
3513 
3514 	/*
3515 	 * Reallocate I/O queue array
3516 	 */
3517 	kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
3518 	nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
3519 	    (nvme->n_submission_queues + 1), KM_SLEEP);
3520 	nvme->n_ioq[0] = nvme->n_adminq;
3521 
3522 	/*
3523 	 * There should always be at least as many submission queues
3524 	 * as completion queues.
3525 	 */
3526 	ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues);
3527 
3528 	nvme->n_ioq_count = nvme->n_submission_queues;
3529 
3530 	nvme->n_io_squeue_len =
3531 	    MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries);
3532 
3533 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len",
3534 	    nvme->n_io_squeue_len);
3535 
3536 	/*
3537 	 * Pre-allocate completion queues.
3538 	 * When there are the same number of submission and completion
3539 	 * queues there is no value in having a larger completion
3540 	 * queue length.
3541 	 */
3542 	if (nvme->n_submission_queues == nvme->n_completion_queues)
3543 		nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
3544 		    nvme->n_io_squeue_len);
3545 
3546 	nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
3547 	    nvme->n_max_queue_entries);
3548 
3549 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len",
3550 	    nvme->n_io_cqueue_len);
3551 
3552 	/*
3553 	 * Assign the equal quantity of taskq threads to each completion
3554 	 * queue, capping the total number of threads to the number
3555 	 * of CPUs.
3556 	 */
3557 	tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues;
3558 
3559 	/*
3560 	 * In case the calculation above is zero, we need at least one
3561 	 * thread per completion queue.
3562 	 */
3563 	tq_threads = MAX(1, tq_threads);
3564 
3565 	if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1,
3566 	    nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) {
3567 		dev_err(nvme->n_dip, CE_WARN,
3568 		    "!failed to pre-allocate completion queues");
3569 		goto fail;
3570 	}
3571 
3572 	/*
3573 	 * If we use less completion queues than interrupt vectors return
3574 	 * some of the interrupt vectors back to the system.
3575 	 */
3576 	if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) {
3577 		nvme_release_interrupts(nvme);
3578 
3579 		if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
3580 		    nvme->n_completion_queues + 1) != DDI_SUCCESS) {
3581 			dev_err(nvme->n_dip, CE_WARN,
3582 			    "!failed to reduce number of interrupts");
3583 			goto fail;
3584 		}
3585 	}
3586 
3587 	/*
3588 	 * Alloc & register I/O queue pairs
3589 	 */
3590 
3591 	for (i = 1; i != nvme->n_ioq_count + 1; i++) {
3592 		if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len,
3593 		    &nvme->n_ioq[i], i) != DDI_SUCCESS) {
3594 			dev_err(nvme->n_dip, CE_WARN,
3595 			    "!unable to allocate I/O qpair %d", i);
3596 			goto fail;
3597 		}
3598 
3599 		if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
3600 			dev_err(nvme->n_dip, CE_WARN,
3601 			    "!unable to create I/O qpair %d", i);
3602 			goto fail;
3603 		}
3604 	}
3605 
3606 	/*
3607 	 * Post more asynchronous events commands to reduce event reporting
3608 	 * latency as suggested by the spec.
3609 	 */
3610 	if (nvme->n_async_event_supported) {
3611 		for (i = 1; i != nvme->n_async_event_limit; i++)
3612 			nvme_async_event(nvme);
3613 	}
3614 
3615 	return (DDI_SUCCESS);
3616 
3617 fail:
3618 	(void) nvme_reset(nvme, B_FALSE);
3619 	return (DDI_FAILURE);
3620 }
3621 
3622 static uint_t
3623 nvme_intr(caddr_t arg1, caddr_t arg2)
3624 {
3625 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
3626 	nvme_t *nvme = (nvme_t *)arg1;
3627 	int inum = (int)(uintptr_t)arg2;
3628 	int ccnt = 0;
3629 	int qnum;
3630 
3631 	if (inum >= nvme->n_intr_cnt)
3632 		return (DDI_INTR_UNCLAIMED);
3633 
3634 	if (nvme->n_dead)
3635 		return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
3636 		    DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
3637 
3638 	/*
3639 	 * The interrupt vector a queue uses is calculated as queue_idx %
3640 	 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
3641 	 * in steps of n_intr_cnt to process all queues using this vector.
3642 	 */
3643 	for (qnum = inum;
3644 	    qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL;
3645 	    qnum += nvme->n_intr_cnt) {
3646 		ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]);
3647 	}
3648 
3649 	return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
3650 }
3651 
3652 static void
3653 nvme_release_interrupts(nvme_t *nvme)
3654 {
3655 	int i;
3656 
3657 	for (i = 0; i < nvme->n_intr_cnt; i++) {
3658 		if (nvme->n_inth[i] == NULL)
3659 			break;
3660 
3661 		if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
3662 			(void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
3663 		else
3664 			(void) ddi_intr_disable(nvme->n_inth[i]);
3665 
3666 		(void) ddi_intr_remove_handler(nvme->n_inth[i]);
3667 		(void) ddi_intr_free(nvme->n_inth[i]);
3668 	}
3669 
3670 	kmem_free(nvme->n_inth, nvme->n_inth_sz);
3671 	nvme->n_inth = NULL;
3672 	nvme->n_inth_sz = 0;
3673 
3674 	nvme->n_progress &= ~NVME_INTERRUPTS;
3675 }
3676 
3677 static int
3678 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
3679 {
3680 	int nintrs, navail, count;
3681 	int ret;
3682 	int i;
3683 
3684 	if (nvme->n_intr_types == 0) {
3685 		ret = ddi_intr_get_supported_types(nvme->n_dip,
3686 		    &nvme->n_intr_types);
3687 		if (ret != DDI_SUCCESS) {
3688 			dev_err(nvme->n_dip, CE_WARN,
3689 			    "!%s: ddi_intr_get_supported types failed",
3690 			    __func__);
3691 			return (ret);
3692 		}
3693 #ifdef __x86
3694 		if (get_hwenv() == HW_VMWARE)
3695 			nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
3696 #endif
3697 	}
3698 
3699 	if ((nvme->n_intr_types & intr_type) == 0)
3700 		return (DDI_FAILURE);
3701 
3702 	ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
3703 	if (ret != DDI_SUCCESS) {
3704 		dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
3705 		    __func__);
3706 		return (ret);
3707 	}
3708 
3709 	ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
3710 	if (ret != DDI_SUCCESS) {
3711 		dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
3712 		    __func__);
3713 		return (ret);
3714 	}
3715 
3716 	/* We want at most one interrupt per queue pair. */
3717 	if (navail > nqpairs)
3718 		navail = nqpairs;
3719 
3720 	nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
3721 	nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
3722 
3723 	ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
3724 	    &count, 0);
3725 	if (ret != DDI_SUCCESS) {
3726 		dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
3727 		    __func__);
3728 		goto fail;
3729 	}
3730 
3731 	nvme->n_intr_cnt = count;
3732 
3733 	ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
3734 	if (ret != DDI_SUCCESS) {
3735 		dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
3736 		    __func__);
3737 		goto fail;
3738 	}
3739 
3740 	for (i = 0; i < count; i++) {
3741 		ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
3742 		    (void *)nvme, (void *)(uintptr_t)i);
3743 		if (ret != DDI_SUCCESS) {
3744 			dev_err(nvme->n_dip, CE_WARN,
3745 			    "!%s: ddi_intr_add_handler failed", __func__);
3746 			goto fail;
3747 		}
3748 	}
3749 
3750 	(void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
3751 
3752 	for (i = 0; i < count; i++) {
3753 		if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
3754 			ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
3755 		else
3756 			ret = ddi_intr_enable(nvme->n_inth[i]);
3757 
3758 		if (ret != DDI_SUCCESS) {
3759 			dev_err(nvme->n_dip, CE_WARN,
3760 			    "!%s: enabling interrupt %d failed", __func__, i);
3761 			goto fail;
3762 		}
3763 	}
3764 
3765 	nvme->n_intr_type = intr_type;
3766 
3767 	nvme->n_progress |= NVME_INTERRUPTS;
3768 
3769 	return (DDI_SUCCESS);
3770 
3771 fail:
3772 	nvme_release_interrupts(nvme);
3773 
3774 	return (ret);
3775 }
3776 
3777 static int
3778 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
3779 {
3780 	_NOTE(ARGUNUSED(arg));
3781 
3782 	pci_ereport_post(dip, fm_error, NULL);
3783 	return (fm_error->fme_status);
3784 }
3785 
3786 static void
3787 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a,
3788     void *b)
3789 {
3790 	nvme_t *nvme = a;
3791 
3792 	nvme->n_dead = B_TRUE;
3793 
3794 	/*
3795 	 * Fail all outstanding commands, including those in the admin queue
3796 	 * (queue 0).
3797 	 */
3798 	for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) {
3799 		nvme_qpair_t *qp = nvme->n_ioq[i];
3800 
3801 		mutex_enter(&qp->nq_mutex);
3802 		for (size_t j = 0; j < qp->nq_nentry; j++) {
3803 			nvme_cmd_t *cmd = qp->nq_cmd[j];
3804 			nvme_cmd_t *u_cmd;
3805 
3806 			if (cmd == NULL) {
3807 				continue;
3808 			}
3809 
3810 			/*
3811 			 * Since we have the queue lock held the entire time we
3812 			 * iterate over it, it's not possible for the queue to
3813 			 * change underneath us. Thus, we don't need to check
3814 			 * that the return value of nvme_unqueue_cmd matches the
3815 			 * requested cmd to unqueue.
3816 			 */
3817 			u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
3818 			taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq,
3819 			    cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
3820 
3821 			ASSERT3P(u_cmd, ==, cmd);
3822 		}
3823 		mutex_exit(&qp->nq_mutex);
3824 	}
3825 }
3826 
3827 static int
3828 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3829 {
3830 	nvme_t *nvme;
3831 	int instance;
3832 	int nregs;
3833 	off_t regsize;
3834 	int i;
3835 	char name[32];
3836 	boolean_t attached_ns;
3837 
3838 	if (cmd != DDI_ATTACH)
3839 		return (DDI_FAILURE);
3840 
3841 	instance = ddi_get_instance(dip);
3842 
3843 	if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
3844 		return (DDI_FAILURE);
3845 
3846 	nvme = ddi_get_soft_state(nvme_state, instance);
3847 	ddi_set_driver_private(dip, nvme);
3848 	nvme->n_dip = dip;
3849 
3850 	/* Set up event handlers for hot removal. */
3851 	if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT,
3852 	    &nvme->n_rm_cookie) != DDI_SUCCESS) {
3853 		goto fail;
3854 	}
3855 	if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie,
3856 	    nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) !=
3857 	    DDI_SUCCESS) {
3858 		goto fail;
3859 	}
3860 
3861 	mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL);
3862 	nvme->n_progress |= NVME_MUTEX_INIT;
3863 
3864 	nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3865 	    DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
3866 	nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
3867 	    dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
3868 	    B_TRUE : B_FALSE;
3869 	nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3870 	    DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
3871 	nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3872 	    DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN);
3873 	/*
3874 	 * Double up the default for completion queues in case of
3875 	 * queue sharing.
3876 	 */
3877 	nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3878 	    DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN);
3879 	nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3880 	    DDI_PROP_DONTPASS, "async-event-limit",
3881 	    NVME_DEFAULT_ASYNC_EVENT_LIMIT);
3882 	nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3883 	    DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
3884 	    B_TRUE : B_FALSE;
3885 	nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3886 	    DDI_PROP_DONTPASS, "min-phys-block-size",
3887 	    NVME_DEFAULT_MIN_BLOCK_SIZE);
3888 	nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3889 	    DDI_PROP_DONTPASS, "max-submission-queues", -1);
3890 	nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3891 	    DDI_PROP_DONTPASS, "max-completion-queues", -1);
3892 
3893 	if (!ISP2(nvme->n_min_block_size) ||
3894 	    (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
3895 		dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
3896 		    "using default %d", ISP2(nvme->n_min_block_size) ?
3897 		    "too low" : "not a power of 2",
3898 		    NVME_DEFAULT_MIN_BLOCK_SIZE);
3899 		nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
3900 	}
3901 
3902 	if (nvme->n_submission_queues != -1 &&
3903 	    (nvme->n_submission_queues < 1 ||
3904 	    nvme->n_submission_queues > UINT16_MAX)) {
3905 		dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not "
3906 		    "valid. Must be [1..%d]", nvme->n_submission_queues,
3907 		    UINT16_MAX);
3908 		nvme->n_submission_queues = -1;
3909 	}
3910 
3911 	if (nvme->n_completion_queues != -1 &&
3912 	    (nvme->n_completion_queues < 1 ||
3913 	    nvme->n_completion_queues > UINT16_MAX)) {
3914 		dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not "
3915 		    "valid. Must be [1..%d]", nvme->n_completion_queues,
3916 		    UINT16_MAX);
3917 		nvme->n_completion_queues = -1;
3918 	}
3919 
3920 	if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
3921 		nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
3922 	else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
3923 		nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
3924 
3925 	if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN)
3926 		nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN;
3927 	if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN)
3928 		nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN;
3929 
3930 	if (nvme->n_async_event_limit < 1)
3931 		nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
3932 
3933 	nvme->n_reg_acc_attr = nvme_reg_acc_attr;
3934 	nvme->n_queue_dma_attr = nvme_queue_dma_attr;
3935 	nvme->n_prp_dma_attr = nvme_prp_dma_attr;
3936 	nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
3937 
3938 	/*
3939 	 * Set up FMA support.
3940 	 */
3941 	nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
3942 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
3943 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
3944 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
3945 
3946 	ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
3947 
3948 	if (nvme->n_fm_cap) {
3949 		if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
3950 			nvme->n_reg_acc_attr.devacc_attr_access =
3951 			    DDI_FLAGERR_ACC;
3952 
3953 		if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
3954 			nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
3955 			nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
3956 		}
3957 
3958 		if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
3959 		    DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
3960 			pci_ereport_setup(dip);
3961 
3962 		if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
3963 			ddi_fm_handler_register(dip, nvme_fm_errcb,
3964 			    (void *)nvme);
3965 	}
3966 
3967 	nvme->n_progress |= NVME_FMA_INIT;
3968 
3969 	/*
3970 	 * The spec defines several register sets. Only the controller
3971 	 * registers (set 1) are currently used.
3972 	 */
3973 	if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
3974 	    nregs < 2 ||
3975 	    ddi_dev_regsize(dip, 1, &regsize) == DDI_FAILURE)
3976 		goto fail;
3977 
3978 	if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
3979 	    &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
3980 		dev_err(dip, CE_WARN, "!failed to map regset 1");
3981 		goto fail;
3982 	}
3983 
3984 	nvme->n_progress |= NVME_REGS_MAPPED;
3985 
3986 	/*
3987 	 * Create PRP DMA cache
3988 	 */
3989 	(void) snprintf(name, sizeof (name), "%s%d_prp_cache",
3990 	    ddi_driver_name(dip), ddi_get_instance(dip));
3991 	nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
3992 	    0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
3993 	    NULL, (void *)nvme, NULL, 0);
3994 
3995 	if (nvme_init(nvme) != DDI_SUCCESS)
3996 		goto fail;
3997 
3998 	/*
3999 	 * Initialize the driver with the UFM subsystem
4000 	 */
4001 	if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops,
4002 	    &nvme->n_ufmh, nvme) != 0) {
4003 		dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem");
4004 		goto fail;
4005 	}
4006 	mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL);
4007 	ddi_ufm_update(nvme->n_ufmh);
4008 	nvme->n_progress |= NVME_UFM_INIT;
4009 
4010 	mutex_init(&nvme->n_mgmt_mutex, NULL, MUTEX_DRIVER, NULL);
4011 	nvme->n_progress |= NVME_MGMT_INIT;
4012 
4013 	/*
4014 	 * Identify namespaces.
4015 	 */
4016 	mutex_enter(&nvme->n_mgmt_mutex);
4017 
4018 	for (i = 1; i <= nvme->n_namespace_count; i++) {
4019 		nvme_namespace_t *ns = NVME_NSID2NS(nvme, i);
4020 
4021 		/*
4022 		 * Namespaces start out ignored. When nvme_init_ns() checks
4023 		 * their properties and finds they can be used, it will set
4024 		 * ns_ignore to B_FALSE. It will also use this state change
4025 		 * to keep an accurate count of attachable namespaces.
4026 		 */
4027 		ns->ns_ignore = B_TRUE;
4028 		if (nvme_init_ns(nvme, i) != 0) {
4029 			mutex_exit(&nvme->n_mgmt_mutex);
4030 			goto fail;
4031 		}
4032 
4033 		if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR,
4034 		    NVME_MINOR(ddi_get_instance(nvme->n_dip), i),
4035 		    DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
4036 			mutex_exit(&nvme->n_mgmt_mutex);
4037 			dev_err(dip, CE_WARN,
4038 			    "!failed to create minor node for namespace %d", i);
4039 			goto fail;
4040 		}
4041 	}
4042 
4043 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
4044 	    NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0)
4045 	    != DDI_SUCCESS) {
4046 		mutex_exit(&nvme->n_mgmt_mutex);
4047 		dev_err(dip, CE_WARN, "nvme_attach: "
4048 		    "cannot create devctl minor node");
4049 		goto fail;
4050 	}
4051 
4052 	attached_ns = B_FALSE;
4053 	for (i = 1; i <= nvme->n_namespace_count; i++) {
4054 		int rv;
4055 
4056 		rv = nvme_attach_ns(nvme, i);
4057 		if (rv == 0) {
4058 			attached_ns = B_TRUE;
4059 		} else if (rv != ENOTSUP) {
4060 			dev_err(nvme->n_dip, CE_WARN,
4061 			    "!failed to attach namespace %d: %d", i, rv);
4062 			/*
4063 			 * Once we have successfully attached a namespace we
4064 			 * can no longer fail the driver attach as there is now
4065 			 * a blkdev child node linked to this device, and
4066 			 * our node is not yet in the attached state.
4067 			 */
4068 			if (!attached_ns) {
4069 				mutex_exit(&nvme->n_mgmt_mutex);
4070 				goto fail;
4071 			}
4072 		}
4073 	}
4074 
4075 	mutex_exit(&nvme->n_mgmt_mutex);
4076 
4077 	return (DDI_SUCCESS);
4078 
4079 fail:
4080 	/* attach successful anyway so that FMA can retire the device */
4081 	if (nvme->n_dead)
4082 		return (DDI_SUCCESS);
4083 
4084 	(void) nvme_detach(dip, DDI_DETACH);
4085 
4086 	return (DDI_FAILURE);
4087 }
4088 
4089 static int
4090 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
4091 {
4092 	int instance, i;
4093 	nvme_t *nvme;
4094 
4095 	if (cmd != DDI_DETACH)
4096 		return (DDI_FAILURE);
4097 
4098 	instance = ddi_get_instance(dip);
4099 
4100 	nvme = ddi_get_soft_state(nvme_state, instance);
4101 
4102 	if (nvme == NULL)
4103 		return (DDI_FAILURE);
4104 
4105 	ddi_remove_minor_node(dip, "devctl");
4106 
4107 	if (nvme->n_ns) {
4108 		for (i = 1; i <= nvme->n_namespace_count; i++) {
4109 			nvme_namespace_t *ns = NVME_NSID2NS(nvme, i);
4110 
4111 			ddi_remove_minor_node(dip, ns->ns_name);
4112 
4113 			if (ns->ns_bd_hdl) {
4114 				(void) bd_detach_handle(ns->ns_bd_hdl);
4115 				bd_free_handle(ns->ns_bd_hdl);
4116 			}
4117 
4118 			if (ns->ns_idns)
4119 				kmem_free(ns->ns_idns,
4120 				    sizeof (nvme_identify_nsid_t));
4121 			if (ns->ns_devid)
4122 				strfree(ns->ns_devid);
4123 		}
4124 
4125 		kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
4126 		    nvme->n_namespace_count);
4127 	}
4128 
4129 	if (nvme->n_progress & NVME_MGMT_INIT) {
4130 		mutex_destroy(&nvme->n_mgmt_mutex);
4131 	}
4132 
4133 	if (nvme->n_progress & NVME_UFM_INIT) {
4134 		ddi_ufm_fini(nvme->n_ufmh);
4135 		mutex_destroy(&nvme->n_fwslot_mutex);
4136 	}
4137 
4138 	if (nvme->n_progress & NVME_INTERRUPTS)
4139 		nvme_release_interrupts(nvme);
4140 
4141 	for (i = 0; i < nvme->n_cq_count; i++) {
4142 		if (nvme->n_cq[i]->ncq_cmd_taskq != NULL)
4143 			taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq);
4144 	}
4145 
4146 	if (nvme->n_progress & NVME_MUTEX_INIT) {
4147 		mutex_destroy(&nvme->n_minor_mutex);
4148 	}
4149 
4150 	if (nvme->n_ioq_count > 0) {
4151 		for (i = 1; i != nvme->n_ioq_count + 1; i++) {
4152 			if (nvme->n_ioq[i] != NULL) {
4153 				/* TODO: send destroy queue commands */
4154 				nvme_free_qpair(nvme->n_ioq[i]);
4155 			}
4156 		}
4157 
4158 		kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
4159 		    (nvme->n_ioq_count + 1));
4160 	}
4161 
4162 	if (nvme->n_prp_cache != NULL) {
4163 		kmem_cache_destroy(nvme->n_prp_cache);
4164 	}
4165 
4166 	if (nvme->n_progress & NVME_REGS_MAPPED) {
4167 		nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE);
4168 		(void) nvme_reset(nvme, B_FALSE);
4169 	}
4170 
4171 	if (nvme->n_progress & NVME_CTRL_LIMITS)
4172 		sema_destroy(&nvme->n_abort_sema);
4173 
4174 	if (nvme->n_progress & NVME_ADMIN_QUEUE)
4175 		nvme_free_qpair(nvme->n_adminq);
4176 
4177 	if (nvme->n_cq_count > 0) {
4178 		nvme_destroy_cq_array(nvme, 0);
4179 		nvme->n_cq = NULL;
4180 		nvme->n_cq_count = 0;
4181 	}
4182 
4183 	if (nvme->n_idctl)
4184 		kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
4185 
4186 	if (nvme->n_progress & NVME_REGS_MAPPED)
4187 		ddi_regs_map_free(&nvme->n_regh);
4188 
4189 	if (nvme->n_progress & NVME_FMA_INIT) {
4190 		if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4191 			ddi_fm_handler_unregister(nvme->n_dip);
4192 
4193 		if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
4194 		    DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4195 			pci_ereport_teardown(nvme->n_dip);
4196 
4197 		ddi_fm_fini(nvme->n_dip);
4198 	}
4199 
4200 	if (nvme->n_vendor != NULL)
4201 		strfree(nvme->n_vendor);
4202 
4203 	if (nvme->n_product != NULL)
4204 		strfree(nvme->n_product);
4205 
4206 	/* Clean up hot removal event handler. */
4207 	if (nvme->n_ev_rm_cb_id != NULL) {
4208 		(void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id);
4209 	}
4210 	nvme->n_ev_rm_cb_id = NULL;
4211 
4212 	ddi_soft_state_free(nvme_state, instance);
4213 
4214 	return (DDI_SUCCESS);
4215 }
4216 
4217 static int
4218 nvme_quiesce(dev_info_t *dip)
4219 {
4220 	int instance;
4221 	nvme_t *nvme;
4222 
4223 	instance = ddi_get_instance(dip);
4224 
4225 	nvme = ddi_get_soft_state(nvme_state, instance);
4226 
4227 	if (nvme == NULL)
4228 		return (DDI_FAILURE);
4229 
4230 	nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE);
4231 
4232 	(void) nvme_reset(nvme, B_TRUE);
4233 
4234 	return (DDI_FAILURE);
4235 }
4236 
4237 static int
4238 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma)
4239 {
4240 	nvme_t *nvme = cmd->nc_nvme;
4241 	uint_t nprp_per_page, nprp;
4242 	uint64_t *prp;
4243 	const ddi_dma_cookie_t *cookie;
4244 	uint_t idx;
4245 	uint_t ncookies = ddi_dma_ncookies(dma);
4246 
4247 	if (ncookies == 0)
4248 		return (DDI_FAILURE);
4249 
4250 	if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL)
4251 		return (DDI_FAILURE);
4252 	cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress;
4253 
4254 	if (ncookies == 1) {
4255 		cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
4256 		return (DDI_SUCCESS);
4257 	} else if (ncookies == 2) {
4258 		if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL)
4259 			return (DDI_FAILURE);
4260 		cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress;
4261 		return (DDI_SUCCESS);
4262 	}
4263 
4264 	/*
4265 	 * At this point, we're always operating on cookies at
4266 	 * index >= 1 and writing the addresses of those cookies
4267 	 * into a new page. The address of that page is stored
4268 	 * as the second PRP entry.
4269 	 */
4270 	nprp_per_page = nvme->n_pagesize / sizeof (uint64_t);
4271 	ASSERT(nprp_per_page > 0);
4272 
4273 	/*
4274 	 * We currently don't support chained PRPs and set up our DMA
4275 	 * attributes to reflect that. If we still get an I/O request
4276 	 * that needs a chained PRP something is very wrong. Account
4277 	 * for the first cookie here, which we've placed in d_prp[0].
4278 	 */
4279 	nprp = howmany(ncookies - 1, nprp_per_page);
4280 	VERIFY(nprp == 1);
4281 
4282 	/*
4283 	 * Allocate a page of pointers, in which we'll write the
4284 	 * addresses of cookies 1 to `ncookies`.
4285 	 */
4286 	cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
4287 	bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
4288 	cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress;
4289 
4290 	prp = (uint64_t *)cmd->nc_prp->nd_memp;
4291 	for (idx = 1; idx < ncookies; idx++) {
4292 		if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL)
4293 			return (DDI_FAILURE);
4294 		*prp++ = cookie->dmac_laddress;
4295 	}
4296 
4297 	(void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
4298 	    DDI_DMA_SYNC_FORDEV);
4299 	return (DDI_SUCCESS);
4300 }
4301 
4302 /*
4303  * The maximum number of requests supported for a deallocate request is
4304  * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and
4305  * unchanged through at least 1.4a). The definition of nvme_range_t is also
4306  * from the NVMe 1.1 spec. Together, the result is that all of the ranges for
4307  * a deallocate request will fit into the smallest supported namespace page
4308  * (4k).
4309  */
4310 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096);
4311 
4312 static int
4313 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize,
4314     int allocflag)
4315 {
4316 	const dkioc_free_list_t *dfl = xfer->x_dfl;
4317 	const dkioc_free_list_ext_t *exts = dfl->dfl_exts;
4318 	nvme_t *nvme = cmd->nc_nvme;
4319 	nvme_range_t *ranges = NULL;
4320 	uint_t i;
4321 
4322 	/*
4323 	 * The number of ranges in the request is 0s based (that is
4324 	 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ...,
4325 	 * word10 == 255 -> 256 ranges). Therefore the allowed values are
4326 	 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request,
4327 	 * we either provided bad info in nvme_bd_driveinfo() or there is a bug
4328 	 * in blkdev.
4329 	 */
4330 	VERIFY3U(dfl->dfl_num_exts, >, 0);
4331 	VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES);
4332 	cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff;
4333 
4334 	cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE;
4335 
4336 	cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag);
4337 	if (cmd->nc_prp == NULL)
4338 		return (DDI_FAILURE);
4339 
4340 	bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
4341 	ranges = (nvme_range_t *)cmd->nc_prp->nd_memp;
4342 
4343 	cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress;
4344 	cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
4345 
4346 	for (i = 0; i < dfl->dfl_num_exts; i++) {
4347 		uint64_t lba, len;
4348 
4349 		lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize;
4350 		len = exts[i].dfle_length / blocksize;
4351 
4352 		VERIFY3U(len, <=, UINT32_MAX);
4353 
4354 		/* No context attributes for a deallocate request */
4355 		ranges[i].nr_ctxattr = 0;
4356 		ranges[i].nr_len = len;
4357 		ranges[i].nr_lba = lba;
4358 	}
4359 
4360 	(void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
4361 	    DDI_DMA_SYNC_FORDEV);
4362 
4363 	return (DDI_SUCCESS);
4364 }
4365 
4366 static nvme_cmd_t *
4367 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
4368 {
4369 	nvme_t *nvme = ns->ns_nvme;
4370 	nvme_cmd_t *cmd;
4371 	int allocflag;
4372 
4373 	/*
4374 	 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
4375 	 */
4376 	allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP;
4377 	cmd = nvme_alloc_cmd(nvme, allocflag);
4378 
4379 	if (cmd == NULL)
4380 		return (NULL);
4381 
4382 	cmd->nc_sqe.sqe_opc = opc;
4383 	cmd->nc_callback = nvme_bd_xfer_done;
4384 	cmd->nc_xfer = xfer;
4385 
4386 	switch (opc) {
4387 	case NVME_OPC_NVM_WRITE:
4388 	case NVME_OPC_NVM_READ:
4389 		VERIFY(xfer->x_nblks <= 0x10000);
4390 
4391 		cmd->nc_sqe.sqe_nsid = ns->ns_id;
4392 
4393 		cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
4394 		cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
4395 		cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
4396 
4397 		if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS)
4398 			goto fail;
4399 		break;
4400 
4401 	case NVME_OPC_NVM_FLUSH:
4402 		cmd->nc_sqe.sqe_nsid = ns->ns_id;
4403 		break;
4404 
4405 	case NVME_OPC_NVM_DSET_MGMT:
4406 		cmd->nc_sqe.sqe_nsid = ns->ns_id;
4407 
4408 		if (nvme_fill_ranges(cmd, xfer,
4409 		    (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS)
4410 			goto fail;
4411 		break;
4412 
4413 	default:
4414 		goto fail;
4415 	}
4416 
4417 	return (cmd);
4418 
4419 fail:
4420 	nvme_free_cmd(cmd);
4421 	return (NULL);
4422 }
4423 
4424 static void
4425 nvme_bd_xfer_done(void *arg)
4426 {
4427 	nvme_cmd_t *cmd = arg;
4428 	bd_xfer_t *xfer = cmd->nc_xfer;
4429 	int error = 0;
4430 
4431 	error = nvme_check_cmd_status(cmd);
4432 	nvme_free_cmd(cmd);
4433 
4434 	bd_xfer_done(xfer, error);
4435 }
4436 
4437 static void
4438 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
4439 {
4440 	nvme_namespace_t *ns = arg;
4441 	nvme_t *nvme = ns->ns_nvme;
4442 	uint_t ns_count = MAX(1, nvme->n_namespaces_attachable);
4443 	boolean_t mutex_exit_needed = B_TRUE;
4444 
4445 	/*
4446 	 * nvme_bd_driveinfo is called by blkdev in two situations:
4447 	 * - during bd_attach_handle(), which we call with the mutex held
4448 	 * - during bd_attach(), which may be called with or without the
4449 	 *   mutex held
4450 	 */
4451 	if (mutex_owned(&nvme->n_mgmt_mutex))
4452 		mutex_exit_needed = B_FALSE;
4453 	else
4454 		mutex_enter(&nvme->n_mgmt_mutex);
4455 
4456 	/*
4457 	 * Set the blkdev qcount to the number of submission queues.
4458 	 * It will then create one waitq/runq pair for each submission
4459 	 * queue and spread I/O requests across the queues.
4460 	 */
4461 	drive->d_qcount = nvme->n_ioq_count;
4462 
4463 	/*
4464 	 * I/O activity to individual namespaces is distributed across
4465 	 * each of the d_qcount blkdev queues (which has been set to
4466 	 * the number of nvme submission queues). d_qsize is the number
4467 	 * of submitted and not completed I/Os within each queue that blkdev
4468 	 * will allow before it starts holding them in the waitq.
4469 	 *
4470 	 * Each namespace will create a child blkdev instance, for each one
4471 	 * we try and set the d_qsize so that each namespace gets an
4472 	 * equal portion of the submission queue.
4473 	 *
4474 	 * If post instantiation of the nvme drive, n_namespaces_attachable
4475 	 * changes and a namespace is attached it could calculate a
4476 	 * different d_qsize. It may even be that the sum of the d_qsizes is
4477 	 * now beyond the submission queue size. Should that be the case
4478 	 * and the I/O rate is such that blkdev attempts to submit more
4479 	 * I/Os than the size of the submission queue, the excess I/Os
4480 	 * will be held behind the semaphore nq_sema.
4481 	 */
4482 	drive->d_qsize = nvme->n_io_squeue_len / ns_count;
4483 
4484 	/*
4485 	 * Don't let the queue size drop below the minimum, though.
4486 	 */
4487 	drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN);
4488 
4489 	/*
4490 	 * d_maxxfer is not set, which means the value is taken from the DMA
4491 	 * attributes specified to bd_alloc_handle.
4492 	 */
4493 
4494 	drive->d_removable = B_FALSE;
4495 	drive->d_hotpluggable = B_FALSE;
4496 
4497 	bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
4498 	drive->d_target = ns->ns_id;
4499 	drive->d_lun = 0;
4500 
4501 	drive->d_model = nvme->n_idctl->id_model;
4502 	drive->d_model_len = sizeof (nvme->n_idctl->id_model);
4503 	drive->d_vendor = nvme->n_vendor;
4504 	drive->d_vendor_len = strlen(nvme->n_vendor);
4505 	drive->d_product = nvme->n_product;
4506 	drive->d_product_len = strlen(nvme->n_product);
4507 	drive->d_serial = nvme->n_idctl->id_serial;
4508 	drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
4509 	drive->d_revision = nvme->n_idctl->id_fwrev;
4510 	drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
4511 
4512 	/*
4513 	 * If we support the dataset management command, the only restrictions
4514 	 * on a discard request are the maximum number of ranges (segments)
4515 	 * per single request.
4516 	 */
4517 	if (nvme->n_idctl->id_oncs.on_dset_mgmt)
4518 		drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES;
4519 
4520 	if (mutex_exit_needed)
4521 		mutex_exit(&nvme->n_mgmt_mutex);
4522 }
4523 
4524 static int
4525 nvme_bd_mediainfo(void *arg, bd_media_t *media)
4526 {
4527 	nvme_namespace_t *ns = arg;
4528 	nvme_t *nvme = ns->ns_nvme;
4529 	boolean_t mutex_exit_needed = B_TRUE;
4530 
4531 	if (nvme->n_dead) {
4532 		return (EIO);
4533 	}
4534 
4535 	/*
4536 	 * nvme_bd_mediainfo is called by blkdev in various situations,
4537 	 * most of them out of our control. There's one exception though:
4538 	 * When we call bd_state_change() in response to "namespace change"
4539 	 * notification, where the mutex is already being held by us.
4540 	 */
4541 	if (mutex_owned(&nvme->n_mgmt_mutex))
4542 		mutex_exit_needed = B_FALSE;
4543 	else
4544 		mutex_enter(&nvme->n_mgmt_mutex);
4545 
4546 	media->m_nblks = ns->ns_block_count;
4547 	media->m_blksize = ns->ns_block_size;
4548 	media->m_readonly = B_FALSE;
4549 	media->m_solidstate = B_TRUE;
4550 
4551 	media->m_pblksize = ns->ns_best_block_size;
4552 
4553 	if (mutex_exit_needed)
4554 		mutex_exit(&nvme->n_mgmt_mutex);
4555 
4556 	return (0);
4557 }
4558 
4559 static int
4560 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
4561 {
4562 	nvme_t *nvme = ns->ns_nvme;
4563 	nvme_cmd_t *cmd;
4564 	nvme_qpair_t *ioq;
4565 	boolean_t poll;
4566 	int ret;
4567 
4568 	if (nvme->n_dead) {
4569 		return (EIO);
4570 	}
4571 
4572 	cmd = nvme_create_nvm_cmd(ns, opc, xfer);
4573 	if (cmd == NULL)
4574 		return (ENOMEM);
4575 
4576 	cmd->nc_sqid = xfer->x_qnum + 1;
4577 	ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
4578 	ioq = nvme->n_ioq[cmd->nc_sqid];
4579 
4580 	/*
4581 	 * Get the polling flag before submitting the command. The command may
4582 	 * complete immediately after it was submitted, which means we must
4583 	 * treat both cmd and xfer as if they have been freed already.
4584 	 */
4585 	poll = (xfer->x_flags & BD_XFER_POLL) != 0;
4586 
4587 	ret = nvme_submit_io_cmd(ioq, cmd);
4588 
4589 	if (ret != 0)
4590 		return (ret);
4591 
4592 	if (!poll)
4593 		return (0);
4594 
4595 	do {
4596 		cmd = nvme_retrieve_cmd(nvme, ioq);
4597 		if (cmd != NULL)
4598 			cmd->nc_callback(cmd);
4599 		else
4600 			drv_usecwait(10);
4601 	} while (ioq->nq_active_cmds != 0);
4602 
4603 	return (0);
4604 }
4605 
4606 static int
4607 nvme_bd_read(void *arg, bd_xfer_t *xfer)
4608 {
4609 	nvme_namespace_t *ns = arg;
4610 
4611 	return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
4612 }
4613 
4614 static int
4615 nvme_bd_write(void *arg, bd_xfer_t *xfer)
4616 {
4617 	nvme_namespace_t *ns = arg;
4618 
4619 	return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
4620 }
4621 
4622 static int
4623 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
4624 {
4625 	nvme_namespace_t *ns = arg;
4626 
4627 	if (ns->ns_nvme->n_dead)
4628 		return (EIO);
4629 
4630 	/*
4631 	 * If the volatile write cache is not present or not enabled the FLUSH
4632 	 * command is a no-op, so we can take a shortcut here.
4633 	 */
4634 	if (!ns->ns_nvme->n_write_cache_present) {
4635 		bd_xfer_done(xfer, ENOTSUP);
4636 		return (0);
4637 	}
4638 
4639 	if (!ns->ns_nvme->n_write_cache_enabled) {
4640 		bd_xfer_done(xfer, 0);
4641 		return (0);
4642 	}
4643 
4644 	return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
4645 }
4646 
4647 static int
4648 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
4649 {
4650 	nvme_namespace_t *ns = arg;
4651 	nvme_t *nvme = ns->ns_nvme;
4652 
4653 	if (nvme->n_dead) {
4654 		return (EIO);
4655 	}
4656 
4657 	if (*(uint64_t *)ns->ns_nguid != 0 ||
4658 	    *(uint64_t *)(ns->ns_nguid + 8) != 0) {
4659 		return (ddi_devid_init(devinfo, DEVID_NVME_NGUID,
4660 		    sizeof (ns->ns_nguid), ns->ns_nguid, devid));
4661 	} else if (*(uint64_t *)ns->ns_eui64 != 0) {
4662 		return (ddi_devid_init(devinfo, DEVID_NVME_EUI64,
4663 		    sizeof (ns->ns_eui64), ns->ns_eui64, devid));
4664 	} else {
4665 		return (ddi_devid_init(devinfo, DEVID_NVME_NSID,
4666 		    strlen(ns->ns_devid), ns->ns_devid, devid));
4667 	}
4668 }
4669 
4670 static int
4671 nvme_bd_free_space(void *arg, bd_xfer_t *xfer)
4672 {
4673 	nvme_namespace_t *ns = arg;
4674 
4675 	if (xfer->x_dfl == NULL)
4676 		return (EINVAL);
4677 
4678 	if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt)
4679 		return (ENOTSUP);
4680 
4681 	return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT));
4682 }
4683 
4684 static int
4685 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
4686 {
4687 #ifndef __lock_lint
4688 	_NOTE(ARGUNUSED(cred_p));
4689 #endif
4690 	minor_t minor = getminor(*devp);
4691 	nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
4692 	int nsid = NVME_MINOR_NSID(minor);
4693 	nvme_minor_state_t *nm;
4694 	int rv = 0;
4695 
4696 	if (otyp != OTYP_CHR)
4697 		return (EINVAL);
4698 
4699 	if (nvme == NULL)
4700 		return (ENXIO);
4701 
4702 	if (nsid > nvme->n_namespace_count)
4703 		return (ENXIO);
4704 
4705 	if (nvme->n_dead)
4706 		return (EIO);
4707 
4708 	mutex_enter(&nvme->n_minor_mutex);
4709 
4710 	/*
4711 	 * First check the devctl node and error out if it's been opened
4712 	 * exclusively already by any other thread.
4713 	 */
4714 	if (nvme->n_minor.nm_oexcl != NULL &&
4715 	    nvme->n_minor.nm_oexcl != curthread) {
4716 		rv = EBUSY;
4717 		goto out;
4718 	}
4719 
4720 	nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor);
4721 
4722 	if (flag & FEXCL) {
4723 		if (nm->nm_oexcl != NULL || nm->nm_open) {
4724 			rv = EBUSY;
4725 			goto out;
4726 		}
4727 
4728 		/*
4729 		 * If at least one namespace is already open, fail the
4730 		 * exclusive open of the devctl node.
4731 		 */
4732 		if (nsid == 0) {
4733 			for (int i = 1; i <= nvme->n_namespace_count; i++) {
4734 				if (NVME_NSID2NS(nvme, i)->ns_minor.nm_open) {
4735 					rv = EBUSY;
4736 					goto out;
4737 				}
4738 			}
4739 		}
4740 
4741 		nm->nm_oexcl = curthread;
4742 	}
4743 
4744 	nm->nm_open = B_TRUE;
4745 
4746 out:
4747 	mutex_exit(&nvme->n_minor_mutex);
4748 	return (rv);
4749 
4750 }
4751 
4752 static int
4753 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
4754 {
4755 #ifndef __lock_lint
4756 	_NOTE(ARGUNUSED(cred_p));
4757 	_NOTE(ARGUNUSED(flag));
4758 #endif
4759 	minor_t minor = getminor(dev);
4760 	nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
4761 	int nsid = NVME_MINOR_NSID(minor);
4762 	nvme_minor_state_t *nm;
4763 
4764 	if (otyp != OTYP_CHR)
4765 		return (ENXIO);
4766 
4767 	if (nvme == NULL)
4768 		return (ENXIO);
4769 
4770 	if (nsid > nvme->n_namespace_count)
4771 		return (ENXIO);
4772 
4773 	nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor);
4774 
4775 	mutex_enter(&nvme->n_minor_mutex);
4776 	if (nm->nm_oexcl != NULL) {
4777 		ASSERT(nm->nm_oexcl == curthread);
4778 		nm->nm_oexcl = NULL;
4779 	}
4780 
4781 	ASSERT(nm->nm_open);
4782 	nm->nm_open = B_FALSE;
4783 	mutex_exit(&nvme->n_minor_mutex);
4784 
4785 	return (0);
4786 }
4787 
4788 static int
4789 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
4790     cred_t *cred_p)
4791 {
4792 	_NOTE(ARGUNUSED(cred_p));
4793 	int rv = 0;
4794 	void *idctl;
4795 
4796 	if ((mode & FREAD) == 0)
4797 		return (EPERM);
4798 
4799 	if (nioc->n_len < NVME_IDENTIFY_BUFSIZE)
4800 		return (EINVAL);
4801 
4802 	if ((rv = nvme_identify(nvme, B_TRUE, nsid, (void **)&idctl)) != 0)
4803 		return (rv);
4804 
4805 	if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode)
4806 	    != 0)
4807 		rv = EFAULT;
4808 
4809 	kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
4810 
4811 	return (rv);
4812 }
4813 
4814 /*
4815  * Execute commands on behalf of the various ioctls.
4816  */
4817 static int
4818 nvme_ioc_cmd(nvme_t *nvme, nvme_sqe_t *sqe, boolean_t is_admin, void *data_addr,
4819     uint32_t data_len, int rwk, nvme_cqe_t *cqe, uint_t timeout)
4820 {
4821 	nvme_cmd_t *cmd;
4822 	nvme_qpair_t *ioq;
4823 	int rv = 0;
4824 
4825 	cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
4826 	if (is_admin) {
4827 		cmd->nc_sqid = 0;
4828 		ioq = nvme->n_adminq;
4829 	} else {
4830 		cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1;
4831 		ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
4832 		ioq = nvme->n_ioq[cmd->nc_sqid];
4833 	}
4834 
4835 	/*
4836 	 * This function is used to facilitate requests from
4837 	 * userspace, so don't panic if the command fails. This
4838 	 * is especially true for admin passthru commands, where
4839 	 * the actual command data structure is entirely defined
4840 	 * by userspace.
4841 	 */
4842 	cmd->nc_dontpanic = B_TRUE;
4843 
4844 	cmd->nc_callback = nvme_wakeup_cmd;
4845 	cmd->nc_sqe = *sqe;
4846 
4847 	if ((rwk & (FREAD | FWRITE)) != 0) {
4848 		if (data_addr == NULL) {
4849 			rv = EINVAL;
4850 			goto free_cmd;
4851 		}
4852 
4853 		if (nvme_zalloc_dma(nvme, data_len, DDI_DMA_READ,
4854 		    &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
4855 			dev_err(nvme->n_dip, CE_WARN,
4856 			    "!nvme_zalloc_dma failed for nvme_ioc_cmd()");
4857 
4858 			rv = ENOMEM;
4859 			goto free_cmd;
4860 		}
4861 
4862 		if ((rv = nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah)) != 0)
4863 			goto free_cmd;
4864 
4865 		if ((rwk & FWRITE) != 0) {
4866 			if (ddi_copyin(data_addr, cmd->nc_dma->nd_memp,
4867 			    data_len, rwk & FKIOCTL) != 0) {
4868 				rv = EFAULT;
4869 				goto free_cmd;
4870 			}
4871 		}
4872 	}
4873 
4874 	if (is_admin) {
4875 		nvme_admin_cmd(cmd, timeout);
4876 	} else {
4877 		mutex_enter(&cmd->nc_mutex);
4878 
4879 		rv = nvme_submit_io_cmd(ioq, cmd);
4880 
4881 		if (rv == EAGAIN) {
4882 			mutex_exit(&cmd->nc_mutex);
4883 			dev_err(cmd->nc_nvme->n_dip, CE_WARN,
4884 			    "!nvme_ioc_cmd() failed, I/O Q full");
4885 			goto free_cmd;
4886 		}
4887 
4888 		nvme_wait_cmd(cmd, timeout);
4889 
4890 		mutex_exit(&cmd->nc_mutex);
4891 	}
4892 
4893 	if (cqe != NULL)
4894 		*cqe = cmd->nc_cqe;
4895 
4896 	if ((rv = nvme_check_cmd_status(cmd)) != 0) {
4897 		dev_err(nvme->n_dip, CE_WARN,
4898 		    "!nvme_ioc_cmd() failed with sct = %x, sc = %x",
4899 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
4900 
4901 		goto free_cmd;
4902 	}
4903 
4904 	if ((rwk & FREAD) != 0) {
4905 		if (ddi_copyout(cmd->nc_dma->nd_memp,
4906 		    data_addr, data_len, rwk & FKIOCTL) != 0)
4907 			rv = EFAULT;
4908 	}
4909 
4910 free_cmd:
4911 	nvme_free_cmd(cmd);
4912 
4913 	return (rv);
4914 }
4915 
4916 static int
4917 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
4918     int mode, cred_t *cred_p)
4919 {
4920 	_NOTE(ARGUNUSED(nsid, cred_p));
4921 	int rv = 0;
4922 	nvme_reg_cap_t cap = { 0 };
4923 	nvme_capabilities_t nc;
4924 
4925 	if ((mode & FREAD) == 0)
4926 		return (EPERM);
4927 
4928 	if (nioc->n_len < sizeof (nc))
4929 		return (EINVAL);
4930 
4931 	cap.r = nvme_get64(nvme, NVME_REG_CAP);
4932 
4933 	/*
4934 	 * The MPSMIN and MPSMAX fields in the CAP register use 0 to
4935 	 * specify the base page size of 4k (1<<12), so add 12 here to
4936 	 * get the real page size value.
4937 	 */
4938 	nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax);
4939 	nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin);
4940 
4941 	if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0)
4942 		rv = EFAULT;
4943 
4944 	return (rv);
4945 }
4946 
4947 static int
4948 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
4949     int mode, cred_t *cred_p)
4950 {
4951 	_NOTE(ARGUNUSED(cred_p));
4952 	void *log = NULL;
4953 	size_t bufsize = 0;
4954 	int rv = 0;
4955 
4956 	if ((mode & FREAD) == 0)
4957 		return (EPERM);
4958 
4959 	if (nsid > 0 && !NVME_NSID2NS(nvme, nsid)->ns_active)
4960 		return (EINVAL);
4961 
4962 	switch (nioc->n_arg) {
4963 	case NVME_LOGPAGE_ERROR:
4964 		if (nsid != 0)
4965 			return (EINVAL);
4966 		break;
4967 	case NVME_LOGPAGE_HEALTH:
4968 		if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0)
4969 			return (EINVAL);
4970 
4971 		if (nsid == 0)
4972 			nsid = (uint32_t)-1;
4973 
4974 		break;
4975 	case NVME_LOGPAGE_FWSLOT:
4976 		if (nsid != 0)
4977 			return (EINVAL);
4978 		break;
4979 	default:
4980 		if (!NVME_IS_VENDOR_SPECIFIC_LOGPAGE(nioc->n_arg))
4981 			return (EINVAL);
4982 		if (nioc->n_len > NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE) {
4983 			dev_err(nvme->n_dip, CE_NOTE, "!Vendor-specific log "
4984 			    "page size exceeds device maximum supported size: "
4985 			    "%lu", NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE);
4986 			return (EINVAL);
4987 		}
4988 		if (nioc->n_len == 0)
4989 			return (EINVAL);
4990 		bufsize = nioc->n_len;
4991 		if (nsid == 0)
4992 			nsid = (uint32_t)-1;
4993 	}
4994 
4995 	if (nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, nioc->n_arg, nsid)
4996 	    != DDI_SUCCESS)
4997 		return (EIO);
4998 
4999 	if (nioc->n_len < bufsize) {
5000 		kmem_free(log, bufsize);
5001 		return (EINVAL);
5002 	}
5003 
5004 	if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0)
5005 		rv = EFAULT;
5006 
5007 	nioc->n_len = bufsize;
5008 	kmem_free(log, bufsize);
5009 
5010 	return (rv);
5011 }
5012 
5013 static int
5014 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
5015     int mode, cred_t *cred_p)
5016 {
5017 	_NOTE(ARGUNUSED(cred_p));
5018 	void *buf = NULL;
5019 	size_t bufsize = 0;
5020 	uint32_t res = 0;
5021 	uint8_t feature;
5022 	int rv = 0;
5023 
5024 	if ((mode & FREAD) == 0)
5025 		return (EPERM);
5026 
5027 	if (nsid > 0 && !NVME_NSID2NS(nvme, nsid)->ns_active)
5028 		return (EINVAL);
5029 
5030 	if ((nioc->n_arg >> 32) > 0xff)
5031 		return (EINVAL);
5032 
5033 	feature = (uint8_t)(nioc->n_arg >> 32);
5034 
5035 	switch (feature) {
5036 	case NVME_FEAT_ARBITRATION:
5037 	case NVME_FEAT_POWER_MGMT:
5038 	case NVME_FEAT_ERROR:
5039 	case NVME_FEAT_NQUEUES:
5040 	case NVME_FEAT_INTR_COAL:
5041 	case NVME_FEAT_WRITE_ATOM:
5042 	case NVME_FEAT_ASYNC_EVENT:
5043 	case NVME_FEAT_PROGRESS:
5044 		if (nsid != 0)
5045 			return (EINVAL);
5046 		break;
5047 
5048 	case NVME_FEAT_TEMPERATURE:
5049 		if (nsid != 0)
5050 			return (EINVAL);
5051 		res = nioc->n_arg & 0xffffffffUL;
5052 		if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) {
5053 			nvme_temp_threshold_t tt;
5054 
5055 			tt.r = res;
5056 			if (tt.b.tt_thsel != NVME_TEMP_THRESH_OVER &&
5057 			    tt.b.tt_thsel != NVME_TEMP_THRESH_UNDER) {
5058 				return (EINVAL);
5059 			}
5060 
5061 			if (tt.b.tt_tmpsel > NVME_TEMP_THRESH_MAX_SENSOR) {
5062 				return (EINVAL);
5063 			}
5064 		} else if (res != 0) {
5065 			return (ENOTSUP);
5066 		}
5067 		break;
5068 
5069 	case NVME_FEAT_INTR_VECT:
5070 		if (nsid != 0)
5071 			return (EINVAL);
5072 
5073 		res = nioc->n_arg & 0xffffffffUL;
5074 		if (res >= nvme->n_intr_cnt)
5075 			return (EINVAL);
5076 		break;
5077 
5078 	case NVME_FEAT_LBA_RANGE:
5079 		if (nvme->n_lba_range_supported == B_FALSE)
5080 			return (EINVAL);
5081 
5082 		if (nsid == 0 ||
5083 		    nsid > nvme->n_namespace_count)
5084 			return (EINVAL);
5085 
5086 		break;
5087 
5088 	case NVME_FEAT_WRITE_CACHE:
5089 		if (nsid != 0)
5090 			return (EINVAL);
5091 
5092 		if (!nvme->n_write_cache_present)
5093 			return (EINVAL);
5094 
5095 		break;
5096 
5097 	case NVME_FEAT_AUTO_PST:
5098 		if (nsid != 0)
5099 			return (EINVAL);
5100 
5101 		if (!nvme->n_auto_pst_supported)
5102 			return (EINVAL);
5103 
5104 		break;
5105 
5106 	default:
5107 		return (EINVAL);
5108 	}
5109 
5110 	rv = nvme_get_features(nvme, B_TRUE, nsid, feature, &res, &buf,
5111 	    &bufsize);
5112 	if (rv != 0)
5113 		return (rv);
5114 
5115 	if (nioc->n_len < bufsize) {
5116 		kmem_free(buf, bufsize);
5117 		return (EINVAL);
5118 	}
5119 
5120 	if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0)
5121 		rv = EFAULT;
5122 
5123 	kmem_free(buf, bufsize);
5124 	nioc->n_arg = res;
5125 	nioc->n_len = bufsize;
5126 
5127 	return (rv);
5128 }
5129 
5130 static int
5131 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
5132     cred_t *cred_p)
5133 {
5134 	_NOTE(ARGUNUSED(nsid, mode, cred_p));
5135 
5136 	if ((mode & FREAD) == 0)
5137 		return (EPERM);
5138 
5139 	nioc->n_arg = nvme->n_intr_cnt;
5140 	return (0);
5141 }
5142 
5143 static int
5144 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
5145     cred_t *cred_p)
5146 {
5147 	_NOTE(ARGUNUSED(nsid, cred_p));
5148 	int rv = 0;
5149 
5150 	if ((mode & FREAD) == 0)
5151 		return (EPERM);
5152 
5153 	if (nioc->n_len < sizeof (nvme->n_version))
5154 		return (ENOMEM);
5155 
5156 	if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf,
5157 	    sizeof (nvme->n_version), mode) != 0)
5158 		rv = EFAULT;
5159 
5160 	return (rv);
5161 }
5162 
5163 static int
5164 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
5165     cred_t *cred_p)
5166 {
5167 	_NOTE(ARGUNUSED(mode));
5168 	nvme_format_nvm_t frmt = { 0 };
5169 	int c_nsid = nsid != 0 ? nsid : 1;
5170 	nvme_identify_nsid_t *idns;
5171 	nvme_minor_state_t *nm;
5172 
5173 	if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
5174 		return (EPERM);
5175 
5176 	nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor);
5177 	if (nm->nm_oexcl != curthread)
5178 		return (EACCES);
5179 
5180 	if (nsid != 0) {
5181 		if (NVME_NSID2NS(nvme, nsid)->ns_attached)
5182 			return (EBUSY);
5183 		else if (!NVME_NSID2NS(nvme, nsid)->ns_active)
5184 			return (EINVAL);
5185 	}
5186 
5187 	frmt.r = nioc->n_arg & 0xffffffff;
5188 
5189 	/*
5190 	 * Check whether the FORMAT NVM command is supported.
5191 	 */
5192 	if (nvme->n_idctl->id_oacs.oa_format == 0)
5193 		return (ENOTSUP);
5194 
5195 	/*
5196 	 * Don't allow format or secure erase of individual namespace if that
5197 	 * would cause a format or secure erase of all namespaces.
5198 	 */
5199 	if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0)
5200 		return (EINVAL);
5201 
5202 	if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE &&
5203 	    nvme->n_idctl->id_fna.fn_sec_erase != 0)
5204 		return (EINVAL);
5205 
5206 	/*
5207 	 * Don't allow formatting with Protection Information.
5208 	 */
5209 	if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0)
5210 		return (EINVAL);
5211 
5212 	/*
5213 	 * Don't allow formatting using an illegal LBA format, or any LBA format
5214 	 * that uses metadata.
5215 	 */
5216 	idns = NVME_NSID2NS(nvme, c_nsid)->ns_idns;
5217 	if (frmt.b.fm_lbaf > idns->id_nlbaf ||
5218 	    idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0)
5219 		return (EINVAL);
5220 
5221 	/*
5222 	 * Don't allow formatting using an illegal Secure Erase setting.
5223 	 */
5224 	if (frmt.b.fm_ses > NVME_FRMT_MAX_SES ||
5225 	    (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO &&
5226 	    nvme->n_idctl->id_fna.fn_crypt_erase == 0))
5227 		return (EINVAL);
5228 
5229 	if (nsid == 0)
5230 		nsid = (uint32_t)-1;
5231 
5232 	return (nvme_format_nvm(nvme, B_TRUE, nsid, frmt.b.fm_lbaf, B_FALSE, 0,
5233 	    B_FALSE, frmt.b.fm_ses));
5234 }
5235 
5236 static int
5237 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
5238     cred_t *cred_p)
5239 {
5240 	_NOTE(ARGUNUSED(nioc, mode));
5241 	int rv;
5242 
5243 	if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
5244 		return (EPERM);
5245 
5246 	if (nsid == 0)
5247 		return (EINVAL);
5248 
5249 	if (NVME_NSID2NS(nvme, nsid)->ns_minor.nm_oexcl != curthread)
5250 		return (EACCES);
5251 
5252 	mutex_enter(&nvme->n_mgmt_mutex);
5253 
5254 	rv = nvme_detach_ns(nvme, nsid);
5255 
5256 	mutex_exit(&nvme->n_mgmt_mutex);
5257 
5258 	return (rv);
5259 }
5260 
5261 static int
5262 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
5263     cred_t *cred_p)
5264 {
5265 	_NOTE(ARGUNUSED(nioc, mode));
5266 	int rv;
5267 
5268 	if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
5269 		return (EPERM);
5270 
5271 	if (nsid == 0)
5272 		return (EINVAL);
5273 
5274 	if (NVME_NSID2NS(nvme, nsid)->ns_minor.nm_oexcl != curthread)
5275 		return (EACCES);
5276 
5277 	mutex_enter(&nvme->n_mgmt_mutex);
5278 
5279 	if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) {
5280 		mutex_exit(&nvme->n_mgmt_mutex);
5281 		return (EIO);
5282 	}
5283 
5284 	rv = nvme_attach_ns(nvme, nsid);
5285 
5286 	mutex_exit(&nvme->n_mgmt_mutex);
5287 	return (rv);
5288 }
5289 
5290 static void
5291 nvme_ufm_update(nvme_t *nvme)
5292 {
5293 	mutex_enter(&nvme->n_fwslot_mutex);
5294 	ddi_ufm_update(nvme->n_ufmh);
5295 	if (nvme->n_fwslot != NULL) {
5296 		kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t));
5297 		nvme->n_fwslot = NULL;
5298 	}
5299 	mutex_exit(&nvme->n_fwslot_mutex);
5300 }
5301 
5302 static int
5303 nvme_ioctl_firmware_download(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
5304     int mode, cred_t *cred_p)
5305 {
5306 	int rv = 0;
5307 	size_t len, copylen;
5308 	offset_t offset;
5309 	uintptr_t buf;
5310 	nvme_cqe_t cqe = { 0 };
5311 	nvme_sqe_t sqe = {
5312 	    .sqe_opc	= NVME_OPC_FW_IMAGE_LOAD
5313 	};
5314 
5315 	if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
5316 		return (EPERM);
5317 
5318 	if (nvme->n_idctl->id_oacs.oa_firmware == 0)
5319 		return (ENOTSUP);
5320 
5321 	if (nsid != 0)
5322 		return (EINVAL);
5323 
5324 	/*
5325 	 * The offset (in n_len) is restricted to the number of DWORDs in
5326 	 * 32 bits.
5327 	 */
5328 	if (nioc->n_len > NVME_FW_OFFSETB_MAX)
5329 		return (EINVAL);
5330 
5331 	/* Confirm that both offset and length are a multiple of DWORD bytes */
5332 	if ((nioc->n_len & NVME_DWORD_MASK) != 0 ||
5333 	    (nioc->n_arg & NVME_DWORD_MASK) != 0)
5334 		return (EINVAL);
5335 
5336 	len = nioc->n_len;
5337 	offset = nioc->n_arg;
5338 	buf = (uintptr_t)nioc->n_buf;
5339 
5340 	nioc->n_arg = 0;
5341 
5342 	while (len > 0 && rv == 0) {
5343 		/*
5344 		 * nvme_ioc_cmd() does not use SGLs or PRP lists.
5345 		 * It is limited to 2 PRPs per NVM command, so limit
5346 		 * the size of the data to 2 pages.
5347 		 */
5348 		copylen = MIN(2 * nvme->n_pagesize, len);
5349 
5350 		sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1;
5351 		sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT);
5352 
5353 		rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void *)buf, copylen,
5354 		    FWRITE, &cqe, nvme_admin_cmd_timeout);
5355 
5356 		/*
5357 		 * Regardless of whether the command succeeded or not, whether
5358 		 * there's an errno in rv to be returned, we'll return any
5359 		 * command-specific status code in n_arg.
5360 		 *
5361 		 * As n_arg isn't cleared in all other possible code paths
5362 		 * returning an error, we return the status code as a negative
5363 		 * value so it can be distinguished easily from whatever value
5364 		 * was passed in n_arg originally. This of course only works as
5365 		 * long as arguments passed in n_arg are less than INT64_MAX,
5366 		 * which they currently are.
5367 		 */
5368 		if (cqe.cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
5369 			nioc->n_arg = (uint64_t)-cqe.cqe_sf.sf_sc;
5370 
5371 		buf += copylen;
5372 		offset += copylen;
5373 		len -= copylen;
5374 	}
5375 
5376 	/*
5377 	 * Let the DDI UFM subsystem know that the firmware information for
5378 	 * this device has changed.
5379 	 */
5380 	nvme_ufm_update(nvme);
5381 
5382 	return (rv);
5383 }
5384 
5385 static int
5386 nvme_ioctl_firmware_commit(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
5387     int mode, cred_t *cred_p)
5388 {
5389 	nvme_firmware_commit_dw10_t fc_dw10 = { 0 };
5390 	uint32_t slot = nioc->n_arg & 0xffffffff;
5391 	uint32_t action = nioc->n_arg >> 32;
5392 	nvme_cqe_t cqe = { 0 };
5393 	nvme_sqe_t sqe = {
5394 	    .sqe_opc	= NVME_OPC_FW_ACTIVATE
5395 	};
5396 	int timeout;
5397 	int rv;
5398 
5399 	if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
5400 		return (EPERM);
5401 
5402 	if (nvme->n_idctl->id_oacs.oa_firmware == 0)
5403 		return (ENOTSUP);
5404 
5405 	if (nsid != 0)
5406 		return (EINVAL);
5407 
5408 	/* Validate slot is in range. */
5409 	if (slot < NVME_FW_SLOT_MIN || slot > NVME_FW_SLOT_MAX)
5410 		return (EINVAL);
5411 
5412 	switch (action) {
5413 	case NVME_FWC_SAVE:
5414 	case NVME_FWC_SAVE_ACTIVATE:
5415 		timeout = nvme_commit_save_cmd_timeout;
5416 		if (slot == 1 && nvme->n_idctl->id_frmw.fw_readonly)
5417 			return (EROFS);
5418 		break;
5419 	case NVME_FWC_ACTIVATE:
5420 	case NVME_FWC_ACTIVATE_IMMED:
5421 		timeout = nvme_admin_cmd_timeout;
5422 		break;
5423 	default:
5424 		return (EINVAL);
5425 	}
5426 
5427 	fc_dw10.b.fc_slot = slot;
5428 	fc_dw10.b.fc_action = action;
5429 	sqe.sqe_cdw10 = fc_dw10.r;
5430 
5431 	nioc->n_arg = 0;
5432 	rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, NULL, 0, 0, &cqe, timeout);
5433 
5434 	/*
5435 	 * Regardless of whether the command succeeded or not, whether
5436 	 * there's an errno in rv to be returned, we'll return any
5437 	 * command-specific status code in n_arg.
5438 	 *
5439 	 * As n_arg isn't cleared in all other possible code paths
5440 	 * returning an error, we return the status code as a negative
5441 	 * value so it can be distinguished easily from whatever value
5442 	 * was passed in n_arg originally. This of course only works as
5443 	 * long as arguments passed in n_arg are less than INT64_MAX,
5444 	 * which they currently are.
5445 	 */
5446 	if (cqe.cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
5447 		nioc->n_arg = (uint64_t)-cqe.cqe_sf.sf_sc;
5448 
5449 	/*
5450 	 * Let the DDI UFM subsystem know that the firmware information for
5451 	 * this device has changed.
5452 	 */
5453 	nvme_ufm_update(nvme);
5454 
5455 	return (rv);
5456 }
5457 
5458 /*
5459  * Helper to copy in a passthru command from userspace, handling
5460  * different data models.
5461  */
5462 static int
5463 nvme_passthru_copy_cmd_in(const void *buf, nvme_passthru_cmd_t *cmd, int mode)
5464 {
5465 #ifdef _MULTI_DATAMODEL
5466 	switch (ddi_model_convert_from(mode & FMODELS)) {
5467 	case DDI_MODEL_ILP32: {
5468 		nvme_passthru_cmd32_t cmd32;
5469 		if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0)
5470 			return (-1);
5471 		cmd->npc_opcode = cmd32.npc_opcode;
5472 		cmd->npc_timeout = cmd32.npc_timeout;
5473 		cmd->npc_flags = cmd32.npc_flags;
5474 		cmd->npc_cdw12 = cmd32.npc_cdw12;
5475 		cmd->npc_cdw13 = cmd32.npc_cdw13;
5476 		cmd->npc_cdw14 = cmd32.npc_cdw14;
5477 		cmd->npc_cdw15 = cmd32.npc_cdw15;
5478 		cmd->npc_buflen = cmd32.npc_buflen;
5479 		cmd->npc_buf = cmd32.npc_buf;
5480 		break;
5481 	}
5482 	case DDI_MODEL_NONE:
5483 #endif
5484 	if (ddi_copyin(buf, (void*)cmd, sizeof (nvme_passthru_cmd_t),
5485 	    mode) != 0)
5486 		return (-1);
5487 #ifdef _MULTI_DATAMODEL
5488 		break;
5489 	}
5490 #endif
5491 	return (0);
5492 }
5493 
5494 /*
5495  * Helper to copy out a passthru command result to userspace, handling
5496  * different data models.
5497  */
5498 static int
5499 nvme_passthru_copy_cmd_out(const nvme_passthru_cmd_t *cmd, void *buf, int mode)
5500 {
5501 #ifdef _MULTI_DATAMODEL
5502 	switch (ddi_model_convert_from(mode & FMODELS)) {
5503 	case DDI_MODEL_ILP32: {
5504 		nvme_passthru_cmd32_t cmd32;
5505 		bzero(&cmd32, sizeof (cmd32));
5506 		cmd32.npc_opcode = cmd->npc_opcode;
5507 		cmd32.npc_status = cmd->npc_status;
5508 		cmd32.npc_err = cmd->npc_err;
5509 		cmd32.npc_timeout = cmd->npc_timeout;
5510 		cmd32.npc_flags = cmd->npc_flags;
5511 		cmd32.npc_cdw0 = cmd->npc_cdw0;
5512 		cmd32.npc_cdw12 = cmd->npc_cdw12;
5513 		cmd32.npc_cdw13 = cmd->npc_cdw13;
5514 		cmd32.npc_cdw14 = cmd->npc_cdw14;
5515 		cmd32.npc_cdw15 = cmd->npc_cdw15;
5516 		cmd32.npc_buflen = (size32_t)cmd->npc_buflen;
5517 		cmd32.npc_buf = (uintptr32_t)cmd->npc_buf;
5518 		if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0)
5519 			return (-1);
5520 		break;
5521 	}
5522 	case DDI_MODEL_NONE:
5523 #endif
5524 		if (ddi_copyout(cmd, buf, sizeof (nvme_passthru_cmd_t),
5525 		    mode) != 0)
5526 			return (-1);
5527 #ifdef _MULTI_DATAMODEL
5528 		break;
5529 	}
5530 #endif
5531 	return (0);
5532 }
5533 
5534 /*
5535  * Run an arbitrary vendor-specific admin command on the device.
5536  */
5537 static int
5538 nvme_ioctl_passthru(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
5539     cred_t *cred_p)
5540 {
5541 	int rv = 0;
5542 	uint_t timeout = 0;
5543 	int rwk = 0;
5544 	nvme_passthru_cmd_t cmd;
5545 	size_t expected_passthru_size = 0;
5546 	nvme_sqe_t sqe;
5547 	nvme_cqe_t cqe;
5548 
5549 	bzero(&cmd, sizeof (cmd));
5550 	bzero(&sqe, sizeof (sqe));
5551 	bzero(&cqe, sizeof (cqe));
5552 
5553 	/*
5554 	 * Basic checks: permissions, data model, argument size.
5555 	 */
5556 	if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
5557 		return (EPERM);
5558 
5559 	/*
5560 	 * Compute the expected size of the argument buffer
5561 	 */
5562 #ifdef _MULTI_DATAMODEL
5563 	switch (ddi_model_convert_from(mode & FMODELS)) {
5564 	case DDI_MODEL_ILP32:
5565 		expected_passthru_size = sizeof (nvme_passthru_cmd32_t);
5566 		break;
5567 	case DDI_MODEL_NONE:
5568 #endif
5569 		expected_passthru_size = sizeof (nvme_passthru_cmd_t);
5570 #ifdef _MULTI_DATAMODEL
5571 		break;
5572 	}
5573 #endif
5574 
5575 	if (nioc->n_len != expected_passthru_size) {
5576 		cmd.npc_err = NVME_PASSTHRU_ERR_CMD_SIZE;
5577 		rv = EINVAL;
5578 		goto out;
5579 	}
5580 
5581 	/*
5582 	 * Ensure the device supports the standard vendor specific
5583 	 * admin command format.
5584 	 */
5585 	if (!nvme->n_idctl->id_nvscc.nv_spec) {
5586 		cmd.npc_err = NVME_PASSTHRU_ERR_NOT_SUPPORTED;
5587 		rv = ENOTSUP;
5588 		goto out;
5589 	}
5590 
5591 	if (nvme_passthru_copy_cmd_in((const void*)nioc->n_buf, &cmd, mode))
5592 		return (EFAULT);
5593 
5594 	if (!NVME_IS_VENDOR_SPECIFIC_CMD(cmd.npc_opcode)) {
5595 		cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_OPCODE;
5596 		rv = EINVAL;
5597 		goto out;
5598 	}
5599 
5600 	/*
5601 	 * This restriction is not mandated by the spec, so future work
5602 	 * could relax this if it's necessary to support commands that both
5603 	 * read and write.
5604 	 */
5605 	if ((cmd.npc_flags & NVME_PASSTHRU_READ) != 0 &&
5606 	    (cmd.npc_flags & NVME_PASSTHRU_WRITE) != 0) {
5607 		cmd.npc_err = NVME_PASSTHRU_ERR_READ_AND_WRITE;
5608 		rv = EINVAL;
5609 		goto out;
5610 	}
5611 	if (cmd.npc_timeout > nvme_vendor_specific_admin_cmd_max_timeout) {
5612 		cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_TIMEOUT;
5613 		rv = EINVAL;
5614 		goto out;
5615 	}
5616 	timeout = cmd.npc_timeout;
5617 
5618 	/*
5619 	 * Passed-thru command buffer verification:
5620 	 *  - Size is multiple of DWords
5621 	 *  - Non-null iff the length is non-zero
5622 	 *  - Null if neither reading nor writing data.
5623 	 *  - Non-null if reading or writing.
5624 	 *  - Maximum buffer size.
5625 	 */
5626 	if ((cmd.npc_buflen % sizeof (uint32_t)) != 0) {
5627 		cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER;
5628 		rv = EINVAL;
5629 		goto out;
5630 	}
5631 	if (((void*)cmd.npc_buf != NULL && cmd.npc_buflen == 0) ||
5632 	    ((void*)cmd.npc_buf == NULL && cmd.npc_buflen != 0)) {
5633 		cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER;
5634 		rv = EINVAL;
5635 		goto out;
5636 	}
5637 	if (cmd.npc_flags == 0 && (void*)cmd.npc_buf != NULL) {
5638 		cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER;
5639 		rv = EINVAL;
5640 		goto out;
5641 	}
5642 	if ((cmd.npc_flags != 0) && ((void*)cmd.npc_buf == NULL)) {
5643 		cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER;
5644 		rv = EINVAL;
5645 		goto out;
5646 	}
5647 	if (cmd.npc_buflen > nvme_vendor_specific_admin_cmd_size) {
5648 		cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER;
5649 		rv = EINVAL;
5650 		goto out;
5651 	}
5652 	if ((cmd.npc_buflen >> NVME_DWORD_SHIFT) > UINT32_MAX) {
5653 		cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER;
5654 		rv = EINVAL;
5655 		goto out;
5656 	}
5657 
5658 	sqe.sqe_opc = cmd.npc_opcode;
5659 	sqe.sqe_nsid = nsid;
5660 	sqe.sqe_cdw10 = (uint32_t)(cmd.npc_buflen >> NVME_DWORD_SHIFT);
5661 	sqe.sqe_cdw12 = cmd.npc_cdw12;
5662 	sqe.sqe_cdw13 = cmd.npc_cdw13;
5663 	sqe.sqe_cdw14 = cmd.npc_cdw14;
5664 	sqe.sqe_cdw15 = cmd.npc_cdw15;
5665 	if ((cmd.npc_flags & NVME_PASSTHRU_READ) != 0)
5666 		rwk = FREAD;
5667 	else if ((cmd.npc_flags & NVME_PASSTHRU_WRITE) != 0)
5668 		rwk = FWRITE;
5669 
5670 	rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void*)cmd.npc_buf,
5671 	    cmd.npc_buflen, rwk, &cqe, timeout);
5672 	cmd.npc_status = cqe.cqe_sf.sf_sc;
5673 	cmd.npc_cdw0 = cqe.cqe_dw0;
5674 
5675 out:
5676 	if (nvme_passthru_copy_cmd_out(&cmd, (void*)nioc->n_buf, mode))
5677 		rv = EFAULT;
5678 	return (rv);
5679 }
5680 
5681 static int
5682 nvme_ioctl_ns_state(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
5683     cred_t *cred_p)
5684 {
5685 	_NOTE(ARGUNUSED(cred_p));
5686 	nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid);
5687 
5688 	if ((mode & FREAD) == 0)
5689 		return (EPERM);
5690 
5691 	if (nsid == 0)
5692 		return (EINVAL);
5693 
5694 	nioc->n_arg = 0;
5695 
5696 	mutex_enter(&nvme->n_mgmt_mutex);
5697 
5698 	if (ns->ns_allocated)
5699 		nioc->n_arg |= NVME_NS_STATE_ALLOCATED;
5700 
5701 	if (ns->ns_active)
5702 		nioc->n_arg |= NVME_NS_STATE_ACTIVE;
5703 
5704 	if (ns->ns_attached)
5705 		nioc->n_arg |= NVME_NS_STATE_ATTACHED;
5706 
5707 	if (ns->ns_ignore)
5708 		nioc->n_arg |= NVME_NS_STATE_IGNORED;
5709 
5710 	mutex_exit(&nvme->n_mgmt_mutex);
5711 
5712 	return (0);
5713 }
5714 
5715 static int
5716 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
5717     int *rval_p)
5718 {
5719 #ifndef __lock_lint
5720 	_NOTE(ARGUNUSED(rval_p));
5721 #endif
5722 	minor_t minor = getminor(dev);
5723 	nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
5724 	int nsid = NVME_MINOR_NSID(minor);
5725 	int rv = 0;
5726 	nvme_ioctl_t nioc;
5727 
5728 	int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = {
5729 		NULL,
5730 		nvme_ioctl_identify,
5731 		nvme_ioctl_identify,
5732 		nvme_ioctl_capabilities,
5733 		nvme_ioctl_get_logpage,
5734 		nvme_ioctl_get_features,
5735 		nvme_ioctl_intr_cnt,
5736 		nvme_ioctl_version,
5737 		nvme_ioctl_format,
5738 		nvme_ioctl_detach,
5739 		nvme_ioctl_attach,
5740 		nvme_ioctl_firmware_download,
5741 		nvme_ioctl_firmware_commit,
5742 		nvme_ioctl_passthru,
5743 		nvme_ioctl_ns_state
5744 	};
5745 
5746 	if (nvme == NULL)
5747 		return (ENXIO);
5748 
5749 	if (nsid > nvme->n_namespace_count)
5750 		return (ENXIO);
5751 
5752 	if (IS_DEVCTL(cmd))
5753 		return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
5754 
5755 #ifdef _MULTI_DATAMODEL
5756 	switch (ddi_model_convert_from(mode & FMODELS)) {
5757 	case DDI_MODEL_ILP32: {
5758 		nvme_ioctl32_t nioc32;
5759 		if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t),
5760 		    mode) != 0)
5761 			return (EFAULT);
5762 		nioc.n_len = nioc32.n_len;
5763 		nioc.n_buf = nioc32.n_buf;
5764 		nioc.n_arg = nioc32.n_arg;
5765 		break;
5766 	}
5767 	case DDI_MODEL_NONE:
5768 #endif
5769 		if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode)
5770 		    != 0)
5771 			return (EFAULT);
5772 #ifdef _MULTI_DATAMODEL
5773 		break;
5774 	}
5775 #endif
5776 
5777 	if (nvme->n_dead && cmd != NVME_IOC_DETACH)
5778 		return (EIO);
5779 
5780 
5781 	if (cmd == NVME_IOC_IDENTIFY_CTRL) {
5782 		/*
5783 		 * This makes NVME_IOC_IDENTIFY_CTRL work the same on devctl and
5784 		 * attachment point nodes.
5785 		 */
5786 		nsid = 0;
5787 	} else if (cmd == NVME_IOC_IDENTIFY_NSID && nsid == 0) {
5788 		/*
5789 		 * This makes NVME_IOC_IDENTIFY_NSID work on a devctl node, it
5790 		 * will always return identify data for namespace 1.
5791 		 */
5792 		nsid = 1;
5793 	}
5794 
5795 	if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL)
5796 		rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode,
5797 		    cred_p);
5798 	else
5799 		rv = EINVAL;
5800 
5801 #ifdef _MULTI_DATAMODEL
5802 	switch (ddi_model_convert_from(mode & FMODELS)) {
5803 	case DDI_MODEL_ILP32: {
5804 		nvme_ioctl32_t nioc32;
5805 
5806 		nioc32.n_len = (size32_t)nioc.n_len;
5807 		nioc32.n_buf = (uintptr32_t)nioc.n_buf;
5808 		nioc32.n_arg = nioc.n_arg;
5809 
5810 		if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t),
5811 		    mode) != 0)
5812 			return (EFAULT);
5813 		break;
5814 	}
5815 	case DDI_MODEL_NONE:
5816 #endif
5817 		if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode)
5818 		    != 0)
5819 			return (EFAULT);
5820 #ifdef _MULTI_DATAMODEL
5821 		break;
5822 	}
5823 #endif
5824 
5825 	return (rv);
5826 }
5827 
5828 /*
5829  * DDI UFM Callbacks
5830  */
5831 static int
5832 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
5833     ddi_ufm_image_t *img)
5834 {
5835 	nvme_t *nvme = arg;
5836 
5837 	if (imgno != 0)
5838 		return (EINVAL);
5839 
5840 	ddi_ufm_image_set_desc(img, "Firmware");
5841 	ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot);
5842 
5843 	return (0);
5844 }
5845 
5846 /*
5847  * Fill out firmware slot information for the requested slot.  The firmware
5848  * slot information is gathered by requesting the Firmware Slot Information log
5849  * page.  The format of the page is described in section 5.10.1.3.
5850  *
5851  * We lazily cache the log page on the first call and then invalidate the cache
5852  * data after a successful firmware download or firmware commit command.
5853  * The cached data is protected by a mutex as the state can change
5854  * asynchronous to this callback.
5855  */
5856 static int
5857 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
5858     uint_t slotno, ddi_ufm_slot_t *slot)
5859 {
5860 	nvme_t *nvme = arg;
5861 	void *log = NULL;
5862 	size_t bufsize;
5863 	ddi_ufm_attr_t attr = 0;
5864 	char fw_ver[NVME_FWVER_SZ + 1];
5865 	int ret;
5866 
5867 	if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1))
5868 		return (EINVAL);
5869 
5870 	mutex_enter(&nvme->n_fwslot_mutex);
5871 	if (nvme->n_fwslot == NULL) {
5872 		ret = nvme_get_logpage(nvme, B_TRUE, &log, &bufsize,
5873 		    NVME_LOGPAGE_FWSLOT, 0);
5874 		if (ret != DDI_SUCCESS ||
5875 		    bufsize != sizeof (nvme_fwslot_log_t)) {
5876 			if (log != NULL)
5877 				kmem_free(log, bufsize);
5878 			mutex_exit(&nvme->n_fwslot_mutex);
5879 			return (EIO);
5880 		}
5881 		nvme->n_fwslot = (nvme_fwslot_log_t *)log;
5882 	}
5883 
5884 	/*
5885 	 * NVMe numbers firmware slots starting at 1
5886 	 */
5887 	if (slotno == (nvme->n_fwslot->fw_afi - 1))
5888 		attr |= DDI_UFM_ATTR_ACTIVE;
5889 
5890 	if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0)
5891 		attr |= DDI_UFM_ATTR_WRITEABLE;
5892 
5893 	if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') {
5894 		attr |= DDI_UFM_ATTR_EMPTY;
5895 	} else {
5896 		(void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno],
5897 		    NVME_FWVER_SZ);
5898 		fw_ver[NVME_FWVER_SZ] = '\0';
5899 		ddi_ufm_slot_set_version(slot, fw_ver);
5900 	}
5901 	mutex_exit(&nvme->n_fwslot_mutex);
5902 
5903 	ddi_ufm_slot_set_attrs(slot, attr);
5904 
5905 	return (0);
5906 }
5907 
5908 static int
5909 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
5910 {
5911 	*caps = DDI_UFM_CAP_REPORT;
5912 	return (0);
5913 }
5914