1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved.
14 * Copyright 2019 Unix Software Ltd.
15 * Copyright 2020 Joyent, Inc.
16 * Copyright 2020 Racktop Systems.
17 * Copyright 2025 Oxide Computer Company.
18 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
19 * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
20 */
21
22 /*
23 * blkdev driver for NVMe compliant storage devices
24 *
25 * This driver targets and is designed to support all NVMe 1.x and NVMe 2.x
26 * devices. Features are added to the driver as we encounter devices that
27 * require them and our needs, so some commands or log pages may not take
28 * advantage of newer features that devices support at this time. When you
29 * encounter such a case, it is generally fine to add that support to the driver
30 * as long as you take care to ensure that the requisite device version is met
31 * before using it.
32 *
33 * The driver has only been tested on x86 systems and will not work on big-
34 * endian systems without changes to the code accessing registers and data
35 * structures used by the hardware.
36 *
37 *
38 * Interrupt Usage:
39 *
40 * The driver will use a single interrupt while configuring the device as the
41 * specification requires, but contrary to the specification it will try to use
42 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it
43 * will switch to multiple-message MSI(-X) if supported. The driver wants to
44 * have one interrupt vector per CPU, but it will work correctly if less are
45 * available. Interrupts can be shared by queues, the interrupt handler will
46 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only
47 * the admin queue will share an interrupt with one I/O queue. The interrupt
48 * handler will retrieve completed commands from all queues sharing an interrupt
49 * vector and will post them to a taskq for completion processing.
50 *
51 *
52 * Command Processing:
53 *
54 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up
55 * to 65536 I/O commands. The driver will configure one I/O queue pair per
56 * available interrupt vector, with the queue length usually much smaller than
57 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
58 * interrupt vectors will be used.
59 *
60 * Additionally the hardware provides a single special admin queue pair that can
61 * hold up to 4096 admin commands.
62 *
63 * From the hardware perspective both queues of a queue pair are independent,
64 * but they share some driver state: the command array (holding pointers to
65 * commands currently being processed by the hardware) and the active command
66 * counter. Access to a submission queue and the shared state is protected by
67 * nq_mutex; completion queue is protected by ncq_mutex.
68 *
69 * When a command is submitted to a queue pair the active command counter is
70 * incremented and a pointer to the command is stored in the command array. The
71 * array index is used as command identifier (CID) in the submission queue
72 * entry. Some commands may take a very long time to complete, and if the queue
73 * wraps around in that time a submission may find the next array slot to still
74 * be used by a long-running command. In this case the array is sequentially
75 * searched for the next free slot. The length of the command array is the same
76 * as the configured queue length. Queue overrun is prevented by the semaphore,
77 * so a command submission may block if the queue is full.
78 *
79 *
80 * Polled I/O Support:
81 *
82 * For kernel core dump support the driver can do polled I/O. As interrupts are
83 * turned off while dumping the driver will just submit a command in the regular
84 * way, and then repeatedly attempt a command retrieval until it gets the
85 * command back.
86 *
87 *
88 * Namespace Support:
89 *
90 * NVMe devices can have multiple namespaces, each being a independent data
91 * store. The driver supports multiple namespaces and creates a blkdev interface
92 * for each namespace found. Namespaces can have various attributes to support
93 * protection information. This driver does not support any of this and ignores
94 * namespaces that have these attributes.
95 *
96 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
97 * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally
98 * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64
99 * if present to generate the devid, and passes the EUI64 to blkdev to use it
100 * in the device node names.
101 *
102 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a
103 * single controller. This is an artificial limit imposed by the driver to be
104 * able to address a reasonable number of controllers and namespaces while
105 * fitting within the constraints of MAXMIN32, aka a 32-bit device number which
106 * only has 18-bits for the minor number. See the minor node section for more
107 * information.
108 *
109 *
110 * Minor nodes:
111 *
112 * For each NVMe device the driver exposes one minor node for the controller and
113 * one minor node for each namespace. The only operations supported by those
114 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the
115 * primary control interface for the devices. The character device is a private
116 * interface and we attempt stability through libnvme and more so nvmeadm.
117 *
118 * The controller minor node is much more flexible than the namespace minor node
119 * and should be preferred. The controller node allows one to target any
120 * namespace that the device has, while the namespace is limited in what it can
121 * acquire. While the namespace minor exists, it should not be relied upon and
122 * is not by libnvme.
123 *
124 * The minor number space is split in two. We use the lower part to support the
125 * controller and namespaces as described above in the 'Namespace Support'
126 * section. The second set is used for cloning opens. We set aside one million
127 * minors for this purpose. We utilize a cloning open so that way we can have
128 * per-file_t state. This is how we end up implementing and tracking locking
129 * state and related.
130 *
131 * When we have this cloned open, then we allocate a new nvme_minor_t which gets
132 * its minor number from the nvme_open_minors id_space_t and is stored in the
133 * nvme_open_minors_avl. While someone calls open on a controller or namespace
134 * minor, everything else occurs in the context of one of these ephemeral
135 * minors.
136 *
137 *
138 * ioctls, Errors, and Exclusive Access:
139 *
140 * All of the logical commands that one can issue are driven through the
141 * ioctl(9E) interface. All of our ioctls have a similar shape where they
142 * all include the 'nvme_ioctl_common_t' as their first member.
143 *
144 * This common ioctl structure is used to communicate the namespace that should
145 * be targeted. When the namespace is left as 0, then that indicates that it
146 * should target whatever the default is of the minor node. For a namespace
147 * minor, that will be transparently rewritten to the namespace's namespace id.
148 *
149 * In addition, the nvme_ioctl_common_t structure also has a standard error
150 * return. Our goal in our ioctl path is to ensure that we have useful semantic
151 * errors as much as possible. EINVAL, EIO, etc. are all overloaded. Instead as
152 * long as we can copy in our structure, then we will set a semantic error. If
153 * we have an error from the controller, then that will be included there.
154 *
155 * Each command has a specific policy that controls whether or not it is allowed
156 * on the namespace or controller minor, whether the broadcast namespace is
157 * allowed, various settings around what kind of exclusive access is allowed,
158 * and more. Each of these is wrapped up in a bit of policy described by the
159 * 'nvme_ioctl_check_t' structure.
160 *
161 * The device provides a form of exclusion in the form of both a
162 * controller-level and namespace-level read and write lock. Most operations do
163 * not require a lock (e.g. get log page, identify, etc.), but a few do (e.g.
164 * format nvm, firmware related activity, etc.). A read lock guarantees that you
165 * can complete your operation without interference, but read locks are not
166 * required. If you don't take a read lock and someone comes in with a write
167 * lock, then subsequent operations will fail with a semantic error indicating
168 * that you were blocked due to this.
169 *
170 * Here are some of the rules that govern our locks:
171 *
172 * 1. Writers starve readers. Any readers are allowed to finish when there is a
173 * pending writer; however, all subsequent readers will be blocked upon that
174 * writer.
175 * 2. A controller write lock takes priority over all other locks. Put
176 * differently a controller writer not only starves subsequent controller
177 * readers, but also all namespace read and write locks.
178 * 3. Each namespace lock is independent.
179 * 4. At most a single namespace lock may be owned.
180 * 5. If you own a namespace lock, you may not take a controller lock (to help
181 * with lock ordering).
182 * 6. In a similar spirit, if you own a controller write lock, you may not take
183 * any namespace lock. Someone with the controller write lock can perform any
184 * operations that they need to. However, if you have a controller read lock
185 * you may take any namespace lock.
186 * 7. There is no ability to upgrade a read lock to a write lock.
187 * 8. There is no recursive locking.
188 *
189 * While there's a lot there to keep track of, the goals of these are to
190 * constrain things so as to avoid deadlock. This is more complex than the
191 * original implementation in the driver which only allowed for an exclusive
192 * open that was tied to the thread. The first issue with tying this to the
193 * thread was that that didn't work well for software that utilized thread
194 * pools, like complex daemons. The second issue is that we want the ability for
195 * daemons, such as a FRU monitor, to be able to retain a file descriptor to the
196 * device without blocking others from taking action except during critical
197 * periods.
198 *
199 * In particular to enable something like libnvme, we didn't want someone to
200 * have to open and close the file descriptor to change what kind of exclusive
201 * access they desired.
202 *
203 * There are two different sets of data structures that we employ for tracking
204 * locking information:
205 *
206 * 1) The nvme_lock_t structure is contained in both the nvme_t and the
207 * nvme_namespace_t and tracks the current writer, readers, and pending writers
208 * and readers. Each of these lists or the writer pointer all refer to our
209 * second data structure.
210 *
211 * When a lock is owned by a single writer, then the nl_writer field is set to a
212 * specific minor's lock data structure. If instead readers are present, then
213 * the nl_readers list_t is not empty. An invariant of the system is that if
214 * nl_writer is non-NULL, nl_readers must be empty and conversely, if nl_readers
215 * is not empty, nl_writer must be NULL.
216 *
217 * 2) The nvme_minor_lock_info_t exists in the nvme_minor_t. There is one
218 * information structure which represents the minor's controller lock and a
219 * second one that represents the minor's namespace lock. The members of this
220 * are broken into tracking what the current lock is and what it targets. It
221 * also several members that are intended for debugging (nli_last_change,
222 * nli_acq_kthread, etc.).
223 *
224 * While the minor has two different lock information structures, our rules
225 * ensure that only one of the two can be pending and that they shouldn't result
226 * in a deadlock. When a lock is pending, the caller is sleeping on the minor's
227 * nm_cv member.
228 *
229 * These relationships are represented in the following image which shows a
230 * controller write lock being held with a pending readers on the controller
231 * lock and pending writers on one of the controller's namespaces.
232 *
233 * +---------+
234 * | nvme_t |
235 * | |
236 * | n_lock -|-------+
237 * | n_ns -+ | | +-----------------------------+
238 * +-------|-+ +-----------------+ | nvme_minor_t |
239 * | | nvme_lock_t | | |
240 * | | | | +------------------------+ |
241 * | | writer --|-------------->| nvme_minor_lock_info_t | |
242 * | | reader list | | | nm_ctrl_lock | |
243 * | | pending writers | | +------------------------+ |
244 * | | pending readers |------+ | +------------------------+ |
245 * | +-----------------+ | | | nvme_minor_lock_info_t | |
246 * | | | | nm_ns_lock | |
247 * | | | +------------------------+ |
248 * | | +-----------------------------+
249 * +------------------+ | +-----------------+
250 * | nvme_namespace_t | | | nvme_minor_t |
251 * | | | | |
252 * | ns_lock ---+ | | | +-------------+ |
253 * +------------|-----+ +-----------------|>|nm_ctrl_lock | |
254 * | | +-------------+ |
255 * v +-----------------+
256 * +------------------+ ...
257 * | nvme_lock_t | +-----------------+
258 * | | | nvme_minor_t |
259 * | writer | | |
260 * | reader list | | +-------------+ |
261 * | pending writers -|-----------------+ | |nm_ctrl_lock | |
262 * | pending readers | | | +-------------+ |
263 * +------------------+ | +-----------------+
264 * +-----------------------------+ | +-----------------------------+
265 * | nvme_minor_t | | | nvme_minor_t |
266 * | | | | |
267 * | +------------------------+ | | | +------------------------+ |
268 * | | nvme_minor_lock_info_t | | | | | nvme_minor_lock_info_t | |
269 * | | nm_ctrl_lock | | | | | nm_ctrl_lock | |
270 * | +------------------------+ | | | +------------------------+ |
271 * | +------------------------+ | v | +------------------------+ |
272 * | | nvme_minor_lock_info_t |-|-----|->| nvme_minor_lock_info_t | |
273 * | | nm_ns_lock | | | | nm_ns_lock | |
274 * | +------------------------+ | | +------------------------+ |
275 * +-----------------------------+ +-----------------------------+
276 *
277 * Blkdev Interface:
278 *
279 * This driver uses blkdev to do all the heavy lifting involved with presenting
280 * a disk device to the system. As a result, the processing of I/O requests is
281 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
282 * setup, and splitting of transfers into manageable chunks.
283 *
284 * I/O requests coming in from blkdev are turned into NVM commands and posted to
285 * an I/O queue. The queue is selected by taking the CPU id modulo the number of
286 * queues. There is currently no timeout handling of I/O commands.
287 *
288 * Blkdev also supports querying device/media information and generating a
289 * devid. The driver reports the best block size as determined by the namespace
290 * format back to blkdev as physical block size to support partition and block
291 * alignment. The devid is either based on the namespace GUID or EUI64, if
292 * present, or composed using the device vendor ID, model number, serial number,
293 * and the namespace ID.
294 *
295 *
296 * Error Handling:
297 *
298 * Error handling is currently limited to detecting fatal hardware errors,
299 * either by asynchronous events, or synchronously through command status or
300 * admin command timeouts. In case of severe errors the device is fenced off,
301 * all further requests will return EIO. FMA is then called to fault the device.
302 *
303 * The hardware has a limit for outstanding asynchronous event requests. Before
304 * this limit is known the driver assumes it is at least 1 and posts a single
305 * asynchronous request. Later when the limit is known more asynchronous event
306 * requests are posted to allow quicker reception of error information. When an
307 * asynchronous event is posted by the hardware the driver will parse the error
308 * status fields and log information or fault the device, depending on the
309 * severity of the asynchronous event. The asynchronous event request is then
310 * reused and posted to the admin queue again.
311 *
312 * On command completion the command status is checked for errors. In case of
313 * errors indicating a driver bug the driver panics. Almost all other error
314 * status values just cause EIO to be returned.
315 *
316 * Command timeouts are currently detected for all admin commands except
317 * asynchronous event requests. If a command times out and the hardware appears
318 * to be healthy the driver attempts to abort the command. The abort command
319 * timeout is a separate tunable but the original command timeout will be used
320 * if it is greater. If the abort times out too the driver assumes the device
321 * to be dead, fences it off, and calls FMA to retire it. In all other cases
322 * the aborted command should return immediately with a status indicating it
323 * was aborted, and the driver will wait indefinitely for that to happen. No
324 * timeout handling of normal I/O commands is presently done.
325 *
326 * Any command that times out due to the controller dropping dead will be put on
327 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA
328 * memory being reused by the system and later being written to by a "dead"
329 * NVMe controller.
330 *
331 *
332 * Locking:
333 *
334 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held
335 * when accessing shared state and submission queue registers, ncq_mutex
336 * is held when accessing completion queue state and registers.
337 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while
338 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both
339 * mutexes themselves.
340 *
341 * Each command also has its own nc_mutex, which is associated with the
342 * condition variable nc_cv. It is only used on admin commands which are run
343 * synchronously. In that case it must be held across calls to
344 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by
345 * nvme_admin_cmd(). It must also be held whenever the completion state of the
346 * command is changed or while an admin command timeout is handled.
347 *
348 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first.
349 * More than one nc_mutex may only be held when aborting commands. In this case,
350 * the nc_mutex of the command to be aborted must be held across the call to
351 * nvme_abort_cmd() to prevent the command from completing while the abort is in
352 * progress.
353 *
354 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be
355 * acquired first. More than one nq_mutex is never held by a single thread.
356 * The ncq_mutex is only held by nvme_retrieve_cmd() and
357 * nvme_process_iocq(). nvme_process_iocq() is only called from the
358 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the
359 * mutex is non-contentious but is required for implementation completeness
360 * and safety.
361 *
362 * Each nvme_t has an n_admin_stat_mutex that protects the admin command
363 * statistics structure. If this is taken in conjunction with any other locks,
364 * then it must be taken last.
365 *
366 * There is one mutex n_minor_mutex which protects all open flags nm_open and
367 * exclusive-open thread pointers nm_oexcl of each minor node associated with a
368 * controller and its namespaces.
369 *
370 * In addition, there is a logical namespace management mutex which protects the
371 * data about namespaces. When interrogating the metadata of any namespace, this
372 * lock must be held. This gets tricky as we need to call into blkdev, which may
373 * issue callbacks into us which want this and it is illegal to hold locks
374 * across those blkdev calls as otherwise they might lead to deadlock (blkdev
375 * leverages ndi_devi_enter()).
376 *
377 * The lock exposes two levels, one that we call 'NVME' and one 'BDRO' or blkdev
378 * read-only. The idea is that most callers will use the NVME level which says
379 * this is a full traditional mutex operation. The BDRO level is used by blkdev
380 * callback functions and is a promise to only only read the data. When a blkdev
381 * operation starts, the lock holder will use nvme_mgmt_bd_start(). This
382 * strictly speaking drops the mutex, but records that the lock is logically
383 * held by the thread that did the start() operation.
384 *
385 * During this time, other threads (or even the same one) may end up calling
386 * into nvme_mgmt_lock(). Only one person may still hold the lock at any time;
387 * however, the BRDO level will be allowed to proceed during this time. This
388 * allows us to make consistent progress and honor the blkdev lock ordering
389 * requirements, albeit it is not as straightforward as a simple mutex.
390 *
391 * Quiesce / Fast Reboot:
392 *
393 * The driver currently does not support fast reboot. A quiesce(9E) entry point
394 * is still provided which is used to send a shutdown notification to the
395 * device.
396 *
397 *
398 * NVMe Hotplug:
399 *
400 * The driver supports hot removal. The driver uses the NDI event framework
401 * to register a callback, nvme_remove_callback, to clean up when a disk is
402 * removed. In particular, the driver will unqueue outstanding I/O commands and
403 * set n_dead on the softstate to true so that other operations, such as ioctls
404 * and command submissions, fail as well.
405 *
406 * While the callback registration relies on the NDI event framework, the
407 * removal event itself is kicked off in the PCIe hotplug framework, when the
408 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a
409 * device was removed from the slot.
410 *
411 * The NVMe driver instance itself will remain until the final close of the
412 * device.
413 *
414 *
415 * DDI UFM Support
416 *
417 * The driver supports the DDI UFM framework for reporting information about
418 * the device's firmware image and slot configuration. This data can be
419 * queried by userland software via ioctls to the ufm driver. For more
420 * information, see ddi_ufm(9E).
421 *
422 *
423 * Driver Configuration:
424 *
425 * The following driver properties can be changed to control some aspects of the
426 * drivers operation:
427 * - strict-version: can be set to 0 to allow devices conforming to newer
428 * major versions to be used
429 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
430 * specific command status as a fatal error leading device faulting
431 * - admin-queue-len: the maximum length of the admin queue (16-4096)
432 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536)
433 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536)
434 * - async-event-limit: the maximum number of asynchronous event requests to be
435 * posted by the driver
436 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
437 * cache
438 * - min-phys-block-size: the minimum physical block size to report to blkdev,
439 * which is among other things the basis for ZFS vdev ashift
440 * - max-submission-queues: the maximum number of I/O submission queues.
441 * - max-completion-queues: the maximum number of I/O completion queues,
442 * can be less than max-submission-queues, in which case the completion
443 * queues are shared.
444 *
445 * In addition to the above properties, some device-specific tunables can be
446 * configured using the nvme-config-list global property. The value of this
447 * property is a list of triplets. The formal syntax is:
448 *
449 * nvme-config-list ::= <triplet> [, <triplet>]* ;
450 * <triplet> ::= "<model>" , "<rev-list>" , "<tuple-list>"
451 * <rev-list> ::= [ <fwrev> [, <fwrev>]*]
452 * <tuple-list> ::= <tunable> [, <tunable>]*
453 * <tunable> ::= <name> : <value>
454 *
455 * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and
456 * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list>
457 * contains one or more tunables to apply to all controllers that match the
458 * specified model number and optionally firmware revision. Each <tunable> is a
459 * <name> : <value> pair. Supported tunables are:
460 *
461 * - ignore-unknown-vendor-status: can be set to "on" to not handle any vendor
462 * specific command status as a fatal error leading device faulting
463 *
464 * - min-phys-block-size: the minimum physical block size to report to blkdev,
465 * which is among other things the basis for ZFS vdev ashift
466 *
467 * - volatile-write-cache: can be set to "on" or "off" to enable or disable the
468 * volatile write cache, if present
469 *
470 *
471 * TODO:
472 * - figure out sane default for I/O queue depth reported to blkdev
473 * - FMA handling of media errors
474 * - support for devices supporting very large I/O requests using chained PRPs
475 * - support for configuring hardware parameters like interrupt coalescing
476 * - support for media formatting and hard partitioning into namespaces
477 * - support for big-endian systems
478 * - support for fast reboot
479 * - support for NVMe Subsystem Reset (1.1)
480 * - support for Scatter/Gather lists (1.1)
481 * - support for Reservations (1.1)
482 * - support for power management
483 */
484
485 #include <sys/byteorder.h>
486 #ifdef _BIG_ENDIAN
487 #error nvme driver needs porting for big-endian platforms
488 #endif
489
490 #include <sys/modctl.h>
491 #include <sys/conf.h>
492 #include <sys/devops.h>
493 #include <sys/ddi.h>
494 #include <sys/ddi_ufm.h>
495 #include <sys/sunddi.h>
496 #include <sys/sunndi.h>
497 #include <sys/bitmap.h>
498 #include <sys/sysmacros.h>
499 #include <sys/param.h>
500 #include <sys/varargs.h>
501 #include <sys/cpuvar.h>
502 #include <sys/disp.h>
503 #include <sys/blkdev.h>
504 #include <sys/atomic.h>
505 #include <sys/archsystm.h>
506 #include <sys/sata/sata_hba.h>
507 #include <sys/stat.h>
508 #include <sys/policy.h>
509 #include <sys/list.h>
510 #include <sys/dkio.h>
511 #include <sys/pci.h>
512 #include <sys/mkdev.h>
513
514 #include <sys/nvme.h>
515
516 #ifdef __x86
517 #include <sys/x86_archext.h>
518 #endif
519
520 #include "nvme_reg.h"
521 #include "nvme_var.h"
522
523 /*
524 * Assertions to make sure that we've properly captured various aspects of the
525 * packed structures and haven't broken them during updates.
526 */
527 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE);
528 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256);
529 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512);
530 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520);
531 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768);
532 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792);
533 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048);
534 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072);
535
536 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE);
537 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32);
538 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92);
539 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104);
540 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128);
541 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384);
542
543 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE);
544 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE);
545
546 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE);
547 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32);
548 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64);
549
550 CTASSERT(sizeof (nvme_nschange_list_t) == 4096);
551
552 /* NVMe spec version supported */
553 static const int nvme_version_major = 2;
554
555 /* Tunable for FORMAT NVM command timeout in seconds, default is 600s */
556 uint32_t nvme_format_cmd_timeout = 600;
557
558 /* Tunable for firmware commit with NVME_FWC_SAVE, default is 15s */
559 uint32_t nvme_commit_save_cmd_timeout = 15;
560
561 /*
562 * Tunable for the admin command timeout used for commands other than those
563 * with their own timeouts defined above; in seconds. While most commands are
564 * expected to complete very quickly (sub-second), experience has shown that
565 * some controllers can occasionally be a bit slower, and not always consistent
566 * in the time taken - times of up to around 4.2s have been observed. Setting
567 * this to 15s by default provides headroom.
568 */
569 uint32_t nvme_admin_cmd_timeout = 15;
570
571 /*
572 * Tunable for abort command timeout in seconds, default is 60s. This timeout
573 * is used when issuing an abort command, currently only in response to a
574 * different admin command timing out. Aborts always complete after the command
575 * that they are attempting to abort so we need to allow enough time for the
576 * controller to process the long running command that we are attempting to
577 * abort. The abort timeout here is only used if it is greater than the timeout
578 * for the command that is being aborted.
579 */
580 uint32_t nvme_abort_cmd_timeout = 60;
581
582 /*
583 * Tunable for the size of arbitrary vendor specific admin commands,
584 * default is 16MiB.
585 */
586 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24;
587
588 /*
589 * Tunable for the max timeout of arbitary vendor specific admin commands,
590 * default is 60s.
591 */
592 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60;
593
594 /*
595 * This ID space, AVL, and lock are used for keeping track of minor state across
596 * opens between different devices.
597 */
598 static id_space_t *nvme_open_minors;
599 static avl_tree_t nvme_open_minors_avl;
600 kmutex_t nvme_open_minors_mutex;
601
602 /*
603 * Removal taskq used for n_dead callback processing.
604 */
605 taskq_t *nvme_dead_taskq;
606
607 /*
608 * This enumeration is used in tandem with nvme_mgmt_lock() to describe which
609 * form of the lock is being taken. See the theory statement for more context.
610 */
611 typedef enum {
612 /*
613 * This is the primary form of taking the management lock and indicates
614 * that the user intends to do a read/write of it. This should always be
615 * used for any ioctl paths or truly anything other than a blkdev
616 * information operation.
617 */
618 NVME_MGMT_LOCK_NVME,
619 /*
620 * This is a subordinate form of the lock whereby the user is in blkdev
621 * callback context and will only intend to read the namespace data.
622 */
623 NVME_MGMT_LOCK_BDRO
624 } nvme_mgmt_lock_level_t;
625
626 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
627 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
628 static int nvme_quiesce(dev_info_t *);
629 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
630 static int nvme_setup_interrupts(nvme_t *, int, int);
631 static void nvme_release_interrupts(nvme_t *);
632 static uint_t nvme_intr(caddr_t, caddr_t);
633
634 static void nvme_shutdown(nvme_t *, boolean_t);
635 static boolean_t nvme_reset(nvme_t *, boolean_t);
636 static int nvme_init(nvme_t *);
637 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
638 static void nvme_free_cmd(nvme_cmd_t *);
639 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
640 bd_xfer_t *);
641 static void nvme_admin_cmd(nvme_cmd_t *, uint32_t);
642 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
643 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
644 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
645 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
646 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
647 static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
648 static void nvme_wakeup_cmd(void *);
649 static void nvme_async_event_task(void *);
650
651 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
652 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
653 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
654 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
655 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
656 static inline int nvme_check_cmd_status(nvme_cmd_t *);
657 static boolean_t nvme_check_cmd_status_ioctl(nvme_cmd_t *,
658 nvme_ioctl_common_t *);
659
660 static int nvme_abort_cmd(nvme_cmd_t *, const uint32_t);
661 static void nvme_async_event(nvme_t *);
662 static boolean_t nvme_format_nvm(nvme_t *, nvme_ioctl_format_t *);
663 static boolean_t nvme_get_logpage_int(nvme_t *, boolean_t, void **, size_t *,
664 uint8_t);
665 static boolean_t nvme_identify(nvme_t *, boolean_t, nvme_ioctl_identify_t *,
666 void **);
667 static boolean_t nvme_identify_int(nvme_t *, uint32_t, uint8_t, void **);
668 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t,
669 uint32_t *);
670 static int nvme_write_cache_set(nvme_t *, boolean_t);
671 static int nvme_set_nqueues(nvme_t *);
672
673 static void nvme_free_dma(nvme_dma_t *);
674 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
675 nvme_dma_t **);
676 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
677 nvme_dma_t **);
678 static void nvme_free_qpair(nvme_qpair_t *);
679 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t);
680 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
681
682 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
683 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
684 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
685 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
686
687 static boolean_t nvme_check_regs_hdl(nvme_t *);
688 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
689
690 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t);
691
692 static void nvme_bd_xfer_done(void *);
693 static void nvme_bd_driveinfo(void *, bd_drive_t *);
694 static int nvme_bd_mediainfo(void *, bd_media_t *);
695 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
696 static int nvme_bd_read(void *, bd_xfer_t *);
697 static int nvme_bd_write(void *, bd_xfer_t *);
698 static int nvme_bd_sync(void *, bd_xfer_t *);
699 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
700 static int nvme_bd_free_space(void *, bd_xfer_t *);
701
702 static int nvme_prp_dma_constructor(void *, void *, int);
703 static void nvme_prp_dma_destructor(void *, void *);
704
705 static void nvme_prepare_devid(nvme_t *, uint32_t);
706
707 /* DDI UFM callbacks */
708 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
709 ddi_ufm_image_t *);
710 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
711 ddi_ufm_slot_t *);
712 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
713
714 static int nvme_open(dev_t *, int, int, cred_t *);
715 static int nvme_close(dev_t, int, int, cred_t *);
716 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
717
718 static int nvme_init_ns(nvme_t *, uint32_t);
719 static boolean_t nvme_attach_ns(nvme_t *, nvme_ioctl_common_t *);
720 static boolean_t nvme_detach_ns(nvme_t *, nvme_ioctl_common_t *);
721
722 static int nvme_minor_comparator(const void *, const void *);
723
724 static ddi_ufm_ops_t nvme_ufm_ops = {
725 NULL,
726 nvme_ufm_fill_image,
727 nvme_ufm_fill_slot,
728 nvme_ufm_getcaps
729 };
730
731 /*
732 * Minor numbers are split amongst those used for controllers and for device
733 * opens. The number of controller minors are limited based open MAXMIN32 per
734 * the theory statement. We allocate 1 million minors as a total guess at a
735 * number that'll probably be enough. The starting point of the open minors can
736 * be shifted to accommodate future expansion of the NVMe device minors.
737 */
738 #define NVME_MINOR_INST_SHIFT 9
739 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
740 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT)
741 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
742 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2)
743
744 #define NVME_OPEN_NMINORS (1024 * 1024)
745 #define NVME_OPEN_MINOR_MIN (MAXMIN32 + 1)
746 #define NVME_OPEN_MINOR_MAX_EXCL (NVME_OPEN_MINOR_MIN + \
747 NVME_OPEN_NMINORS)
748
749 #define NVME_BUMP_STAT(nvme, stat) \
750 atomic_inc_64(&nvme->n_device_stat.nds_ ## stat.value.ui64)
751
752 static void *nvme_state;
753 static kmem_cache_t *nvme_cmd_cache;
754
755 /*
756 * DMA attributes for queue DMA memory
757 *
758 * Queue DMA memory must be page aligned. The maximum length of a queue is
759 * 65536 entries, and an entry can be 64 bytes long.
760 */
761 static const ddi_dma_attr_t nvme_queue_dma_attr = {
762 .dma_attr_version = DMA_ATTR_V0,
763 .dma_attr_addr_lo = 0,
764 .dma_attr_addr_hi = 0xffffffffffffffffULL,
765 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
766 .dma_attr_align = 0x1000,
767 .dma_attr_burstsizes = 0x7ff,
768 .dma_attr_minxfer = 0x1000,
769 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
770 .dma_attr_seg = 0xffffffffffffffffULL,
771 .dma_attr_sgllen = 1,
772 .dma_attr_granular = 1,
773 .dma_attr_flags = 0,
774 };
775
776 /*
777 * DMA attributes for transfers using Physical Region Page (PRP) entries
778 *
779 * A PRP entry describes one page of DMA memory using the page size specified
780 * in the controller configuration's memory page size register (CC.MPS). It uses
781 * a 64bit base address aligned to this page size. There is no limitation on
782 * chaining PRPs together for arbitrarily large DMA transfers. These DMA
783 * attributes will be copied into the nvme_t during nvme_attach() and the
784 * dma_attr_maxxfer will be updated.
785 */
786 static const ddi_dma_attr_t nvme_prp_dma_attr = {
787 .dma_attr_version = DMA_ATTR_V0,
788 .dma_attr_addr_lo = 0,
789 .dma_attr_addr_hi = 0xffffffffffffffffULL,
790 .dma_attr_count_max = 0xfff,
791 .dma_attr_align = 0x1000,
792 .dma_attr_burstsizes = 0x7ff,
793 .dma_attr_minxfer = 0x1000,
794 .dma_attr_maxxfer = 0x1000,
795 .dma_attr_seg = 0xfff,
796 .dma_attr_sgllen = -1,
797 .dma_attr_granular = 1,
798 .dma_attr_flags = 0,
799 };
800
801 /*
802 * DMA attributes for transfers using scatter/gather lists
803 *
804 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
805 * 32bit length field. SGL Segment and SGL Last Segment entries require the
806 * length to be a multiple of 16 bytes. While the SGL DMA attributes are copied
807 * into the nvme_t, they are not currently used for any I/O.
808 */
809 static const ddi_dma_attr_t nvme_sgl_dma_attr = {
810 .dma_attr_version = DMA_ATTR_V0,
811 .dma_attr_addr_lo = 0,
812 .dma_attr_addr_hi = 0xffffffffffffffffULL,
813 .dma_attr_count_max = 0xffffffffUL,
814 .dma_attr_align = 1,
815 .dma_attr_burstsizes = 0x7ff,
816 .dma_attr_minxfer = 0x10,
817 .dma_attr_maxxfer = 0xfffffffffULL,
818 .dma_attr_seg = 0xffffffffffffffffULL,
819 .dma_attr_sgllen = -1,
820 .dma_attr_granular = 0x10,
821 .dma_attr_flags = 0
822 };
823
824 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
825 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
826 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
827 .devacc_attr_dataorder = DDI_STRICTORDER_ACC
828 };
829
830 /*
831 * ioctl validation policies. These are policies that determine which namespaces
832 * are allowed or disallowed for various operations. Note, all policy items
833 * should be explicitly listed here to help make it clear what our intent is.
834 * That is also why some of these are identical or repeated when they cover
835 * different ioctls.
836 */
837
838 /*
839 * The controller information ioctl generally contains read-only information
840 * about the controller that is sourced from multiple different pieces of
841 * information. This does not operate on a namespace and none are accepted.
842 */
843 static const nvme_ioctl_check_t nvme_check_ctrl_info = {
844 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
845 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
846 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
847 };
848
849 /*
850 * The kernel namespace information requires a namespace ID to be specified. It
851 * does not allow for the broadcast ID to be specified.
852 */
853 static const nvme_ioctl_check_t nvme_check_ns_info = {
854 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
855 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
856 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
857 };
858
859 /*
860 * Identify commands are allowed to operate on a namespace minor. Unfortunately,
861 * the namespace field in identify commands is a bit, weird. In particular, some
862 * commands need a valid namespace, while others are namespace listing
863 * operations, which means illegal namespaces like zero are allowed.
864 */
865 static const nvme_ioctl_check_t nvme_check_identify = {
866 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
867 .nck_skip_ctrl = B_TRUE, .nck_ctrl_rewrite = B_FALSE,
868 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
869 };
870
871 /*
872 * The get log page command requires the ability to specify namespaces. When
873 * targeting the controller, one must use the broadcast NSID.
874 */
875 static const nvme_ioctl_check_t nvme_check_get_logpage = {
876 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
877 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
878 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
879 };
880
881 /*
882 * When getting a feature, we do not want rewriting behavior as most features do
883 * not require a namespace to be specified. Specific instances are checked in
884 * nvme_validate_get_feature().
885 */
886 static const nvme_ioctl_check_t nvme_check_get_feature = {
887 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
888 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
889 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
890 };
891
892 /*
893 * Format commands must target a namespace. The broadcast namespace must be used
894 * when referring to the controller.
895 */
896 static const nvme_ioctl_check_t nvme_check_format = {
897 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
898 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
899 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_WRITE
900 };
901
902 /*
903 * Attach and detach must always target a minor. However, the broadcast
904 * namespace is not allowed. We still perform rewriting so that way specifying
905 * the controller node with 0 will be caught.
906 */
907 static const nvme_ioctl_check_t nvme_check_attach_detach = {
908 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
909 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
910 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
911 };
912
913 /*
914 * Firmware operations must not target a namespace and are only allowed from the
915 * controller.
916 */
917 static const nvme_ioctl_check_t nvme_check_firmware = {
918 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
919 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
920 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
921 };
922
923 /*
924 * Passthru commands are an odd set. We only allow them from the primary
925 * controller; however, we allow a namespace to be specified in them and allow
926 * the broadcast namespace. We do not perform rewriting because we don't know
927 * what the semantics are. We explicitly exempt passthru commands from needing
928 * an exclusive lock and leave it up to them to tell us the impact of the
929 * command and semantics. As this is a privileged interface and the semantics
930 * are arbitrary, there's not much we can do without some assistance from the
931 * consumer.
932 */
933 static const nvme_ioctl_check_t nvme_check_passthru = {
934 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
935 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
936 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
937 };
938
939 /*
940 * Lock operations are allowed to target a namespace, but must not be rewritten.
941 * There is no support for the broadcast namespace. This is the only ioctl that
942 * should skip exclusive checking as it's used to grant it.
943 */
944 static const nvme_ioctl_check_t nvme_check_locking = {
945 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
946 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
947 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_SKIP
948 };
949
950 static struct cb_ops nvme_cb_ops = {
951 .cb_open = nvme_open,
952 .cb_close = nvme_close,
953 .cb_strategy = nodev,
954 .cb_print = nodev,
955 .cb_dump = nodev,
956 .cb_read = nodev,
957 .cb_write = nodev,
958 .cb_ioctl = nvme_ioctl,
959 .cb_devmap = nodev,
960 .cb_mmap = nodev,
961 .cb_segmap = nodev,
962 .cb_chpoll = nochpoll,
963 .cb_prop_op = ddi_prop_op,
964 .cb_str = 0,
965 .cb_flag = D_NEW | D_MP,
966 .cb_rev = CB_REV,
967 .cb_aread = nodev,
968 .cb_awrite = nodev
969 };
970
971 static struct dev_ops nvme_dev_ops = {
972 .devo_rev = DEVO_REV,
973 .devo_refcnt = 0,
974 .devo_getinfo = ddi_no_info,
975 .devo_identify = nulldev,
976 .devo_probe = nulldev,
977 .devo_attach = nvme_attach,
978 .devo_detach = nvme_detach,
979 .devo_reset = nodev,
980 .devo_cb_ops = &nvme_cb_ops,
981 .devo_bus_ops = NULL,
982 .devo_power = NULL,
983 .devo_quiesce = nvme_quiesce,
984 };
985
986 static struct modldrv nvme_modldrv = {
987 .drv_modops = &mod_driverops,
988 .drv_linkinfo = "NVMe driver",
989 .drv_dev_ops = &nvme_dev_ops
990 };
991
992 static struct modlinkage nvme_modlinkage = {
993 .ml_rev = MODREV_1,
994 .ml_linkage = { &nvme_modldrv, NULL }
995 };
996
997 static bd_ops_t nvme_bd_ops = {
998 .o_version = BD_OPS_CURRENT_VERSION,
999 .o_drive_info = nvme_bd_driveinfo,
1000 .o_media_info = nvme_bd_mediainfo,
1001 .o_devid_init = nvme_bd_devid,
1002 .o_sync_cache = nvme_bd_sync,
1003 .o_read = nvme_bd_read,
1004 .o_write = nvme_bd_write,
1005 .o_free_space = nvme_bd_free_space,
1006 };
1007
1008 /*
1009 * This list will hold commands that have timed out and couldn't be aborted.
1010 * As we don't know what the hardware may still do with the DMA memory we can't
1011 * free them, so we'll keep them forever on this list where we can easily look
1012 * at them with mdb.
1013 */
1014 static struct list nvme_lost_cmds;
1015 static kmutex_t nvme_lc_mutex;
1016
1017 int
_init(void)1018 _init(void)
1019 {
1020 int error;
1021
1022 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
1023 if (error != DDI_SUCCESS)
1024 return (error);
1025
1026 if ((nvme_open_minors = id_space_create("nvme_open_minors",
1027 NVME_OPEN_MINOR_MIN, NVME_OPEN_MINOR_MAX_EXCL)) == NULL) {
1028 ddi_soft_state_fini(&nvme_state);
1029 return (ENOMEM);
1030 }
1031
1032 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
1033 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1034
1035 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
1036 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
1037 offsetof(nvme_cmd_t, nc_list));
1038
1039 mutex_init(&nvme_open_minors_mutex, NULL, MUTEX_DRIVER, NULL);
1040 avl_create(&nvme_open_minors_avl, nvme_minor_comparator,
1041 sizeof (nvme_minor_t), offsetof(nvme_minor_t, nm_avl));
1042
1043 nvme_dead_taskq = taskq_create("nvme_dead_taskq", 1, minclsyspri, 1, 1,
1044 TASKQ_PREPOPULATE);
1045
1046 bd_mod_init(&nvme_dev_ops);
1047
1048 error = mod_install(&nvme_modlinkage);
1049 if (error != DDI_SUCCESS) {
1050 ddi_soft_state_fini(&nvme_state);
1051 id_space_destroy(nvme_open_minors);
1052 mutex_destroy(&nvme_lc_mutex);
1053 list_destroy(&nvme_lost_cmds);
1054 bd_mod_fini(&nvme_dev_ops);
1055 mutex_destroy(&nvme_open_minors_mutex);
1056 avl_destroy(&nvme_open_minors_avl);
1057 taskq_destroy(nvme_dead_taskq);
1058 }
1059
1060 return (error);
1061 }
1062
1063 int
_fini(void)1064 _fini(void)
1065 {
1066 int error;
1067
1068 if (!list_is_empty(&nvme_lost_cmds))
1069 return (DDI_FAILURE);
1070
1071 error = mod_remove(&nvme_modlinkage);
1072 if (error == DDI_SUCCESS) {
1073 ddi_soft_state_fini(&nvme_state);
1074 id_space_destroy(nvme_open_minors);
1075 kmem_cache_destroy(nvme_cmd_cache);
1076 mutex_destroy(&nvme_lc_mutex);
1077 list_destroy(&nvme_lost_cmds);
1078 bd_mod_fini(&nvme_dev_ops);
1079 mutex_destroy(&nvme_open_minors_mutex);
1080 avl_destroy(&nvme_open_minors_avl);
1081 taskq_destroy(nvme_dead_taskq);
1082 }
1083
1084 return (error);
1085 }
1086
1087 int
_info(struct modinfo * modinfop)1088 _info(struct modinfo *modinfop)
1089 {
1090 return (mod_info(&nvme_modlinkage, modinfop));
1091 }
1092
1093 static inline void
nvme_put64(nvme_t * nvme,uintptr_t reg,uint64_t val)1094 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
1095 {
1096 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1097
1098 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1099 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
1100 }
1101
1102 static inline void
nvme_put32(nvme_t * nvme,uintptr_t reg,uint32_t val)1103 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
1104 {
1105 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1106
1107 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1108 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
1109 }
1110
1111 static inline uint64_t
nvme_get64(nvme_t * nvme,uintptr_t reg)1112 nvme_get64(nvme_t *nvme, uintptr_t reg)
1113 {
1114 uint64_t val;
1115
1116 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1117
1118 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1119 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
1120
1121 return (val);
1122 }
1123
1124 static inline uint32_t
nvme_get32(nvme_t * nvme,uintptr_t reg)1125 nvme_get32(nvme_t *nvme, uintptr_t reg)
1126 {
1127 uint32_t val;
1128
1129 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1130
1131 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1132 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
1133
1134 return (val);
1135 }
1136
1137 static void
nvme_mgmt_lock_fini(nvme_mgmt_lock_t * lock)1138 nvme_mgmt_lock_fini(nvme_mgmt_lock_t *lock)
1139 {
1140 ASSERT3U(lock->nml_bd_own, ==, 0);
1141 mutex_destroy(&lock->nml_lock);
1142 cv_destroy(&lock->nml_cv);
1143 }
1144
1145 static void
nvme_mgmt_lock_init(nvme_mgmt_lock_t * lock)1146 nvme_mgmt_lock_init(nvme_mgmt_lock_t *lock)
1147 {
1148 mutex_init(&lock->nml_lock, NULL, MUTEX_DRIVER, NULL);
1149 cv_init(&lock->nml_cv, NULL, CV_DRIVER, NULL);
1150 lock->nml_bd_own = 0;
1151 }
1152
1153 static void
nvme_mgmt_unlock(nvme_t * nvme)1154 nvme_mgmt_unlock(nvme_t *nvme)
1155 {
1156 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1157
1158 cv_broadcast(&lock->nml_cv);
1159 mutex_exit(&lock->nml_lock);
1160 }
1161
1162 #ifdef DEBUG
1163 static boolean_t
nvme_mgmt_lock_held(nvme_t * nvme)1164 nvme_mgmt_lock_held(nvme_t *nvme)
1165 {
1166 return (MUTEX_HELD(&nvme->n_mgmt.nml_lock) != 0);
1167 }
1168 #endif /* DEBUG */
1169
1170 static void
nvme_mgmt_lock(nvme_t * nvme,nvme_mgmt_lock_level_t level)1171 nvme_mgmt_lock(nvme_t *nvme, nvme_mgmt_lock_level_t level)
1172 {
1173 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1174 mutex_enter(&lock->nml_lock);
1175 while (lock->nml_bd_own != 0) {
1176 if (level == NVME_MGMT_LOCK_BDRO)
1177 break;
1178 cv_wait(&lock->nml_cv, &lock->nml_lock);
1179 }
1180 }
1181
1182 /*
1183 * This and nvme_mgmt_bd_end() are used to indicate that the driver is going to
1184 * be calling into a re-entrant blkdev related function. We cannot hold the lock
1185 * across such an operation and therefore must indicate that this is logically
1186 * held, while allowing other operations to proceed. This nvme_mgmt_bd_end() may
1187 * only be called by a thread that already holds the nmve_mgmt_lock().
1188 */
1189 static void
nvme_mgmt_bd_start(nvme_t * nvme)1190 nvme_mgmt_bd_start(nvme_t *nvme)
1191 {
1192 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1193
1194 VERIFY(MUTEX_HELD(&lock->nml_lock));
1195 VERIFY3U(lock->nml_bd_own, ==, 0);
1196 lock->nml_bd_own = (uintptr_t)curthread;
1197 mutex_exit(&lock->nml_lock);
1198 }
1199
1200 static void
nvme_mgmt_bd_end(nvme_t * nvme)1201 nvme_mgmt_bd_end(nvme_t *nvme)
1202 {
1203 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1204
1205 mutex_enter(&lock->nml_lock);
1206 VERIFY3U(lock->nml_bd_own, ==, (uintptr_t)curthread);
1207 lock->nml_bd_own = 0;
1208 }
1209
1210 /*
1211 * This is a central clearing house for marking an NVMe controller dead and/or
1212 * removed. This takes care of setting the flag, taking care of outstanding
1213 * blocked locks, and sending a DDI FMA impact. This is called from a precarious
1214 * place where locking is suspect. The only guarantee we have is that the nvme_t
1215 * is valid and won't disappear until we return.
1216 *
1217 * This should only be used after attach has been called.
1218 */
1219 static void
nvme_ctrl_mark_dead(nvme_t * nvme,boolean_t removed)1220 nvme_ctrl_mark_dead(nvme_t *nvme, boolean_t removed)
1221 {
1222 boolean_t was_dead;
1223
1224 /*
1225 * See if we win the race to set things up here. If someone beat us to
1226 * it, we do not do anything.
1227 */
1228 was_dead = atomic_cas_32((volatile uint32_t *)&nvme->n_dead, B_FALSE,
1229 B_TRUE);
1230
1231 /*
1232 * If we were removed, note this in our death status, regardless of
1233 * whether or not we were already dead. We need to know this so that we
1234 * can decide if it is safe to try and interact the the device in e.g.
1235 * reset and shutdown.
1236 */
1237 if (removed) {
1238 nvme->n_dead_status = NVME_IOCTL_E_CTRL_GONE;
1239 }
1240
1241 if (was_dead) {
1242 return;
1243 }
1244
1245 /*
1246 * If this was removed, there is no reason to change the service impact.
1247 * Otherwise, we need to change our default return code to indicate that
1248 * the device is truly dead, and not simply gone.
1249 */
1250 if (!removed) {
1251 ASSERT3U(nvme->n_dead_status, ==, NVME_IOCTL_E_CTRL_DEAD);
1252 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1253 }
1254
1255 taskq_dispatch_ent(nvme_dead_taskq, nvme_rwlock_ctrl_dead, nvme,
1256 TQ_NOSLEEP, &nvme->n_dead_tqent);
1257 }
1258
1259 static boolean_t
nvme_ctrl_is_gone(const nvme_t * nvme)1260 nvme_ctrl_is_gone(const nvme_t *nvme)
1261 {
1262 if (nvme->n_dead && nvme->n_dead_status == NVME_IOCTL_E_CTRL_GONE)
1263 return (B_TRUE);
1264
1265 return (B_FALSE);
1266 }
1267
1268 static boolean_t
nvme_check_regs_hdl(nvme_t * nvme)1269 nvme_check_regs_hdl(nvme_t *nvme)
1270 {
1271 ddi_fm_error_t error;
1272
1273 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
1274
1275 if (error.fme_status != DDI_FM_OK)
1276 return (B_TRUE);
1277
1278 return (B_FALSE);
1279 }
1280
1281 static boolean_t
nvme_check_dma_hdl(nvme_dma_t * dma)1282 nvme_check_dma_hdl(nvme_dma_t *dma)
1283 {
1284 ddi_fm_error_t error;
1285
1286 if (dma == NULL)
1287 return (B_FALSE);
1288
1289 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
1290
1291 if (error.fme_status != DDI_FM_OK)
1292 return (B_TRUE);
1293
1294 return (B_FALSE);
1295 }
1296
1297 static void
nvme_free_dma_common(nvme_dma_t * dma)1298 nvme_free_dma_common(nvme_dma_t *dma)
1299 {
1300 if (dma->nd_dmah != NULL)
1301 (void) ddi_dma_unbind_handle(dma->nd_dmah);
1302 if (dma->nd_acch != NULL)
1303 ddi_dma_mem_free(&dma->nd_acch);
1304 if (dma->nd_dmah != NULL)
1305 ddi_dma_free_handle(&dma->nd_dmah);
1306 }
1307
1308 static void
nvme_free_dma(nvme_dma_t * dma)1309 nvme_free_dma(nvme_dma_t *dma)
1310 {
1311 nvme_free_dma_common(dma);
1312 kmem_free(dma, sizeof (*dma));
1313 }
1314
1315 static void
nvme_prp_dma_destructor(void * buf,void * private __unused)1316 nvme_prp_dma_destructor(void *buf, void *private __unused)
1317 {
1318 nvme_dma_t *dma = (nvme_dma_t *)buf;
1319
1320 nvme_free_dma_common(dma);
1321 }
1322
1323 static int
nvme_alloc_dma_common(nvme_t * nvme,nvme_dma_t * dma,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr)1324 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
1325 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
1326 {
1327 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
1328 &dma->nd_dmah) != DDI_SUCCESS) {
1329 /*
1330 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
1331 * the only other possible error is DDI_DMA_BADATTR which
1332 * indicates a driver bug which should cause a panic.
1333 */
1334 dev_err(nvme->n_dip, CE_PANIC,
1335 "!failed to get DMA handle, check DMA attributes");
1336 return (DDI_FAILURE);
1337 }
1338
1339 /*
1340 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
1341 * or the flags are conflicting, which isn't the case here.
1342 */
1343 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
1344 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
1345 &dma->nd_len, &dma->nd_acch);
1346
1347 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
1348 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1349 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
1350 dev_err(nvme->n_dip, CE_WARN,
1351 "!failed to bind DMA memory");
1352 NVME_BUMP_STAT(nvme, dma_bind_err);
1353 nvme_free_dma_common(dma);
1354 return (DDI_FAILURE);
1355 }
1356
1357 return (DDI_SUCCESS);
1358 }
1359
1360 static int
nvme_zalloc_dma(nvme_t * nvme,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr,nvme_dma_t ** ret)1361 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
1362 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
1363 {
1364 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
1365
1366 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
1367 DDI_SUCCESS) {
1368 *ret = NULL;
1369 kmem_free(dma, sizeof (nvme_dma_t));
1370 return (DDI_FAILURE);
1371 }
1372
1373 bzero(dma->nd_memp, dma->nd_len);
1374
1375 *ret = dma;
1376 return (DDI_SUCCESS);
1377 }
1378
1379 static int
nvme_prp_dma_constructor(void * buf,void * private,int flags __unused)1380 nvme_prp_dma_constructor(void *buf, void *private, int flags __unused)
1381 {
1382 nvme_dma_t *dma = (nvme_dma_t *)buf;
1383 nvme_t *nvme = (nvme_t *)private;
1384
1385 dma->nd_dmah = NULL;
1386 dma->nd_acch = NULL;
1387
1388 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
1389 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
1390 return (-1);
1391 }
1392
1393 ASSERT(dma->nd_ncookie == 1);
1394
1395 dma->nd_cached = B_TRUE;
1396
1397 return (0);
1398 }
1399
1400 static int
nvme_zalloc_queue_dma(nvme_t * nvme,uint32_t nentry,uint16_t qe_len,uint_t flags,nvme_dma_t ** dma)1401 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
1402 uint_t flags, nvme_dma_t **dma)
1403 {
1404 uint32_t len = nentry * qe_len;
1405 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
1406
1407 len = roundup(len, nvme->n_pagesize);
1408
1409 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
1410 != DDI_SUCCESS) {
1411 dev_err(nvme->n_dip, CE_WARN,
1412 "!failed to get DMA memory for queue");
1413 goto fail;
1414 }
1415
1416 if ((*dma)->nd_ncookie != 1) {
1417 dev_err(nvme->n_dip, CE_WARN,
1418 "!got too many cookies for queue DMA");
1419 goto fail;
1420 }
1421
1422 return (DDI_SUCCESS);
1423
1424 fail:
1425 if (*dma) {
1426 nvme_free_dma(*dma);
1427 *dma = NULL;
1428 }
1429
1430 return (DDI_FAILURE);
1431 }
1432
1433 static void
nvme_free_cq(nvme_cq_t * cq)1434 nvme_free_cq(nvme_cq_t *cq)
1435 {
1436 mutex_destroy(&cq->ncq_mutex);
1437
1438 if (cq->ncq_cmd_taskq != NULL)
1439 taskq_destroy(cq->ncq_cmd_taskq);
1440
1441 if (cq->ncq_dma != NULL)
1442 nvme_free_dma(cq->ncq_dma);
1443
1444 kmem_free(cq, sizeof (*cq));
1445 }
1446
1447 static void
nvme_free_qpair(nvme_qpair_t * qp)1448 nvme_free_qpair(nvme_qpair_t *qp)
1449 {
1450 int i;
1451
1452 mutex_destroy(&qp->nq_mutex);
1453 sema_destroy(&qp->nq_sema);
1454
1455 if (qp->nq_sqdma != NULL)
1456 nvme_free_dma(qp->nq_sqdma);
1457
1458 if (qp->nq_active_cmds > 0)
1459 for (i = 0; i != qp->nq_nentry; i++)
1460 if (qp->nq_cmd[i] != NULL)
1461 nvme_free_cmd(qp->nq_cmd[i]);
1462
1463 if (qp->nq_cmd != NULL)
1464 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
1465
1466 kmem_free(qp, sizeof (nvme_qpair_t));
1467 }
1468
1469 /*
1470 * Destroy the pre-allocated cq array, but only free individual completion
1471 * queues from the given starting index.
1472 */
1473 static void
nvme_destroy_cq_array(nvme_t * nvme,uint_t start)1474 nvme_destroy_cq_array(nvme_t *nvme, uint_t start)
1475 {
1476 uint_t i;
1477
1478 for (i = start; i < nvme->n_cq_count; i++)
1479 if (nvme->n_cq[i] != NULL)
1480 nvme_free_cq(nvme->n_cq[i]);
1481
1482 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count);
1483 }
1484
1485 static int
nvme_alloc_cq(nvme_t * nvme,uint32_t nentry,nvme_cq_t ** cqp,uint16_t idx,uint_t nthr)1486 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx,
1487 uint_t nthr)
1488 {
1489 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP);
1490 char name[64]; /* large enough for the taskq name */
1491
1492 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER,
1493 DDI_INTR_PRI(nvme->n_intr_pri));
1494
1495 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
1496 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS)
1497 goto fail;
1498
1499 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp;
1500 cq->ncq_nentry = nentry;
1501 cq->ncq_id = idx;
1502 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx);
1503
1504 /*
1505 * Each completion queue has its own command taskq.
1506 */
1507 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u",
1508 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx);
1509
1510 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX,
1511 TASKQ_PREPOPULATE);
1512
1513 if (cq->ncq_cmd_taskq == NULL) {
1514 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd "
1515 "taskq for cq %u", idx);
1516 goto fail;
1517 }
1518
1519 *cqp = cq;
1520 return (DDI_SUCCESS);
1521
1522 fail:
1523 nvme_free_cq(cq);
1524 *cqp = NULL;
1525
1526 return (DDI_FAILURE);
1527 }
1528
1529 /*
1530 * Create the n_cq array big enough to hold "ncq" completion queues.
1531 * If the array already exists it will be re-sized (but only larger).
1532 * The admin queue is included in this array, which boosts the
1533 * max number of entries to UINT16_MAX + 1.
1534 */
1535 static int
nvme_create_cq_array(nvme_t * nvme,uint_t ncq,uint32_t nentry,uint_t nthr)1536 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr)
1537 {
1538 nvme_cq_t **cq;
1539 uint_t i, cq_count;
1540
1541 ASSERT3U(ncq, >, nvme->n_cq_count);
1542
1543 cq = nvme->n_cq;
1544 cq_count = nvme->n_cq_count;
1545
1546 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP);
1547 nvme->n_cq_count = ncq;
1548
1549 for (i = 0; i < cq_count; i++)
1550 nvme->n_cq[i] = cq[i];
1551
1552 for (; i < nvme->n_cq_count; i++)
1553 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) !=
1554 DDI_SUCCESS)
1555 goto fail;
1556
1557 if (cq != NULL)
1558 kmem_free(cq, sizeof (*cq) * cq_count);
1559
1560 return (DDI_SUCCESS);
1561
1562 fail:
1563 nvme_destroy_cq_array(nvme, cq_count);
1564 /*
1565 * Restore the original array
1566 */
1567 nvme->n_cq_count = cq_count;
1568 nvme->n_cq = cq;
1569
1570 return (DDI_FAILURE);
1571 }
1572
1573 static int
nvme_alloc_qpair(nvme_t * nvme,uint32_t nentry,nvme_qpair_t ** nqp,uint_t idx)1574 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
1575 uint_t idx)
1576 {
1577 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
1578 uint_t cq_idx;
1579
1580 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
1581 DDI_INTR_PRI(nvme->n_intr_pri));
1582
1583 /*
1584 * The NVMe spec defines that a full queue has one empty (unused) slot;
1585 * initialize the semaphore accordingly.
1586 */
1587 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL);
1588
1589 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
1590 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
1591 goto fail;
1592
1593 /*
1594 * idx == 0 is adminq, those above 0 are shared io completion queues.
1595 */
1596 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1);
1597 qp->nq_cq = nvme->n_cq[cq_idx];
1598 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
1599 qp->nq_nentry = nentry;
1600
1601 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
1602
1603 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
1604 qp->nq_next_cmd = 0;
1605
1606 *nqp = qp;
1607 return (DDI_SUCCESS);
1608
1609 fail:
1610 nvme_free_qpair(qp);
1611 *nqp = NULL;
1612
1613 return (DDI_FAILURE);
1614 }
1615
1616 /*
1617 * One might reasonably consider that the nvme_cmd_cache should have a cache
1618 * constructor and destructor that takes care of the mutex/cv init/destroy, and
1619 * that nvme_free_cmd should reset more fields such that allocation becomes
1620 * simpler. This is not currently implemented as:
1621 * - nvme_cmd_cache is a global cache, shared across nvme instances and
1622 * therefore there is no easy access to the corresponding nvme_t in the
1623 * constructor to determine the required interrupt priority.
1624 * - Most fields in nvme_cmd_t would need to be zeroed in nvme_free_cmd while
1625 * preserving the mutex/cv. It is easier to able to zero the entire
1626 * structure and then init the mutex/cv only in the unlikely event that we
1627 * want an admin command.
1628 */
1629 static nvme_cmd_t *
nvme_alloc_cmd(nvme_t * nvme,int kmflag)1630 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
1631 {
1632 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
1633
1634 if (cmd != NULL) {
1635 bzero(cmd, sizeof (nvme_cmd_t));
1636 cmd->nc_nvme = nvme;
1637 }
1638
1639 return (cmd);
1640 }
1641
1642 static nvme_cmd_t *
nvme_alloc_admin_cmd(nvme_t * nvme,int kmflag)1643 nvme_alloc_admin_cmd(nvme_t *nvme, int kmflag)
1644 {
1645 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, kmflag);
1646
1647 if (cmd != NULL) {
1648 cmd->nc_flags |= NVME_CMD_F_USELOCK;
1649 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
1650 DDI_INTR_PRI(nvme->n_intr_pri));
1651 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
1652 }
1653
1654 return (cmd);
1655 }
1656
1657 static void
nvme_free_cmd(nvme_cmd_t * cmd)1658 nvme_free_cmd(nvme_cmd_t *cmd)
1659 {
1660 /* Don't free commands on the lost commands list. */
1661 if (list_link_active(&cmd->nc_list))
1662 return;
1663
1664 if (cmd->nc_dma) {
1665 nvme_free_dma(cmd->nc_dma);
1666 cmd->nc_dma = NULL;
1667 }
1668
1669 if (cmd->nc_prp) {
1670 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp);
1671 cmd->nc_prp = NULL;
1672 }
1673
1674 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
1675 cv_destroy(&cmd->nc_cv);
1676 mutex_destroy(&cmd->nc_mutex);
1677 }
1678
1679 kmem_cache_free(nvme_cmd_cache, cmd);
1680 }
1681
1682 static void
nvme_submit_admin_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd,uint32_t * qtimeoutp)1683 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
1684 {
1685 sema_p(&qp->nq_sema);
1686 nvme_submit_cmd_common(qp, cmd, qtimeoutp);
1687 }
1688
1689 static int
nvme_submit_io_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd)1690 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1691 {
1692 if (cmd->nc_nvme->n_dead) {
1693 return (EIO);
1694 }
1695
1696 if (sema_tryp(&qp->nq_sema) == 0)
1697 return (EAGAIN);
1698
1699 nvme_submit_cmd_common(qp, cmd, NULL);
1700 return (0);
1701 }
1702
1703 /*
1704 * Common command submission routine. If `qtimeoutp` is not NULL then it will
1705 * be set to the sum of the timeouts of any active commands ahead of the one
1706 * being submitted.
1707 */
1708 static void
nvme_submit_cmd_common(nvme_qpair_t * qp,nvme_cmd_t * cmd,uint32_t * qtimeoutp)1709 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
1710 {
1711 nvme_reg_sqtdbl_t tail = { 0 };
1712
1713 /*
1714 * We don't need to take a lock on cmd since it is not yet enqueued.
1715 */
1716 cmd->nc_submit_ts = gethrtime();
1717 cmd->nc_state = NVME_CMD_SUBMITTED;
1718
1719 mutex_enter(&qp->nq_mutex);
1720
1721 /*
1722 * Now that we hold the queue pair lock, we must check whether or not
1723 * the controller has been listed as dead (e.g. was removed due to
1724 * hotplug). This is necessary as otherwise we could race with
1725 * nvme_remove_callback(). Because this has not been enqueued, we don't
1726 * call nvme_unqueue_cmd(), which is why we must manually decrement the
1727 * semaphore.
1728 */
1729 if (cmd->nc_nvme->n_dead) {
1730 cmd->nc_queue_ts = gethrtime();
1731 cmd->nc_state = NVME_CMD_QUEUED;
1732 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback,
1733 cmd, TQ_NOSLEEP, &cmd->nc_tqent);
1734 sema_v(&qp->nq_sema);
1735 mutex_exit(&qp->nq_mutex);
1736 return;
1737 }
1738
1739 /*
1740 * Try to insert the cmd into the active cmd array at the nq_next_cmd
1741 * slot. If the slot is already occupied advance to the next slot and
1742 * try again. This can happen for long running commands like async event
1743 * requests.
1744 */
1745 while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
1746 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1747 qp->nq_cmd[qp->nq_next_cmd] = cmd;
1748
1749 /*
1750 * We keep track of the number of active commands in this queue, and
1751 * the sum of the timeouts for those active commands.
1752 */
1753 qp->nq_active_cmds++;
1754 if (qtimeoutp != NULL)
1755 *qtimeoutp = qp->nq_active_timeout;
1756 qp->nq_active_timeout += cmd->nc_timeout;
1757
1758 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
1759 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
1760 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
1761 sizeof (nvme_sqe_t) * qp->nq_sqtail,
1762 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
1763 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1764
1765 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
1766 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
1767
1768 mutex_exit(&qp->nq_mutex);
1769 }
1770
1771 static nvme_cmd_t *
nvme_unqueue_cmd(nvme_t * nvme,nvme_qpair_t * qp,int cid)1772 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
1773 {
1774 nvme_cmd_t *cmd;
1775
1776 ASSERT(mutex_owned(&qp->nq_mutex));
1777 ASSERT3S(cid, <, qp->nq_nentry);
1778
1779 cmd = qp->nq_cmd[cid];
1780 /*
1781 * Some controllers will erroneously add things to the completion queue
1782 * for which there is no matching outstanding command. If this happens,
1783 * it is almost certainly a controller firmware bug since nq_mutex
1784 * is held across command submission and ringing the queue doorbell,
1785 * and is also held in this function.
1786 *
1787 * If we see such an unexpected command, there is not much we can do.
1788 * These will be logged and counted in nvme_get_completed(), but
1789 * otherwise ignored.
1790 */
1791 if (cmd == NULL)
1792 return (NULL);
1793 qp->nq_cmd[cid] = NULL;
1794 ASSERT3U(qp->nq_active_cmds, >, 0);
1795 qp->nq_active_cmds--;
1796 ASSERT3U(qp->nq_active_timeout, >=, cmd->nc_timeout);
1797 qp->nq_active_timeout -= cmd->nc_timeout;
1798 sema_v(&qp->nq_sema);
1799
1800 ASSERT3P(cmd, !=, NULL);
1801 ASSERT3P(cmd->nc_nvme, ==, nvme);
1802 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
1803
1804 return (cmd);
1805 }
1806
1807 /*
1808 * This is called when an admin abort has failed to complete, once for the
1809 * original command and once for the abort itself. At this point the controller
1810 * has been marked dead. The commands are considered lost, de-queued if
1811 * possible, and placed on a global lost commands list so that they cannot be
1812 * freed and so that any DMA memory they have have is not re-used.
1813 */
1814 static void
nvme_lost_cmd(nvme_t * nvme,nvme_cmd_t * cmd)1815 nvme_lost_cmd(nvme_t *nvme, nvme_cmd_t *cmd)
1816 {
1817 ASSERT(mutex_owned(&cmd->nc_mutex));
1818
1819 switch (cmd->nc_state) {
1820 case NVME_CMD_SUBMITTED: {
1821 nvme_qpair_t *qp = nvme->n_ioq[cmd->nc_sqid];
1822
1823 /*
1824 * The command is still in the submitted state, meaning that we
1825 * have not processed a completion queue entry for it. De-queue
1826 * should be successful and if the hardware does later report
1827 * completion we'll skip it as a command for which we aren't
1828 * expecting a response (see nvme_unqueue_cmd()).
1829 */
1830 mutex_enter(&qp->nq_mutex);
1831 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
1832 mutex_exit(&qp->nq_mutex);
1833 }
1834 case NVME_CMD_ALLOCATED:
1835 case NVME_CMD_COMPLETED:
1836 /*
1837 * If the command has not been submitted, or has completed,
1838 * there is nothing to do here. In the event of an abort
1839 * command timeout, we can end up here in the process of
1840 * "losing" the original command. It's possible that command
1841 * has actually completed (or been queued on the taskq) in the
1842 * interim.
1843 */
1844 break;
1845 case NVME_CMD_QUEUED:
1846 /*
1847 * The command is on the taskq, awaiting callback. This should
1848 * be fairly rapid so wait for completion.
1849 */
1850 while (cmd->nc_state != NVME_CMD_COMPLETED)
1851 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
1852 break;
1853 case NVME_CMD_LOST:
1854 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1855 "%s: command %p already lost", __func__, (void *)cmd);
1856 break;
1857 }
1858
1859 cmd->nc_state = NVME_CMD_LOST;
1860
1861 mutex_enter(&nvme_lc_mutex);
1862 list_insert_head(&nvme_lost_cmds, cmd);
1863 mutex_exit(&nvme_lc_mutex);
1864 }
1865
1866 /*
1867 * Get the command tied to the next completed cqe and bump along completion
1868 * queue head counter.
1869 */
1870 static nvme_cmd_t *
nvme_get_completed(nvme_t * nvme,nvme_cq_t * cq)1871 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq)
1872 {
1873 nvme_qpair_t *qp;
1874 nvme_cqe_t *cqe;
1875 nvme_cmd_t *cmd;
1876
1877 ASSERT(mutex_owned(&cq->ncq_mutex));
1878
1879 retry:
1880 cqe = &cq->ncq_cq[cq->ncq_head];
1881
1882 /* Check phase tag of CQE. Hardware inverts it for new entries. */
1883 if (cqe->cqe_sf.sf_p == cq->ncq_phase)
1884 return (NULL);
1885
1886 qp = nvme->n_ioq[cqe->cqe_sqid];
1887
1888 mutex_enter(&qp->nq_mutex);
1889 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
1890 mutex_exit(&qp->nq_mutex);
1891
1892 qp->nq_sqhead = cqe->cqe_sqhd;
1893 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry;
1894
1895 /* Toggle phase on wrap-around. */
1896 if (cq->ncq_head == 0)
1897 cq->ncq_phase = cq->ncq_phase != 0 ? 0 : 1;
1898
1899 if (cmd == NULL) {
1900 dev_err(nvme->n_dip, CE_WARN,
1901 "!received completion for unknown cid 0x%x", cqe->cqe_cid);
1902 NVME_BUMP_STAT(nvme, unknown_cid);
1903 /*
1904 * We want to ignore this unexpected completion entry as it
1905 * is most likely a result of a bug in the controller firmware.
1906 * However, if we return NULL, then callers will assume there
1907 * are no more pending commands for this wakeup. Retry to keep
1908 * enumerating commands until the phase tag indicates there are
1909 * no more and we are really done.
1910 */
1911 goto retry;
1912 }
1913
1914 ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid);
1915 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
1916
1917 return (cmd);
1918 }
1919
1920 /*
1921 * Process all completed commands on the io completion queue.
1922 */
1923 static uint_t
nvme_process_iocq(nvme_t * nvme,nvme_cq_t * cq)1924 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq)
1925 {
1926 nvme_reg_cqhdbl_t head = { 0 };
1927 nvme_cmd_t *cmd;
1928 uint_t completed = 0;
1929
1930 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1931 DDI_SUCCESS)
1932 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1933 __func__);
1934
1935 mutex_enter(&cq->ncq_mutex);
1936
1937 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1938 /*
1939 * NVME_CMD_F_USELOCK is applied to all commands which are
1940 * going to be waited for by another thread in nvme_wait_cmd
1941 * and indicates that the lock should be taken before modifying
1942 * protected fields, and that the mutex has been initialised.
1943 * Commands which do not require the mutex to be held have not
1944 * initialised it (to reduce overhead).
1945 */
1946 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
1947 mutex_enter(&cmd->nc_mutex);
1948 /*
1949 * The command could have been de-queued as lost while
1950 * we waited on the lock, in which case we drop it.
1951 */
1952 if (cmd->nc_state == NVME_CMD_LOST) {
1953 mutex_exit(&cmd->nc_mutex);
1954 completed++;
1955 continue;
1956 }
1957 }
1958 cmd->nc_queue_ts = gethrtime();
1959 cmd->nc_state = NVME_CMD_QUEUED;
1960 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0)
1961 mutex_exit(&cmd->nc_mutex);
1962 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd,
1963 TQ_NOSLEEP, &cmd->nc_tqent);
1964
1965 completed++;
1966 }
1967
1968 if (completed > 0) {
1969 /*
1970 * Update the completion queue head doorbell.
1971 */
1972 head.b.cqhdbl_cqh = cq->ncq_head;
1973 nvme_put32(nvme, cq->ncq_hdbl, head.r);
1974 }
1975
1976 mutex_exit(&cq->ncq_mutex);
1977
1978 return (completed);
1979 }
1980
1981 static nvme_cmd_t *
nvme_retrieve_cmd(nvme_t * nvme,nvme_qpair_t * qp)1982 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
1983 {
1984 nvme_cq_t *cq = qp->nq_cq;
1985 nvme_reg_cqhdbl_t head = { 0 };
1986 nvme_cmd_t *cmd;
1987
1988 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1989 DDI_SUCCESS)
1990 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1991 __func__);
1992
1993 mutex_enter(&cq->ncq_mutex);
1994
1995 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1996 head.b.cqhdbl_cqh = cq->ncq_head;
1997 nvme_put32(nvme, cq->ncq_hdbl, head.r);
1998 }
1999
2000 mutex_exit(&cq->ncq_mutex);
2001
2002 return (cmd);
2003 }
2004
2005 static int
nvme_check_unknown_cmd_status(nvme_cmd_t * cmd)2006 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
2007 {
2008 nvme_cqe_t *cqe = &cmd->nc_cqe;
2009
2010 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2011 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
2012 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
2013 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
2014 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
2015
2016 if (cmd->nc_xfer != NULL)
2017 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2018
2019 if (cmd->nc_nvme->n_strict_version) {
2020 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2021 }
2022
2023 return (EIO);
2024 }
2025
2026 static int
nvme_check_vendor_cmd_status(nvme_cmd_t * cmd)2027 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
2028 {
2029 nvme_cqe_t *cqe = &cmd->nc_cqe;
2030
2031 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2032 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
2033 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
2034 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
2035 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
2036 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
2037 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2038 }
2039
2040 return (EIO);
2041 }
2042
2043 static int
nvme_check_integrity_cmd_status(nvme_cmd_t * cmd)2044 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
2045 {
2046 nvme_cqe_t *cqe = &cmd->nc_cqe;
2047
2048 switch (cqe->cqe_sf.sf_sc) {
2049 case NVME_CQE_SC_INT_NVM_WRITE:
2050 /* write fail */
2051 /* TODO: post ereport */
2052 if (cmd->nc_xfer != NULL)
2053 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2054 return (EIO);
2055
2056 case NVME_CQE_SC_INT_NVM_READ:
2057 /* read fail */
2058 /* TODO: post ereport */
2059 if (cmd->nc_xfer != NULL)
2060 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2061 return (EIO);
2062
2063 default:
2064 return (nvme_check_unknown_cmd_status(cmd));
2065 }
2066 }
2067
2068 static int
nvme_check_generic_cmd_status(nvme_cmd_t * cmd)2069 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
2070 {
2071 nvme_cqe_t *cqe = &cmd->nc_cqe;
2072
2073 switch (cqe->cqe_sf.sf_sc) {
2074 case NVME_CQE_SC_GEN_SUCCESS:
2075 return (0);
2076
2077 /*
2078 * Errors indicating a bug in the driver should cause a panic.
2079 */
2080 case NVME_CQE_SC_GEN_INV_OPC:
2081 /* Invalid Command Opcode */
2082 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmd_err);
2083 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2084 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2085 "programming error: invalid opcode in cmd %p",
2086 (void *)cmd);
2087 }
2088 return (EINVAL);
2089
2090 case NVME_CQE_SC_GEN_INV_FLD:
2091 /* Invalid Field in Command */
2092 NVME_BUMP_STAT(cmd->nc_nvme, inv_field_err);
2093 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2094 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2095 "programming error: invalid field in cmd %p",
2096 (void *)cmd);
2097 }
2098 return (EIO);
2099
2100 case NVME_CQE_SC_GEN_ID_CNFL:
2101 /* Command ID Conflict */
2102 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2103 "cmd ID conflict in cmd %p", (void *)cmd);
2104 return (0);
2105
2106 case NVME_CQE_SC_GEN_INV_NS:
2107 /* Invalid Namespace or Format */
2108 NVME_BUMP_STAT(cmd->nc_nvme, inv_nsfmt_err);
2109 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2110 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2111 "programming error: invalid NS/format in cmd %p",
2112 (void *)cmd);
2113 }
2114 return (EINVAL);
2115
2116 case NVME_CQE_SC_GEN_CMD_SEQ_ERR:
2117 /*
2118 * Command Sequence Error
2119 *
2120 * This can be generated normally by user log page requests that
2121 * come out of order (e.g. getting the persistent event log
2122 * without establishing the context). If the kernel manages this
2123 * on its own then that's problematic.
2124 */
2125 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmdseq_err);
2126 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2127 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2128 "programming error: command sequencing error %p",
2129 (void *)cmd);
2130 }
2131 return (EINVAL);
2132
2133 case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
2134 /* LBA Out Of Range */
2135 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2136 "LBA out of range in cmd %p", (void *)cmd);
2137 return (0);
2138
2139 /*
2140 * Non-fatal errors, handle gracefully.
2141 */
2142 case NVME_CQE_SC_GEN_DATA_XFR_ERR:
2143 /* Data Transfer Error (DMA) */
2144 /* TODO: post ereport */
2145 NVME_BUMP_STAT(cmd->nc_nvme, data_xfr_err);
2146 if (cmd->nc_xfer != NULL)
2147 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2148 return (EIO);
2149
2150 case NVME_CQE_SC_GEN_INTERNAL_ERR:
2151 /*
2152 * Internal Error. The spec (v1.0, section 4.5.1.2) says
2153 * detailed error information is returned as async event,
2154 * so we pretty much ignore the error here and handle it
2155 * in the async event handler.
2156 */
2157 NVME_BUMP_STAT(cmd->nc_nvme, internal_err);
2158 if (cmd->nc_xfer != NULL)
2159 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2160 return (EIO);
2161
2162 case NVME_CQE_SC_GEN_ABORT_REQUEST:
2163 /*
2164 * Command Abort Requested. This normally happens only when a
2165 * command times out.
2166 */
2167 /* TODO: post ereport or change blkdev to handle this? */
2168 NVME_BUMP_STAT(cmd->nc_nvme, abort_rq_err);
2169 return (ECANCELED);
2170
2171 case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
2172 /* Command Aborted due to Power Loss Notification */
2173 NVME_BUMP_STAT(cmd->nc_nvme, abort_pwrloss_err);
2174 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2175 return (EIO);
2176
2177 case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
2178 /* Command Aborted due to SQ Deletion */
2179 NVME_BUMP_STAT(cmd->nc_nvme, abort_sq_del);
2180 return (EIO);
2181
2182 case NVME_CQE_SC_GEN_NVM_CAP_EXC:
2183 /* Capacity Exceeded */
2184 NVME_BUMP_STAT(cmd->nc_nvme, nvm_cap_exc);
2185 if (cmd->nc_xfer != NULL)
2186 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2187 return (EIO);
2188
2189 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
2190 /* Namespace Not Ready */
2191 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_notrdy);
2192 if (cmd->nc_xfer != NULL)
2193 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2194 return (EIO);
2195
2196 case NVME_CQE_SC_GEN_NVM_FORMATTING:
2197 /* Format in progress (1.2) */
2198 if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2))
2199 return (nvme_check_unknown_cmd_status(cmd));
2200 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_formatting);
2201 if (cmd->nc_xfer != NULL)
2202 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2203 return (EIO);
2204
2205 default:
2206 return (nvme_check_unknown_cmd_status(cmd));
2207 }
2208 }
2209
2210 static int
nvme_check_specific_cmd_status(nvme_cmd_t * cmd)2211 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
2212 {
2213 nvme_cqe_t *cqe = &cmd->nc_cqe;
2214
2215 switch (cqe->cqe_sf.sf_sc) {
2216 case NVME_CQE_SC_SPC_INV_CQ:
2217 /* Completion Queue Invalid */
2218 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
2219 NVME_BUMP_STAT(cmd->nc_nvme, inv_cq_err);
2220 return (EINVAL);
2221
2222 case NVME_CQE_SC_SPC_INV_QID:
2223 /* Invalid Queue Identifier */
2224 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2225 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
2226 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
2227 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2228 NVME_BUMP_STAT(cmd->nc_nvme, inv_qid_err);
2229 return (EINVAL);
2230
2231 case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
2232 /* Max Queue Size Exceeded */
2233 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2234 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2235 NVME_BUMP_STAT(cmd->nc_nvme, max_qsz_exc);
2236 return (EINVAL);
2237
2238 case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
2239 /* Abort Command Limit Exceeded */
2240 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
2241 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2242 "abort command limit exceeded in cmd %p", (void *)cmd);
2243 return (0);
2244
2245 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
2246 /* Async Event Request Limit Exceeded */
2247 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
2248 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2249 "async event request limit exceeded in cmd %p",
2250 (void *)cmd);
2251 return (0);
2252
2253 case NVME_CQE_SC_SPC_INV_INT_VECT:
2254 /* Invalid Interrupt Vector */
2255 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2256 NVME_BUMP_STAT(cmd->nc_nvme, inv_int_vect);
2257 return (EINVAL);
2258
2259 case NVME_CQE_SC_SPC_INV_LOG_PAGE:
2260 /* Invalid Log Page */
2261 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
2262 NVME_BUMP_STAT(cmd->nc_nvme, inv_log_page);
2263 return (EINVAL);
2264
2265 case NVME_CQE_SC_SPC_INV_FORMAT:
2266 /* Invalid Format */
2267 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT);
2268 NVME_BUMP_STAT(cmd->nc_nvme, inv_format);
2269 if (cmd->nc_xfer != NULL)
2270 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2271 return (EINVAL);
2272
2273 case NVME_CQE_SC_SPC_INV_Q_DEL:
2274 /* Invalid Queue Deletion */
2275 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2276 NVME_BUMP_STAT(cmd->nc_nvme, inv_q_del);
2277 return (EINVAL);
2278
2279 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
2280 /* Conflicting Attributes */
2281 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
2282 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2283 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2284 NVME_BUMP_STAT(cmd->nc_nvme, cnfl_attr);
2285 if (cmd->nc_xfer != NULL)
2286 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2287 return (EINVAL);
2288
2289 case NVME_CQE_SC_SPC_NVM_INV_PROT:
2290 /* Invalid Protection Information */
2291 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
2292 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2293 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2294 NVME_BUMP_STAT(cmd->nc_nvme, inv_prot);
2295 if (cmd->nc_xfer != NULL)
2296 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2297 return (EINVAL);
2298
2299 case NVME_CQE_SC_SPC_NVM_READONLY:
2300 /* Write to Read Only Range */
2301 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2302 NVME_BUMP_STAT(cmd->nc_nvme, readonly);
2303 if (cmd->nc_xfer != NULL)
2304 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2305 return (EROFS);
2306
2307 case NVME_CQE_SC_SPC_INV_FW_SLOT:
2308 /* Invalid Firmware Slot */
2309 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwslot);
2310 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2311 return (EINVAL);
2312
2313 case NVME_CQE_SC_SPC_INV_FW_IMG:
2314 /* Invalid Firmware Image */
2315 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwimg);
2316 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2317 return (EINVAL);
2318
2319 case NVME_CQE_SC_SPC_FW_RESET:
2320 /* Conventional Reset Required */
2321 NVME_BUMP_STAT(cmd->nc_nvme, fwact_creset);
2322 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2323 return (0);
2324
2325 case NVME_CQE_SC_SPC_FW_NSSR:
2326 /* NVMe Subsystem Reset Required */
2327 NVME_BUMP_STAT(cmd->nc_nvme, fwact_nssr);
2328 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2329 return (0);
2330
2331 case NVME_CQE_SC_SPC_FW_NEXT_RESET:
2332 /* Activation Requires Reset */
2333 NVME_BUMP_STAT(cmd->nc_nvme, fwact_reset);
2334 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2335 return (0);
2336
2337 case NVME_CQE_SC_SPC_FW_MTFA:
2338 /* Activation Requires Maximum Time Violation */
2339 NVME_BUMP_STAT(cmd->nc_nvme, fwact_mtfa);
2340 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2341 return (EAGAIN);
2342
2343 case NVME_CQE_SC_SPC_FW_PROHIBITED:
2344 /* Activation Prohibited */
2345 NVME_BUMP_STAT(cmd->nc_nvme, fwact_prohibited);
2346 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2347 return (EINVAL);
2348
2349 case NVME_CQE_SC_SPC_FW_OVERLAP:
2350 /* Overlapping Firmware Ranges */
2351 NVME_BUMP_STAT(cmd->nc_nvme, fw_overlap);
2352 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD ||
2353 cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2354 return (EINVAL);
2355
2356 default:
2357 return (nvme_check_unknown_cmd_status(cmd));
2358 }
2359 }
2360
2361 static inline int
nvme_check_cmd_status(nvme_cmd_t * cmd)2362 nvme_check_cmd_status(nvme_cmd_t *cmd)
2363 {
2364 nvme_cqe_t *cqe = &cmd->nc_cqe;
2365
2366 /*
2367 * Take a shortcut if the controller is dead, or if
2368 * command status indicates no error.
2369 */
2370 if (cmd->nc_nvme->n_dead)
2371 return (EIO);
2372
2373 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2374 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2375 return (0);
2376
2377 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
2378 return (nvme_check_generic_cmd_status(cmd));
2379 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
2380 return (nvme_check_specific_cmd_status(cmd));
2381 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
2382 return (nvme_check_integrity_cmd_status(cmd));
2383 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
2384 return (nvme_check_vendor_cmd_status(cmd));
2385
2386 return (nvme_check_unknown_cmd_status(cmd));
2387 }
2388
2389 /*
2390 * Check the command status as used by an ioctl path and do not convert it to an
2391 * errno. We still allow all the command status checking to occur, but otherwise
2392 * will pass back the controller error as is.
2393 */
2394 static boolean_t
nvme_check_cmd_status_ioctl(nvme_cmd_t * cmd,nvme_ioctl_common_t * ioc)2395 nvme_check_cmd_status_ioctl(nvme_cmd_t *cmd, nvme_ioctl_common_t *ioc)
2396 {
2397 nvme_cqe_t *cqe = &cmd->nc_cqe;
2398 nvme_t *nvme = cmd->nc_nvme;
2399
2400 if (nvme->n_dead) {
2401 return (nvme_ioctl_error(ioc, nvme->n_dead_status, 0, 0));
2402 }
2403
2404 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2405 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2406 return (B_TRUE);
2407
2408 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) {
2409 (void) nvme_check_generic_cmd_status(cmd);
2410 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) {
2411 (void) nvme_check_specific_cmd_status(cmd);
2412 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) {
2413 (void) nvme_check_integrity_cmd_status(cmd);
2414 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) {
2415 (void) nvme_check_vendor_cmd_status(cmd);
2416 } else {
2417 (void) nvme_check_unknown_cmd_status(cmd);
2418 }
2419
2420 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_ERROR,
2421 cqe->cqe_sf.sf_sct, cqe->cqe_sf.sf_sc));
2422 }
2423
2424 static int
nvme_abort_cmd(nvme_cmd_t * cmd,const uint32_t sec)2425 nvme_abort_cmd(nvme_cmd_t *cmd, const uint32_t sec)
2426 {
2427 nvme_t *nvme = cmd->nc_nvme;
2428 nvme_cmd_t *abort_cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2429 nvme_abort_cmd_t ac = { 0 };
2430 int ret = 0;
2431
2432 sema_p(&nvme->n_abort_sema);
2433
2434 ac.b.ac_cid = cmd->nc_sqe.sqe_cid;
2435 ac.b.ac_sqid = cmd->nc_sqid;
2436
2437 abort_cmd->nc_sqid = 0;
2438 abort_cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
2439 abort_cmd->nc_callback = nvme_wakeup_cmd;
2440 abort_cmd->nc_sqe.sqe_cdw10 = ac.r;
2441
2442 /*
2443 * Send the ABORT to the hardware. The ABORT command will return _after_
2444 * the aborted command has completed (aborted or otherwise) so we must
2445 * drop the aborted command's lock to allow it to complete.
2446 * We want to allow at least `nvme_abort_cmd_timeout` seconds for the
2447 * abort to be processed, but more if we are aborting a long-running
2448 * command to give that time to complete/abort too.
2449 */
2450 mutex_exit(&cmd->nc_mutex);
2451 nvme_admin_cmd(abort_cmd, MAX(nvme_abort_cmd_timeout, sec));
2452 mutex_enter(&cmd->nc_mutex);
2453
2454 sema_v(&nvme->n_abort_sema);
2455
2456 /*
2457 * If the abort command itself has timed out, it will have been
2458 * de-queued so that its callback will not be called after this point,
2459 * and its state will be NVME_CMD_LOST.
2460 *
2461 * nvme_admin_cmd(abort_cmd)
2462 * -> nvme_wait_cmd(abort_cmd)
2463 * -> nvme_cmd(abort_cmd)
2464 * | -> nvme_admin_cmd(cmd)
2465 * | -> nvme_wait_cmd(cmd)
2466 * | -> nvme_ctrl_mark_dead()
2467 * | -> nvme_lost_cmd(cmd)
2468 * | -> cmd->nc_stat = NVME_CMD_LOST
2469 * and here we are.
2470 */
2471 if (abort_cmd->nc_state == NVME_CMD_LOST) {
2472 dev_err(nvme->n_dip, CE_WARN,
2473 "!ABORT of command %d/%d timed out",
2474 cmd->nc_sqe.sqe_cid, cmd->nc_sqid);
2475 NVME_BUMP_STAT(nvme, abort_timeout);
2476 ret = EIO;
2477 } else if ((ret = nvme_check_cmd_status(abort_cmd)) != 0) {
2478 dev_err(nvme->n_dip, CE_WARN,
2479 "!ABORT of command %d/%d "
2480 "failed with sct = %x, sc = %x",
2481 cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2482 abort_cmd->nc_cqe.cqe_sf.sf_sct,
2483 abort_cmd->nc_cqe.cqe_sf.sf_sc);
2484 NVME_BUMP_STAT(nvme, abort_failed);
2485 } else {
2486 boolean_t success = ((abort_cmd->nc_cqe.cqe_dw0 & 1) == 0);
2487
2488 dev_err(nvme->n_dip, CE_WARN,
2489 "!ABORT of command %d/%d %ssuccessful",
2490 cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2491 success ? "" : "un");
2492
2493 if (success) {
2494 NVME_BUMP_STAT(nvme, abort_successful);
2495 } else {
2496 NVME_BUMP_STAT(nvme, abort_unsuccessful);
2497 }
2498 }
2499
2500 /*
2501 * This abort abort_cmd has either completed or been de-queued as
2502 * lost in nvme_wait_cmd. Either way it's safe to free it here.
2503 */
2504 nvme_free_cmd(abort_cmd);
2505
2506 return (ret);
2507 }
2508
2509 /*
2510 * nvme_wait_cmd -- wait for command completion or timeout
2511 *
2512 * In case of a serious error or a timeout of the abort command the hardware
2513 * will be declared dead and FMA will be notified.
2514 */
2515 static void
nvme_wait_cmd(nvme_cmd_t * cmd,uint32_t sec)2516 nvme_wait_cmd(nvme_cmd_t *cmd, uint32_t sec)
2517 {
2518 nvme_t *nvme = cmd->nc_nvme;
2519 nvme_reg_csts_t csts;
2520
2521 ASSERT(mutex_owned(&cmd->nc_mutex));
2522
2523 while (cmd->nc_state != NVME_CMD_COMPLETED) {
2524 clock_t timeout = ddi_get_lbolt() +
2525 drv_usectohz((long)sec * MICROSEC);
2526
2527 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) {
2528 /*
2529 * If this command is on the task queue then we don't
2530 * consider it to have timed out. We are waiting for
2531 * the callback to be invoked, the timing of which can
2532 * be affected by system load and should not count
2533 * against the device; continue to wait.
2534 * While this doesn't help deal with the possibility of
2535 * a command timing out between being placed on the CQ
2536 * and arriving on the taskq, we expect interrupts to
2537 * run fairly promptly making this a small window.
2538 */
2539 if (cmd->nc_state != NVME_CMD_QUEUED)
2540 break;
2541 }
2542 }
2543
2544 if (cmd->nc_state == NVME_CMD_COMPLETED) {
2545 DTRACE_PROBE1(nvme_admin_cmd_completed, nvme_cmd_t *, cmd);
2546 nvme_admin_stat_cmd(nvme, cmd);
2547 return;
2548 }
2549
2550 /*
2551 * The command timed out.
2552 */
2553
2554 DTRACE_PROBE1(nvme_admin_cmd_timeout, nvme_cmd_t *, cmd);
2555 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2556 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
2557 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2558 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
2559 NVME_BUMP_STAT(nvme, cmd_timeout);
2560
2561 /*
2562 * Check controller for fatal status, any errors associated with the
2563 * register or DMA handle, or for a double timeout (abort command timed
2564 * out). If necessary log a warning and call FMA.
2565 */
2566 if (csts.b.csts_cfs ||
2567 nvme_check_regs_hdl(nvme) ||
2568 nvme_check_dma_hdl(cmd->nc_dma) ||
2569 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
2570 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2571 nvme_lost_cmd(nvme, cmd);
2572 return;
2573 }
2574
2575 /* Issue an abort for the command that has timed out */
2576 if (nvme_abort_cmd(cmd, sec) == 0) {
2577 /*
2578 * If the abort completed, whether or not it was
2579 * successful in aborting the command, that command
2580 * will also have completed with an appropriate
2581 * status.
2582 */
2583 while (cmd->nc_state != NVME_CMD_COMPLETED)
2584 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
2585 return;
2586 }
2587
2588 /*
2589 * Otherwise, the abort has also timed out or failed, which
2590 * will have marked the controller dead. De-queue the original command
2591 * and add it to the lost commands list.
2592 */
2593 VERIFY(cmd->nc_nvme->n_dead);
2594 nvme_lost_cmd(nvme, cmd);
2595 }
2596
2597 static void
nvme_wakeup_cmd(void * arg)2598 nvme_wakeup_cmd(void *arg)
2599 {
2600 nvme_cmd_t *cmd = arg;
2601
2602 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
2603
2604 mutex_enter(&cmd->nc_mutex);
2605 cmd->nc_state = NVME_CMD_COMPLETED;
2606 cv_signal(&cmd->nc_cv);
2607 mutex_exit(&cmd->nc_mutex);
2608 }
2609
2610 static void
nvme_async_event_task(void * arg)2611 nvme_async_event_task(void *arg)
2612 {
2613 nvme_cmd_t *cmd = arg;
2614 nvme_t *nvme = cmd->nc_nvme;
2615 nvme_error_log_entry_t *error_log = NULL;
2616 nvme_health_log_t *health_log = NULL;
2617 nvme_nschange_list_t *nslist = NULL;
2618 size_t logsize = 0;
2619 nvme_async_event_t event;
2620
2621 /*
2622 * Check for errors associated with the async request itself. The only
2623 * command-specific error is "async event limit exceeded", which
2624 * indicates a programming error in the driver and causes a panic in
2625 * nvme_check_cmd_status().
2626 *
2627 * Other possible errors are various scenarios where the async request
2628 * was aborted, or internal errors in the device. Internal errors are
2629 * reported to FMA, the command aborts need no special handling here.
2630 *
2631 * And finally, at least qemu nvme does not support async events,
2632 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we
2633 * will avoid posting async events.
2634 */
2635
2636 if (nvme_check_cmd_status(cmd) != 0) {
2637 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2638 "!async event request returned failure, sct = 0x%x, "
2639 "sc = 0x%x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
2640 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
2641 cmd->nc_cqe.cqe_sf.sf_m);
2642
2643 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2644 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
2645 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2646 }
2647
2648 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2649 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
2650 cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
2651 nvme->n_async_event_supported = B_FALSE;
2652 }
2653
2654 nvme_free_cmd(cmd);
2655 return;
2656 }
2657
2658 event.r = cmd->nc_cqe.cqe_dw0;
2659
2660 /* Clear CQE and re-submit the async request. */
2661 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
2662 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
2663 cmd = NULL; /* cmd can no longer be used after resubmission */
2664
2665 switch (event.b.ae_type) {
2666 case NVME_ASYNC_TYPE_ERROR:
2667 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
2668 if (!nvme_get_logpage_int(nvme, B_FALSE,
2669 (void **)&error_log, &logsize,
2670 NVME_LOGPAGE_ERROR)) {
2671 return;
2672 }
2673 } else {
2674 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2675 "async event reply: type=0x%x logpage=0x%x",
2676 event.b.ae_type, event.b.ae_logpage);
2677 NVME_BUMP_STAT(nvme, wrong_logpage);
2678 return;
2679 }
2680
2681 switch (event.b.ae_info) {
2682 case NVME_ASYNC_ERROR_INV_SQ:
2683 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2684 "invalid submission queue");
2685 return;
2686
2687 case NVME_ASYNC_ERROR_INV_DBL:
2688 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2689 "invalid doorbell write value");
2690 return;
2691
2692 case NVME_ASYNC_ERROR_DIAGFAIL:
2693 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
2694 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2695 NVME_BUMP_STAT(nvme, diagfail_event);
2696 break;
2697
2698 case NVME_ASYNC_ERROR_PERSISTENT:
2699 dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
2700 "device error");
2701 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2702 NVME_BUMP_STAT(nvme, persistent_event);
2703 break;
2704
2705 case NVME_ASYNC_ERROR_TRANSIENT:
2706 dev_err(nvme->n_dip, CE_WARN, "!transient internal "
2707 "device error");
2708 /* TODO: send ereport */
2709 NVME_BUMP_STAT(nvme, transient_event);
2710 break;
2711
2712 case NVME_ASYNC_ERROR_FW_LOAD:
2713 dev_err(nvme->n_dip, CE_WARN,
2714 "!firmware image load error");
2715 NVME_BUMP_STAT(nvme, fw_load_event);
2716 break;
2717 }
2718 break;
2719
2720 case NVME_ASYNC_TYPE_HEALTH:
2721 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
2722 if (!nvme_get_logpage_int(nvme, B_FALSE,
2723 (void **)&health_log, &logsize,
2724 NVME_LOGPAGE_HEALTH)) {
2725 return;
2726 }
2727 } else {
2728 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2729 "type=0x%x logpage=0x%x", event.b.ae_type,
2730 event.b.ae_logpage);
2731 NVME_BUMP_STAT(nvme, wrong_logpage);
2732 return;
2733 }
2734
2735 switch (event.b.ae_info) {
2736 case NVME_ASYNC_HEALTH_RELIABILITY:
2737 dev_err(nvme->n_dip, CE_WARN,
2738 "!device reliability compromised");
2739 /* TODO: send ereport */
2740 NVME_BUMP_STAT(nvme, reliability_event);
2741 break;
2742
2743 case NVME_ASYNC_HEALTH_TEMPERATURE:
2744 dev_err(nvme->n_dip, CE_WARN,
2745 "!temperature above threshold");
2746 /* TODO: send ereport */
2747 NVME_BUMP_STAT(nvme, temperature_event);
2748 break;
2749
2750 case NVME_ASYNC_HEALTH_SPARE:
2751 dev_err(nvme->n_dip, CE_WARN,
2752 "!spare space below threshold");
2753 /* TODO: send ereport */
2754 NVME_BUMP_STAT(nvme, spare_event);
2755 break;
2756 }
2757 break;
2758
2759 case NVME_ASYNC_TYPE_NOTICE:
2760 switch (event.b.ae_info) {
2761 case NVME_ASYNC_NOTICE_NS_CHANGE:
2762 if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) {
2763 dev_err(nvme->n_dip, CE_WARN,
2764 "!wrong logpage in async event reply: "
2765 "type=0x%x logpage=0x%x",
2766 event.b.ae_type, event.b.ae_logpage);
2767 NVME_BUMP_STAT(nvme, wrong_logpage);
2768 break;
2769 }
2770
2771 dev_err(nvme->n_dip, CE_NOTE,
2772 "namespace attribute change event, "
2773 "logpage = 0x%x", event.b.ae_logpage);
2774 NVME_BUMP_STAT(nvme, notice_event);
2775
2776 if (!nvme_get_logpage_int(nvme, B_FALSE,
2777 (void **)&nslist, &logsize,
2778 NVME_LOGPAGE_NSCHANGE)) {
2779 break;
2780 }
2781
2782 if (nslist->nscl_ns[0] == UINT32_MAX) {
2783 dev_err(nvme->n_dip, CE_CONT,
2784 "more than %u namespaces have changed.\n",
2785 NVME_NSCHANGE_LIST_SIZE);
2786 break;
2787 }
2788
2789 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
2790 for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) {
2791 uint32_t nsid = nslist->nscl_ns[i];
2792
2793 if (nsid == 0) /* end of list */
2794 break;
2795
2796 dev_err(nvme->n_dip, CE_NOTE,
2797 "!namespace nvme%d/%u has changed.",
2798 ddi_get_instance(nvme->n_dip), nsid);
2799
2800 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
2801 continue;
2802
2803 nvme_mgmt_bd_start(nvme);
2804 bd_state_change(nvme_nsid2ns(nvme,
2805 nsid)->ns_bd_hdl);
2806 nvme_mgmt_bd_end(nvme);
2807 }
2808 nvme_mgmt_unlock(nvme);
2809
2810 break;
2811
2812 case NVME_ASYNC_NOTICE_FW_ACTIVATE:
2813 dev_err(nvme->n_dip, CE_NOTE,
2814 "firmware activation starting, "
2815 "logpage = 0x%x", event.b.ae_logpage);
2816 NVME_BUMP_STAT(nvme, notice_event);
2817 break;
2818
2819 case NVME_ASYNC_NOTICE_TELEMETRY:
2820 dev_err(nvme->n_dip, CE_NOTE,
2821 "telemetry log changed, "
2822 "logpage = 0x%x", event.b.ae_logpage);
2823 NVME_BUMP_STAT(nvme, notice_event);
2824 break;
2825
2826 case NVME_ASYNC_NOTICE_NS_ASYMM:
2827 dev_err(nvme->n_dip, CE_NOTE,
2828 "asymmetric namespace access change, "
2829 "logpage = 0x%x", event.b.ae_logpage);
2830 NVME_BUMP_STAT(nvme, notice_event);
2831 break;
2832
2833 case NVME_ASYNC_NOTICE_LATENCYLOG:
2834 dev_err(nvme->n_dip, CE_NOTE,
2835 "predictable latency event aggregate log change, "
2836 "logpage = 0x%x", event.b.ae_logpage);
2837 NVME_BUMP_STAT(nvme, notice_event);
2838 break;
2839
2840 case NVME_ASYNC_NOTICE_LBASTATUS:
2841 dev_err(nvme->n_dip, CE_NOTE,
2842 "LBA status information alert, "
2843 "logpage = 0x%x", event.b.ae_logpage);
2844 NVME_BUMP_STAT(nvme, notice_event);
2845 break;
2846
2847 case NVME_ASYNC_NOTICE_ENDURANCELOG:
2848 dev_err(nvme->n_dip, CE_NOTE,
2849 "endurance group event aggregate log page change, "
2850 "logpage = 0x%x", event.b.ae_logpage);
2851 NVME_BUMP_STAT(nvme, notice_event);
2852 break;
2853
2854 default:
2855 dev_err(nvme->n_dip, CE_WARN,
2856 "!unknown notice async event received, "
2857 "info = 0x%x, logpage = 0x%x", event.b.ae_info,
2858 event.b.ae_logpage);
2859 NVME_BUMP_STAT(nvme, unknown_event);
2860 break;
2861 }
2862 break;
2863
2864 case NVME_ASYNC_TYPE_VENDOR:
2865 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
2866 "received, info = 0x%x, logpage = 0x%x", event.b.ae_info,
2867 event.b.ae_logpage);
2868 NVME_BUMP_STAT(nvme, vendor_event);
2869 break;
2870
2871 default:
2872 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
2873 "type = 0x%x, info = 0x%x, logpage = 0x%x", event.b.ae_type,
2874 event.b.ae_info, event.b.ae_logpage);
2875 NVME_BUMP_STAT(nvme, unknown_event);
2876 break;
2877 }
2878
2879 if (error_log != NULL)
2880 kmem_free(error_log, logsize);
2881
2882 if (health_log != NULL)
2883 kmem_free(health_log, logsize);
2884
2885 if (nslist != NULL)
2886 kmem_free(nslist, logsize);
2887 }
2888
2889 static void
nvme_admin_cmd(nvme_cmd_t * cmd,uint32_t sec)2890 nvme_admin_cmd(nvme_cmd_t *cmd, uint32_t sec)
2891 {
2892 uint32_t qtimeout;
2893
2894 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
2895
2896 mutex_enter(&cmd->nc_mutex);
2897 cmd->nc_timeout = sec;
2898 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd, &qtimeout);
2899 /*
2900 * We will wait for a total of this command's specified timeout plus
2901 * the sum of the timeouts of any commands queued ahead of this one. If
2902 * we aren't first in the queue, this will inflate the timeout somewhat
2903 * but these times are not critical and it means that if we get stuck
2904 * behind a long running command such as a namespace format then we
2905 * won't time out and trigger an abort.
2906 */
2907 nvme_wait_cmd(cmd, sec + qtimeout);
2908 mutex_exit(&cmd->nc_mutex);
2909 }
2910
2911 static void
nvme_async_event(nvme_t * nvme)2912 nvme_async_event(nvme_t *nvme)
2913 {
2914 nvme_cmd_t *cmd;
2915
2916 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2917 cmd->nc_sqid = 0;
2918 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
2919 cmd->nc_callback = nvme_async_event_task;
2920 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
2921
2922 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
2923 }
2924
2925 /*
2926 * There are commands such as format or vendor unique commands that are going to
2927 * manipulate the data in a namespace or destroy them, we make sure that none of
2928 * the ones that will be impacted are actually attached.
2929 */
2930 static boolean_t
nvme_no_blkdev_attached(nvme_t * nvme,uint32_t nsid)2931 nvme_no_blkdev_attached(nvme_t *nvme, uint32_t nsid)
2932 {
2933 ASSERT(nvme_mgmt_lock_held(nvme));
2934 ASSERT3U(nsid, !=, 0);
2935
2936 if (nsid != NVME_NSID_BCAST) {
2937 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
2938 return (!ns->ns_attached);
2939 }
2940
2941 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
2942 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
2943
2944 if (ns->ns_attached) {
2945 return (B_FALSE);
2946 }
2947 }
2948
2949 return (B_TRUE);
2950 }
2951
2952 static boolean_t
nvme_format_nvm(nvme_t * nvme,nvme_ioctl_format_t * ioc)2953 nvme_format_nvm(nvme_t *nvme, nvme_ioctl_format_t *ioc)
2954 {
2955 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2956 nvme_format_nvm_t format_nvm = { 0 };
2957 boolean_t ret;
2958
2959 format_nvm.b.fm_lbaf = bitx32(ioc->nif_lbaf, 3, 0);
2960 format_nvm.b.fm_ses = bitx32(ioc->nif_ses, 2, 0);
2961
2962 cmd->nc_sqid = 0;
2963 cmd->nc_callback = nvme_wakeup_cmd;
2964 cmd->nc_sqe.sqe_nsid = ioc->nif_common.nioc_nsid;
2965 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
2966 cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
2967
2968 /*
2969 * We don't want to panic on any format commands. There are two reasons
2970 * for this:
2971 *
2972 * 1) All format commands are initiated by users. We don't want to panic
2973 * on user commands.
2974 *
2975 * 2) Several devices like the Samsung SM951 don't allow formatting of
2976 * all namespaces in one command and we'd prefer to handle that
2977 * gracefully.
2978 */
2979 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
2980
2981 nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
2982
2983 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nif_common) != 0) {
2984 dev_err(nvme->n_dip, CE_WARN,
2985 "!FORMAT failed with sct = %x, sc = %x",
2986 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2987 ret = B_FALSE;
2988 goto fail;
2989 }
2990
2991 ret = B_TRUE;
2992 fail:
2993 nvme_free_cmd(cmd);
2994 return (ret);
2995 }
2996
2997 /*
2998 * Retrieve a specific log page. The contents of the log page request should
2999 * have already been validated by the system.
3000 */
3001 static boolean_t
nvme_get_logpage(nvme_t * nvme,boolean_t user,nvme_ioctl_get_logpage_t * log,void ** buf)3002 nvme_get_logpage(nvme_t *nvme, boolean_t user, nvme_ioctl_get_logpage_t *log,
3003 void **buf)
3004 {
3005 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3006 nvme_getlogpage_dw10_t dw10;
3007 uint32_t offlo, offhi;
3008 nvme_getlogpage_dw11_t dw11;
3009 nvme_getlogpage_dw14_t dw14;
3010 uint32_t ndw;
3011 boolean_t ret = B_FALSE;
3012
3013 bzero(&dw10, sizeof (dw10));
3014 bzero(&dw11, sizeof (dw11));
3015 bzero(&dw14, sizeof (dw14));
3016
3017 cmd->nc_sqid = 0;
3018 cmd->nc_callback = nvme_wakeup_cmd;
3019 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
3020 cmd->nc_sqe.sqe_nsid = log->nigl_common.nioc_nsid;
3021
3022 if (user)
3023 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3024
3025 /*
3026 * The size field is the number of double words, but is a zeros based
3027 * value. We need to store our actual value minus one.
3028 */
3029 ndw = (uint32_t)(log->nigl_len / 4);
3030 ASSERT3U(ndw, >, 0);
3031 ndw--;
3032
3033 dw10.b.lp_lid = bitx32(log->nigl_lid, 7, 0);
3034 dw10.b.lp_lsp = bitx32(log->nigl_lsp, 6, 0);
3035 dw10.b.lp_rae = bitx32(log->nigl_lsp, 0, 0);
3036 dw10.b.lp_lnumdl = bitx32(ndw, 15, 0);
3037
3038 dw11.b.lp_numdu = bitx32(ndw, 31, 16);
3039 dw11.b.lp_lsi = bitx32(log->nigl_lsi, 15, 0);
3040
3041 offlo = bitx64(log->nigl_offset, 31, 0);
3042 offhi = bitx64(log->nigl_offset, 63, 32);
3043
3044 dw14.b.lp_csi = bitx32(log->nigl_csi, 7, 0);
3045
3046 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3047 cmd->nc_sqe.sqe_cdw11 = dw11.r;
3048 cmd->nc_sqe.sqe_cdw12 = offlo;
3049 cmd->nc_sqe.sqe_cdw13 = offhi;
3050 cmd->nc_sqe.sqe_cdw14 = dw14.r;
3051
3052 if (nvme_zalloc_dma(nvme, log->nigl_len, DDI_DMA_READ,
3053 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
3054 dev_err(nvme->n_dip, CE_WARN,
3055 "!nvme_zalloc_dma failed for GET LOG PAGE");
3056 ret = nvme_ioctl_error(&log->nigl_common,
3057 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3058 goto fail;
3059 }
3060
3061 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
3062 ret = nvme_ioctl_error(&log->nigl_common,
3063 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3064 goto fail;
3065 }
3066 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3067
3068 if (!nvme_check_cmd_status_ioctl(cmd, &log->nigl_common)) {
3069 if (!user) {
3070 dev_err(nvme->n_dip, CE_WARN,
3071 "!GET LOG PAGE failed with sct = %x, sc = %x",
3072 cmd->nc_cqe.cqe_sf.sf_sct,
3073 cmd->nc_cqe.cqe_sf.sf_sc);
3074 }
3075 ret = B_FALSE;
3076 goto fail;
3077 }
3078
3079 *buf = kmem_alloc(log->nigl_len, KM_SLEEP);
3080 bcopy(cmd->nc_dma->nd_memp, *buf, log->nigl_len);
3081
3082 ret = B_TRUE;
3083 fail:
3084 nvme_free_cmd(cmd);
3085
3086 return (ret);
3087 }
3088
3089 /*
3090 * This is an internal wrapper for when the kernel wants to get a log page.
3091 * Currently this assumes that the only thing that is required is the log page
3092 * ID. If more information is required, we'll be better served to just use the
3093 * general ioctl interface.
3094 */
3095 static boolean_t
nvme_get_logpage_int(nvme_t * nvme,boolean_t user,void ** buf,size_t * bufsize,uint8_t lid)3096 nvme_get_logpage_int(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize,
3097 uint8_t lid)
3098 {
3099 const nvme_log_page_info_t *info = NULL;
3100 nvme_ioctl_get_logpage_t log;
3101 nvme_valid_ctrl_data_t data;
3102 boolean_t bret;
3103 bool var;
3104
3105 for (size_t i = 0; i < nvme_std_log_npages; i++) {
3106 if (nvme_std_log_pages[i].nlpi_lid == lid &&
3107 nvme_std_log_pages[i].nlpi_csi == NVME_CSI_NVM) {
3108 info = &nvme_std_log_pages[i];
3109 break;
3110 }
3111 }
3112
3113 if (info == NULL) {
3114 return (B_FALSE);
3115 }
3116
3117 data.vcd_vers = &nvme->n_version;
3118 data.vcd_id = nvme->n_idctl;
3119 bzero(&log, sizeof (log));
3120 log.nigl_common.nioc_nsid = NVME_NSID_BCAST;
3121 log.nigl_csi = info->nlpi_csi;
3122 log.nigl_lid = info->nlpi_lid;
3123 log.nigl_len = nvme_log_page_info_size(info, &data, &var);
3124
3125 /*
3126 * We only support getting standard fixed-length log pages through the
3127 * kernel interface at this time. If a log page either has an unknown
3128 * size or has a variable length, then we cannot get it.
3129 */
3130 if (log.nigl_len == 0 || var) {
3131 return (B_FALSE);
3132 }
3133
3134 bret = nvme_get_logpage(nvme, user, &log, buf);
3135 if (!bret) {
3136 return (B_FALSE);
3137 }
3138
3139 *bufsize = log.nigl_len;
3140 return (B_TRUE);
3141 }
3142
3143 static boolean_t
nvme_identify(nvme_t * nvme,boolean_t user,nvme_ioctl_identify_t * ioc,void ** buf)3144 nvme_identify(nvme_t *nvme, boolean_t user, nvme_ioctl_identify_t *ioc,
3145 void **buf)
3146 {
3147 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3148 boolean_t ret = B_FALSE;
3149 nvme_identify_dw10_t dw10;
3150
3151 ASSERT3P(buf, !=, NULL);
3152
3153 bzero(&dw10, sizeof (dw10));
3154
3155 cmd->nc_sqid = 0;
3156 cmd->nc_callback = nvme_wakeup_cmd;
3157 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
3158 cmd->nc_sqe.sqe_nsid = ioc->nid_common.nioc_nsid;
3159
3160 dw10.b.id_cns = bitx32(ioc->nid_cns, 7, 0);
3161 dw10.b.id_cntid = bitx32(ioc->nid_ctrlid, 15, 0);
3162
3163 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3164
3165 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
3166 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
3167 dev_err(nvme->n_dip, CE_WARN,
3168 "!nvme_zalloc_dma failed for IDENTIFY");
3169 ret = nvme_ioctl_error(&ioc->nid_common,
3170 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3171 goto fail;
3172 }
3173
3174 if (cmd->nc_dma->nd_ncookie > 2) {
3175 dev_err(nvme->n_dip, CE_WARN,
3176 "!too many DMA cookies for IDENTIFY");
3177 NVME_BUMP_STAT(nvme, too_many_cookies);
3178 ret = nvme_ioctl_error(&ioc->nid_common,
3179 NVME_IOCTL_E_BAD_PRP, 0, 0);
3180 goto fail;
3181 }
3182
3183 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
3184 if (cmd->nc_dma->nd_ncookie > 1) {
3185 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
3186 &cmd->nc_dma->nd_cookie);
3187 cmd->nc_sqe.sqe_dptr.d_prp[1] =
3188 cmd->nc_dma->nd_cookie.dmac_laddress;
3189 }
3190
3191 if (user)
3192 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3193
3194 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3195
3196 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nid_common)) {
3197 dev_err(nvme->n_dip, CE_WARN,
3198 "!IDENTIFY failed with sct = %x, sc = %x",
3199 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3200 ret = B_FALSE;
3201 goto fail;
3202 }
3203
3204 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
3205 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
3206 ret = B_TRUE;
3207
3208 fail:
3209 nvme_free_cmd(cmd);
3210
3211 return (ret);
3212 }
3213
3214 static boolean_t
nvme_identify_int(nvme_t * nvme,uint32_t nsid,uint8_t cns,void ** buf)3215 nvme_identify_int(nvme_t *nvme, uint32_t nsid, uint8_t cns, void **buf)
3216 {
3217 nvme_ioctl_identify_t id;
3218
3219 bzero(&id, sizeof (nvme_ioctl_identify_t));
3220 id.nid_common.nioc_nsid = nsid;
3221 id.nid_cns = cns;
3222
3223 return (nvme_identify(nvme, B_FALSE, &id, buf));
3224 }
3225
3226 static int
nvme_set_features(nvme_t * nvme,boolean_t user,uint32_t nsid,uint8_t feature,uint32_t val,uint32_t * res)3227 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
3228 uint32_t val, uint32_t *res)
3229 {
3230 _NOTE(ARGUNUSED(nsid));
3231 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3232 int ret = EINVAL;
3233
3234 ASSERT(res != NULL);
3235
3236 cmd->nc_sqid = 0;
3237 cmd->nc_callback = nvme_wakeup_cmd;
3238 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
3239 cmd->nc_sqe.sqe_cdw10 = feature;
3240 cmd->nc_sqe.sqe_cdw11 = val;
3241
3242 if (user)
3243 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3244
3245 switch (feature) {
3246 case NVME_FEAT_WRITE_CACHE:
3247 if (!nvme->n_write_cache_present)
3248 goto fail;
3249 break;
3250
3251 case NVME_FEAT_NQUEUES:
3252 break;
3253
3254 default:
3255 goto fail;
3256 }
3257
3258 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3259
3260 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3261 dev_err(nvme->n_dip, CE_WARN,
3262 "!SET FEATURES %d failed with sct = %x, sc = %x",
3263 feature, cmd->nc_cqe.cqe_sf.sf_sct,
3264 cmd->nc_cqe.cqe_sf.sf_sc);
3265 goto fail;
3266 }
3267
3268 *res = cmd->nc_cqe.cqe_dw0;
3269
3270 fail:
3271 nvme_free_cmd(cmd);
3272 return (ret);
3273 }
3274
3275 static int
nvme_write_cache_set(nvme_t * nvme,boolean_t enable)3276 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
3277 {
3278 nvme_write_cache_t nwc = { 0 };
3279
3280 if (enable)
3281 nwc.b.wc_wce = 1;
3282
3283 /*
3284 * We've seen some cases where this fails due to us being told we've
3285 * specified an invalid namespace when operating against the Xen xcp-ng
3286 * qemu NVMe virtual device. As such, we generally ensure that trying to
3287 * enable this doesn't lead us to panic. It's not completely clear why
3288 * specifying namespace zero here fails, but not when we're setting the
3289 * number of queues below.
3290 */
3291 return (nvme_set_features(nvme, B_TRUE, 0, NVME_FEAT_WRITE_CACHE,
3292 nwc.r, &nwc.r));
3293 }
3294
3295 static int
nvme_set_nqueues(nvme_t * nvme)3296 nvme_set_nqueues(nvme_t *nvme)
3297 {
3298 nvme_nqueues_t nq = { 0 };
3299 int ret;
3300
3301 /*
3302 * The default is to allocate one completion queue per vector.
3303 */
3304 if (nvme->n_completion_queues == -1)
3305 nvme->n_completion_queues = nvme->n_intr_cnt;
3306
3307 /*
3308 * There is no point in having more completion queues than
3309 * interrupt vectors.
3310 */
3311 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3312 nvme->n_intr_cnt);
3313
3314 /*
3315 * The default is to use one submission queue per completion queue.
3316 */
3317 if (nvme->n_submission_queues == -1)
3318 nvme->n_submission_queues = nvme->n_completion_queues;
3319
3320 /*
3321 * There is no point in having more completion queues than
3322 * submission queues.
3323 */
3324 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3325 nvme->n_submission_queues);
3326
3327 ASSERT(nvme->n_submission_queues > 0);
3328 ASSERT(nvme->n_completion_queues > 0);
3329
3330 nq.b.nq_nsq = nvme->n_submission_queues - 1;
3331 nq.b.nq_ncq = nvme->n_completion_queues - 1;
3332
3333 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r,
3334 &nq.r);
3335
3336 if (ret == 0) {
3337 /*
3338 * Never use more than the requested number of queues.
3339 */
3340 nvme->n_submission_queues = MIN(nvme->n_submission_queues,
3341 nq.b.nq_nsq + 1);
3342 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3343 nq.b.nq_ncq + 1);
3344 }
3345
3346 return (ret);
3347 }
3348
3349 static int
nvme_create_completion_queue(nvme_t * nvme,nvme_cq_t * cq)3350 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq)
3351 {
3352 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3353 nvme_create_queue_dw10_t dw10 = { 0 };
3354 nvme_create_cq_dw11_t c_dw11 = { 0 };
3355 int ret;
3356
3357 dw10.b.q_qid = cq->ncq_id;
3358 dw10.b.q_qsize = cq->ncq_nentry - 1;
3359
3360 c_dw11.b.cq_pc = 1;
3361 c_dw11.b.cq_ien = 1;
3362 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt;
3363
3364 cmd->nc_sqid = 0;
3365 cmd->nc_callback = nvme_wakeup_cmd;
3366 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
3367 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3368 cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
3369 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress;
3370
3371 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3372
3373 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3374 dev_err(nvme->n_dip, CE_WARN,
3375 "!CREATE CQUEUE failed with sct = %x, sc = %x",
3376 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3377 }
3378
3379 nvme_free_cmd(cmd);
3380
3381 return (ret);
3382 }
3383
3384 static int
nvme_create_io_qpair(nvme_t * nvme,nvme_qpair_t * qp,uint16_t idx)3385 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
3386 {
3387 nvme_cq_t *cq = qp->nq_cq;
3388 nvme_cmd_t *cmd;
3389 nvme_create_queue_dw10_t dw10 = { 0 };
3390 nvme_create_sq_dw11_t s_dw11 = { 0 };
3391 int ret;
3392
3393 /*
3394 * It is possible to have more qpairs than completion queues,
3395 * and when the idx > ncq_id, that completion queue is shared
3396 * and has already been created.
3397 */
3398 if (idx <= cq->ncq_id &&
3399 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS)
3400 return (DDI_FAILURE);
3401
3402 dw10.b.q_qid = idx;
3403 dw10.b.q_qsize = qp->nq_nentry - 1;
3404
3405 s_dw11.b.sq_pc = 1;
3406 s_dw11.b.sq_cqid = cq->ncq_id;
3407
3408 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3409 cmd->nc_sqid = 0;
3410 cmd->nc_callback = nvme_wakeup_cmd;
3411 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
3412 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3413 cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
3414 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
3415
3416 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3417
3418 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3419 dev_err(nvme->n_dip, CE_WARN,
3420 "!CREATE SQUEUE failed with sct = %x, sc = %x",
3421 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3422 }
3423
3424 nvme_free_cmd(cmd);
3425
3426 return (ret);
3427 }
3428
3429 static boolean_t
nvme_reset(nvme_t * nvme,boolean_t quiesce)3430 nvme_reset(nvme_t *nvme, boolean_t quiesce)
3431 {
3432 nvme_reg_csts_t csts;
3433 int i;
3434
3435 /*
3436 * If the device is gone, do not try to interact with it. We define
3437 * that resetting such a device is impossible, and always fails.
3438 */
3439 if (nvme_ctrl_is_gone(nvme)) {
3440 return (B_FALSE);
3441 }
3442
3443 nvme_put32(nvme, NVME_REG_CC, 0);
3444
3445 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3446 if (csts.b.csts_rdy == 1) {
3447 nvme_put32(nvme, NVME_REG_CC, 0);
3448
3449 /*
3450 * The timeout value is from the Controller Capabilities
3451 * register (CAP.TO, section 3.1.1). This is the worst case
3452 * time to wait for CSTS.RDY to transition from 1 to 0 after
3453 * CC.EN transitions from 1 to 0.
3454 *
3455 * The timeout units are in 500 ms units, and we are delaying
3456 * in 50ms chunks, hence counting to n_timeout * 10.
3457 */
3458 for (i = 0; i < nvme->n_timeout * 10; i++) {
3459 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3460 if (csts.b.csts_rdy == 0)
3461 break;
3462
3463 /*
3464 * Quiescing drivers should not use locks or timeouts,
3465 * so if this is the quiesce path, use a quiesce-safe
3466 * delay.
3467 */
3468 if (quiesce) {
3469 drv_usecwait(50000);
3470 } else {
3471 delay(drv_usectohz(50000));
3472 }
3473 }
3474 }
3475
3476 nvme_put32(nvme, NVME_REG_AQA, 0);
3477 nvme_put32(nvme, NVME_REG_ASQ, 0);
3478 nvme_put32(nvme, NVME_REG_ACQ, 0);
3479
3480 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3481 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
3482 }
3483
3484 static void
nvme_shutdown(nvme_t * nvme,boolean_t quiesce)3485 nvme_shutdown(nvme_t *nvme, boolean_t quiesce)
3486 {
3487 nvme_reg_cc_t cc;
3488 nvme_reg_csts_t csts;
3489 int i;
3490
3491 /*
3492 * Do not try to interact with the device if it is gone. Since it is
3493 * not there, in some sense it must already be shut down anyway.
3494 */
3495 if (nvme_ctrl_is_gone(nvme)) {
3496 return;
3497 }
3498
3499 cc.r = nvme_get32(nvme, NVME_REG_CC);
3500 cc.b.cc_shn = NVME_CC_SHN_NORMAL;
3501 nvme_put32(nvme, NVME_REG_CC, cc.r);
3502
3503 for (i = 0; i < 10; i++) {
3504 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3505 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
3506 break;
3507
3508 if (quiesce) {
3509 drv_usecwait(100000);
3510 } else {
3511 delay(drv_usectohz(100000));
3512 }
3513 }
3514 }
3515
3516 /*
3517 * Return length of string without trailing spaces.
3518 */
3519 static int
nvme_strlen(const char * str,int len)3520 nvme_strlen(const char *str, int len)
3521 {
3522 if (len <= 0)
3523 return (0);
3524
3525 while (str[--len] == ' ')
3526 ;
3527
3528 return (++len);
3529 }
3530
3531 static void
nvme_config_min_block_size(nvme_t * nvme,char * model,char * val)3532 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val)
3533 {
3534 ulong_t bsize = 0;
3535 char *msg = "";
3536
3537 if (ddi_strtoul(val, NULL, 0, &bsize) != 0)
3538 goto err;
3539
3540 if (!ISP2(bsize)) {
3541 msg = ": not a power of 2";
3542 goto err;
3543 }
3544
3545 if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) {
3546 msg = ": too low";
3547 goto err;
3548 }
3549
3550 nvme->n_min_block_size = bsize;
3551 return;
3552
3553 err:
3554 dev_err(nvme->n_dip, CE_WARN,
3555 "!nvme-config-list: ignoring invalid min-phys-block-size '%s' "
3556 "for model '%s'%s", val, model, msg);
3557
3558 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
3559 }
3560
3561 static void
nvme_config_boolean(nvme_t * nvme,char * model,char * name,char * val,boolean_t * b)3562 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val,
3563 boolean_t *b)
3564 {
3565 if (strcmp(val, "on") == 0 ||
3566 strcmp(val, "true") == 0)
3567 *b = B_TRUE;
3568 else if (strcmp(val, "off") == 0 ||
3569 strcmp(val, "false") == 0)
3570 *b = B_FALSE;
3571 else
3572 dev_err(nvme->n_dip, CE_WARN,
3573 "!nvme-config-list: invalid value for %s '%s'"
3574 " for model '%s', ignoring", name, val, model);
3575 }
3576
3577 static void
nvme_config_list(nvme_t * nvme)3578 nvme_config_list(nvme_t *nvme)
3579 {
3580 char **config_list;
3581 uint_t nelem;
3582 int rv, i;
3583
3584 /*
3585 * We're following the pattern of 'sd-config-list' here, but extend it.
3586 * Instead of two we have three separate strings for "model", "fwrev",
3587 * and "name-value-list".
3588 */
3589 rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip,
3590 DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem);
3591
3592 if (rv != DDI_PROP_SUCCESS) {
3593 if (rv == DDI_PROP_CANNOT_DECODE) {
3594 dev_err(nvme->n_dip, CE_WARN,
3595 "!nvme-config-list: cannot be decoded");
3596 }
3597
3598 return;
3599 }
3600
3601 if ((nelem % 3) != 0) {
3602 dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be "
3603 "triplets of <model>/<fwrev>/<name-value-list> strings ");
3604 goto out;
3605 }
3606
3607 for (i = 0; i < nelem; i += 3) {
3608 char *model = config_list[i];
3609 char *fwrev = config_list[i + 1];
3610 char *nvp, *save_nv;
3611 int id_model_len, id_fwrev_len;
3612
3613 id_model_len = nvme_strlen(nvme->n_idctl->id_model,
3614 sizeof (nvme->n_idctl->id_model));
3615
3616 if (strlen(model) != id_model_len)
3617 continue;
3618
3619 if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0)
3620 continue;
3621
3622 id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev,
3623 sizeof (nvme->n_idctl->id_fwrev));
3624
3625 if (strlen(fwrev) != 0) {
3626 boolean_t match = B_FALSE;
3627 char *fwr, *last_fw;
3628
3629 for (fwr = strtok_r(fwrev, ",", &last_fw);
3630 fwr != NULL;
3631 fwr = strtok_r(NULL, ",", &last_fw)) {
3632 if (strlen(fwr) != id_fwrev_len)
3633 continue;
3634
3635 if (strncmp(fwr, nvme->n_idctl->id_fwrev,
3636 id_fwrev_len) == 0)
3637 match = B_TRUE;
3638 }
3639
3640 if (!match)
3641 continue;
3642 }
3643
3644 /*
3645 * We should now have a comma-separated list of name:value
3646 * pairs.
3647 */
3648 for (nvp = strtok_r(config_list[i + 2], ",", &save_nv);
3649 nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) {
3650 char *name = nvp;
3651 char *val = strchr(nvp, ':');
3652
3653 if (val == NULL || name == val) {
3654 dev_err(nvme->n_dip, CE_WARN,
3655 "!nvme-config-list: <name-value-list> "
3656 "for model '%s' is malformed", model);
3657 goto out;
3658 }
3659
3660 /*
3661 * Null-terminate 'name', move 'val' past ':' sep.
3662 */
3663 *val++ = '\0';
3664
3665 /*
3666 * Process the name:val pairs that we know about.
3667 */
3668 if (strcmp(name, "ignore-unknown-vendor-status") == 0) {
3669 nvme_config_boolean(nvme, model, name, val,
3670 &nvme->n_ignore_unknown_vendor_status);
3671 } else if (strcmp(name, "min-phys-block-size") == 0) {
3672 nvme_config_min_block_size(nvme, model, val);
3673 } else if (strcmp(name, "volatile-write-cache") == 0) {
3674 nvme_config_boolean(nvme, model, name, val,
3675 &nvme->n_write_cache_enabled);
3676 } else {
3677 /*
3678 * Unknown 'name'.
3679 */
3680 dev_err(nvme->n_dip, CE_WARN,
3681 "!nvme-config-list: unknown config '%s' "
3682 "for model '%s', ignoring", name, model);
3683 }
3684 }
3685 }
3686
3687 out:
3688 ddi_prop_free(config_list);
3689 }
3690
3691 static void
nvme_prepare_devid(nvme_t * nvme,uint32_t nsid)3692 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
3693 {
3694 /*
3695 * Section 7.7 of the spec describes how to get a unique ID for
3696 * the controller: the vendor ID, the model name and the serial
3697 * number shall be unique when combined.
3698 *
3699 * If a namespace has no EUI64 we use the above and add the hex
3700 * namespace ID to get a unique ID for the namespace.
3701 */
3702 char model[sizeof (nvme->n_idctl->id_model) + 1];
3703 char serial[sizeof (nvme->n_idctl->id_serial) + 1];
3704
3705 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
3706 bcopy(nvme->n_idctl->id_serial, serial,
3707 sizeof (nvme->n_idctl->id_serial));
3708
3709 model[sizeof (nvme->n_idctl->id_model)] = '\0';
3710 serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
3711
3712 nvme_nsid2ns(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X",
3713 nvme->n_idctl->id_vid, model, serial, nsid);
3714 }
3715
3716 static nvme_identify_nsid_list_t *
nvme_update_nsid_list(nvme_t * nvme,int cns)3717 nvme_update_nsid_list(nvme_t *nvme, int cns)
3718 {
3719 nvme_identify_nsid_list_t *nslist;
3720
3721 /*
3722 * We currently don't handle cases where there are more than
3723 * 1024 active namespaces, requiring several IDENTIFY commands.
3724 */
3725 if (nvme_identify_int(nvme, 0, cns, (void **)&nslist))
3726 return (nslist);
3727
3728 return (NULL);
3729 }
3730
3731 nvme_namespace_t *
nvme_nsid2ns(nvme_t * nvme,uint32_t nsid)3732 nvme_nsid2ns(nvme_t *nvme, uint32_t nsid)
3733 {
3734 ASSERT3U(nsid, !=, 0);
3735 ASSERT3U(nsid, <=, nvme->n_namespace_count);
3736 return (&nvme->n_ns[nsid - 1]);
3737 }
3738
3739 static boolean_t
nvme_allocated_ns(nvme_namespace_t * ns)3740 nvme_allocated_ns(nvme_namespace_t *ns)
3741 {
3742 nvme_t *nvme = ns->ns_nvme;
3743 uint32_t i;
3744
3745 ASSERT(nvme_mgmt_lock_held(nvme));
3746
3747 /*
3748 * If supported, update the list of allocated namespace IDs.
3749 */
3750 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) &&
3751 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
3752 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
3753 NVME_IDENTIFY_NSID_ALLOC_LIST);
3754 boolean_t found = B_FALSE;
3755
3756 /*
3757 * When namespace management is supported, this really shouldn't
3758 * be NULL. Treat all namespaces as allocated if it is.
3759 */
3760 if (nslist == NULL)
3761 return (B_TRUE);
3762
3763 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
3764 if (ns->ns_id == 0)
3765 break;
3766
3767 if (ns->ns_id == nslist->nl_nsid[i])
3768 found = B_TRUE;
3769 }
3770
3771 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
3772 return (found);
3773 } else {
3774 /*
3775 * If namespace management isn't supported, report all
3776 * namespaces as allocated.
3777 */
3778 return (B_TRUE);
3779 }
3780 }
3781
3782 static boolean_t
nvme_active_ns(nvme_namespace_t * ns)3783 nvme_active_ns(nvme_namespace_t *ns)
3784 {
3785 nvme_t *nvme = ns->ns_nvme;
3786 uint64_t *ptr;
3787 uint32_t i;
3788
3789 ASSERT(nvme_mgmt_lock_held(nvme));
3790
3791 /*
3792 * If supported, update the list of active namespace IDs.
3793 */
3794 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) {
3795 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
3796 NVME_IDENTIFY_NSID_LIST);
3797 boolean_t found = B_FALSE;
3798
3799 /*
3800 * When namespace management is supported, this really shouldn't
3801 * be NULL. Treat all namespaces as allocated if it is.
3802 */
3803 if (nslist == NULL)
3804 return (B_TRUE);
3805
3806 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
3807 if (ns->ns_id == 0)
3808 break;
3809
3810 if (ns->ns_id == nslist->nl_nsid[i])
3811 found = B_TRUE;
3812 }
3813
3814 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
3815 return (found);
3816 }
3817
3818 /*
3819 * Workaround for revision 1.0:
3820 * Check whether the IDENTIFY NAMESPACE data is zero-filled.
3821 */
3822 for (ptr = (uint64_t *)ns->ns_idns;
3823 ptr != (uint64_t *)(ns->ns_idns + 1);
3824 ptr++) {
3825 if (*ptr != 0) {
3826 return (B_TRUE);
3827 }
3828 }
3829
3830 return (B_FALSE);
3831 }
3832
3833 static int
nvme_init_ns(nvme_t * nvme,uint32_t nsid)3834 nvme_init_ns(nvme_t *nvme, uint32_t nsid)
3835 {
3836 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
3837 nvme_identify_nsid_t *idns;
3838 boolean_t was_ignored;
3839 int last_rp;
3840
3841 ns->ns_nvme = nvme;
3842
3843 ASSERT(nvme_mgmt_lock_held(nvme));
3844
3845 /*
3846 * Because we might rescan a namespace and this will fail after boot
3847 * that'd leave us in a bad spot. We need to do something about this
3848 * longer term, but it's not clear how exactly we would recover right
3849 * now.
3850 */
3851 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
3852 (void **)&idns)) {
3853 dev_err(nvme->n_dip, CE_WARN,
3854 "!failed to identify namespace %d", nsid);
3855 return (DDI_FAILURE);
3856 }
3857
3858 if (ns->ns_idns != NULL)
3859 kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t));
3860
3861 ns->ns_idns = idns;
3862 ns->ns_id = nsid;
3863
3864 was_ignored = ns->ns_ignore;
3865
3866 ns->ns_allocated = nvme_allocated_ns(ns);
3867 ns->ns_active = nvme_active_ns(ns);
3868
3869 ns->ns_block_count = idns->id_nsize;
3870 ns->ns_block_size =
3871 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
3872 ns->ns_best_block_size = ns->ns_block_size;
3873
3874 /*
3875 * Get the EUI64 if present.
3876 */
3877 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
3878 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
3879
3880 /*
3881 * Get the NGUID if present.
3882 */
3883 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2))
3884 bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid));
3885
3886 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
3887 if (*(uint64_t *)ns->ns_eui64 == 0)
3888 nvme_prepare_devid(nvme, ns->ns_id);
3889
3890 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id);
3891
3892 /*
3893 * Find the LBA format with no metadata and the best relative
3894 * performance. A value of 3 means "degraded", 0 is best.
3895 */
3896 last_rp = 3;
3897 for (int j = 0; j <= idns->id_nlbaf; j++) {
3898 if (idns->id_lbaf[j].lbaf_lbads == 0)
3899 break;
3900 if (idns->id_lbaf[j].lbaf_ms != 0)
3901 continue;
3902 if (idns->id_lbaf[j].lbaf_rp >= last_rp)
3903 continue;
3904 last_rp = idns->id_lbaf[j].lbaf_rp;
3905 ns->ns_best_block_size =
3906 1 << idns->id_lbaf[j].lbaf_lbads;
3907 }
3908
3909 if (ns->ns_best_block_size < nvme->n_min_block_size)
3910 ns->ns_best_block_size = nvme->n_min_block_size;
3911
3912 was_ignored = ns->ns_ignore;
3913
3914 /*
3915 * We currently don't support namespaces that are inactive, or use
3916 * either:
3917 * - protection information
3918 * - illegal block size (< 512)
3919 */
3920 if (!ns->ns_active) {
3921 ns->ns_ignore = B_TRUE;
3922 } else if (idns->id_dps.dp_pinfo) {
3923 dev_err(nvme->n_dip, CE_WARN,
3924 "!ignoring namespace %d, unsupported feature: "
3925 "pinfo = %d", nsid, idns->id_dps.dp_pinfo);
3926 ns->ns_ignore = B_TRUE;
3927 } else if (ns->ns_block_size < 512) {
3928 dev_err(nvme->n_dip, CE_WARN,
3929 "!ignoring namespace %d, unsupported block size %"PRIu64,
3930 nsid, (uint64_t)ns->ns_block_size);
3931 ns->ns_ignore = B_TRUE;
3932 } else {
3933 ns->ns_ignore = B_FALSE;
3934 }
3935
3936 /*
3937 * Keep a count of namespaces which are attachable.
3938 * See comments in nvme_bd_driveinfo() to understand its effect.
3939 */
3940 if (was_ignored) {
3941 /*
3942 * Previously ignored, but now not. Count it.
3943 */
3944 if (!ns->ns_ignore)
3945 nvme->n_namespaces_attachable++;
3946 } else {
3947 /*
3948 * Wasn't ignored previously, but now needs to be.
3949 * Discount it.
3950 */
3951 if (ns->ns_ignore)
3952 nvme->n_namespaces_attachable--;
3953 }
3954
3955 return (DDI_SUCCESS);
3956 }
3957
3958 static boolean_t
nvme_attach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)3959 nvme_attach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
3960 {
3961 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
3962 int ret;
3963
3964 ASSERT(nvme_mgmt_lock_held(nvme));
3965
3966 if (ns->ns_ignore) {
3967 return (nvme_ioctl_error(com, NVME_IOCTL_E_UNSUP_ATTACH_NS,
3968 0, 0));
3969 }
3970
3971 if (ns->ns_bd_hdl == NULL) {
3972 bd_ops_t ops = nvme_bd_ops;
3973
3974 if (!nvme->n_idctl->id_oncs.on_dset_mgmt)
3975 ops.o_free_space = NULL;
3976
3977 ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr,
3978 KM_SLEEP);
3979
3980 if (ns->ns_bd_hdl == NULL) {
3981 dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev "
3982 "handle for namespace id %u", com->nioc_nsid);
3983 return (nvme_ioctl_error(com,
3984 NVME_IOCTL_E_BLKDEV_ATTACH, 0, 0));
3985 }
3986 }
3987
3988 nvme_mgmt_bd_start(nvme);
3989 ret = bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl);
3990 nvme_mgmt_bd_end(nvme);
3991 if (ret != DDI_SUCCESS) {
3992 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_ATTACH,
3993 0, 0));
3994 }
3995
3996 ns->ns_attached = B_TRUE;
3997
3998 return (B_TRUE);
3999 }
4000
4001 static boolean_t
nvme_detach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)4002 nvme_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
4003 {
4004 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
4005 int ret;
4006
4007 ASSERT(nvme_mgmt_lock_held(nvme));
4008
4009 if (ns->ns_ignore || !ns->ns_attached)
4010 return (B_TRUE);
4011
4012 nvme_mgmt_bd_start(nvme);
4013 ASSERT3P(ns->ns_bd_hdl, !=, NULL);
4014 ret = bd_detach_handle(ns->ns_bd_hdl);
4015 nvme_mgmt_bd_end(nvme);
4016
4017 if (ret != DDI_SUCCESS) {
4018 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_DETACH, 0,
4019 0));
4020 }
4021
4022 ns->ns_attached = B_FALSE;
4023 return (B_TRUE);
4024
4025 }
4026
4027 /*
4028 * Rescan the namespace information associated with the namespaces indicated by
4029 * ioc. They should not be attached to blkdev right now.
4030 */
4031 static void
nvme_rescan_ns(nvme_t * nvme,uint32_t nsid)4032 nvme_rescan_ns(nvme_t *nvme, uint32_t nsid)
4033 {
4034 ASSERT(nvme_mgmt_lock_held(nvme));
4035 ASSERT3U(nsid, !=, 0);
4036
4037 if (nsid != NVME_NSID_BCAST) {
4038 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
4039
4040 ASSERT3U(ns->ns_attached, ==, B_FALSE);
4041 (void) nvme_init_ns(nvme, nsid);
4042 return;
4043 }
4044
4045 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
4046 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
4047
4048 ASSERT3U(ns->ns_attached, ==, B_FALSE);
4049 (void) nvme_init_ns(nvme, i);
4050 }
4051 }
4052
4053 typedef struct nvme_quirk_table {
4054 uint16_t nq_vendor_id;
4055 uint16_t nq_device_id;
4056 nvme_quirk_t nq_quirks;
4057 } nvme_quirk_table_t;
4058
4059 static const nvme_quirk_table_t nvme_quirks[] = {
4060 { 0x1987, 0x5018, NVME_QUIRK_START_CID }, /* Phison E18 */
4061 };
4062
4063 static void
nvme_detect_quirks(nvme_t * nvme)4064 nvme_detect_quirks(nvme_t *nvme)
4065 {
4066 for (uint_t i = 0; i < ARRAY_SIZE(nvme_quirks); i++) {
4067 const nvme_quirk_table_t *nqt = &nvme_quirks[i];
4068
4069 if (nqt->nq_vendor_id == nvme->n_vendor_id &&
4070 nqt->nq_device_id == nvme->n_device_id) {
4071 nvme->n_quirks = nqt->nq_quirks;
4072 return;
4073 }
4074 }
4075 }
4076
4077 static int
nvme_init(nvme_t * nvme)4078 nvme_init(nvme_t *nvme)
4079 {
4080 nvme_reg_cc_t cc = { 0 };
4081 nvme_reg_aqa_t aqa = { 0 };
4082 nvme_reg_asq_t asq = { 0 };
4083 nvme_reg_acq_t acq = { 0 };
4084 nvme_reg_cap_t cap;
4085 nvme_reg_vs_t vs;
4086 nvme_reg_csts_t csts;
4087 int i = 0;
4088 uint16_t nqueues;
4089 uint_t tq_threads;
4090 char model[sizeof (nvme->n_idctl->id_model) + 1];
4091 char *vendor, *product;
4092 uint32_t nsid;
4093
4094 /* Check controller version */
4095 vs.r = nvme_get32(nvme, NVME_REG_VS);
4096 nvme->n_version.v_major = vs.b.vs_mjr;
4097 nvme->n_version.v_minor = vs.b.vs_mnr;
4098 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d\n",
4099 nvme->n_version.v_major, nvme->n_version.v_minor);
4100
4101 if (nvme->n_version.v_major > nvme_version_major) {
4102 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x",
4103 nvme_version_major);
4104 if (nvme->n_strict_version)
4105 goto fail;
4106 }
4107
4108 /* retrieve controller configuration */
4109 cap.r = nvme_get64(nvme, NVME_REG_CAP);
4110
4111 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
4112 dev_err(nvme->n_dip, CE_WARN,
4113 "!NVM command set not supported by hardware");
4114 goto fail;
4115 }
4116
4117 nvme->n_nssr_supported = cap.b.cap_nssrs;
4118 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
4119 nvme->n_timeout = cap.b.cap_to;
4120 nvme->n_arbitration_mechanisms = cap.b.cap_ams;
4121 nvme->n_cont_queues_reqd = cap.b.cap_cqr;
4122 nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
4123
4124 /*
4125 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
4126 * the base page size of 4k (1<<12), so add 12 here to get the real
4127 * page size value.
4128 */
4129 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
4130 cap.b.cap_mpsmax + 12);
4131 nvme->n_pagesize = 1UL << (nvme->n_pageshift);
4132
4133 /*
4134 * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
4135 */
4136 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
4137 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
4138
4139 /*
4140 * Set up PRP DMA to transfer 1 page-aligned page at a time.
4141 * Maxxfer may be increased after we identified the controller limits.
4142 */
4143 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
4144 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
4145 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
4146 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
4147
4148 /*
4149 * Reset controller if it's still in ready state.
4150 */
4151 if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
4152 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
4153 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
4154 nvme->n_dead = B_TRUE;
4155 goto fail;
4156 }
4157
4158 /*
4159 * Create the cq array with one completion queue to be assigned
4160 * to the admin queue pair and a limited number of taskqs (4).
4161 */
4162 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) !=
4163 DDI_SUCCESS) {
4164 dev_err(nvme->n_dip, CE_WARN,
4165 "!failed to pre-allocate admin completion queue");
4166 goto fail;
4167 }
4168 /*
4169 * Create the admin queue pair.
4170 */
4171 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
4172 != DDI_SUCCESS) {
4173 dev_err(nvme->n_dip, CE_WARN,
4174 "!unable to allocate admin qpair");
4175 goto fail;
4176 }
4177 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
4178 nvme->n_ioq[0] = nvme->n_adminq;
4179
4180 if (nvme->n_quirks & NVME_QUIRK_START_CID)
4181 nvme->n_adminq->nq_next_cmd++;
4182
4183 nvme->n_progress |= NVME_ADMIN_QUEUE;
4184
4185 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4186 "admin-queue-len", nvme->n_admin_queue_len);
4187
4188 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
4189 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
4190 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress;
4191
4192 ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
4193 ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
4194
4195 nvme_put32(nvme, NVME_REG_AQA, aqa.r);
4196 nvme_put64(nvme, NVME_REG_ASQ, asq);
4197 nvme_put64(nvme, NVME_REG_ACQ, acq);
4198
4199 cc.b.cc_ams = 0; /* use Round-Robin arbitration */
4200 cc.b.cc_css = 0; /* use NVM command set */
4201 cc.b.cc_mps = nvme->n_pageshift - 12;
4202 cc.b.cc_shn = 0; /* no shutdown in progress */
4203 cc.b.cc_en = 1; /* enable controller */
4204 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */
4205 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */
4206
4207 nvme_put32(nvme, NVME_REG_CC, cc.r);
4208
4209 /*
4210 * Wait for the controller to become ready.
4211 */
4212 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
4213 if (csts.b.csts_rdy == 0) {
4214 for (i = 0; i != nvme->n_timeout * 10; i++) {
4215 delay(drv_usectohz(50000));
4216 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
4217
4218 if (csts.b.csts_cfs == 1) {
4219 dev_err(nvme->n_dip, CE_WARN,
4220 "!controller fatal status at init");
4221 ddi_fm_service_impact(nvme->n_dip,
4222 DDI_SERVICE_LOST);
4223 nvme->n_dead = B_TRUE;
4224 goto fail;
4225 }
4226
4227 if (csts.b.csts_rdy == 1)
4228 break;
4229 }
4230 }
4231
4232 if (csts.b.csts_rdy == 0) {
4233 dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
4234 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
4235 nvme->n_dead = B_TRUE;
4236 goto fail;
4237 }
4238
4239 /*
4240 * Assume an abort command limit of 1. We'll destroy and re-init
4241 * that later when we know the true abort command limit.
4242 */
4243 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
4244
4245 /*
4246 * Set up initial interrupt for admin queue.
4247 */
4248 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
4249 != DDI_SUCCESS) &&
4250 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
4251 != DDI_SUCCESS) &&
4252 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
4253 != DDI_SUCCESS)) {
4254 dev_err(nvme->n_dip, CE_WARN,
4255 "!failed to set up initial interrupt");
4256 goto fail;
4257 }
4258
4259 /*
4260 * Post an asynchronous event command to catch errors.
4261 * We assume the asynchronous events are supported as required by
4262 * specification (Figure 40 in section 5 of NVMe 1.2).
4263 * However, since at least qemu does not follow the specification,
4264 * we need a mechanism to protect ourselves.
4265 */
4266 nvme->n_async_event_supported = B_TRUE;
4267 nvme_async_event(nvme);
4268
4269 /*
4270 * Identify Controller
4271 */
4272 if (!nvme_identify_int(nvme, 0, NVME_IDENTIFY_CTRL,
4273 (void **)&nvme->n_idctl)) {
4274 dev_err(nvme->n_dip, CE_WARN, "!failed to identify controller");
4275 goto fail;
4276 }
4277
4278 /*
4279 * Get the common namespace information if available. If not, we use the
4280 * information for nsid 1.
4281 */
4282 if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) &&
4283 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
4284 nsid = NVME_NSID_BCAST;
4285 } else {
4286 nsid = 1;
4287 }
4288
4289 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
4290 (void **)&nvme->n_idcomns)) {
4291 dev_err(nvme->n_dip, CE_WARN, "!failed to identify common "
4292 "namespace information");
4293 goto fail;
4294 }
4295 /*
4296 * Process nvme-config-list (if present) in nvme.conf.
4297 */
4298 nvme_config_list(nvme);
4299
4300 /*
4301 * Get Vendor & Product ID
4302 */
4303 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
4304 model[sizeof (nvme->n_idctl->id_model)] = '\0';
4305 sata_split_model(model, &vendor, &product);
4306
4307 if (vendor == NULL)
4308 nvme->n_vendor = strdup("NVMe");
4309 else
4310 nvme->n_vendor = strdup(vendor);
4311
4312 nvme->n_product = strdup(product);
4313
4314 /*
4315 * Get controller limits.
4316 */
4317 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
4318 MIN(nvme->n_admin_queue_len / 10,
4319 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
4320
4321 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4322 "async-event-limit", nvme->n_async_event_limit);
4323
4324 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
4325
4326 /*
4327 * Reinitialize the semaphore with the true abort command limit
4328 * supported by the hardware. It's not necessary to disable interrupts
4329 * as only command aborts use the semaphore, and no commands are
4330 * executed or aborted while we're here.
4331 */
4332 sema_destroy(&nvme->n_abort_sema);
4333 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
4334 SEMA_DRIVER, NULL);
4335
4336 nvme->n_progress |= NVME_CTRL_LIMITS;
4337
4338 if (nvme->n_idctl->id_mdts == 0)
4339 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
4340 else
4341 nvme->n_max_data_transfer_size =
4342 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
4343
4344 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
4345
4346 /*
4347 * Limit n_max_data_transfer_size to what we can handle in one PRP.
4348 * Chained PRPs are currently unsupported.
4349 *
4350 * This is a no-op on hardware which doesn't support a transfer size
4351 * big enough to require chained PRPs.
4352 */
4353 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
4354 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
4355
4356 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
4357
4358 /*
4359 * Make sure the minimum/maximum queue entry sizes are not
4360 * larger/smaller than the default.
4361 */
4362
4363 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
4364 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
4365 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
4366 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
4367 goto fail;
4368
4369 /*
4370 * Check for the presence of a Volatile Write Cache. If present,
4371 * enable or disable based on the value of the property
4372 * volatile-write-cache-enable (default is enabled).
4373 */
4374 nvme->n_write_cache_present =
4375 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
4376
4377 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4378 "volatile-write-cache-present",
4379 nvme->n_write_cache_present ? 1 : 0);
4380
4381 if (!nvme->n_write_cache_present) {
4382 nvme->n_write_cache_enabled = B_FALSE;
4383 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
4384 != 0) {
4385 dev_err(nvme->n_dip, CE_WARN,
4386 "!failed to %sable volatile write cache",
4387 nvme->n_write_cache_enabled ? "en" : "dis");
4388 /*
4389 * Assume the cache is (still) enabled.
4390 */
4391 nvme->n_write_cache_enabled = B_TRUE;
4392 }
4393
4394 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4395 "volatile-write-cache-enable",
4396 nvme->n_write_cache_enabled ? 1 : 0);
4397
4398 /*
4399 * Get number of supported namespaces and allocate namespace array.
4400 */
4401 nvme->n_namespace_count = nvme->n_idctl->id_nn;
4402
4403 if (nvme->n_namespace_count == 0) {
4404 dev_err(nvme->n_dip, CE_WARN,
4405 "!controllers without namespaces are not supported");
4406 goto fail;
4407 }
4408
4409 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
4410 nvme->n_namespace_count, KM_SLEEP);
4411
4412 /*
4413 * Try to set up MSI/MSI-X interrupts.
4414 */
4415 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
4416 != 0) {
4417 nvme_release_interrupts(nvme);
4418
4419 nqueues = MIN(UINT16_MAX, ncpus);
4420
4421 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
4422 nqueues) != DDI_SUCCESS) &&
4423 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
4424 nqueues) != DDI_SUCCESS)) {
4425 dev_err(nvme->n_dip, CE_WARN,
4426 "!failed to set up MSI/MSI-X interrupts");
4427 goto fail;
4428 }
4429 }
4430
4431 /*
4432 * Create I/O queue pairs.
4433 */
4434
4435 if (nvme_set_nqueues(nvme) != 0) {
4436 dev_err(nvme->n_dip, CE_WARN,
4437 "!failed to set number of I/O queues to %d",
4438 nvme->n_intr_cnt);
4439 goto fail;
4440 }
4441
4442 /*
4443 * Reallocate I/O queue array
4444 */
4445 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
4446 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
4447 (nvme->n_submission_queues + 1), KM_SLEEP);
4448 nvme->n_ioq[0] = nvme->n_adminq;
4449
4450 /*
4451 * There should always be at least as many submission queues
4452 * as completion queues.
4453 */
4454 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues);
4455
4456 nvme->n_ioq_count = nvme->n_submission_queues;
4457
4458 nvme->n_io_squeue_len =
4459 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries);
4460
4461 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len",
4462 nvme->n_io_squeue_len);
4463
4464 /*
4465 * Pre-allocate completion queues.
4466 * When there are the same number of submission and completion
4467 * queues there is no value in having a larger completion
4468 * queue length.
4469 */
4470 if (nvme->n_submission_queues == nvme->n_completion_queues)
4471 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4472 nvme->n_io_squeue_len);
4473
4474 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4475 nvme->n_max_queue_entries);
4476
4477 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len",
4478 nvme->n_io_cqueue_len);
4479
4480 /*
4481 * Assign the equal quantity of taskq threads to each completion
4482 * queue, capping the total number of threads to the number
4483 * of CPUs.
4484 */
4485 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues;
4486
4487 /*
4488 * In case the calculation above is zero, we need at least one
4489 * thread per completion queue.
4490 */
4491 tq_threads = MAX(1, tq_threads);
4492
4493 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1,
4494 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) {
4495 dev_err(nvme->n_dip, CE_WARN,
4496 "!failed to pre-allocate completion queues");
4497 goto fail;
4498 }
4499
4500 /*
4501 * If we use less completion queues than interrupt vectors return
4502 * some of the interrupt vectors back to the system.
4503 */
4504 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) {
4505 nvme_release_interrupts(nvme);
4506
4507 if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
4508 nvme->n_completion_queues + 1) != DDI_SUCCESS) {
4509 dev_err(nvme->n_dip, CE_WARN,
4510 "!failed to reduce number of interrupts");
4511 goto fail;
4512 }
4513 }
4514
4515 /*
4516 * Alloc & register I/O queue pairs
4517 */
4518
4519 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
4520 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len,
4521 &nvme->n_ioq[i], i) != DDI_SUCCESS) {
4522 dev_err(nvme->n_dip, CE_WARN,
4523 "!unable to allocate I/O qpair %d", i);
4524 goto fail;
4525 }
4526
4527 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
4528 dev_err(nvme->n_dip, CE_WARN,
4529 "!unable to create I/O qpair %d", i);
4530 goto fail;
4531 }
4532 }
4533
4534 /*
4535 * Post more asynchronous events commands to reduce event reporting
4536 * latency as suggested by the spec.
4537 */
4538 if (nvme->n_async_event_supported) {
4539 for (i = 1; i != nvme->n_async_event_limit; i++)
4540 nvme_async_event(nvme);
4541 }
4542
4543 return (DDI_SUCCESS);
4544
4545 fail:
4546 (void) nvme_reset(nvme, B_FALSE);
4547 return (DDI_FAILURE);
4548 }
4549
4550 static uint_t
nvme_intr(caddr_t arg1,caddr_t arg2)4551 nvme_intr(caddr_t arg1, caddr_t arg2)
4552 {
4553 nvme_t *nvme = (nvme_t *)arg1;
4554 int inum = (int)(uintptr_t)arg2;
4555 int ccnt = 0;
4556 int qnum;
4557
4558 if (inum >= nvme->n_intr_cnt)
4559 return (DDI_INTR_UNCLAIMED);
4560
4561 if (nvme->n_dead) {
4562 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
4563 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
4564 }
4565
4566 /*
4567 * The interrupt vector a queue uses is calculated as queue_idx %
4568 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
4569 * in steps of n_intr_cnt to process all queues using this vector.
4570 */
4571 for (qnum = inum;
4572 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL;
4573 qnum += nvme->n_intr_cnt) {
4574 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]);
4575 }
4576
4577 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
4578 }
4579
4580 static void
nvme_release_interrupts(nvme_t * nvme)4581 nvme_release_interrupts(nvme_t *nvme)
4582 {
4583 int i;
4584
4585 for (i = 0; i < nvme->n_intr_cnt; i++) {
4586 if (nvme->n_inth[i] == NULL)
4587 break;
4588
4589 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4590 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
4591 else
4592 (void) ddi_intr_disable(nvme->n_inth[i]);
4593
4594 (void) ddi_intr_remove_handler(nvme->n_inth[i]);
4595 (void) ddi_intr_free(nvme->n_inth[i]);
4596 }
4597
4598 kmem_free(nvme->n_inth, nvme->n_inth_sz);
4599 nvme->n_inth = NULL;
4600 nvme->n_inth_sz = 0;
4601
4602 nvme->n_progress &= ~NVME_INTERRUPTS;
4603 }
4604
4605 static int
nvme_setup_interrupts(nvme_t * nvme,int intr_type,int nqpairs)4606 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
4607 {
4608 int nintrs, navail, count;
4609 int ret;
4610 int i;
4611
4612 if (nvme->n_intr_types == 0) {
4613 ret = ddi_intr_get_supported_types(nvme->n_dip,
4614 &nvme->n_intr_types);
4615 if (ret != DDI_SUCCESS) {
4616 dev_err(nvme->n_dip, CE_WARN,
4617 "!%s: ddi_intr_get_supported types failed",
4618 __func__);
4619 return (ret);
4620 }
4621 #ifdef __x86
4622 if (get_hwenv() == HW_VMWARE)
4623 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
4624 #endif
4625 }
4626
4627 if ((nvme->n_intr_types & intr_type) == 0)
4628 return (DDI_FAILURE);
4629
4630 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
4631 if (ret != DDI_SUCCESS) {
4632 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
4633 __func__);
4634 return (ret);
4635 }
4636
4637 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
4638 if (ret != DDI_SUCCESS) {
4639 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
4640 __func__);
4641 return (ret);
4642 }
4643
4644 /* We want at most one interrupt per queue pair. */
4645 if (navail > nqpairs)
4646 navail = nqpairs;
4647
4648 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
4649 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
4650
4651 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
4652 &count, 0);
4653 if (ret != DDI_SUCCESS) {
4654 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
4655 __func__);
4656 goto fail;
4657 }
4658
4659 nvme->n_intr_cnt = count;
4660
4661 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
4662 if (ret != DDI_SUCCESS) {
4663 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
4664 __func__);
4665 goto fail;
4666 }
4667
4668 for (i = 0; i < count; i++) {
4669 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
4670 (void *)nvme, (void *)(uintptr_t)i);
4671 if (ret != DDI_SUCCESS) {
4672 dev_err(nvme->n_dip, CE_WARN,
4673 "!%s: ddi_intr_add_handler failed", __func__);
4674 goto fail;
4675 }
4676 }
4677
4678 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
4679
4680 for (i = 0; i < count; i++) {
4681 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4682 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
4683 else
4684 ret = ddi_intr_enable(nvme->n_inth[i]);
4685
4686 if (ret != DDI_SUCCESS) {
4687 dev_err(nvme->n_dip, CE_WARN,
4688 "!%s: enabling interrupt %d failed", __func__, i);
4689 goto fail;
4690 }
4691 }
4692
4693 nvme->n_intr_type = intr_type;
4694
4695 nvme->n_progress |= NVME_INTERRUPTS;
4696
4697 return (DDI_SUCCESS);
4698
4699 fail:
4700 nvme_release_interrupts(nvme);
4701
4702 return (ret);
4703 }
4704
4705 static int
nvme_fm_errcb(dev_info_t * dip,ddi_fm_error_t * fm_error,const void * arg)4706 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
4707 {
4708 _NOTE(ARGUNUSED(arg));
4709
4710 pci_ereport_post(dip, fm_error, NULL);
4711 return (fm_error->fme_status);
4712 }
4713
4714 static void
nvme_remove_callback(dev_info_t * dip,ddi_eventcookie_t cookie,void * a,void * b)4715 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a,
4716 void *b)
4717 {
4718 nvme_t *nvme = a;
4719
4720 nvme_ctrl_mark_dead(nvme, B_TRUE);
4721
4722 /*
4723 * Fail all outstanding commands, including those in the admin queue
4724 * (queue 0).
4725 */
4726 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) {
4727 nvme_qpair_t *qp = nvme->n_ioq[i];
4728
4729 mutex_enter(&qp->nq_mutex);
4730 for (size_t j = 0; j < qp->nq_nentry; j++) {
4731 nvme_cmd_t *cmd = qp->nq_cmd[j];
4732 nvme_cmd_t *u_cmd;
4733
4734 if (cmd == NULL) {
4735 continue;
4736 }
4737
4738 /*
4739 * Since we have the queue lock held the entire time we
4740 * iterate over it, it's not possible for the queue to
4741 * change underneath us. Thus, we don't need to check
4742 * that the return value of nvme_unqueue_cmd matches the
4743 * requested cmd to unqueue.
4744 */
4745 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
4746 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq,
4747 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
4748
4749 ASSERT3P(u_cmd, ==, cmd);
4750 }
4751 mutex_exit(&qp->nq_mutex);
4752 }
4753 }
4754
4755 /*
4756 * Open minor management
4757 */
4758 static int
nvme_minor_comparator(const void * l,const void * r)4759 nvme_minor_comparator(const void *l, const void *r)
4760 {
4761 const nvme_minor_t *lm = l;
4762 const nvme_minor_t *rm = r;
4763
4764 if (lm->nm_minor > rm->nm_minor) {
4765 return (1);
4766 } else if (lm->nm_minor < rm->nm_minor) {
4767 return (-1);
4768 } else {
4769 return (0);
4770 }
4771 }
4772
4773 static void
nvme_minor_free(nvme_minor_t * minor)4774 nvme_minor_free(nvme_minor_t *minor)
4775 {
4776 if (minor->nm_minor > 0) {
4777 ASSERT3S(minor->nm_minor, >=, NVME_OPEN_MINOR_MIN);
4778 id_free(nvme_open_minors, minor->nm_minor);
4779 minor->nm_minor = 0;
4780 }
4781 VERIFY0(list_link_active(&minor->nm_ctrl_lock.nli_node));
4782 VERIFY0(list_link_active(&minor->nm_ns_lock.nli_node));
4783 cv_destroy(&minor->nm_cv);
4784 kmem_free(minor, sizeof (nvme_minor_t));
4785 }
4786
4787 static nvme_minor_t *
nvme_minor_find_by_dev(dev_t dev)4788 nvme_minor_find_by_dev(dev_t dev)
4789 {
4790 id_t id = (id_t)getminor(dev);
4791 nvme_minor_t search = { .nm_minor = id };
4792 nvme_minor_t *ret;
4793
4794 mutex_enter(&nvme_open_minors_mutex);
4795 ret = avl_find(&nvme_open_minors_avl, &search, NULL);
4796 mutex_exit(&nvme_open_minors_mutex);
4797
4798 return (ret);
4799 }
4800
4801 static int
nvme_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)4802 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
4803 {
4804 nvme_t *nvme;
4805 int instance;
4806 int nregs;
4807 off_t regsize;
4808 char name[32];
4809 boolean_t attached_ns;
4810
4811 if (cmd != DDI_ATTACH)
4812 return (DDI_FAILURE);
4813
4814 instance = ddi_get_instance(dip);
4815
4816 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
4817 return (DDI_FAILURE);
4818
4819 nvme = ddi_get_soft_state(nvme_state, instance);
4820 ddi_set_driver_private(dip, nvme);
4821 nvme->n_dip = dip;
4822
4823 /*
4824 * Map PCI config space
4825 */
4826 if (pci_config_setup(dip, &nvme->n_pcicfg_handle) != DDI_SUCCESS) {
4827 dev_err(dip, CE_WARN, "!failed to map PCI config space");
4828 goto fail;
4829 }
4830 nvme->n_progress |= NVME_PCI_CONFIG;
4831
4832 /*
4833 * Get the various PCI IDs from config space
4834 */
4835 nvme->n_vendor_id =
4836 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_VENID);
4837 nvme->n_device_id =
4838 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_DEVID);
4839 nvme->n_revision_id =
4840 pci_config_get8(nvme->n_pcicfg_handle, PCI_CONF_REVID);
4841 nvme->n_subsystem_device_id =
4842 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBSYSID);
4843 nvme->n_subsystem_vendor_id =
4844 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBVENID);
4845
4846 nvme_detect_quirks(nvme);
4847
4848 /*
4849 * Set up event handlers for hot removal. While npe(4D) supports the hot
4850 * removal event being injected for devices, the same is not true of all
4851 * of our possible parents (i.e. pci(4D) as of this writing). The most
4852 * common case this shows up is in some virtualization environments. We
4853 * should treat this as non-fatal so that way devices work but leave
4854 * this set up in such a way that if a nexus does grow support for this
4855 * we're good to go.
4856 */
4857 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT,
4858 &nvme->n_rm_cookie) == DDI_SUCCESS) {
4859 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie,
4860 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) !=
4861 DDI_SUCCESS) {
4862 goto fail;
4863 }
4864 } else {
4865 nvme->n_ev_rm_cb_id = NULL;
4866 }
4867
4868 mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL);
4869 nvme->n_progress |= NVME_MUTEX_INIT;
4870
4871 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4872 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
4873 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
4874 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
4875 B_TRUE : B_FALSE;
4876 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4877 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
4878 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4879 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN);
4880 /*
4881 * Double up the default for completion queues in case of
4882 * queue sharing.
4883 */
4884 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4885 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN);
4886 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4887 DDI_PROP_DONTPASS, "async-event-limit",
4888 NVME_DEFAULT_ASYNC_EVENT_LIMIT);
4889 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4890 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
4891 B_TRUE : B_FALSE;
4892 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4893 DDI_PROP_DONTPASS, "min-phys-block-size",
4894 NVME_DEFAULT_MIN_BLOCK_SIZE);
4895 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4896 DDI_PROP_DONTPASS, "max-submission-queues", -1);
4897 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4898 DDI_PROP_DONTPASS, "max-completion-queues", -1);
4899
4900 if (!ISP2(nvme->n_min_block_size) ||
4901 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
4902 dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
4903 "using default %d", ISP2(nvme->n_min_block_size) ?
4904 "too low" : "not a power of 2",
4905 NVME_DEFAULT_MIN_BLOCK_SIZE);
4906 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
4907 }
4908
4909 if (nvme->n_submission_queues != -1 &&
4910 (nvme->n_submission_queues < 1 ||
4911 nvme->n_submission_queues > UINT16_MAX)) {
4912 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not "
4913 "valid. Must be [1..%d]", nvme->n_submission_queues,
4914 UINT16_MAX);
4915 nvme->n_submission_queues = -1;
4916 }
4917
4918 if (nvme->n_completion_queues != -1 &&
4919 (nvme->n_completion_queues < 1 ||
4920 nvme->n_completion_queues > UINT16_MAX)) {
4921 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not "
4922 "valid. Must be [1..%d]", nvme->n_completion_queues,
4923 UINT16_MAX);
4924 nvme->n_completion_queues = -1;
4925 }
4926
4927 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
4928 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
4929 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
4930 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
4931
4932 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN)
4933 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN;
4934 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN)
4935 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN;
4936
4937 if (nvme->n_async_event_limit < 1)
4938 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
4939
4940 nvme->n_reg_acc_attr = nvme_reg_acc_attr;
4941 nvme->n_queue_dma_attr = nvme_queue_dma_attr;
4942 nvme->n_prp_dma_attr = nvme_prp_dma_attr;
4943 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
4944
4945 /*
4946 * Set up FMA support.
4947 */
4948 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
4949 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4950 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
4951 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
4952
4953 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
4954
4955 if (nvme->n_fm_cap) {
4956 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
4957 nvme->n_reg_acc_attr.devacc_attr_access =
4958 DDI_FLAGERR_ACC;
4959
4960 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
4961 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
4962 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
4963 }
4964
4965 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
4966 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4967 pci_ereport_setup(dip);
4968
4969 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4970 ddi_fm_handler_register(dip, nvme_fm_errcb,
4971 (void *)nvme);
4972 }
4973
4974 nvme->n_progress |= NVME_FMA_INIT;
4975
4976 /*
4977 * The spec defines several register sets. Only the controller
4978 * registers (set 1) are currently used.
4979 */
4980 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
4981 nregs < 2 ||
4982 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE)
4983 goto fail;
4984
4985 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
4986 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
4987 dev_err(dip, CE_WARN, "!failed to map regset 1");
4988 goto fail;
4989 }
4990
4991 nvme->n_progress |= NVME_REGS_MAPPED;
4992
4993 /*
4994 * Set up kstats
4995 */
4996 if (!nvme_stat_init(nvme)) {
4997 dev_err(dip, CE_WARN, "!failed to create device kstats");
4998 goto fail;
4999 }
5000 nvme->n_progress |= NVME_STAT_INIT;
5001
5002 /*
5003 * Create PRP DMA cache
5004 */
5005 (void) snprintf(name, sizeof (name), "%s%d_prp_cache",
5006 ddi_driver_name(dip), ddi_get_instance(dip));
5007 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
5008 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
5009 NULL, (void *)nvme, NULL, 0);
5010
5011 if (nvme_init(nvme) != DDI_SUCCESS)
5012 goto fail;
5013
5014 /*
5015 * Initialize the driver with the UFM subsystem
5016 */
5017 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops,
5018 &nvme->n_ufmh, nvme) != 0) {
5019 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem");
5020 goto fail;
5021 }
5022 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL);
5023 ddi_ufm_update(nvme->n_ufmh);
5024 nvme->n_progress |= NVME_UFM_INIT;
5025
5026 nvme_mgmt_lock_init(&nvme->n_mgmt);
5027 nvme_lock_init(&nvme->n_lock);
5028 nvme->n_progress |= NVME_MGMT_INIT;
5029 nvme->n_dead_status = NVME_IOCTL_E_CTRL_DEAD;
5030
5031 /*
5032 * Identify namespaces.
5033 */
5034 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
5035
5036 boolean_t minor_logged = B_FALSE;
5037 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5038 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5039
5040 nvme_lock_init(&ns->ns_lock);
5041 ns->ns_progress |= NVME_NS_LOCK;
5042
5043 /*
5044 * Namespaces start out ignored. When nvme_init_ns() checks
5045 * their properties and finds they can be used, it will set
5046 * ns_ignore to B_FALSE. It will also use this state change
5047 * to keep an accurate count of attachable namespaces.
5048 */
5049 ns->ns_ignore = B_TRUE;
5050 if (nvme_init_ns(nvme, i) != 0) {
5051 nvme_mgmt_unlock(nvme);
5052 goto fail;
5053 }
5054
5055 /*
5056 * We only create compat minor nodes for the namespace for the
5057 * first NVME_MINOR_MAX namespaces. Those that are beyond this
5058 * can only be accessed through the primary controller node,
5059 * which is generally fine as that's what libnvme uses and is
5060 * our preferred path. Not having a minor is better than not
5061 * having the namespace!
5062 */
5063 if (i > NVME_MINOR_MAX) {
5064 if (!minor_logged) {
5065 dev_err(dip, CE_WARN, "namespace minor "
5066 "creation limited to the first %u "
5067 "namespaces, device has %u",
5068 NVME_MINOR_MAX, nvme->n_namespace_count);
5069 minor_logged = B_TRUE;
5070 }
5071 continue;
5072 }
5073
5074 if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR,
5075 NVME_MINOR(ddi_get_instance(nvme->n_dip), i),
5076 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
5077 nvme_mgmt_unlock(nvme);
5078 dev_err(dip, CE_WARN,
5079 "!failed to create minor node for namespace %d", i);
5080 goto fail;
5081 }
5082 ns->ns_progress |= NVME_NS_MINOR;
5083 }
5084
5085 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
5086 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) !=
5087 DDI_SUCCESS) {
5088 nvme_mgmt_unlock(nvme);
5089 dev_err(dip, CE_WARN, "nvme_attach: "
5090 "cannot create devctl minor node");
5091 goto fail;
5092 }
5093
5094 attached_ns = B_FALSE;
5095 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5096 nvme_ioctl_common_t com = { .nioc_nsid = i };
5097
5098 if (nvme_attach_ns(nvme, &com)) {
5099 attached_ns = B_TRUE;
5100 } else if (com.nioc_drv_err != NVME_IOCTL_E_UNSUP_ATTACH_NS) {
5101 dev_err(nvme->n_dip, CE_WARN, "!failed to attach "
5102 "namespace %d due to blkdev error", i);
5103 /*
5104 * Once we have successfully attached a namespace we
5105 * can no longer fail the driver attach as there is now
5106 * a blkdev child node linked to this device, and
5107 * our node is not yet in the attached state.
5108 */
5109 if (!attached_ns) {
5110 nvme_mgmt_unlock(nvme);
5111 goto fail;
5112 }
5113 }
5114 }
5115
5116 nvme_mgmt_unlock(nvme);
5117
5118 return (DDI_SUCCESS);
5119
5120 fail:
5121 /* attach successful anyway so that FMA can retire the device */
5122 if (nvme->n_dead)
5123 return (DDI_SUCCESS);
5124
5125 (void) nvme_detach(dip, DDI_DETACH);
5126
5127 return (DDI_FAILURE);
5128 }
5129
5130 static int
nvme_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)5131 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
5132 {
5133 int instance;
5134 nvme_t *nvme;
5135
5136 if (cmd != DDI_DETACH)
5137 return (DDI_FAILURE);
5138
5139 instance = ddi_get_instance(dip);
5140
5141 nvme = ddi_get_soft_state(nvme_state, instance);
5142
5143 if (nvme == NULL)
5144 return (DDI_FAILURE);
5145
5146 /*
5147 * Remove all minor nodes from the device regardless of the source in
5148 * one swoop.
5149 */
5150 ddi_remove_minor_node(dip, NULL);
5151
5152 /*
5153 * We need to remove the event handler as one of the first things that
5154 * we do. If we proceed with other teardown without removing the event
5155 * handler, we could end up in a very unfortunate race with ourselves.
5156 * The DDI does not serialize these with detach (just like timeout(9F)
5157 * and others).
5158 */
5159 if (nvme->n_ev_rm_cb_id != NULL) {
5160 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id);
5161 }
5162 nvme->n_ev_rm_cb_id = NULL;
5163
5164 /*
5165 * If the controller was marked dead, there is a slight chance that we
5166 * are asynchronusly processing the removal taskq. Because we have
5167 * removed the callback handler above and all minor nodes and commands
5168 * are closed, there is no other way to get in here. As such, we wait on
5169 * the nvme_dead_taskq to complete so we can avoid tracking if it's
5170 * running or not.
5171 */
5172 taskq_wait(nvme_dead_taskq);
5173
5174 if (nvme->n_ns) {
5175 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5176 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5177
5178 if (ns->ns_bd_hdl) {
5179 (void) bd_detach_handle(ns->ns_bd_hdl);
5180 bd_free_handle(ns->ns_bd_hdl);
5181 }
5182
5183 if (ns->ns_idns)
5184 kmem_free(ns->ns_idns,
5185 sizeof (nvme_identify_nsid_t));
5186 if (ns->ns_devid)
5187 strfree(ns->ns_devid);
5188
5189 if ((ns->ns_progress & NVME_NS_LOCK) != 0)
5190 nvme_lock_fini(&ns->ns_lock);
5191 }
5192
5193 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
5194 nvme->n_namespace_count);
5195 }
5196
5197 if (nvme->n_progress & NVME_MGMT_INIT) {
5198 nvme_lock_fini(&nvme->n_lock);
5199 nvme_mgmt_lock_fini(&nvme->n_mgmt);
5200 }
5201
5202 if (nvme->n_progress & NVME_UFM_INIT) {
5203 ddi_ufm_fini(nvme->n_ufmh);
5204 mutex_destroy(&nvme->n_fwslot_mutex);
5205 }
5206
5207 if (nvme->n_progress & NVME_INTERRUPTS)
5208 nvme_release_interrupts(nvme);
5209
5210 for (uint_t i = 0; i < nvme->n_cq_count; i++) {
5211 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL)
5212 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq);
5213 }
5214
5215 if (nvme->n_progress & NVME_MUTEX_INIT) {
5216 mutex_destroy(&nvme->n_minor_mutex);
5217 }
5218
5219 if (nvme->n_ioq_count > 0) {
5220 for (uint_t i = 1; i != nvme->n_ioq_count + 1; i++) {
5221 if (nvme->n_ioq[i] != NULL) {
5222 /* TODO: send destroy queue commands */
5223 nvme_free_qpair(nvme->n_ioq[i]);
5224 }
5225 }
5226
5227 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
5228 (nvme->n_ioq_count + 1));
5229 }
5230
5231 if (nvme->n_prp_cache != NULL) {
5232 kmem_cache_destroy(nvme->n_prp_cache);
5233 }
5234
5235 if (nvme->n_progress & NVME_REGS_MAPPED) {
5236 nvme_shutdown(nvme, B_FALSE);
5237 (void) nvme_reset(nvme, B_FALSE);
5238 }
5239
5240 if (nvme->n_progress & NVME_CTRL_LIMITS)
5241 sema_destroy(&nvme->n_abort_sema);
5242
5243 if (nvme->n_progress & NVME_ADMIN_QUEUE)
5244 nvme_free_qpair(nvme->n_adminq);
5245
5246 if (nvme->n_cq_count > 0) {
5247 nvme_destroy_cq_array(nvme, 0);
5248 nvme->n_cq = NULL;
5249 nvme->n_cq_count = 0;
5250 }
5251
5252 if (nvme->n_idcomns)
5253 kmem_free(nvme->n_idcomns, NVME_IDENTIFY_BUFSIZE);
5254
5255 if (nvme->n_idctl)
5256 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
5257
5258 if (nvme->n_progress & NVME_REGS_MAPPED)
5259 ddi_regs_map_free(&nvme->n_regh);
5260
5261 if (nvme->n_progress & NVME_STAT_INIT)
5262 nvme_stat_cleanup(nvme);
5263
5264 if (nvme->n_progress & NVME_FMA_INIT) {
5265 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5266 ddi_fm_handler_unregister(nvme->n_dip);
5267
5268 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
5269 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5270 pci_ereport_teardown(nvme->n_dip);
5271
5272 ddi_fm_fini(nvme->n_dip);
5273 }
5274
5275 if (nvme->n_progress & NVME_PCI_CONFIG)
5276 pci_config_teardown(&nvme->n_pcicfg_handle);
5277
5278 if (nvme->n_vendor != NULL)
5279 strfree(nvme->n_vendor);
5280
5281 if (nvme->n_product != NULL)
5282 strfree(nvme->n_product);
5283
5284 ddi_soft_state_free(nvme_state, instance);
5285
5286 return (DDI_SUCCESS);
5287 }
5288
5289 static int
nvme_quiesce(dev_info_t * dip)5290 nvme_quiesce(dev_info_t *dip)
5291 {
5292 int instance;
5293 nvme_t *nvme;
5294
5295 instance = ddi_get_instance(dip);
5296
5297 nvme = ddi_get_soft_state(nvme_state, instance);
5298
5299 if (nvme == NULL)
5300 return (DDI_FAILURE);
5301
5302 nvme_shutdown(nvme, B_TRUE);
5303
5304 (void) nvme_reset(nvme, B_TRUE);
5305
5306 return (DDI_SUCCESS);
5307 }
5308
5309 static int
nvme_fill_prp(nvme_cmd_t * cmd,ddi_dma_handle_t dma)5310 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma)
5311 {
5312 nvme_t *nvme = cmd->nc_nvme;
5313 uint_t nprp_per_page, nprp;
5314 uint64_t *prp;
5315 const ddi_dma_cookie_t *cookie;
5316 uint_t idx;
5317 uint_t ncookies = ddi_dma_ncookies(dma);
5318
5319 if (ncookies == 0)
5320 return (DDI_FAILURE);
5321
5322 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL)
5323 return (DDI_FAILURE);
5324 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress;
5325
5326 if (ncookies == 1) {
5327 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5328 return (DDI_SUCCESS);
5329 } else if (ncookies == 2) {
5330 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL)
5331 return (DDI_FAILURE);
5332 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress;
5333 return (DDI_SUCCESS);
5334 }
5335
5336 /*
5337 * At this point, we're always operating on cookies at
5338 * index >= 1 and writing the addresses of those cookies
5339 * into a new page. The address of that page is stored
5340 * as the second PRP entry.
5341 */
5342 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t);
5343 ASSERT(nprp_per_page > 0);
5344
5345 /*
5346 * We currently don't support chained PRPs and set up our DMA
5347 * attributes to reflect that. If we still get an I/O request
5348 * that needs a chained PRP something is very wrong. Account
5349 * for the first cookie here, which we've placed in d_prp[0].
5350 */
5351 nprp = howmany(ncookies - 1, nprp_per_page);
5352 VERIFY(nprp == 1);
5353
5354 /*
5355 * Allocate a page of pointers, in which we'll write the
5356 * addresses of cookies 1 to `ncookies`.
5357 */
5358 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
5359 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5360 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress;
5361
5362 prp = (uint64_t *)cmd->nc_prp->nd_memp;
5363 for (idx = 1; idx < ncookies; idx++) {
5364 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL)
5365 return (DDI_FAILURE);
5366 *prp++ = cookie->dmac_laddress;
5367 }
5368
5369 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5370 DDI_DMA_SYNC_FORDEV);
5371 return (DDI_SUCCESS);
5372 }
5373
5374 /*
5375 * The maximum number of requests supported for a deallocate request is
5376 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and
5377 * unchanged through at least 1.4a). The definition of nvme_range_t is also
5378 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for
5379 * a deallocate request will fit into the smallest supported namespace page
5380 * (4k).
5381 */
5382 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096);
5383
5384 static int
nvme_fill_ranges(nvme_cmd_t * cmd,bd_xfer_t * xfer,uint64_t blocksize,int allocflag)5385 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize,
5386 int allocflag)
5387 {
5388 const dkioc_free_list_t *dfl = xfer->x_dfl;
5389 const dkioc_free_list_ext_t *exts = dfl->dfl_exts;
5390 nvme_t *nvme = cmd->nc_nvme;
5391 nvme_range_t *ranges = NULL;
5392 uint_t i;
5393
5394 /*
5395 * The number of ranges in the request is 0s based (that is
5396 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ...,
5397 * word10 == 255 -> 256 ranges). Therefore the allowed values are
5398 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request,
5399 * we either provided bad info in nvme_bd_driveinfo() or there is a bug
5400 * in blkdev.
5401 */
5402 VERIFY3U(dfl->dfl_num_exts, >, 0);
5403 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES);
5404 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff;
5405
5406 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE;
5407
5408 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag);
5409 if (cmd->nc_prp == NULL)
5410 return (DDI_FAILURE);
5411
5412 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5413 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp;
5414
5415 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress;
5416 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5417
5418 for (i = 0; i < dfl->dfl_num_exts; i++) {
5419 uint64_t lba, len;
5420
5421 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize;
5422 len = exts[i].dfle_length / blocksize;
5423
5424 VERIFY3U(len, <=, UINT32_MAX);
5425
5426 /* No context attributes for a deallocate request */
5427 ranges[i].nr_ctxattr = 0;
5428 ranges[i].nr_len = len;
5429 ranges[i].nr_lba = lba;
5430 }
5431
5432 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5433 DDI_DMA_SYNC_FORDEV);
5434
5435 return (DDI_SUCCESS);
5436 }
5437
5438 static nvme_cmd_t *
nvme_create_nvm_cmd(nvme_namespace_t * ns,uint8_t opc,bd_xfer_t * xfer)5439 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
5440 {
5441 nvme_t *nvme = ns->ns_nvme;
5442 nvme_cmd_t *cmd;
5443 int allocflag;
5444
5445 /*
5446 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
5447 */
5448 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP;
5449 cmd = nvme_alloc_cmd(nvme, allocflag);
5450
5451 if (cmd == NULL)
5452 return (NULL);
5453
5454 cmd->nc_sqe.sqe_opc = opc;
5455 cmd->nc_callback = nvme_bd_xfer_done;
5456 cmd->nc_xfer = xfer;
5457
5458 switch (opc) {
5459 case NVME_OPC_NVM_WRITE:
5460 case NVME_OPC_NVM_READ:
5461 VERIFY(xfer->x_nblks <= 0x10000);
5462
5463 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5464
5465 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
5466 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
5467 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
5468
5469 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS)
5470 goto fail;
5471 break;
5472
5473 case NVME_OPC_NVM_FLUSH:
5474 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5475 break;
5476
5477 case NVME_OPC_NVM_DSET_MGMT:
5478 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5479
5480 if (nvme_fill_ranges(cmd, xfer,
5481 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS)
5482 goto fail;
5483 break;
5484
5485 default:
5486 goto fail;
5487 }
5488
5489 return (cmd);
5490
5491 fail:
5492 nvme_free_cmd(cmd);
5493 return (NULL);
5494 }
5495
5496 static void
nvme_bd_xfer_done(void * arg)5497 nvme_bd_xfer_done(void *arg)
5498 {
5499 nvme_cmd_t *cmd = arg;
5500 bd_xfer_t *xfer = cmd->nc_xfer;
5501 int error = 0;
5502
5503 error = nvme_check_cmd_status(cmd);
5504 nvme_free_cmd(cmd);
5505
5506 bd_xfer_done(xfer, error);
5507 }
5508
5509 static void
nvme_bd_driveinfo(void * arg,bd_drive_t * drive)5510 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
5511 {
5512 nvme_namespace_t *ns = arg;
5513 nvme_t *nvme = ns->ns_nvme;
5514 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable);
5515
5516 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5517
5518 /*
5519 * Set the blkdev qcount to the number of submission queues.
5520 * It will then create one waitq/runq pair for each submission
5521 * queue and spread I/O requests across the queues.
5522 */
5523 drive->d_qcount = nvme->n_ioq_count;
5524
5525 /*
5526 * I/O activity to individual namespaces is distributed across
5527 * each of the d_qcount blkdev queues (which has been set to
5528 * the number of nvme submission queues). d_qsize is the number
5529 * of submitted and not completed I/Os within each queue that blkdev
5530 * will allow before it starts holding them in the waitq.
5531 *
5532 * Each namespace will create a child blkdev instance, for each one
5533 * we try and set the d_qsize so that each namespace gets an
5534 * equal portion of the submission queue.
5535 *
5536 * If post instantiation of the nvme drive, n_namespaces_attachable
5537 * changes and a namespace is attached it could calculate a
5538 * different d_qsize. It may even be that the sum of the d_qsizes is
5539 * now beyond the submission queue size. Should that be the case
5540 * and the I/O rate is such that blkdev attempts to submit more
5541 * I/Os than the size of the submission queue, the excess I/Os
5542 * will be held behind the semaphore nq_sema.
5543 */
5544 drive->d_qsize = nvme->n_io_squeue_len / ns_count;
5545
5546 /*
5547 * Don't let the queue size drop below the minimum, though.
5548 */
5549 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN);
5550
5551 /*
5552 * d_maxxfer is not set, which means the value is taken from the DMA
5553 * attributes specified to bd_alloc_handle.
5554 */
5555
5556 drive->d_removable = B_FALSE;
5557 drive->d_hotpluggable = B_FALSE;
5558
5559 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
5560 drive->d_target = ns->ns_id;
5561 drive->d_lun = 0;
5562
5563 drive->d_model = nvme->n_idctl->id_model;
5564 drive->d_model_len = sizeof (nvme->n_idctl->id_model);
5565 drive->d_vendor = nvme->n_vendor;
5566 drive->d_vendor_len = strlen(nvme->n_vendor);
5567 drive->d_product = nvme->n_product;
5568 drive->d_product_len = strlen(nvme->n_product);
5569 drive->d_serial = nvme->n_idctl->id_serial;
5570 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
5571 drive->d_revision = nvme->n_idctl->id_fwrev;
5572 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
5573
5574 /*
5575 * If we support the dataset management command, the only restrictions
5576 * on a discard request are the maximum number of ranges (segments)
5577 * per single request.
5578 */
5579 if (nvme->n_idctl->id_oncs.on_dset_mgmt)
5580 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES;
5581
5582 nvme_mgmt_unlock(nvme);
5583 }
5584
5585 static int
nvme_bd_mediainfo(void * arg,bd_media_t * media)5586 nvme_bd_mediainfo(void *arg, bd_media_t *media)
5587 {
5588 nvme_namespace_t *ns = arg;
5589 nvme_t *nvme = ns->ns_nvme;
5590
5591 if (nvme->n_dead) {
5592 return (EIO);
5593 }
5594
5595 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5596
5597 media->m_nblks = ns->ns_block_count;
5598 media->m_blksize = ns->ns_block_size;
5599 media->m_readonly = B_FALSE;
5600 media->m_solidstate = B_TRUE;
5601
5602 media->m_pblksize = ns->ns_best_block_size;
5603
5604 nvme_mgmt_unlock(nvme);
5605
5606 return (0);
5607 }
5608
5609 static int
nvme_bd_cmd(nvme_namespace_t * ns,bd_xfer_t * xfer,uint8_t opc)5610 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
5611 {
5612 nvme_t *nvme = ns->ns_nvme;
5613 nvme_cmd_t *cmd;
5614 nvme_qpair_t *ioq;
5615 boolean_t poll;
5616 int ret;
5617
5618 if (nvme->n_dead) {
5619 return (EIO);
5620 }
5621
5622 cmd = nvme_create_nvm_cmd(ns, opc, xfer);
5623 if (cmd == NULL)
5624 return (ENOMEM);
5625
5626 cmd->nc_sqid = xfer->x_qnum + 1;
5627 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
5628 ioq = nvme->n_ioq[cmd->nc_sqid];
5629
5630 /*
5631 * Get the polling flag before submitting the command. The command may
5632 * complete immediately after it was submitted, which means we must
5633 * treat both cmd and xfer as if they have been freed already.
5634 */
5635 poll = (xfer->x_flags & BD_XFER_POLL) != 0;
5636
5637 ret = nvme_submit_io_cmd(ioq, cmd);
5638
5639 if (ret != 0)
5640 return (ret);
5641
5642 if (!poll)
5643 return (0);
5644
5645 do {
5646 cmd = nvme_retrieve_cmd(nvme, ioq);
5647 if (cmd != NULL) {
5648 ASSERT0(cmd->nc_flags & NVME_CMD_F_USELOCK);
5649 cmd->nc_callback(cmd);
5650 } else {
5651 drv_usecwait(10);
5652 }
5653 } while (ioq->nq_active_cmds != 0);
5654
5655 return (0);
5656 }
5657
5658 static int
nvme_bd_read(void * arg,bd_xfer_t * xfer)5659 nvme_bd_read(void *arg, bd_xfer_t *xfer)
5660 {
5661 nvme_namespace_t *ns = arg;
5662
5663 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
5664 }
5665
5666 static int
nvme_bd_write(void * arg,bd_xfer_t * xfer)5667 nvme_bd_write(void *arg, bd_xfer_t *xfer)
5668 {
5669 nvme_namespace_t *ns = arg;
5670
5671 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
5672 }
5673
5674 static int
nvme_bd_sync(void * arg,bd_xfer_t * xfer)5675 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
5676 {
5677 nvme_namespace_t *ns = arg;
5678
5679 if (ns->ns_nvme->n_dead)
5680 return (EIO);
5681
5682 /*
5683 * If the volatile write cache is not present or not enabled the FLUSH
5684 * command is a no-op, so we can take a shortcut here.
5685 */
5686 if (!ns->ns_nvme->n_write_cache_present) {
5687 bd_xfer_done(xfer, ENOTSUP);
5688 return (0);
5689 }
5690
5691 if (!ns->ns_nvme->n_write_cache_enabled) {
5692 bd_xfer_done(xfer, 0);
5693 return (0);
5694 }
5695
5696 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
5697 }
5698
5699 static int
nvme_bd_devid(void * arg,dev_info_t * devinfo,ddi_devid_t * devid)5700 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
5701 {
5702 nvme_namespace_t *ns = arg;
5703 nvme_t *nvme = ns->ns_nvme;
5704
5705 if (nvme->n_dead) {
5706 return (EIO);
5707 }
5708
5709 if (*(uint64_t *)ns->ns_nguid != 0 ||
5710 *(uint64_t *)(ns->ns_nguid + 8) != 0) {
5711 return (ddi_devid_init(devinfo, DEVID_NVME_NGUID,
5712 sizeof (ns->ns_nguid), ns->ns_nguid, devid));
5713 } else if (*(uint64_t *)ns->ns_eui64 != 0) {
5714 return (ddi_devid_init(devinfo, DEVID_NVME_EUI64,
5715 sizeof (ns->ns_eui64), ns->ns_eui64, devid));
5716 } else {
5717 return (ddi_devid_init(devinfo, DEVID_NVME_NSID,
5718 strlen(ns->ns_devid), ns->ns_devid, devid));
5719 }
5720 }
5721
5722 static int
nvme_bd_free_space(void * arg,bd_xfer_t * xfer)5723 nvme_bd_free_space(void *arg, bd_xfer_t *xfer)
5724 {
5725 nvme_namespace_t *ns = arg;
5726
5727 if (xfer->x_dfl == NULL)
5728 return (EINVAL);
5729
5730 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt)
5731 return (ENOTSUP);
5732
5733 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT));
5734 }
5735
5736 static int
nvme_open(dev_t * devp,int flag,int otyp,cred_t * cred_p)5737 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
5738 {
5739 #ifndef __lock_lint
5740 _NOTE(ARGUNUSED(cred_p));
5741 #endif
5742 nvme_t *nvme;
5743 nvme_minor_t *minor = NULL;
5744 uint32_t nsid;
5745 minor_t m = getminor(*devp);
5746 int rv = 0;
5747
5748 if (otyp != OTYP_CHR)
5749 return (EINVAL);
5750
5751 if (m >= NVME_OPEN_MINOR_MIN)
5752 return (ENXIO);
5753
5754 nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(m));
5755 nsid = NVME_MINOR_NSID(m);
5756
5757 if (nvme == NULL)
5758 return (ENXIO);
5759
5760 if (nsid > MIN(nvme->n_namespace_count, NVME_MINOR_MAX))
5761 return (ENXIO);
5762
5763 if (nvme->n_dead)
5764 return (EIO);
5765
5766 /*
5767 * At this point, we're going to allow an open to proceed on this
5768 * device. We need to allocate a new instance for this (presuming one is
5769 * available).
5770 */
5771 minor = kmem_zalloc(sizeof (nvme_minor_t), KM_NOSLEEP_LAZY);
5772 if (minor == NULL) {
5773 return (ENOMEM);
5774 }
5775
5776 cv_init(&minor->nm_cv, NULL, CV_DRIVER, NULL);
5777 list_link_init(&minor->nm_ctrl_lock.nli_node);
5778 minor->nm_ctrl_lock.nli_nvme = nvme;
5779 minor->nm_ctrl_lock.nli_minor = minor;
5780 list_link_init(&minor->nm_ns_lock.nli_node);
5781 minor->nm_ns_lock.nli_nvme = nvme;
5782 minor->nm_ns_lock.nli_minor = minor;
5783 minor->nm_minor = id_alloc_nosleep(nvme_open_minors);
5784 if (minor->nm_minor == -1) {
5785 nvme_minor_free(minor);
5786 return (ENOSPC);
5787 }
5788
5789 minor->nm_ctrl = nvme;
5790 if (nsid != 0) {
5791 minor->nm_ns = nvme_nsid2ns(nvme, nsid);
5792 }
5793
5794 /*
5795 * Before we check for exclusive access and attempt a lock if requested,
5796 * ensure that this minor is persisted.
5797 */
5798 mutex_enter(&nvme_open_minors_mutex);
5799 avl_add(&nvme_open_minors_avl, minor);
5800 mutex_exit(&nvme_open_minors_mutex);
5801
5802 /*
5803 * A request for opening this FEXCL, is translated into a non-blocking
5804 * write lock of the appropriate entity. This honors the original
5805 * semantics here. In the future, we should see if we can remove this
5806 * and turn a request for FEXCL at open into ENOTSUP.
5807 */
5808 mutex_enter(&nvme->n_minor_mutex);
5809 if ((flag & FEXCL) != 0) {
5810 nvme_ioctl_lock_t lock = {
5811 .nil_level = NVME_LOCK_L_WRITE,
5812 .nil_flags = NVME_LOCK_F_DONT_BLOCK
5813 };
5814
5815 if (minor->nm_ns != NULL) {
5816 lock.nil_ent = NVME_LOCK_E_NS;
5817 lock.nil_common.nioc_nsid = nsid;
5818 } else {
5819 lock.nil_ent = NVME_LOCK_E_CTRL;
5820 }
5821 nvme_rwlock(minor, &lock);
5822 if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) {
5823 mutex_exit(&nvme->n_minor_mutex);
5824
5825 mutex_enter(&nvme_open_minors_mutex);
5826 avl_remove(&nvme_open_minors_avl, minor);
5827 mutex_exit(&nvme_open_minors_mutex);
5828
5829 nvme_minor_free(minor);
5830 return (EBUSY);
5831 }
5832 }
5833 mutex_exit(&nvme->n_minor_mutex);
5834
5835 *devp = makedevice(getmajor(*devp), (minor_t)minor->nm_minor);
5836 return (rv);
5837
5838 }
5839
5840 static int
nvme_close(dev_t dev,int flag __unused,int otyp,cred_t * cred_p __unused)5841 nvme_close(dev_t dev, int flag __unused, int otyp, cred_t *cred_p __unused)
5842 {
5843 nvme_minor_t *minor;
5844 nvme_t *nvme;
5845
5846 if (otyp != OTYP_CHR) {
5847 return (ENXIO);
5848 }
5849
5850 minor = nvme_minor_find_by_dev(dev);
5851 if (minor == NULL) {
5852 return (ENXIO);
5853 }
5854
5855 mutex_enter(&nvme_open_minors_mutex);
5856 avl_remove(&nvme_open_minors_avl, minor);
5857 mutex_exit(&nvme_open_minors_mutex);
5858
5859 /*
5860 * When this device is being closed, we must ensure that any locks held
5861 * by this are dealt with.
5862 */
5863 nvme = minor->nm_ctrl;
5864 mutex_enter(&nvme->n_minor_mutex);
5865 ASSERT3U(minor->nm_ctrl_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
5866 ASSERT3U(minor->nm_ns_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
5867
5868 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
5869 VERIFY3P(minor->nm_ctrl_lock.nli_lock, !=, NULL);
5870 nvme_rwunlock(&minor->nm_ctrl_lock,
5871 minor->nm_ctrl_lock.nli_lock);
5872 }
5873
5874 if (minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
5875 VERIFY3P(minor->nm_ns_lock.nli_lock, !=, NULL);
5876 nvme_rwunlock(&minor->nm_ns_lock, minor->nm_ns_lock.nli_lock);
5877 }
5878 mutex_exit(&nvme->n_minor_mutex);
5879
5880 nvme_minor_free(minor);
5881
5882 return (0);
5883 }
5884
5885 void
nvme_ioctl_success(nvme_ioctl_common_t * ioc)5886 nvme_ioctl_success(nvme_ioctl_common_t *ioc)
5887 {
5888 ioc->nioc_drv_err = NVME_IOCTL_E_OK;
5889 ioc->nioc_ctrl_sc = NVME_CQE_SC_GEN_SUCCESS;
5890 ioc->nioc_ctrl_sct = NVME_CQE_SCT_GENERIC;
5891 }
5892
5893 boolean_t
nvme_ioctl_error(nvme_ioctl_common_t * ioc,nvme_ioctl_errno_t err,uint32_t sct,uint32_t sc)5894 nvme_ioctl_error(nvme_ioctl_common_t *ioc, nvme_ioctl_errno_t err, uint32_t sct,
5895 uint32_t sc)
5896 {
5897 ioc->nioc_drv_err = err;
5898 ioc->nioc_ctrl_sct = sct;
5899 ioc->nioc_ctrl_sc = sc;
5900
5901 return (B_FALSE);
5902 }
5903
5904 static int
nvme_ioctl_copyout_error(nvme_ioctl_errno_t err,intptr_t uaddr,int mode)5905 nvme_ioctl_copyout_error(nvme_ioctl_errno_t err, intptr_t uaddr, int mode)
5906 {
5907 nvme_ioctl_common_t ioc;
5908
5909 ASSERT3U(err, !=, NVME_IOCTL_E_CTRL_ERROR);
5910 bzero(&ioc, sizeof (ioc));
5911 if (ddi_copyout(&ioc, (void *)uaddr, sizeof (nvme_ioctl_common_t),
5912 mode & FKIOCTL) != 0) {
5913 return (EFAULT);
5914 }
5915 return (0);
5916 }
5917
5918 /*
5919 * The companion to the namespace checking. This occurs after any rewriting
5920 * occurs. This is the primary point that we attempt to enforce any operation's
5921 * exclusivity. Note, it is theoretically possible for an operation to be
5922 * ongoing and to have someone with an exclusive lock ask to unlock it for some
5923 * reason. This does not maintain the number of such events that are going on.
5924 * While perhaps this is leaving too much up to the user, by the same token we
5925 * don't try to stop them from issuing two different format NVM commands
5926 * targeting the whole device at the same time either, even though the
5927 * controller would really rather that didn't happen.
5928 */
5929 static boolean_t
nvme_ioctl_excl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)5930 nvme_ioctl_excl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
5931 const nvme_ioctl_check_t *check)
5932 {
5933 nvme_t *const nvme = minor->nm_ctrl;
5934 nvme_namespace_t *ns;
5935 boolean_t have_ctrl, have_ns, ctrl_is_excl, ns_is_excl;
5936
5937 /*
5938 * If the command doesn't require anything, then we're done.
5939 */
5940 if (check->nck_excl == NVME_IOCTL_EXCL_SKIP) {
5941 return (B_TRUE);
5942 }
5943
5944 if (ioc->nioc_nsid == 0 || ioc->nioc_nsid == NVME_NSID_BCAST) {
5945 ns = NULL;
5946 } else {
5947 ns = nvme_nsid2ns(nvme, ioc->nioc_nsid);
5948 }
5949
5950 mutex_enter(&nvme->n_minor_mutex);
5951 ctrl_is_excl = nvme->n_lock.nl_writer != NULL;
5952 have_ctrl = nvme->n_lock.nl_writer == &minor->nm_ctrl_lock;
5953 if (ns != NULL) {
5954 /*
5955 * We explicitly test the namespace lock's writer versus asking
5956 * the minor because the minor's namespace lock may apply to a
5957 * different namespace.
5958 */
5959 ns_is_excl = ns->ns_lock.nl_writer != NULL;
5960 have_ns = ns->ns_lock.nl_writer == &minor->nm_ns_lock;
5961 ASSERT0(have_ctrl && have_ns);
5962 #ifdef DEBUG
5963 if (have_ns) {
5964 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, ns);
5965 }
5966 #endif
5967 } else {
5968 ns_is_excl = B_FALSE;
5969 have_ns = B_FALSE;
5970 }
5971 ASSERT0(ctrl_is_excl && ns_is_excl);
5972 mutex_exit(&nvme->n_minor_mutex);
5973
5974 if (check->nck_excl == NVME_IOCTL_EXCL_WRITE) {
5975 if (ns == NULL) {
5976 if (have_ctrl) {
5977 return (B_TRUE);
5978 }
5979 return (nvme_ioctl_error(ioc,
5980 NVME_IOCTL_E_NEED_CTRL_WRLOCK, 0, 0));
5981 } else {
5982 if (have_ctrl || have_ns) {
5983 return (B_TRUE);
5984 }
5985 return (nvme_ioctl_error(ioc,
5986 NVME_IOCTL_E_NEED_NS_WRLOCK, 0, 0));
5987 }
5988 }
5989
5990 /*
5991 * Now we have an operation that does not require exclusive access. We
5992 * can proceed as long as no one else has it or if someone does it is
5993 * us. Regardless of what we target, a controller lock will stop us.
5994 */
5995 if (ctrl_is_excl && !have_ctrl) {
5996 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_LOCKED, 0, 0));
5997 }
5998
5999 /*
6000 * Only check namespace exclusivity if we are targeting one.
6001 */
6002 if (ns != NULL && ns_is_excl && !have_ns) {
6003 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_LOCKED, 0, 0));
6004 }
6005
6006 return (B_TRUE);
6007 }
6008
6009 /*
6010 * Perform common checking as to whether or not an ioctl operation may proceed.
6011 * We check in this function various aspects of the namespace attributes that
6012 * it's calling on. Once the namespace attributes and any possible rewriting
6013 * have been performed, then we proceed to check whether or not the requisite
6014 * exclusive access is present in nvme_ioctl_excl_check().
6015 */
6016 static boolean_t
nvme_ioctl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)6017 nvme_ioctl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
6018 const nvme_ioctl_check_t *check)
6019 {
6020 /*
6021 * If the minor has a namespace pointer, then it is constrained to that
6022 * namespace. If a namespace is allowed, then there are only two valid
6023 * values that we can find. The first is matching the minor. The second
6024 * is our value zero, which will be transformed to the current
6025 * namespace.
6026 */
6027 if (minor->nm_ns != NULL) {
6028 if (!check->nck_ns_ok || !check->nck_ns_minor_ok) {
6029 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NOT_CTRL, 0,
6030 0));
6031 }
6032
6033 if (ioc->nioc_nsid == 0) {
6034 ioc->nioc_nsid = minor->nm_ns->ns_id;
6035 } else if (ioc->nioc_nsid != minor->nm_ns->ns_id) {
6036 return (nvme_ioctl_error(ioc,
6037 NVME_IOCTL_E_MINOR_WRONG_NS, 0, 0));
6038 }
6039
6040 return (nvme_ioctl_excl_check(minor, ioc, check));
6041 }
6042
6043 /*
6044 * If we've been told to skip checking the controller, here's where we
6045 * do that. This should really only be for commands which use the
6046 * namespace ID for listing purposes and therefore can have
6047 * traditionally illegal values here.
6048 */
6049 if (check->nck_skip_ctrl) {
6050 return (nvme_ioctl_excl_check(minor, ioc, check));
6051 }
6052
6053 /*
6054 * At this point, we know that we're on the controller's node. We first
6055 * deal with the simple case, is a namespace allowed at all or not. If
6056 * it is not allowed, then the only acceptable value is zero.
6057 */
6058 if (!check->nck_ns_ok) {
6059 if (ioc->nioc_nsid != 0) {
6060 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_UNUSE, 0,
6061 0));
6062 }
6063
6064 return (nvme_ioctl_excl_check(minor, ioc, check));
6065 }
6066
6067 /*
6068 * At this point, we know that a controller is allowed to use a
6069 * namespace. If we haven't been given zero or the broadcast namespace,
6070 * check to see if it's actually a valid namespace ID. If is outside of
6071 * range, then it is an error. Next, if we have been requested to
6072 * rewrite 0 (the this controller indicator) as the broadcast namespace,
6073 * do so.
6074 *
6075 * While we validate that this namespace is within the valid range, we
6076 * do not check if it is active or inactive. That is left to our callers
6077 * to determine.
6078 */
6079 if (ioc->nioc_nsid > minor->nm_ctrl->n_namespace_count &&
6080 ioc->nioc_nsid != NVME_NSID_BCAST) {
6081 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_RANGE, 0, 0));
6082 }
6083
6084 if (ioc->nioc_nsid == 0 && check->nck_ctrl_rewrite) {
6085 ioc->nioc_nsid = NVME_NSID_BCAST;
6086 }
6087
6088 /*
6089 * Finally, see if we have ended up with a broadcast namespace ID
6090 * whether through specification or rewriting. If that is not allowed,
6091 * then that is an error.
6092 */
6093 if (!check->nck_bcast_ok && ioc->nioc_nsid == NVME_NSID_BCAST) {
6094 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_BCAST_NS, 0, 0));
6095 }
6096
6097 return (nvme_ioctl_excl_check(minor, ioc, check));
6098 }
6099
6100 static int
nvme_ioctl_ctrl_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6101 nvme_ioctl_ctrl_info(nvme_minor_t *minor, intptr_t arg, int mode,
6102 cred_t *cred_p)
6103 {
6104 nvme_t *const nvme = minor->nm_ctrl;
6105 nvme_ioctl_ctrl_info_t *info;
6106 nvme_reg_cap_t cap = { 0 };
6107 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_CTRL };
6108 void *idbuf;
6109
6110 if ((mode & FREAD) == 0)
6111 return (EBADF);
6112
6113 info = kmem_alloc(sizeof (nvme_ioctl_ctrl_info_t), KM_NOSLEEP_LAZY);
6114 if (info == NULL) {
6115 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
6116 mode));
6117 }
6118
6119 if (ddi_copyin((void *)arg, info, sizeof (nvme_ioctl_ctrl_info_t),
6120 mode & FKIOCTL) != 0) {
6121 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6122 return (EFAULT);
6123 }
6124
6125 if (!nvme_ioctl_check(minor, &info->nci_common,
6126 &nvme_check_ctrl_info)) {
6127 goto copyout;
6128 }
6129
6130 /*
6131 * We explicitly do not use the identify controller copy in the kernel
6132 * right now so that way we can get a snapshot of the controller's
6133 * current capacity and values. While it's tempting to try to use this
6134 * to refresh the kernel's version we don't just to simplify the rest of
6135 * the driver right now.
6136 */
6137 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
6138 info->nci_common = id.nid_common;
6139 goto copyout;
6140 }
6141 bcopy(idbuf, &info->nci_ctrl_id, sizeof (nvme_identify_ctrl_t));
6142 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
6143
6144 /*
6145 * Use the kernel's cached common namespace information for this.
6146 */
6147 bcopy(nvme->n_idcomns, &info->nci_common_ns,
6148 sizeof (nvme_identify_nsid_t));
6149
6150 info->nci_vers = nvme->n_version;
6151
6152 /*
6153 * The MPSMIN and MPSMAX fields in the CAP register use 0 to
6154 * specify the base page size of 4k (1<<12), so add 12 here to
6155 * get the real page size value.
6156 */
6157 cap.r = nvme_get64(nvme, NVME_REG_CAP);
6158 info->nci_caps.cap_mpsmax = 1 << (12 + cap.b.cap_mpsmax);
6159 info->nci_caps.cap_mpsmin = 1 << (12 + cap.b.cap_mpsmin);
6160
6161 info->nci_nintrs = (uint32_t)nvme->n_intr_cnt;
6162
6163 copyout:
6164 if (ddi_copyout(info, (void *)arg, sizeof (nvme_ioctl_ctrl_info_t),
6165 mode & FKIOCTL) != 0) {
6166 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6167 return (EFAULT);
6168 }
6169
6170 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6171 return (0);
6172 }
6173
6174 static int
nvme_ioctl_ns_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6175 nvme_ioctl_ns_info(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6176 {
6177 nvme_t *const nvme = minor->nm_ctrl;
6178 nvme_ioctl_ns_info_t *ns_info;
6179 nvme_namespace_t *ns;
6180 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_NSID };
6181 void *idbuf;
6182
6183 if ((mode & FREAD) == 0)
6184 return (EBADF);
6185
6186 ns_info = kmem_zalloc(sizeof (nvme_ioctl_ns_info_t), KM_NOSLEEP_LAZY);
6187 if (ns_info == NULL) {
6188 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
6189 mode));
6190 }
6191
6192 if (ddi_copyin((void *)arg, ns_info, sizeof (nvme_ioctl_ns_info_t),
6193 mode & FKIOCTL) != 0) {
6194 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6195 return (EFAULT);
6196 }
6197
6198 if (!nvme_ioctl_check(minor, &ns_info->nni_common,
6199 &nvme_check_ns_info)) {
6200 goto copyout;
6201 }
6202
6203 ASSERT3U(ns_info->nni_common.nioc_nsid, >, 0);
6204 ns = nvme_nsid2ns(nvme, ns_info->nni_common.nioc_nsid);
6205
6206 /*
6207 * First fetch a fresh copy of the namespace information. Most callers
6208 * are using this because they will want a mostly accurate snapshot of
6209 * capacity and utilization.
6210 */
6211 id.nid_common.nioc_nsid = ns_info->nni_common.nioc_nsid;
6212 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
6213 ns_info->nni_common = id.nid_common;
6214 goto copyout;
6215 }
6216 bcopy(idbuf, &ns_info->nni_id, sizeof (nvme_identify_nsid_t));
6217 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
6218
6219 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6220 if (ns->ns_allocated)
6221 ns_info->nni_state |= NVME_NS_STATE_ALLOCATED;
6222
6223 if (ns->ns_active)
6224 ns_info->nni_state |= NVME_NS_STATE_ACTIVE;
6225
6226 if (ns->ns_ignore)
6227 ns_info->nni_state |= NVME_NS_STATE_IGNORED;
6228
6229 if (ns->ns_attached) {
6230 const char *addr;
6231
6232 ns_info->nni_state |= NVME_NS_STATE_ATTACHED;
6233 addr = bd_address(ns->ns_bd_hdl);
6234 if (strlcpy(ns_info->nni_addr, addr,
6235 sizeof (ns_info->nni_addr)) >= sizeof (ns_info->nni_addr)) {
6236 nvme_mgmt_unlock(nvme);
6237 (void) nvme_ioctl_error(&ns_info->nni_common,
6238 NVME_IOCTL_E_BD_ADDR_OVER, 0, 0);
6239 goto copyout;
6240 }
6241 }
6242 nvme_mgmt_unlock(nvme);
6243
6244 copyout:
6245 if (ddi_copyout(ns_info, (void *)arg, sizeof (nvme_ioctl_ns_info_t),
6246 mode & FKIOCTL) != 0) {
6247 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6248 return (EFAULT);
6249 }
6250
6251 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6252 return (0);
6253 }
6254
6255 static int
nvme_ioctl_identify(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6256 nvme_ioctl_identify(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6257 {
6258 _NOTE(ARGUNUSED(cred_p));
6259 nvme_t *const nvme = minor->nm_ctrl;
6260 void *idctl;
6261 uint_t model;
6262 nvme_ioctl_identify_t id;
6263 #ifdef _MULTI_DATAMODEL
6264 nvme_ioctl_identify32_t id32;
6265 #endif
6266 boolean_t ns_minor;
6267
6268 if ((mode & FREAD) == 0)
6269 return (EBADF);
6270
6271 model = ddi_model_convert_from(mode);
6272 switch (model) {
6273 #ifdef _MULTI_DATAMODEL
6274 case DDI_MODEL_ILP32:
6275 bzero(&id, sizeof (id));
6276 if (ddi_copyin((void *)arg, &id32, sizeof (id32),
6277 mode & FKIOCTL) != 0) {
6278 return (EFAULT);
6279 }
6280 id.nid_common.nioc_nsid = id32.nid_common.nioc_nsid;
6281 id.nid_cns = id32.nid_cns;
6282 id.nid_ctrlid = id32.nid_ctrlid;
6283 id.nid_data = id32.nid_data;
6284 break;
6285 #endif /* _MULTI_DATAMODEL */
6286 case DDI_MODEL_NONE:
6287 if (ddi_copyin((void *)arg, &id, sizeof (id),
6288 mode & FKIOCTL) != 0) {
6289 return (EFAULT);
6290 }
6291 break;
6292 default:
6293 return (ENOTSUP);
6294 }
6295
6296 if (!nvme_ioctl_check(minor, &id.nid_common, &nvme_check_identify)) {
6297 goto copyout;
6298 }
6299
6300 ns_minor = minor->nm_ns != NULL;
6301 if (!nvme_validate_identify(nvme, &id, ns_minor)) {
6302 goto copyout;
6303 }
6304
6305 if (nvme_identify(nvme, B_TRUE, &id, &idctl)) {
6306 int ret = ddi_copyout(idctl, (void *)id.nid_data,
6307 NVME_IDENTIFY_BUFSIZE, mode & FKIOCTL);
6308 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
6309 if (ret != 0) {
6310 (void) nvme_ioctl_error(&id.nid_common,
6311 NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6312 goto copyout;
6313 }
6314
6315 nvme_ioctl_success(&id.nid_common);
6316 }
6317
6318 copyout:
6319 switch (model) {
6320 #ifdef _MULTI_DATAMODEL
6321 case DDI_MODEL_ILP32:
6322 id32.nid_common = id.nid_common;
6323
6324 if (ddi_copyout(&id32, (void *)arg, sizeof (id32),
6325 mode & FKIOCTL) != 0) {
6326 return (EFAULT);
6327 }
6328 break;
6329 #endif /* _MULTI_DATAMODEL */
6330 case DDI_MODEL_NONE:
6331 if (ddi_copyout(&id, (void *)arg, sizeof (id),
6332 mode & FKIOCTL) != 0) {
6333 return (EFAULT);
6334 }
6335 break;
6336 default:
6337 return (ENOTSUP);
6338 }
6339
6340 return (0);
6341 }
6342
6343 /*
6344 * Execute commands on behalf of the various ioctls.
6345 *
6346 * If this returns true then the command completed successfully. Otherwise error
6347 * information is returned in the nvme_ioctl_common_t arguments.
6348 */
6349 typedef struct {
6350 nvme_sqe_t *ica_sqe;
6351 void *ica_data;
6352 uint32_t ica_data_len;
6353 uint_t ica_dma_flags;
6354 int ica_copy_flags;
6355 uint32_t ica_timeout;
6356 uint32_t ica_cdw0;
6357 } nvme_ioc_cmd_args_t;
6358
6359 static boolean_t
nvme_ioc_cmd(nvme_t * nvme,nvme_ioctl_common_t * ioc,nvme_ioc_cmd_args_t * args)6360 nvme_ioc_cmd(nvme_t *nvme, nvme_ioctl_common_t *ioc, nvme_ioc_cmd_args_t *args)
6361 {
6362 nvme_cmd_t *cmd;
6363 boolean_t ret = B_FALSE;
6364
6365 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
6366 cmd->nc_sqid = 0;
6367
6368 /*
6369 * This function is used to facilitate requests from
6370 * userspace, so don't panic if the command fails. This
6371 * is especially true for admin passthru commands, where
6372 * the actual command data structure is entirely defined
6373 * by userspace.
6374 */
6375 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
6376
6377 cmd->nc_callback = nvme_wakeup_cmd;
6378 cmd->nc_sqe = *args->ica_sqe;
6379
6380 if ((args->ica_dma_flags & DDI_DMA_RDWR) != 0) {
6381 if (args->ica_data == NULL) {
6382 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_DMA_MEM,
6383 0, 0);
6384 goto free_cmd;
6385 }
6386
6387 if (nvme_zalloc_dma(nvme, args->ica_data_len,
6388 args->ica_dma_flags, &nvme->n_prp_dma_attr, &cmd->nc_dma) !=
6389 DDI_SUCCESS) {
6390 dev_err(nvme->n_dip, CE_WARN,
6391 "!nvme_zalloc_dma failed for nvme_ioc_cmd()");
6392 ret = nvme_ioctl_error(ioc,
6393 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6394 goto free_cmd;
6395 }
6396
6397 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
6398 ret = nvme_ioctl_error(ioc,
6399 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6400 goto free_cmd;
6401 }
6402
6403 if ((args->ica_dma_flags & DDI_DMA_WRITE) != 0 &&
6404 ddi_copyin(args->ica_data, cmd->nc_dma->nd_memp,
6405 args->ica_data_len, args->ica_copy_flags) != 0) {
6406 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA,
6407 0, 0);
6408 goto free_cmd;
6409 }
6410 }
6411
6412 nvme_admin_cmd(cmd, args->ica_timeout);
6413
6414 if (!nvme_check_cmd_status_ioctl(cmd, ioc)) {
6415 ret = B_FALSE;
6416 goto free_cmd;
6417 }
6418
6419 args->ica_cdw0 = cmd->nc_cqe.cqe_dw0;
6420
6421 if ((args->ica_dma_flags & DDI_DMA_READ) != 0 &&
6422 ddi_copyout(cmd->nc_dma->nd_memp, args->ica_data,
6423 args->ica_data_len, args->ica_copy_flags) != 0) {
6424 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6425 goto free_cmd;
6426 }
6427
6428 ret = B_TRUE;
6429 nvme_ioctl_success(ioc);
6430
6431 free_cmd:
6432 nvme_free_cmd(cmd);
6433
6434 return (ret);
6435 }
6436
6437 static int
nvme_ioctl_get_logpage(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6438 nvme_ioctl_get_logpage(nvme_minor_t *minor, intptr_t arg, int mode,
6439 cred_t *cred_p)
6440 {
6441 nvme_t *const nvme = minor->nm_ctrl;
6442 void *buf;
6443 nvme_ioctl_get_logpage_t log;
6444 uint_t model;
6445 #ifdef _MULTI_DATAMODEL
6446 nvme_ioctl_get_logpage32_t log32;
6447 #endif
6448
6449 if ((mode & FREAD) == 0) {
6450 return (EBADF);
6451 }
6452
6453 model = ddi_model_convert_from(mode);
6454 switch (model) {
6455 #ifdef _MULTI_DATAMODEL
6456 case DDI_MODEL_ILP32:
6457 bzero(&log, sizeof (log));
6458 if (ddi_copyin((void *)arg, &log32, sizeof (log32),
6459 mode & FKIOCTL) != 0) {
6460 return (EFAULT);
6461 }
6462
6463 log.nigl_common.nioc_nsid = log32.nigl_common.nioc_nsid;
6464 log.nigl_csi = log32.nigl_csi;
6465 log.nigl_lid = log32.nigl_lid;
6466 log.nigl_lsp = log32.nigl_lsp;
6467 log.nigl_len = log32.nigl_len;
6468 log.nigl_offset = log32.nigl_offset;
6469 log.nigl_data = log32.nigl_data;
6470 break;
6471 #endif /* _MULTI_DATAMODEL */
6472 case DDI_MODEL_NONE:
6473 if (ddi_copyin((void *)arg, &log, sizeof (log),
6474 mode & FKIOCTL) != 0) {
6475 return (EFAULT);
6476 }
6477 break;
6478 default:
6479 return (ENOTSUP);
6480 }
6481
6482 /*
6483 * Eventually we'd like to do a soft lock on the namespaces from
6484 * changing out from us during this operation in the future. But we
6485 * haven't implemented that yet.
6486 */
6487 if (!nvme_ioctl_check(minor, &log.nigl_common,
6488 &nvme_check_get_logpage)) {
6489 goto copyout;
6490 }
6491
6492 if (!nvme_validate_logpage(nvme, &log)) {
6493 goto copyout;
6494 }
6495
6496 if (nvme_get_logpage(nvme, B_TRUE, &log, &buf)) {
6497 int copy;
6498
6499 copy = ddi_copyout(buf, (void *)log.nigl_data, log.nigl_len,
6500 mode & FKIOCTL);
6501 kmem_free(buf, log.nigl_len);
6502 if (copy != 0) {
6503 (void) nvme_ioctl_error(&log.nigl_common,
6504 NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6505 goto copyout;
6506 }
6507
6508 nvme_ioctl_success(&log.nigl_common);
6509 }
6510
6511 copyout:
6512 switch (model) {
6513 #ifdef _MULTI_DATAMODEL
6514 case DDI_MODEL_ILP32:
6515 bzero(&log32, sizeof (log32));
6516
6517 log32.nigl_common = log.nigl_common;
6518 log32.nigl_csi = log.nigl_csi;
6519 log32.nigl_lid = log.nigl_lid;
6520 log32.nigl_lsp = log.nigl_lsp;
6521 log32.nigl_len = log.nigl_len;
6522 log32.nigl_offset = log.nigl_offset;
6523 log32.nigl_data = log.nigl_data;
6524 if (ddi_copyout(&log32, (void *)arg, sizeof (log32),
6525 mode & FKIOCTL) != 0) {
6526 return (EFAULT);
6527 }
6528 break;
6529 #endif /* _MULTI_DATAMODEL */
6530 case DDI_MODEL_NONE:
6531 if (ddi_copyout(&log, (void *)arg, sizeof (log),
6532 mode & FKIOCTL) != 0) {
6533 return (EFAULT);
6534 }
6535 break;
6536 default:
6537 return (ENOTSUP);
6538 }
6539
6540 return (0);
6541 }
6542
6543 static int
nvme_ioctl_get_feature(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6544 nvme_ioctl_get_feature(nvme_minor_t *minor, intptr_t arg, int mode,
6545 cred_t *cred_p)
6546 {
6547 nvme_t *const nvme = minor->nm_ctrl;
6548 nvme_ioctl_get_feature_t feat;
6549 uint_t model;
6550 #ifdef _MULTI_DATAMODEL
6551 nvme_ioctl_get_feature32_t feat32;
6552 #endif
6553 nvme_get_features_dw10_t gf_dw10 = { 0 };
6554 nvme_ioc_cmd_args_t args = { NULL };
6555 nvme_sqe_t sqe = {
6556 .sqe_opc = NVME_OPC_GET_FEATURES
6557 };
6558
6559 if ((mode & FREAD) == 0) {
6560 return (EBADF);
6561 }
6562
6563 model = ddi_model_convert_from(mode);
6564 switch (model) {
6565 #ifdef _MULTI_DATAMODEL
6566 case DDI_MODEL_ILP32:
6567 bzero(&feat, sizeof (feat));
6568 if (ddi_copyin((void *)arg, &feat32, sizeof (feat32),
6569 mode & FKIOCTL) != 0) {
6570 return (EFAULT);
6571 }
6572
6573 feat.nigf_common.nioc_nsid = feat32.nigf_common.nioc_nsid;
6574 feat.nigf_fid = feat32.nigf_fid;
6575 feat.nigf_sel = feat32.nigf_sel;
6576 feat.nigf_cdw11 = feat32.nigf_cdw11;
6577 feat.nigf_data = feat32.nigf_data;
6578 feat.nigf_len = feat32.nigf_len;
6579 break;
6580 #endif /* _MULTI_DATAMODEL */
6581 case DDI_MODEL_NONE:
6582 if (ddi_copyin((void *)arg, &feat, sizeof (feat),
6583 mode & FKIOCTL) != 0) {
6584 return (EFAULT);
6585 }
6586 break;
6587 default:
6588 return (ENOTSUP);
6589 }
6590
6591 if (!nvme_ioctl_check(minor, &feat.nigf_common,
6592 &nvme_check_get_feature)) {
6593 goto copyout;
6594 }
6595
6596 if (!nvme_validate_get_feature(nvme, &feat)) {
6597 goto copyout;
6598 }
6599
6600 gf_dw10.b.gt_fid = bitx32(feat.nigf_fid, 7, 0);
6601 gf_dw10.b.gt_sel = bitx32(feat.nigf_sel, 2, 0);
6602 sqe.sqe_cdw10 = gf_dw10.r;
6603 sqe.sqe_cdw11 = feat.nigf_cdw11;
6604 sqe.sqe_nsid = feat.nigf_common.nioc_nsid;
6605
6606 args.ica_sqe = &sqe;
6607 if (feat.nigf_len != 0) {
6608 args.ica_data = (void *)feat.nigf_data;
6609 args.ica_data_len = feat.nigf_len;
6610 args.ica_dma_flags = DDI_DMA_READ;
6611 }
6612 args.ica_copy_flags = mode;
6613 args.ica_timeout = nvme_admin_cmd_timeout;
6614
6615 if (!nvme_ioc_cmd(nvme, &feat.nigf_common, &args)) {
6616 goto copyout;
6617 }
6618
6619 feat.nigf_cdw0 = args.ica_cdw0;
6620
6621 copyout:
6622 switch (model) {
6623 #ifdef _MULTI_DATAMODEL
6624 case DDI_MODEL_ILP32:
6625 bzero(&feat32, sizeof (feat32));
6626
6627 feat32.nigf_common = feat.nigf_common;
6628 feat32.nigf_fid = feat.nigf_fid;
6629 feat32.nigf_sel = feat.nigf_sel;
6630 feat32.nigf_cdw11 = feat.nigf_cdw11;
6631 feat32.nigf_data = feat.nigf_data;
6632 feat32.nigf_len = feat.nigf_len;
6633 feat32.nigf_cdw0 = feat.nigf_cdw0;
6634 if (ddi_copyout(&feat32, (void *)arg, sizeof (feat32),
6635 mode & FKIOCTL) != 0) {
6636 return (EFAULT);
6637 }
6638 break;
6639 #endif /* _MULTI_DATAMODEL */
6640 case DDI_MODEL_NONE:
6641 if (ddi_copyout(&feat, (void *)arg, sizeof (feat),
6642 mode & FKIOCTL) != 0) {
6643 return (EFAULT);
6644 }
6645 break;
6646 default:
6647 return (ENOTSUP);
6648 }
6649
6650 return (0);
6651 }
6652
6653 static int
nvme_ioctl_format(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6654 nvme_ioctl_format(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6655 {
6656 nvme_t *const nvme = minor->nm_ctrl;
6657 nvme_ioctl_format_t ioc;
6658
6659 if ((mode & FWRITE) == 0)
6660 return (EBADF);
6661
6662 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6663 return (EPERM);
6664
6665 if (ddi_copyin((void *)(uintptr_t)arg, &ioc,
6666 sizeof (nvme_ioctl_format_t), mode & FKIOCTL) != 0)
6667 return (EFAULT);
6668
6669 if (!nvme_ioctl_check(minor, &ioc.nif_common, &nvme_check_format)) {
6670 goto copyout;
6671 }
6672
6673 if (!nvme_validate_format(nvme, &ioc)) {
6674 goto copyout;
6675 }
6676
6677 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6678 if (!nvme_no_blkdev_attached(nvme, ioc.nif_common.nioc_nsid)) {
6679 nvme_mgmt_unlock(nvme);
6680 (void) nvme_ioctl_error(&ioc.nif_common,
6681 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
6682 goto copyout;
6683 }
6684
6685 if (nvme_format_nvm(nvme, &ioc)) {
6686 nvme_ioctl_success(&ioc.nif_common);
6687 nvme_rescan_ns(nvme, ioc.nif_common.nioc_nsid);
6688 }
6689 nvme_mgmt_unlock(nvme);
6690
6691 copyout:
6692 if (ddi_copyout(&ioc, (void *)(uintptr_t)arg, sizeof (ioc),
6693 mode & FKIOCTL) != 0) {
6694 return (EFAULT);
6695 }
6696
6697 return (0);
6698 }
6699
6700 static int
nvme_ioctl_detach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6701 nvme_ioctl_detach(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6702 {
6703 nvme_t *const nvme = minor->nm_ctrl;
6704 nvme_ioctl_common_t com;
6705
6706 if ((mode & FWRITE) == 0)
6707 return (EBADF);
6708
6709 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6710 return (EPERM);
6711
6712 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
6713 mode & FKIOCTL) != 0) {
6714 return (EFAULT);
6715 }
6716
6717 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
6718 goto copyout;
6719 }
6720
6721 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6722 if (nvme_detach_ns(nvme, &com)) {
6723 nvme_ioctl_success(&com);
6724 }
6725 nvme_mgmt_unlock(nvme);
6726
6727 copyout:
6728 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
6729 mode & FKIOCTL) != 0) {
6730 return (EFAULT);
6731 }
6732
6733 return (0);
6734 }
6735
6736 static int
nvme_ioctl_attach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6737 nvme_ioctl_attach(nvme_minor_t *minor, intptr_t arg, int mode,
6738 cred_t *cred_p)
6739 {
6740 nvme_t *const nvme = minor->nm_ctrl;
6741 nvme_ioctl_common_t com;
6742 nvme_namespace_t *ns;
6743
6744 if ((mode & FWRITE) == 0)
6745 return (EBADF);
6746
6747 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6748 return (EPERM);
6749
6750 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
6751 mode & FKIOCTL) != 0) {
6752 return (EFAULT);
6753 }
6754
6755 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
6756 goto copyout;
6757 }
6758
6759 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6760 ns = nvme_nsid2ns(nvme, com.nioc_nsid);
6761
6762 /*
6763 * Strictly speaking we shouldn't need to call nvme_init_ns() here as
6764 * we should be properly refreshing the internal state when we are
6765 * issuing commands that change things. However, we opt to still do so
6766 * as a bit of a safety check lest we give the kernel something bad or a
6767 * vendor unique command somehow did something behind our backs.
6768 */
6769 if (!ns->ns_attached) {
6770 (void) nvme_rescan_ns(nvme, com.nioc_nsid);
6771 if (nvme_attach_ns(nvme, &com)) {
6772 nvme_ioctl_success(&com);
6773 }
6774 } else {
6775 nvme_ioctl_success(&com);
6776 }
6777 nvme_mgmt_unlock(nvme);
6778
6779 copyout:
6780 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
6781 mode & FKIOCTL) != 0) {
6782 return (EFAULT);
6783 }
6784
6785 return (0);
6786 }
6787
6788 static void
nvme_ufm_update(nvme_t * nvme)6789 nvme_ufm_update(nvme_t *nvme)
6790 {
6791 mutex_enter(&nvme->n_fwslot_mutex);
6792 ddi_ufm_update(nvme->n_ufmh);
6793 if (nvme->n_fwslot != NULL) {
6794 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t));
6795 nvme->n_fwslot = NULL;
6796 }
6797 mutex_exit(&nvme->n_fwslot_mutex);
6798 }
6799
6800 /*
6801 * Download new firmware to the device's internal staging area. We do not call
6802 * nvme_ufm_update() here because after a firmware download, there has been no
6803 * change to any of the actual persistent firmware data. That requires a
6804 * subsequent ioctl (NVME_IOC_FIRMWARE_COMMIT) to commit the firmware to a slot
6805 * or to activate a slot.
6806 */
6807 static int
nvme_ioctl_firmware_download(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6808 nvme_ioctl_firmware_download(nvme_minor_t *minor, intptr_t arg, int mode,
6809 cred_t *cred_p)
6810 {
6811 nvme_t *const nvme = minor->nm_ctrl;
6812 nvme_ioctl_fw_load_t fw;
6813 uint64_t len, maxcopy;
6814 offset_t offset;
6815 uint32_t gran;
6816 nvme_valid_ctrl_data_t data;
6817 uintptr_t buf;
6818 nvme_sqe_t sqe = {
6819 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD
6820 };
6821
6822 if ((mode & FWRITE) == 0)
6823 return (EBADF);
6824
6825 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6826 return (EPERM);
6827
6828 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
6829 mode & FKIOCTL) != 0) {
6830 return (EFAULT);
6831 }
6832
6833 if (!nvme_ioctl_check(minor, &fw.fwl_common, &nvme_check_firmware)) {
6834 goto copyout;
6835 }
6836
6837 if (!nvme_validate_fw_load(nvme, &fw)) {
6838 goto copyout;
6839 }
6840
6841 len = fw.fwl_len;
6842 offset = fw.fwl_off;
6843 buf = fw.fwl_buf;
6844
6845 /*
6846 * We need to determine the minimum and maximum amount of data that we
6847 * will send to the device in a given go. Starting in NMVe 1.3 this must
6848 * be a multiple of the firmware update granularity (FWUG), but must not
6849 * exceed the maximum data transfer that we've set. Many devices don't
6850 * report something here, which means we'll end up getting our default
6851 * value. Our policy is a little simple, but it's basically if the
6852 * maximum data transfer is evenly divided by the granularity, then use
6853 * it. Otherwise we use the granularity itself. The granularity is
6854 * always in page sized units, so trying to find another optimum point
6855 * isn't worth it. If we encounter a contradiction, then we will have to
6856 * error out.
6857 */
6858 data.vcd_vers = &nvme->n_version;
6859 data.vcd_id = nvme->n_idctl;
6860 gran = nvme_fw_load_granularity(&data);
6861
6862 if ((nvme->n_max_data_transfer_size % gran) == 0) {
6863 maxcopy = nvme->n_max_data_transfer_size;
6864 } else if (gran <= nvme->n_max_data_transfer_size) {
6865 maxcopy = gran;
6866 } else {
6867 (void) nvme_ioctl_error(&fw.fwl_common,
6868 NVME_IOCTL_E_FW_LOAD_IMPOS_GRAN, 0, 0);
6869 goto copyout;
6870 }
6871
6872 while (len > 0) {
6873 nvme_ioc_cmd_args_t args = { NULL };
6874 uint64_t copylen = MIN(maxcopy, len);
6875
6876 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1;
6877 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT);
6878
6879 args.ica_sqe = &sqe;
6880 args.ica_data = (void *)buf;
6881 args.ica_data_len = copylen;
6882 args.ica_dma_flags = DDI_DMA_WRITE;
6883 args.ica_copy_flags = mode;
6884 args.ica_timeout = nvme_admin_cmd_timeout;
6885
6886 if (!nvme_ioc_cmd(nvme, &fw.fwl_common, &args)) {
6887 break;
6888 }
6889
6890 buf += copylen;
6891 offset += copylen;
6892 len -= copylen;
6893 }
6894
6895 copyout:
6896 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
6897 mode & FKIOCTL) != 0) {
6898 return (EFAULT);
6899 }
6900
6901 return (0);
6902 }
6903
6904 static int
nvme_ioctl_firmware_commit(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6905 nvme_ioctl_firmware_commit(nvme_minor_t *minor, intptr_t arg, int mode,
6906 cred_t *cred_p)
6907 {
6908 nvme_t *const nvme = minor->nm_ctrl;
6909 nvme_ioctl_fw_commit_t fw;
6910 nvme_firmware_commit_dw10_t fc_dw10 = { 0 };
6911 nvme_ioc_cmd_args_t args = { NULL };
6912 nvme_sqe_t sqe = {
6913 .sqe_opc = NVME_OPC_FW_ACTIVATE
6914 };
6915
6916 if ((mode & FWRITE) == 0)
6917 return (EBADF);
6918
6919 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6920 return (EPERM);
6921
6922 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
6923 mode & FKIOCTL) != 0) {
6924 return (EFAULT);
6925 }
6926
6927 if (!nvme_ioctl_check(minor, &fw.fwc_common, &nvme_check_firmware)) {
6928 goto copyout;
6929 }
6930
6931 if (!nvme_validate_fw_commit(nvme, &fw)) {
6932 goto copyout;
6933 }
6934
6935 fc_dw10.b.fc_slot = fw.fwc_slot;
6936 fc_dw10.b.fc_action = fw.fwc_action;
6937 sqe.sqe_cdw10 = fc_dw10.r;
6938
6939 args.ica_sqe = &sqe;
6940 args.ica_timeout = nvme_commit_save_cmd_timeout;
6941
6942 /*
6943 * There are no conditional actions to take based on this succeeding or
6944 * failing. A failure is recorded in the ioctl structure returned to the
6945 * user.
6946 */
6947 (void) nvme_ioc_cmd(nvme, &fw.fwc_common, &args);
6948
6949 /*
6950 * Let the DDI UFM subsystem know that the firmware information for
6951 * this device has changed. We perform this unconditionally as an
6952 * invalidation doesn't particularly hurt us.
6953 */
6954 nvme_ufm_update(nvme);
6955
6956 copyout:
6957 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
6958 mode & FKIOCTL) != 0) {
6959 return (EFAULT);
6960 }
6961
6962 return (0);
6963 }
6964
6965 /*
6966 * Helper to copy in a passthru command from userspace, handling
6967 * different data models.
6968 */
6969 static int
nvme_passthru_copyin_cmd(const void * buf,nvme_ioctl_passthru_t * cmd,int mode)6970 nvme_passthru_copyin_cmd(const void *buf, nvme_ioctl_passthru_t *cmd, int mode)
6971 {
6972 switch (ddi_model_convert_from(mode & FMODELS)) {
6973 #ifdef _MULTI_DATAMODEL
6974 case DDI_MODEL_ILP32: {
6975 nvme_ioctl_passthru32_t cmd32;
6976
6977 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0)
6978 return (EFAULT);
6979
6980 bzero(cmd, sizeof (nvme_ioctl_passthru_t));
6981
6982 cmd->npc_common.nioc_nsid = cmd32.npc_common.nioc_nsid;
6983 cmd->npc_opcode = cmd32.npc_opcode;
6984 cmd->npc_timeout = cmd32.npc_timeout;
6985 cmd->npc_flags = cmd32.npc_flags;
6986 cmd->npc_impact = cmd32.npc_impact;
6987 cmd->npc_cdw12 = cmd32.npc_cdw12;
6988 cmd->npc_cdw13 = cmd32.npc_cdw13;
6989 cmd->npc_cdw14 = cmd32.npc_cdw14;
6990 cmd->npc_cdw15 = cmd32.npc_cdw15;
6991 cmd->npc_buflen = cmd32.npc_buflen;
6992 cmd->npc_buf = cmd32.npc_buf;
6993 break;
6994 }
6995 #endif /* _MULTI_DATAMODEL */
6996 case DDI_MODEL_NONE:
6997 if (ddi_copyin(buf, (void *)cmd, sizeof (nvme_ioctl_passthru_t),
6998 mode) != 0) {
6999 return (EFAULT);
7000 }
7001 break;
7002 default:
7003 return (ENOTSUP);
7004 }
7005
7006 return (0);
7007 }
7008
7009 /*
7010 * Helper to copy out a passthru command result to userspace, handling
7011 * different data models.
7012 */
7013 static int
nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t * cmd,void * buf,int mode)7014 nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t *cmd, void *buf, int mode)
7015 {
7016 switch (ddi_model_convert_from(mode & FMODELS)) {
7017 #ifdef _MULTI_DATAMODEL
7018 case DDI_MODEL_ILP32: {
7019 nvme_ioctl_passthru32_t cmd32;
7020
7021 bzero(&cmd32, sizeof (nvme_ioctl_passthru32_t));
7022
7023 cmd32.npc_common = cmd->npc_common;
7024 cmd32.npc_opcode = cmd->npc_opcode;
7025 cmd32.npc_timeout = cmd->npc_timeout;
7026 cmd32.npc_flags = cmd->npc_flags;
7027 cmd32.npc_impact = cmd->npc_impact;
7028 cmd32.npc_cdw0 = cmd->npc_cdw0;
7029 cmd32.npc_cdw12 = cmd->npc_cdw12;
7030 cmd32.npc_cdw13 = cmd->npc_cdw13;
7031 cmd32.npc_cdw14 = cmd->npc_cdw14;
7032 cmd32.npc_cdw15 = cmd->npc_cdw15;
7033 cmd32.npc_buflen = (size32_t)cmd->npc_buflen;
7034 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf;
7035 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0)
7036 return (EFAULT);
7037 break;
7038 }
7039 #endif /* _MULTI_DATAMODEL */
7040 case DDI_MODEL_NONE:
7041 if (ddi_copyout(cmd, buf, sizeof (nvme_ioctl_passthru_t),
7042 mode) != 0) {
7043 return (EFAULT);
7044 }
7045 break;
7046 default:
7047 return (ENOTSUP);
7048 }
7049 return (0);
7050 }
7051
7052 /*
7053 * Run an arbitrary vendor-specific admin command on the device.
7054 */
7055 static int
nvme_ioctl_passthru(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7056 nvme_ioctl_passthru(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
7057 {
7058 nvme_t *const nvme = minor->nm_ctrl;
7059 int rv;
7060 nvme_ioctl_passthru_t pass;
7061 nvme_sqe_t sqe;
7062 nvme_ioc_cmd_args_t args = { NULL };
7063
7064 /*
7065 * Basic checks: permissions, data model, argument size.
7066 */
7067 if ((mode & FWRITE) == 0)
7068 return (EBADF);
7069
7070 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7071 return (EPERM);
7072
7073 if ((rv = nvme_passthru_copyin_cmd((void *)(uintptr_t)arg, &pass,
7074 mode)) != 0) {
7075 return (rv);
7076 }
7077
7078 if (!nvme_ioctl_check(minor, &pass.npc_common, &nvme_check_passthru)) {
7079 goto copyout;
7080 }
7081
7082 if (!nvme_validate_vuc(nvme, &pass)) {
7083 goto copyout;
7084 }
7085
7086 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7087 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
7088 /*
7089 * We've been told this has ns impact. Right now force that to
7090 * be every ns until we have more use cases and reason to trust
7091 * the nsid field.
7092 */
7093 if (!nvme_no_blkdev_attached(nvme, NVME_NSID_BCAST)) {
7094 nvme_mgmt_unlock(nvme);
7095 (void) nvme_ioctl_error(&pass.npc_common,
7096 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
7097 goto copyout;
7098 }
7099 }
7100
7101 bzero(&sqe, sizeof (sqe));
7102
7103 sqe.sqe_opc = pass.npc_opcode;
7104 sqe.sqe_nsid = pass.npc_common.nioc_nsid;
7105 sqe.sqe_cdw10 = (uint32_t)(pass.npc_buflen >> NVME_DWORD_SHIFT);
7106 sqe.sqe_cdw12 = pass.npc_cdw12;
7107 sqe.sqe_cdw13 = pass.npc_cdw13;
7108 sqe.sqe_cdw14 = pass.npc_cdw14;
7109 sqe.sqe_cdw15 = pass.npc_cdw15;
7110
7111 args.ica_sqe = &sqe;
7112 args.ica_data = (void *)pass.npc_buf;
7113 args.ica_data_len = pass.npc_buflen;
7114 args.ica_copy_flags = mode;
7115 args.ica_timeout = pass.npc_timeout;
7116
7117 if ((pass.npc_flags & NVME_PASSTHRU_READ) != 0)
7118 args.ica_dma_flags |= DDI_DMA_READ;
7119 else if ((pass.npc_flags & NVME_PASSTHRU_WRITE) != 0)
7120 args.ica_dma_flags |= DDI_DMA_WRITE;
7121
7122 if (nvme_ioc_cmd(nvme, &pass.npc_common, &args)) {
7123 pass.npc_cdw0 = args.ica_cdw0;
7124 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
7125 nvme_rescan_ns(nvme, NVME_NSID_BCAST);
7126 }
7127 }
7128 nvme_mgmt_unlock(nvme);
7129
7130 copyout:
7131 rv = nvme_passthru_copyout_cmd(&pass, (void *)(uintptr_t)arg,
7132 mode);
7133
7134 return (rv);
7135 }
7136
7137 static int
nvme_ioctl_lock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7138 nvme_ioctl_lock(nvme_minor_t *minor, intptr_t arg, int mode,
7139 cred_t *cred_p)
7140 {
7141 nvme_ioctl_lock_t lock;
7142 const nvme_lock_flags_t all_flags = NVME_LOCK_F_DONT_BLOCK;
7143 nvme_t *nvme = minor->nm_ctrl;
7144
7145 if ((mode & FWRITE) == 0)
7146 return (EBADF);
7147
7148 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7149 return (EPERM);
7150
7151 if (ddi_copyin((void *)(uintptr_t)arg, &lock, sizeof (lock),
7152 mode & FKIOCTL) != 0) {
7153 return (EFAULT);
7154 }
7155
7156 if (lock.nil_ent != NVME_LOCK_E_CTRL &&
7157 lock.nil_ent != NVME_LOCK_E_NS) {
7158 (void) nvme_ioctl_error(&lock.nil_common,
7159 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7160 goto copyout;
7161 }
7162
7163 if (lock.nil_level != NVME_LOCK_L_READ &&
7164 lock.nil_level != NVME_LOCK_L_WRITE) {
7165 (void) nvme_ioctl_error(&lock.nil_common,
7166 NVME_IOCTL_E_BAD_LOCK_LEVEL, 0, 0);
7167 goto copyout;
7168 }
7169
7170 if ((lock.nil_flags & ~all_flags) != 0) {
7171 (void) nvme_ioctl_error(&lock.nil_common,
7172 NVME_IOCTL_E_BAD_LOCK_FLAGS, 0, 0);
7173 goto copyout;
7174 }
7175
7176 if (!nvme_ioctl_check(minor, &lock.nil_common, &nvme_check_locking)) {
7177 goto copyout;
7178 }
7179
7180 /*
7181 * If we're on a namespace, confirm that we're not asking for the
7182 * controller.
7183 */
7184 if (lock.nil_common.nioc_nsid != 0 &&
7185 lock.nil_ent == NVME_LOCK_E_CTRL) {
7186 (void) nvme_ioctl_error(&lock.nil_common,
7187 NVME_IOCTL_E_NS_CANNOT_LOCK_CTRL, 0, 0);
7188 goto copyout;
7189 }
7190
7191 /*
7192 * We've reached the point where we can no longer actually check things
7193 * without serializing state. First, we need to check to make sure that
7194 * none of our invariants are being broken for locking:
7195 *
7196 * 1) The caller isn't already blocking for a lock operation to
7197 * complete.
7198 *
7199 * 2) The caller is attempting to grab a lock that they already have.
7200 * While there are other rule violations that this might create, we opt
7201 * to check this ahead of it so we can have slightly better error
7202 * messages for our callers.
7203 *
7204 * 3) The caller is trying to grab a controller lock, while holding a
7205 * namespace lock.
7206 *
7207 * 4) The caller has a controller write lock and is trying to get a
7208 * namespace lock. For now, we disallow this case. Holding a controller
7209 * read lock is allowed, but the write lock allows you to operate on all
7210 * namespaces anyways. In addition, this simplifies the locking logic;
7211 * however, this constraint may be loosened in the future.
7212 *
7213 * 5) The caller is trying to acquire a second namespace lock when they
7214 * already have one.
7215 */
7216 mutex_enter(&nvme->n_minor_mutex);
7217 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_BLOCKED ||
7218 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_BLOCKED) {
7219 (void) nvme_ioctl_error(&lock.nil_common,
7220 NVME_IOCTL_E_LOCK_PENDING, 0, 0);
7221 mutex_exit(&nvme->n_minor_mutex);
7222 goto copyout;
7223 }
7224
7225 if ((lock.nil_ent == NVME_LOCK_E_CTRL &&
7226 minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) ||
7227 (lock.nil_ent == NVME_LOCK_E_NS &&
7228 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
7229 minor->nm_ns_lock.nli_ns->ns_id == lock.nil_common.nioc_nsid)) {
7230 (void) nvme_ioctl_error(&lock.nil_common,
7231 NVME_IOCTL_E_LOCK_ALREADY_HELD, 0, 0);
7232 mutex_exit(&nvme->n_minor_mutex);
7233 goto copyout;
7234 }
7235
7236 if (lock.nil_ent == NVME_LOCK_E_CTRL &&
7237 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
7238 (void) nvme_ioctl_error(&lock.nil_common,
7239 NVME_IOCTL_E_LOCK_NO_CTRL_WITH_NS, 0, 0);
7240 mutex_exit(&nvme->n_minor_mutex);
7241 goto copyout;
7242 }
7243
7244 if (lock.nil_ent == NVME_LOCK_E_NS &&
7245 (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
7246 minor->nm_ctrl_lock.nli_curlevel == NVME_LOCK_L_WRITE)) {
7247 (void) nvme_ioctl_error(&lock.nil_common,
7248 NVME_IOCTL_LOCK_NO_NS_WITH_CTRL_WRLOCK, 0, 0);
7249 mutex_exit(&nvme->n_minor_mutex);
7250 goto copyout;
7251 }
7252
7253 if (lock.nil_ent == NVME_LOCK_E_NS &&
7254 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
7255 (void) nvme_ioctl_error(&lock.nil_common,
7256 NVME_IOCTL_E_LOCK_NO_2ND_NS, 0, 0);
7257 mutex_exit(&nvme->n_minor_mutex);
7258 goto copyout;
7259 }
7260
7261 #ifdef DEBUG
7262 /*
7263 * This is a big block of sanity checks to make sure that we haven't
7264 * allowed anything bad to happen.
7265 */
7266 if (lock.nil_ent == NVME_LOCK_E_NS) {
7267 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
7268 ASSERT3U(minor->nm_ns_lock.nli_state, ==,
7269 NVME_LOCK_STATE_UNLOCKED);
7270 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
7271 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7272
7273 if (minor->nm_ns != NULL) {
7274 ASSERT3U(minor->nm_ns->ns_id, ==,
7275 lock.nil_common.nioc_nsid);
7276 }
7277
7278 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
7279 } else {
7280 ASSERT3P(minor->nm_ctrl_lock.nli_lock, ==, NULL);
7281 ASSERT3U(minor->nm_ctrl_lock.nli_state, ==,
7282 NVME_LOCK_STATE_UNLOCKED);
7283 ASSERT3U(minor->nm_ctrl_lock.nli_curlevel, ==, 0);
7284 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7285 ASSERT0(list_link_active(&minor->nm_ctrl_lock.nli_node));
7286
7287 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
7288 ASSERT3U(minor->nm_ns_lock.nli_state, ==,
7289 NVME_LOCK_STATE_UNLOCKED);
7290 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
7291 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7292 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
7293 }
7294 #endif /* DEBUG */
7295
7296 /*
7297 * At this point we should actually attempt a locking operation.
7298 */
7299 nvme_rwlock(minor, &lock);
7300 mutex_exit(&nvme->n_minor_mutex);
7301
7302 copyout:
7303 if (ddi_copyout(&lock, (void *)(uintptr_t)arg, sizeof (lock),
7304 mode & FKIOCTL) != 0) {
7305 return (EFAULT);
7306 }
7307
7308 return (0);
7309 }
7310
7311 static int
nvme_ioctl_unlock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7312 nvme_ioctl_unlock(nvme_minor_t *minor, intptr_t arg, int mode,
7313 cred_t *cred_p)
7314 {
7315 nvme_ioctl_unlock_t unlock;
7316 nvme_t *const nvme = minor->nm_ctrl;
7317 boolean_t is_ctrl;
7318 nvme_lock_t *lock;
7319 nvme_minor_lock_info_t *info;
7320
7321 /*
7322 * Note, we explicitly don't check for privileges for unlock. The idea
7323 * being that if you have the lock, that's what matters. If you don't
7324 * have the lock, it doesn't matter what privileges that you have at
7325 * all.
7326 */
7327 if ((mode & FWRITE) == 0)
7328 return (EBADF);
7329
7330 if (ddi_copyin((void *)(uintptr_t)arg, &unlock, sizeof (unlock),
7331 mode & FKIOCTL) != 0) {
7332 return (EFAULT);
7333 }
7334
7335 if (unlock.niu_ent != NVME_LOCK_E_CTRL &&
7336 unlock.niu_ent != NVME_LOCK_E_NS) {
7337 (void) nvme_ioctl_error(&unlock.niu_common,
7338 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7339 goto copyout;
7340 }
7341
7342 if (!nvme_ioctl_check(minor, &unlock.niu_common, &nvme_check_locking)) {
7343 goto copyout;
7344 }
7345
7346 /*
7347 * If we're on a namespace, confirm that we're not asking for the
7348 * controller.
7349 */
7350 if (unlock.niu_common.nioc_nsid != 0 &&
7351 unlock.niu_ent == NVME_LOCK_E_CTRL) {
7352 (void) nvme_ioctl_error(&unlock.niu_common,
7353 NVME_IOCTL_E_NS_CANNOT_UNLOCK_CTRL, 0, 0);
7354 goto copyout;
7355 }
7356
7357 mutex_enter(&nvme->n_minor_mutex);
7358 if (unlock.niu_ent == NVME_LOCK_E_CTRL) {
7359 if (minor->nm_ctrl_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7360 mutex_exit(&nvme->n_minor_mutex);
7361 (void) nvme_ioctl_error(&unlock.niu_common,
7362 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7363 goto copyout;
7364 }
7365 } else {
7366 if (minor->nm_ns_lock.nli_ns == NULL) {
7367 mutex_exit(&nvme->n_minor_mutex);
7368 (void) nvme_ioctl_error(&unlock.niu_common,
7369 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7370 goto copyout;
7371 }
7372
7373 /*
7374 * Check that our unlock request corresponds to the namespace ID
7375 * that is currently locked. This could happen if we're using
7376 * the controller node and it specified a valid, but not locked,
7377 * namespace ID.
7378 */
7379 if (minor->nm_ns_lock.nli_ns->ns_id !=
7380 unlock.niu_common.nioc_nsid) {
7381 mutex_exit(&nvme->n_minor_mutex);
7382 ASSERT3P(minor->nm_ns, ==, NULL);
7383 (void) nvme_ioctl_error(&unlock.niu_common,
7384 NVME_IOCTL_E_LOCK_WRONG_NS, 0, 0);
7385 goto copyout;
7386 }
7387
7388 if (minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7389 mutex_exit(&nvme->n_minor_mutex);
7390 (void) nvme_ioctl_error(&unlock.niu_common,
7391 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7392 goto copyout;
7393 }
7394 }
7395
7396 /*
7397 * Finally, perform the unlock.
7398 */
7399 is_ctrl = unlock.niu_ent == NVME_LOCK_E_CTRL;
7400 if (is_ctrl) {
7401 lock = &nvme->n_lock;
7402 info = &minor->nm_ctrl_lock;
7403 } else {
7404 nvme_namespace_t *ns;
7405 const uint32_t nsid = unlock.niu_common.nioc_nsid;
7406
7407 ns = nvme_nsid2ns(nvme, nsid);
7408 lock = &ns->ns_lock;
7409 info = &minor->nm_ns_lock;
7410 VERIFY3P(ns, ==, info->nli_ns);
7411 }
7412 nvme_rwunlock(info, lock);
7413 mutex_exit(&nvme->n_minor_mutex);
7414 nvme_ioctl_success(&unlock.niu_common);
7415
7416 copyout:
7417 if (ddi_copyout(&unlock, (void *)(uintptr_t)arg, sizeof (unlock),
7418 mode & FKIOCTL) != 0) {
7419 return (EFAULT);
7420 }
7421
7422 return (0);
7423 }
7424
7425 static int
nvme_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)7426 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
7427 int *rval_p)
7428 {
7429 #ifndef __lock_lint
7430 _NOTE(ARGUNUSED(rval_p));
7431 #endif
7432 nvme_minor_t *minor;
7433 nvme_t *nvme;
7434
7435 minor = nvme_minor_find_by_dev(dev);
7436 if (minor == NULL) {
7437 return (ENXIO);
7438 }
7439
7440 nvme = minor->nm_ctrl;
7441 if (nvme == NULL)
7442 return (ENXIO);
7443
7444 if (IS_DEVCTL(cmd))
7445 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
7446
7447 if (nvme->n_dead && (cmd != NVME_IOC_DETACH && cmd !=
7448 NVME_IOC_UNLOCK)) {
7449 if (IS_NVME_IOC(cmd) == 0) {
7450 return (EIO);
7451 }
7452
7453 return (nvme_ioctl_copyout_error(nvme->n_dead_status, arg,
7454 mode));
7455 }
7456
7457 /*
7458 * ioctls that are no longer using the original ioctl structure.
7459 */
7460 switch (cmd) {
7461 case NVME_IOC_CTRL_INFO:
7462 return (nvme_ioctl_ctrl_info(minor, arg, mode, cred_p));
7463 case NVME_IOC_IDENTIFY:
7464 return (nvme_ioctl_identify(minor, arg, mode, cred_p));
7465 case NVME_IOC_GET_LOGPAGE:
7466 return (nvme_ioctl_get_logpage(minor, arg, mode, cred_p));
7467 case NVME_IOC_GET_FEATURE:
7468 return (nvme_ioctl_get_feature(minor, arg, mode, cred_p));
7469 case NVME_IOC_DETACH:
7470 return (nvme_ioctl_detach(minor, arg, mode, cred_p));
7471 case NVME_IOC_ATTACH:
7472 return (nvme_ioctl_attach(minor, arg, mode, cred_p));
7473 case NVME_IOC_FORMAT:
7474 return (nvme_ioctl_format(minor, arg, mode, cred_p));
7475 case NVME_IOC_FIRMWARE_DOWNLOAD:
7476 return (nvme_ioctl_firmware_download(minor, arg, mode,
7477 cred_p));
7478 case NVME_IOC_FIRMWARE_COMMIT:
7479 return (nvme_ioctl_firmware_commit(minor, arg, mode,
7480 cred_p));
7481 case NVME_IOC_NS_INFO:
7482 return (nvme_ioctl_ns_info(minor, arg, mode, cred_p));
7483 case NVME_IOC_PASSTHRU:
7484 return (nvme_ioctl_passthru(minor, arg, mode, cred_p));
7485 case NVME_IOC_LOCK:
7486 return (nvme_ioctl_lock(minor, arg, mode, cred_p));
7487 case NVME_IOC_UNLOCK:
7488 return (nvme_ioctl_unlock(minor, arg, mode, cred_p));
7489 default:
7490 return (ENOTTY);
7491 }
7492 }
7493
7494 /*
7495 * DDI UFM Callbacks
7496 */
7497 static int
nvme_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * img)7498 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
7499 ddi_ufm_image_t *img)
7500 {
7501 nvme_t *nvme = arg;
7502
7503 if (imgno != 0)
7504 return (EINVAL);
7505
7506 ddi_ufm_image_set_desc(img, "Firmware");
7507 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot);
7508
7509 return (0);
7510 }
7511
7512 /*
7513 * Fill out firmware slot information for the requested slot. The firmware
7514 * slot information is gathered by requesting the Firmware Slot Information log
7515 * page. The format of the page is described in section 5.10.1.3.
7516 *
7517 * We lazily cache the log page on the first call and then invalidate the cache
7518 * data after a successful firmware download or firmware commit command.
7519 * The cached data is protected by a mutex as the state can change
7520 * asynchronous to this callback.
7521 */
7522 static int
nvme_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slot)7523 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
7524 uint_t slotno, ddi_ufm_slot_t *slot)
7525 {
7526 nvme_t *nvme = arg;
7527 void *log = NULL;
7528 size_t bufsize;
7529 ddi_ufm_attr_t attr = 0;
7530 char fw_ver[NVME_FWVER_SZ + 1];
7531
7532 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1))
7533 return (EINVAL);
7534
7535 mutex_enter(&nvme->n_fwslot_mutex);
7536 if (nvme->n_fwslot == NULL) {
7537 if (!nvme_get_logpage_int(nvme, B_TRUE, &log, &bufsize,
7538 NVME_LOGPAGE_FWSLOT) ||
7539 bufsize != sizeof (nvme_fwslot_log_t)) {
7540 if (log != NULL)
7541 kmem_free(log, bufsize);
7542 mutex_exit(&nvme->n_fwslot_mutex);
7543 return (EIO);
7544 }
7545 nvme->n_fwslot = (nvme_fwslot_log_t *)log;
7546 }
7547
7548 /*
7549 * NVMe numbers firmware slots starting at 1
7550 */
7551 if (slotno == (nvme->n_fwslot->fw_afi - 1))
7552 attr |= DDI_UFM_ATTR_ACTIVE;
7553
7554 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0)
7555 attr |= DDI_UFM_ATTR_WRITEABLE;
7556
7557 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') {
7558 attr |= DDI_UFM_ATTR_EMPTY;
7559 } else {
7560 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno],
7561 NVME_FWVER_SZ);
7562 fw_ver[NVME_FWVER_SZ] = '\0';
7563 ddi_ufm_slot_set_version(slot, fw_ver);
7564 }
7565 mutex_exit(&nvme->n_fwslot_mutex);
7566
7567 ddi_ufm_slot_set_attrs(slot, attr);
7568
7569 return (0);
7570 }
7571
7572 static int
nvme_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)7573 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
7574 {
7575 *caps = DDI_UFM_CAP_REPORT;
7576 return (0);
7577 }
7578
7579 boolean_t
nvme_ctrl_atleast(nvme_t * nvme,const nvme_version_t * min)7580 nvme_ctrl_atleast(nvme_t *nvme, const nvme_version_t *min)
7581 {
7582 return (nvme_vers_atleast(&nvme->n_version, min) ? B_TRUE : B_FALSE);
7583 }
7584