1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved.
14 * Copyright 2019 Unix Software Ltd.
15 * Copyright 2020 Joyent, Inc.
16 * Copyright 2020 Racktop Systems.
17 * Copyright 2025 Oxide Computer Company.
18 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
19 * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
20 */
21
22 /*
23 * blkdev driver for NVMe compliant storage devices
24 *
25 * This driver targets and is designed to support all NVMe 1.x and NVMe 2.x
26 * devices. Features are added to the driver as we encounter devices that
27 * require them and our needs, so some commands or log pages may not take
28 * advantage of newer features that devices support at this time. When you
29 * encounter such a case, it is generally fine to add that support to the driver
30 * as long as you take care to ensure that the requisite device version is met
31 * before using it.
32 *
33 * The driver has only been tested on x86 systems and will not work on big-
34 * endian systems without changes to the code accessing registers and data
35 * structures used by the hardware.
36 *
37 * ---------------
38 * Interrupt Usage
39 * ---------------
40 *
41 * The driver will use a single interrupt while configuring the device as the
42 * specification requires, but contrary to the specification it will try to use
43 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it
44 * will switch to multiple-message MSI(-X) if supported. The driver wants to
45 * have one interrupt vector per CPU, but it will work correctly if less are
46 * available. Interrupts can be shared by queues, the interrupt handler will
47 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only
48 * the admin queue will share an interrupt with one I/O queue. The interrupt
49 * handler will retrieve completed commands from all queues sharing an interrupt
50 * vector and will post them to a taskq for completion processing.
51 *
52 * ------------------
53 * Command Processing
54 * ------------------
55 *
56 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up
57 * to 65536 I/O commands. The driver will configure one I/O queue pair per
58 * available interrupt vector, with the queue length usually much smaller than
59 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
60 * interrupt vectors will be used.
61 *
62 * Additionally the hardware provides a single special admin queue pair that can
63 * hold up to 4096 admin commands.
64 *
65 * From the hardware perspective both queues of a queue pair are independent,
66 * but they share some driver state: the command array (holding pointers to
67 * commands currently being processed by the hardware) and the active command
68 * counter. Access to a submission queue and the shared state is protected by
69 * nq_mutex; completion queue is protected by ncq_mutex.
70 *
71 * When a command is submitted to a queue pair the active command counter is
72 * incremented and a pointer to the command is stored in the command array. The
73 * array index is used as command identifier (CID) in the submission queue
74 * entry. Some commands may take a very long time to complete, and if the queue
75 * wraps around in that time a submission may find the next array slot to still
76 * be used by a long-running command. In this case the array is sequentially
77 * searched for the next free slot. The length of the command array is the same
78 * as the configured queue length. Queue overrun is prevented by the semaphore,
79 * so a command submission may block if the queue is full.
80 *
81 * ------------------
82 * Polled I/O Support
83 * ------------------
84 *
85 * For kernel core dump support the driver can do polled I/O. As interrupts are
86 * turned off while dumping the driver will just submit a command in the regular
87 * way, and then repeatedly attempt a command retrieval until it gets the
88 * command back.
89 *
90 * -----------------
91 * Namespace Support
92 * -----------------
93 *
94 * NVMe devices can have multiple namespaces, each being a independent data
95 * store. The driver supports multiple namespaces and creates a blkdev interface
96 * for each namespace found. Namespaces can have various attributes to support
97 * protection information. This driver does not support any of this and ignores
98 * namespaces that have these attributes.
99 *
100 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
101 * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally
102 * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64
103 * if present to generate the devid, and passes the EUI64 to blkdev to use it
104 * in the device node names.
105 *
106 * When a device has more than (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a
107 * single controller, additional namespaces will not have minor nodes created.
108 * They can still be used and specified by the controller and libnvme. This
109 * limit is trying to balance the number of controllers and namespaces while
110 * fitting within the constraints of MAXMIN32, aka a 32-bit device number which
111 * only has 18-bits for the minor number. See the minor node section for more
112 * information.
113 *
114 * The driver supports namespace management, meaning the ability to create and
115 * destroy namespaces, and to attach and detach namespaces from controllers.
116 * Each namespace has an associated nvme_ns_state_t, which transitions through
117 * several states. The UNALLOCATED, ALLOCATED, and ACTIVE states are states that
118 * are defined by the NVMe specification. Not all ACTIVE namespaces may be
119 * attached to blkdev(4D) due to the use of features we don't support, for
120 * example, metadata protection. Such namespaces are automatically in the
121 * NOT_IGNORED state. Once they are attached to blkdev they enter the ATTACHED
122 * state.
123 *
124 * By default, a device can only transition one such state at a time. Each
125 * command that transitions between states has a corresponding array of errnos
126 * to use to transition. Examples of this are the nvme_ns_delete_states[],
127 * nvme_ctrl_attach_states[], etc. These dictate whether it is okay or not for a
128 * command that changes state to occur or not based on the current state. Each
129 * of these returns a specific error allowing one to understand why something
130 * isn't in the proper state. This allows library consumers to determine whether
131 * or not a namespace is already in the current state it's targeting to be
132 * ignored or not. The following diagram summarizes namespace transitions:
133 *
134 * +-------------+
135 * | |
136 * | Unallocated |
137 * | |
138 * +-------------+
139 * | ^
140 * | |
141 * Namespace Management: . .* * . . . Namespace Management:
142 * Create | | Delete
143 * NVME_IOC_NS_CREATE | | NVME_IOC_NS_DELETE
144 * v |
145 * +-------------+
146 * | |
147 * | Allocated |
148 * | |
149 * +-------------+
150 * | ^
151 * | |
152 * Namespace Attachment: . .* * . . . Namespace Attachment:
153 * Controller Attach | | Controller Detach
154 * NVME_IOC_CTRL_ATTACH | | NVME_IOC_CTRL_DETACH
155 * v |
156 * +------------+ |
157 * | | | +----------+
158 * | Active |>-----+----<| Not |
159 * | |--*-------->| Ignored |
160 * +------------+ . +----------+
161 * . | ^
162 * automatic kernel transition | |
163 * | * . . blkdev Detach
164 * blkdev attach . . * | NVME_IOC_BD_DETACH
165 * NVME_IOC_BD_ATTACH | |
166 * v |
167 * +----------+
168 * | |
169 * | blkdev |
170 * | attached |
171 * | |
172 * +----------+
173 *
174 * -----------
175 * Minor nodes
176 * -----------
177 *
178 * For each NVMe device the driver exposes one minor node for the controller and
179 * one minor node for each namespace. The only operations supported by those
180 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the
181 * primary control interface for the devices. The character device is a private
182 * interface and we attempt stability through libnvme and more so nvmeadm.
183 *
184 * The controller minor node is much more flexible than the namespace minor node
185 * and should be preferred. The controller node allows one to target any
186 * namespace that the device has, while the namespace is limited in what it can
187 * acquire. While the namespace minor exists, it should not be relied upon and
188 * is not by libnvme.
189 *
190 * The minor number space is split in two. We use the lower part to support the
191 * controller and namespaces as described above in the 'Namespace Support'
192 * section. The second set is used for cloning opens. We set aside one million
193 * minors for this purpose. We utilize a cloning open so that way we can have
194 * per-file_t state. This is how we end up implementing and tracking locking
195 * state and related.
196 *
197 * When we have this cloned open, then we allocate a new nvme_minor_t which gets
198 * its minor number from the nvme_open_minors id_space_t and is stored in the
199 * nvme_open_minors_avl. While someone calls open on a controller or namespace
200 * minor, everything else occurs in the context of one of these ephemeral
201 * minors.
202 *
203 * ------------------------------------
204 * ioctls, Errors, and Exclusive Access
205 * ------------------------------------
206 *
207 * All of the logical commands that one can issue are driven through the
208 * ioctl(9E) interface. All of our ioctls have a similar shape where they
209 * all include the 'nvme_ioctl_common_t' as their first member.
210 *
211 * This common ioctl structure is used to communicate the namespace that should
212 * be targeted. When the namespace is left as 0, then that indicates that it
213 * should target whatever the default is of the minor node. For a namespace
214 * minor, that will be transparently rewritten to the namespace's namespace id.
215 *
216 * In addition, the nvme_ioctl_common_t structure also has a standard error
217 * return. Our goal in our ioctl path is to ensure that we have useful semantic
218 * errors as much as possible. EINVAL, EIO, etc. are all overloaded. Instead as
219 * long as we can copy in our structure, then we will set a semantic error. If
220 * we have an error from the controller, then that will be included there.
221 *
222 * Each command has a specific policy that controls whether or not it is allowed
223 * on the namespace or controller minor, whether the broadcast namespace is
224 * allowed, various settings around what kind of exclusive access is allowed,
225 * and more. Each of these is wrapped up in a bit of policy described by the
226 * 'nvme_ioctl_check_t' structure.
227 *
228 * The device provides a form of exclusion in the form of both a
229 * controller-level and namespace-level read and write lock. Most operations do
230 * not require a lock (e.g. get log page, identify, etc.), but a few do (e.g.
231 * format nvm, firmware related activity, etc.). A read lock guarantees that you
232 * can complete your operation without interference, but read locks are not
233 * required. If you don't take a read lock and someone comes in with a write
234 * lock, then subsequent operations will fail with a semantic error indicating
235 * that you were blocked due to this.
236 *
237 * Here are some of the rules that govern our locks:
238 *
239 * 1. Writers starve readers. Any readers are allowed to finish when there is a
240 * pending writer; however, all subsequent readers will be blocked upon that
241 * writer.
242 * 2. A controller write lock takes priority over all other locks. Put
243 * differently a controller writer not only starves subsequent controller
244 * readers, but also all namespace read and write locks.
245 * 3. Each namespace lock is independent.
246 * 4. At most a single namespace lock may be owned.
247 * 5. If you own a namespace lock, you may not take a controller lock (to help
248 * with lock ordering).
249 * 6. In a similar spirit, if you own a controller write lock, you may not take
250 * any namespace lock. Someone with the controller write lock can perform any
251 * operations that they need to. However, if you have a controller read lock
252 * you may take any namespace lock.
253 * 7. There is no ability to upgrade a read lock to a write lock.
254 * 8. There is no recursive locking.
255 *
256 * While there's a lot there to keep track of, the goals of these are to
257 * constrain things so as to avoid deadlock. This is more complex than the
258 * original implementation in the driver which only allowed for an exclusive
259 * open that was tied to the thread. The first issue with tying this to the
260 * thread was that that didn't work well for software that utilized thread
261 * pools, like complex daemons. The second issue is that we want the ability for
262 * daemons, such as a FRU monitor, to be able to retain a file descriptor to the
263 * device without blocking others from taking action except during critical
264 * periods.
265 *
266 * In particular to enable something like libnvme, we didn't want someone to
267 * have to open and close the file descriptor to change what kind of exclusive
268 * access they desired.
269 *
270 * There are two different sets of data structures that we employ for tracking
271 * locking information:
272 *
273 * 1) The nvme_lock_t structure is contained in both the nvme_t and the
274 * nvme_namespace_t and tracks the current writer, readers, and pending writers
275 * and readers. Each of these lists or the writer pointer all refer to our
276 * second data structure.
277 *
278 * When a lock is owned by a single writer, then the nl_writer field is set to a
279 * specific minor's lock data structure. If instead readers are present, then
280 * the nl_readers list_t is not empty. An invariant of the system is that if
281 * nl_writer is non-NULL, nl_readers must be empty and conversely, if nl_readers
282 * is not empty, nl_writer must be NULL.
283 *
284 * 2) The nvme_minor_lock_info_t exists in the nvme_minor_t. There is one
285 * information structure which represents the minor's controller lock and a
286 * second one that represents the minor's namespace lock. The members of this
287 * are broken into tracking what the current lock is and what it targets. It
288 * also several members that are intended for debugging (nli_last_change,
289 * nli_acq_kthread, etc.).
290 *
291 * While the minor has two different lock information structures, our rules
292 * ensure that only one of the two can be pending and that they shouldn't result
293 * in a deadlock. When a lock is pending, the caller is sleeping on the minor's
294 * nm_cv member.
295 *
296 * These relationships are represented in the following image which shows a
297 * controller write lock being held with a pending readers on the controller
298 * lock and pending writers on one of the controller's namespaces.
299 *
300 * +---------+
301 * | nvme_t |
302 * | |
303 * | n_lock -|-------+
304 * | n_ns -+ | | +-----------------------------+
305 * +-------|-+ +-----------------+ | nvme_minor_t |
306 * | | nvme_lock_t | | |
307 * | | | | +------------------------+ |
308 * | | writer --|-------------->| nvme_minor_lock_info_t | |
309 * | | reader list | | | nm_ctrl_lock | |
310 * | | pending writers | | +------------------------+ |
311 * | | pending readers |------+ | +------------------------+ |
312 * | +-----------------+ | | | nvme_minor_lock_info_t | |
313 * | | | | nm_ns_lock | |
314 * | | | +------------------------+ |
315 * | | +-----------------------------+
316 * +------------------+ | +-----------------+
317 * | nvme_namespace_t | | | nvme_minor_t |
318 * | | | | |
319 * | ns_lock ---+ | | | +-------------+ |
320 * +------------|-----+ +-----------------|>|nm_ctrl_lock | |
321 * | | +-------------+ |
322 * v +-----------------+
323 * +------------------+ ...
324 * | nvme_lock_t | +-----------------+
325 * | | | nvme_minor_t |
326 * | writer | | |
327 * | reader list | | +-------------+ |
328 * | pending writers -|-----------------+ | |nm_ctrl_lock | |
329 * | pending readers | | | +-------------+ |
330 * +------------------+ | +-----------------+
331 * +-----------------------------+ | +-----------------------------+
332 * | nvme_minor_t | | | nvme_minor_t |
333 * | | | | |
334 * | +------------------------+ | | | +------------------------+ |
335 * | | nvme_minor_lock_info_t | | | | | nvme_minor_lock_info_t | |
336 * | | nm_ctrl_lock | | | | | nm_ctrl_lock | |
337 * | +------------------------+ | | | +------------------------+ |
338 * | +------------------------+ | v | +------------------------+ |
339 * | | nvme_minor_lock_info_t |-|-----|->| nvme_minor_lock_info_t | |
340 * | | nm_ns_lock | | | | nm_ns_lock | |
341 * | +------------------------+ | | +------------------------+ |
342 * +-----------------------------+ +-----------------------------+
343 *
344 * ----------------
345 * Blkdev Interface
346 * ----------------
347 *
348 * This driver uses blkdev to do all the heavy lifting involved with presenting
349 * a disk device to the system. As a result, the processing of I/O requests is
350 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
351 * setup, and splitting of transfers into manageable chunks.
352 *
353 * I/O requests coming in from blkdev are turned into NVM commands and posted to
354 * an I/O queue. The queue is selected by taking the CPU id modulo the number of
355 * queues. There is currently no timeout handling of I/O commands.
356 *
357 * Blkdev also supports querying device/media information and generating a
358 * devid. The driver reports the best block size as determined by the namespace
359 * format back to blkdev as physical block size to support partition and block
360 * alignment. The devid is either based on the namespace GUID or EUI64, if
361 * present, or composed using the device vendor ID, model number, serial number,
362 * and the namespace ID.
363 *
364 * --------------
365 * Error Handling
366 * --------------
367 *
368 * Error handling is currently limited to detecting fatal hardware errors,
369 * either by asynchronous events, or synchronously through command status or
370 * admin command timeouts. In case of severe errors the device is fenced off,
371 * all further requests will return EIO. FMA is then called to fault the device.
372 *
373 * The hardware has a limit for outstanding asynchronous event requests. Before
374 * this limit is known the driver assumes it is at least 1 and posts a single
375 * asynchronous request. Later when the limit is known more asynchronous event
376 * requests are posted to allow quicker reception of error information. When an
377 * asynchronous event is posted by the hardware the driver will parse the error
378 * status fields and log information or fault the device, depending on the
379 * severity of the asynchronous event. The asynchronous event request is then
380 * reused and posted to the admin queue again.
381 *
382 * On command completion the command status is checked for errors. In case of
383 * errors indicating a driver bug the driver panics. Almost all other error
384 * status values just cause EIO to be returned.
385 *
386 * Command timeouts are currently detected for all admin commands except
387 * asynchronous event requests. If a command times out and the hardware appears
388 * to be healthy the driver attempts to abort the command. The abort command
389 * timeout is a separate tunable but the original command timeout will be used
390 * if it is greater. If the abort times out too the driver assumes the device
391 * to be dead, fences it off, and calls FMA to retire it. In all other cases
392 * the aborted command should return immediately with a status indicating it
393 * was aborted, and the driver will wait indefinitely for that to happen. No
394 * timeout handling of normal I/O commands is presently done.
395 *
396 * Any command that times out due to the controller dropping dead will be put on
397 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA
398 * memory being reused by the system and later being written to by a "dead"
399 * NVMe controller.
400 *
401 * -------
402 * Locking
403 * -------
404 *
405 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held
406 * when accessing shared state and submission queue registers, ncq_mutex
407 * is held when accessing completion queue state and registers.
408 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while
409 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both
410 * mutexes themselves.
411 *
412 * Each command also has its own nc_mutex, which is associated with the
413 * condition variable nc_cv. It is only used on admin commands which are run
414 * synchronously. In that case it must be held across calls to
415 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by
416 * nvme_admin_cmd(). It must also be held whenever the completion state of the
417 * command is changed or while an admin command timeout is handled.
418 *
419 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first.
420 * More than one nc_mutex may only be held when aborting commands. In this case,
421 * the nc_mutex of the command to be aborted must be held across the call to
422 * nvme_abort_cmd() to prevent the command from completing while the abort is in
423 * progress.
424 *
425 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be
426 * acquired first. More than one nq_mutex is never held by a single thread.
427 * The ncq_mutex is only held by nvme_retrieve_cmd() and
428 * nvme_process_iocq(). nvme_process_iocq() is only called from the
429 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the
430 * mutex is non-contentious but is required for implementation completeness
431 * and safety.
432 *
433 * Each nvme_t has an n_admin_stat_mutex that protects the admin command
434 * statistics structure. If this is taken in conjunction with any other locks,
435 * then it must be taken last.
436 *
437 * There is one mutex n_minor_mutex which protects all open flags nm_open and
438 * exclusive-open thread pointers nm_oexcl of each minor node associated with a
439 * controller and its namespaces.
440 *
441 * In addition, there is a logical namespace management mutex which protects the
442 * data about namespaces. When interrogating the metadata of any namespace, this
443 * lock must be held. This gets tricky as we need to call into blkdev, which may
444 * issue callbacks into us which want this and it is illegal to hold locks
445 * across those blkdev calls as otherwise they might lead to deadlock (blkdev
446 * leverages ndi_devi_enter()).
447 *
448 * The lock exposes two levels, one that we call 'NVME' and one 'BDRO' or blkdev
449 * read-only. The idea is that most callers will use the NVME level which says
450 * this is a full traditional mutex operation. The BDRO level is used by blkdev
451 * callback functions and is a promise to only only read the data. When a blkdev
452 * operation starts, the lock holder will use nvme_mgmt_bd_start(). This
453 * strictly speaking drops the mutex, but records that the lock is logically
454 * held by the thread that did the start() operation.
455 *
456 * During this time, other threads (or even the same one) may end up calling
457 * into nvme_mgmt_lock(). Only one person may still hold the lock at any time;
458 * however, the BRDO level will be allowed to proceed during this time. This
459 * allows us to make consistent progress and honor the blkdev lock ordering
460 * requirements, albeit it is not as straightforward as a simple mutex.
461 *
462 * ---------------------
463 * Quiesce / Fast Reboot
464 * ---------------------
465 *
466 * The driver currently does not support fast reboot. A quiesce(9E) entry point
467 * is still provided which is used to send a shutdown notification to the
468 * device.
469 *
470 *
471 * ------------
472 * NVMe Hotplug
473 * ------------
474 *
475 * The driver supports hot removal. The driver uses the NDI event framework
476 * to register a callback, nvme_remove_callback, to clean up when a disk is
477 * removed. In particular, the driver will unqueue outstanding I/O commands and
478 * set n_dead on the softstate to true so that other operations, such as ioctls
479 * and command submissions, fail as well.
480 *
481 * While the callback registration relies on the NDI event framework, the
482 * removal event itself is kicked off in the PCIe hotplug framework, when the
483 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a
484 * device was removed from the slot.
485 *
486 * The NVMe driver instance itself will remain until the final close of the
487 * device.
488 *
489 * ---------------
490 * DDI UFM Support
491 * ---------------
492 *
493 * The driver supports the DDI UFM framework for reporting information about
494 * the device's firmware image and slot configuration. This data can be
495 * queried by userland software via ioctls to the ufm driver. For more
496 * information, see ddi_ufm(9E).
497 *
498 * --------------------
499 * Driver Configuration
500 * --------------------
501 *
502 * The following driver properties can be changed to control some aspects of the
503 * drivers operation:
504 * - strict-version: can be set to 0 to allow devices conforming to newer
505 * major versions to be used
506 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
507 * specific command status as a fatal error leading device faulting
508 * - admin-queue-len: the maximum length of the admin queue (16-4096)
509 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536)
510 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536)
511 * - async-event-limit: the maximum number of asynchronous event requests to be
512 * posted by the driver
513 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
514 * cache
515 * - min-phys-block-size: the minimum physical block size to report to blkdev,
516 * which is among other things the basis for ZFS vdev ashift
517 * - max-submission-queues: the maximum number of I/O submission queues.
518 * - max-completion-queues: the maximum number of I/O completion queues,
519 * can be less than max-submission-queues, in which case the completion
520 * queues are shared.
521 *
522 * In addition to the above properties, some device-specific tunables can be
523 * configured using the nvme-config-list global property. The value of this
524 * property is a list of triplets. The formal syntax is:
525 *
526 * nvme-config-list ::= <triplet> [, <triplet>]* ;
527 * <triplet> ::= "<model>" , "<rev-list>" , "<tuple-list>"
528 * <rev-list> ::= [ <fwrev> [, <fwrev>]*]
529 * <tuple-list> ::= <tunable> [, <tunable>]*
530 * <tunable> ::= <name> : <value>
531 *
532 * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and
533 * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list>
534 * contains one or more tunables to apply to all controllers that match the
535 * specified model number and optionally firmware revision. Each <tunable> is a
536 * <name> : <value> pair. Supported tunables are:
537 *
538 * - ignore-unknown-vendor-status: can be set to "on" to not handle any vendor
539 * specific command status as a fatal error leading device faulting
540 *
541 * - min-phys-block-size: the minimum physical block size to report to blkdev,
542 * which is among other things the basis for ZFS vdev ashift
543 *
544 * - volatile-write-cache: can be set to "on" or "off" to enable or disable the
545 * volatile write cache, if present
546 *
547 *
548 * TODO:
549 * - figure out sane default for I/O queue depth reported to blkdev
550 * - FMA handling of media errors
551 * - support for devices supporting very large I/O requests using chained PRPs
552 * - support for configuring hardware parameters like interrupt coalescing
553 * - support for big-endian systems
554 * - support for fast reboot
555 * - support for NVMe Subsystem Reset (1.1)
556 * - support for Scatter/Gather lists (1.1)
557 * - support for Reservations (1.1)
558 * - support for power management
559 */
560
561 #include <sys/byteorder.h>
562 #ifdef _BIG_ENDIAN
563 #error nvme driver needs porting for big-endian platforms
564 #endif
565
566 #include <sys/modctl.h>
567 #include <sys/conf.h>
568 #include <sys/devops.h>
569 #include <sys/ddi.h>
570 #include <sys/ddi_ufm.h>
571 #include <sys/sunddi.h>
572 #include <sys/sunndi.h>
573 #include <sys/bitmap.h>
574 #include <sys/sysmacros.h>
575 #include <sys/param.h>
576 #include <sys/varargs.h>
577 #include <sys/cpuvar.h>
578 #include <sys/disp.h>
579 #include <sys/blkdev.h>
580 #include <sys/atomic.h>
581 #include <sys/archsystm.h>
582 #include <sys/sata/sata_hba.h>
583 #include <sys/stat.h>
584 #include <sys/policy.h>
585 #include <sys/list.h>
586 #include <sys/dkio.h>
587 #include <sys/pci.h>
588 #include <sys/mkdev.h>
589
590 #include <sys/nvme.h>
591
592 #ifdef __x86
593 #include <sys/x86_archext.h>
594 #endif
595
596 #include "nvme_reg.h"
597 #include "nvme_var.h"
598
599 /*
600 * Assertions to make sure that we've properly captured various aspects of the
601 * packed structures and haven't broken them during updates.
602 */
603 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE);
604 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256);
605 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512);
606 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520);
607 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768);
608 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792);
609 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048);
610 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072);
611
612 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE);
613 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32);
614 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92);
615 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104);
616 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128);
617 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384);
618
619 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE);
620 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE);
621
622 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE);
623 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32);
624 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64);
625
626 CTASSERT(sizeof (nvme_nschange_list_t) == 4096);
627
628 /* NVMe spec version supported */
629 static const int nvme_version_major = 2;
630
631 /* Tunable for FORMAT NVM command timeout in seconds, default is 600s */
632 uint32_t nvme_format_cmd_timeout = 600;
633
634 /* Tunable for firmware commit with NVME_FWC_SAVE, default is 15s */
635 uint32_t nvme_commit_save_cmd_timeout = 15;
636
637 /*
638 * Tunable for the admin command timeout used for commands other than those
639 * with their own timeouts defined above; in seconds. While most commands are
640 * expected to complete very quickly (sub-second), experience has shown that
641 * some controllers can occasionally be a bit slower, and not always consistent
642 * in the time taken - times of up to around 4.2s have been observed. Setting
643 * this to 15s by default provides headroom.
644 */
645 uint32_t nvme_admin_cmd_timeout = 15;
646
647 /*
648 * Tunable for abort command timeout in seconds, default is 60s. This timeout
649 * is used when issuing an abort command, currently only in response to a
650 * different admin command timing out. Aborts always complete after the command
651 * that they are attempting to abort so we need to allow enough time for the
652 * controller to process the long running command that we are attempting to
653 * abort. The abort timeout here is only used if it is greater than the timeout
654 * for the command that is being aborted.
655 */
656 uint32_t nvme_abort_cmd_timeout = 60;
657
658 /*
659 * Tunable for the size of arbitrary vendor specific admin commands,
660 * default is 16MiB.
661 */
662 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24;
663
664 /*
665 * Tunable for the max timeout of arbitary vendor specific admin commands,
666 * default is 60s.
667 */
668 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60;
669
670 /*
671 * This ID space, AVL, and lock are used for keeping track of minor state across
672 * opens between different devices.
673 */
674 static id_space_t *nvme_open_minors;
675 static avl_tree_t nvme_open_minors_avl;
676 kmutex_t nvme_open_minors_mutex;
677
678 /*
679 * Removal taskq used for n_dead callback processing.
680 */
681 taskq_t *nvme_dead_taskq;
682
683 /*
684 * This enumeration is used in tandem with nvme_mgmt_lock() to describe which
685 * form of the lock is being taken. See the theory statement for more context.
686 */
687 typedef enum {
688 /*
689 * This is the primary form of taking the management lock and indicates
690 * that the user intends to do a read/write of it. This should always be
691 * used for any ioctl paths or truly anything other than a blkdev
692 * information operation.
693 */
694 NVME_MGMT_LOCK_NVME,
695 /*
696 * This is a subordinate form of the lock whereby the user is in blkdev
697 * callback context and will only intend to read the namespace data.
698 */
699 NVME_MGMT_LOCK_BDRO
700 } nvme_mgmt_lock_level_t;
701
702 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
703 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
704 static int nvme_quiesce(dev_info_t *);
705 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
706 static int nvme_setup_interrupts(nvme_t *, int, int);
707 static void nvme_release_interrupts(nvme_t *);
708 static uint_t nvme_intr(caddr_t, caddr_t);
709
710 static void nvme_shutdown(nvme_t *, boolean_t);
711 static boolean_t nvme_reset(nvme_t *, boolean_t);
712 static int nvme_init(nvme_t *);
713 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
714 static void nvme_free_cmd(nvme_cmd_t *);
715 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
716 bd_xfer_t *);
717 static void nvme_admin_cmd(nvme_cmd_t *, uint32_t);
718 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
719 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
720 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
721 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
722 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
723 static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
724 static void nvme_wakeup_cmd(void *);
725 static void nvme_async_event_task(void *);
726
727 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
728 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
729 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
730 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
731 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
732 static inline int nvme_check_cmd_status(nvme_cmd_t *);
733 static boolean_t nvme_check_cmd_status_ioctl(nvme_cmd_t *,
734 nvme_ioctl_common_t *);
735
736 static int nvme_abort_cmd(nvme_cmd_t *, const uint32_t);
737 static void nvme_async_event(nvme_t *);
738 static boolean_t nvme_format_nvm(nvme_t *, nvme_ioctl_format_t *);
739 static boolean_t nvme_get_logpage_int(nvme_t *, boolean_t, void **, size_t *,
740 uint8_t);
741 static boolean_t nvme_identify(nvme_t *, boolean_t, nvme_ioctl_identify_t *,
742 void **);
743 static boolean_t nvme_identify_int(nvme_t *, uint32_t, uint8_t, void **);
744 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t,
745 uint32_t *);
746 static int nvme_write_cache_set(nvme_t *, boolean_t);
747 static int nvme_set_nqueues(nvme_t *);
748
749 static void nvme_free_dma(nvme_dma_t *);
750 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
751 nvme_dma_t **);
752 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
753 nvme_dma_t **);
754 static void nvme_free_qpair(nvme_qpair_t *);
755 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t);
756 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
757
758 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
759 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
760 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
761 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
762
763 static boolean_t nvme_check_regs_hdl(nvme_t *);
764 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
765
766 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t);
767
768 static void nvme_bd_xfer_done(void *);
769 static void nvme_bd_driveinfo(void *, bd_drive_t *);
770 static int nvme_bd_mediainfo(void *, bd_media_t *);
771 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
772 static int nvme_bd_read(void *, bd_xfer_t *);
773 static int nvme_bd_write(void *, bd_xfer_t *);
774 static int nvme_bd_sync(void *, bd_xfer_t *);
775 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
776 static int nvme_bd_free_space(void *, bd_xfer_t *);
777
778 static int nvme_prp_dma_constructor(void *, void *, int);
779 static void nvme_prp_dma_destructor(void *, void *);
780
781 static void nvme_prepare_devid(nvme_t *, uint32_t);
782
783 /* DDI UFM callbacks */
784 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
785 ddi_ufm_image_t *);
786 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
787 ddi_ufm_slot_t *);
788 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
789
790 static int nvme_open(dev_t *, int, int, cred_t *);
791 static int nvme_close(dev_t, int, int, cred_t *);
792 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
793
794 static int nvme_init_ns(nvme_t *, uint32_t);
795 static boolean_t nvme_bd_attach_ns(nvme_t *, nvme_ioctl_common_t *);
796 static boolean_t nvme_bd_detach_ns(nvme_t *, nvme_ioctl_common_t *);
797
798 static int nvme_minor_comparator(const void *, const void *);
799
800 static ddi_ufm_ops_t nvme_ufm_ops = {
801 NULL,
802 nvme_ufm_fill_image,
803 nvme_ufm_fill_slot,
804 nvme_ufm_getcaps
805 };
806
807 /*
808 * Minor numbers are split amongst those used for controllers and for device
809 * opens. The number of controller minors are limited based open MAXMIN32 per
810 * the theory statement. We allocate 1 million minors as a total guess at a
811 * number that'll probably be enough. The starting point of the open minors can
812 * be shifted to accommodate future expansion of the NVMe device minors.
813 */
814 #define NVME_MINOR_INST_SHIFT 9
815 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
816 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT)
817 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
818 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2)
819
820 #define NVME_OPEN_NMINORS (1024 * 1024)
821 #define NVME_OPEN_MINOR_MIN (MAXMIN32 + 1)
822 #define NVME_OPEN_MINOR_MAX_EXCL (NVME_OPEN_MINOR_MIN + \
823 NVME_OPEN_NMINORS)
824
825 #define NVME_BUMP_STAT(nvme, stat) \
826 atomic_inc_64(&nvme->n_device_stat.nds_ ## stat.value.ui64)
827
828 static void *nvme_state;
829 static kmem_cache_t *nvme_cmd_cache;
830
831 /*
832 * DMA attributes for queue DMA memory
833 *
834 * Queue DMA memory must be page aligned. The maximum length of a queue is
835 * 65536 entries, and an entry can be 64 bytes long.
836 */
837 static const ddi_dma_attr_t nvme_queue_dma_attr = {
838 .dma_attr_version = DMA_ATTR_V0,
839 .dma_attr_addr_lo = 0,
840 .dma_attr_addr_hi = 0xffffffffffffffffULL,
841 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
842 .dma_attr_align = 0x1000,
843 .dma_attr_burstsizes = 0x7ff,
844 .dma_attr_minxfer = 0x1000,
845 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
846 .dma_attr_seg = 0xffffffffffffffffULL,
847 .dma_attr_sgllen = 1,
848 .dma_attr_granular = 1,
849 .dma_attr_flags = 0,
850 };
851
852 /*
853 * DMA attributes for transfers using Physical Region Page (PRP) entries
854 *
855 * A PRP entry describes one page of DMA memory using the page size specified
856 * in the controller configuration's memory page size register (CC.MPS). It uses
857 * a 64bit base address aligned to this page size. There is no limitation on
858 * chaining PRPs together for arbitrarily large DMA transfers. These DMA
859 * attributes will be copied into the nvme_t during nvme_attach() and the
860 * dma_attr_maxxfer will be updated.
861 */
862 static const ddi_dma_attr_t nvme_prp_dma_attr = {
863 .dma_attr_version = DMA_ATTR_V0,
864 .dma_attr_addr_lo = 0,
865 .dma_attr_addr_hi = 0xffffffffffffffffULL,
866 .dma_attr_count_max = 0xfff,
867 .dma_attr_align = 0x1000,
868 .dma_attr_burstsizes = 0x7ff,
869 .dma_attr_minxfer = 0x1000,
870 .dma_attr_maxxfer = 0x1000,
871 .dma_attr_seg = 0xfff,
872 .dma_attr_sgllen = -1,
873 .dma_attr_granular = 1,
874 .dma_attr_flags = 0,
875 };
876
877 /*
878 * DMA attributes for transfers using scatter/gather lists
879 *
880 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
881 * 32bit length field. SGL Segment and SGL Last Segment entries require the
882 * length to be a multiple of 16 bytes. While the SGL DMA attributes are copied
883 * into the nvme_t, they are not currently used for any I/O.
884 */
885 static const ddi_dma_attr_t nvme_sgl_dma_attr = {
886 .dma_attr_version = DMA_ATTR_V0,
887 .dma_attr_addr_lo = 0,
888 .dma_attr_addr_hi = 0xffffffffffffffffULL,
889 .dma_attr_count_max = 0xffffffffUL,
890 .dma_attr_align = 1,
891 .dma_attr_burstsizes = 0x7ff,
892 .dma_attr_minxfer = 0x10,
893 .dma_attr_maxxfer = 0xfffffffffULL,
894 .dma_attr_seg = 0xffffffffffffffffULL,
895 .dma_attr_sgllen = -1,
896 .dma_attr_granular = 0x10,
897 .dma_attr_flags = 0
898 };
899
900 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
901 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
902 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
903 .devacc_attr_dataorder = DDI_STRICTORDER_ACC
904 };
905
906 /*
907 * ioctl validation policies. These are policies that determine which namespaces
908 * are allowed or disallowed for various operations. Note, all policy items
909 * should be explicitly listed here to help make it clear what our intent is.
910 * That is also why some of these are identical or repeated when they cover
911 * different ioctls.
912 */
913
914 /*
915 * The controller information ioctl generally contains read-only information
916 * about the controller that is sourced from multiple different pieces of
917 * information. This does not operate on a namespace and none are accepted.
918 */
919 static const nvme_ioctl_check_t nvme_check_ctrl_info = {
920 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
921 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
922 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
923 };
924
925 /*
926 * The kernel namespace information requires a namespace ID to be specified. It
927 * does not allow for the broadcast ID to be specified.
928 */
929 static const nvme_ioctl_check_t nvme_check_ns_info = {
930 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
931 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
932 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
933 };
934
935 /*
936 * Identify commands are allowed to operate on a namespace minor. Unfortunately,
937 * the namespace field in identify commands is a bit, weird. In particular, some
938 * commands need a valid namespace, while others are namespace listing
939 * operations, which means illegal namespaces like zero are allowed.
940 */
941 static const nvme_ioctl_check_t nvme_check_identify = {
942 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
943 .nck_skip_ctrl = B_TRUE, .nck_ctrl_rewrite = B_FALSE,
944 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
945 };
946
947 /*
948 * The get log page command requires the ability to specify namespaces. When
949 * targeting the controller, one must use the broadcast NSID.
950 */
951 static const nvme_ioctl_check_t nvme_check_get_logpage = {
952 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
953 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
954 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
955 };
956
957 /*
958 * When getting a feature, we do not want rewriting behavior as most features do
959 * not require a namespace to be specified. Specific instances are checked in
960 * nvme_validate_get_feature().
961 */
962 static const nvme_ioctl_check_t nvme_check_get_feature = {
963 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
964 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
965 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
966 };
967
968 /*
969 * Format commands must target a namespace. The broadcast namespace must be used
970 * when referring to the controller.
971 */
972 static const nvme_ioctl_check_t nvme_check_format = {
973 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
974 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
975 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_WRITE
976 };
977
978 /*
979 * blkdev and controller attach and detach must always target a namespace.
980 * However, the broadcast namespace is not allowed. We still perform rewriting
981 * so that way specifying the controller node with 0 will be caught.
982 */
983 static const nvme_ioctl_check_t nvme_check_attach_detach = {
984 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
985 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
986 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
987 };
988
989 /*
990 * Namespace creation operations cannot target a namespace as the new namespace
991 * ID will be returned in the operation. This operation requires the entire
992 * controller lock to be owned as one has to coordinate this operation with all
993 * of the actual namespace logic that's present.
994 */
995 static const nvme_ioctl_check_t nvme_check_ns_create = {
996 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
997 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
998 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_CTRL
999 };
1000
1001 /*
1002 * NVMe namespace delete must always target a namespace. The broadcast namespace
1003 * isn't allowed. We perform rewriting so that way we can catch this.
1004 * Importantly this only requires holding an exclusive lock on the namespace,
1005 * not on the whole device like creating a namespace does. Note, we don't allow
1006 * this on the namespace minor itself as part of our path towards transitioning
1007 * away from its use.
1008 */
1009 static const nvme_ioctl_check_t nvme_check_ns_delete = {
1010 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
1011 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
1012 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
1013 };
1014
1015 /*
1016 * Firmware operations must not target a namespace and are only allowed from the
1017 * controller.
1018 */
1019 static const nvme_ioctl_check_t nvme_check_firmware = {
1020 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
1021 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
1022 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
1023 };
1024
1025 /*
1026 * Passthru commands are an odd set. We only allow them from the primary
1027 * controller; however, we allow a namespace to be specified in them and allow
1028 * the broadcast namespace. We do not perform rewriting because we don't know
1029 * what the semantics are. We explicitly exempt passthru commands from needing
1030 * an exclusive lock and leave it up to them to tell us the impact of the
1031 * command and semantics. As this is a privileged interface and the semantics
1032 * are arbitrary, there's not much we can do without some assistance from the
1033 * consumer.
1034 */
1035 static const nvme_ioctl_check_t nvme_check_passthru = {
1036 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
1037 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
1038 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
1039 };
1040
1041 /*
1042 * Lock operations are allowed to target a namespace, but must not be rewritten.
1043 * There is no support for the broadcast namespace. This is the only ioctl that
1044 * should skip exclusive checking as it's used to grant it.
1045 */
1046 static const nvme_ioctl_check_t nvme_check_locking = {
1047 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
1048 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
1049 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_SKIP
1050 };
1051
1052 /*
1053 * These data tables indicate how we handle the various states a namespace may
1054 * be in before we put it through the namespace state transition diagram. Note,
1055 * namespace creation does not allow one to specify a namespace ID, therefore
1056 * there it doesn't have a set of entries here.
1057 *
1058 * See Namespace Support in the theory statement for more information.
1059 */
1060 static const nvme_ioctl_errno_t nvme_ns_delete_states[] = {
1061 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1062 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
1063 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1064 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1065 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
1066 };
1067
1068 static const nvme_ioctl_errno_t nvme_ctrl_attach_states[] = {
1069 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1070 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
1071 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1072 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1073 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
1074 };
1075
1076 static const nvme_ioctl_errno_t nvme_ctrl_detach_states[] = {
1077 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1078 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
1079 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_OK,
1080 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
1081 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
1082 };
1083
1084 static const nvme_ioctl_errno_t nvme_bd_attach_states[] = {
1085 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1086 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
1087 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_UNSUP_ATTACH_NS,
1088 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
1089 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH,
1090 };
1091
1092 static const nvme_ioctl_errno_t nvme_bd_detach_states[] = {
1093 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1094 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
1095 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1096 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1097 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_OK,
1098 };
1099
1100 static const nvme_ioctl_errno_t nvme_format_nvm_states[] = {
1101 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1102 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
1103 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_OK,
1104 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
1105 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
1106 };
1107
1108 static struct cb_ops nvme_cb_ops = {
1109 .cb_open = nvme_open,
1110 .cb_close = nvme_close,
1111 .cb_strategy = nodev,
1112 .cb_print = nodev,
1113 .cb_dump = nodev,
1114 .cb_read = nodev,
1115 .cb_write = nodev,
1116 .cb_ioctl = nvme_ioctl,
1117 .cb_devmap = nodev,
1118 .cb_mmap = nodev,
1119 .cb_segmap = nodev,
1120 .cb_chpoll = nochpoll,
1121 .cb_prop_op = ddi_prop_op,
1122 .cb_str = 0,
1123 .cb_flag = D_NEW | D_MP,
1124 .cb_rev = CB_REV,
1125 .cb_aread = nodev,
1126 .cb_awrite = nodev
1127 };
1128
1129 static struct dev_ops nvme_dev_ops = {
1130 .devo_rev = DEVO_REV,
1131 .devo_refcnt = 0,
1132 .devo_getinfo = ddi_no_info,
1133 .devo_identify = nulldev,
1134 .devo_probe = nulldev,
1135 .devo_attach = nvme_attach,
1136 .devo_detach = nvme_detach,
1137 .devo_reset = nodev,
1138 .devo_cb_ops = &nvme_cb_ops,
1139 .devo_bus_ops = NULL,
1140 .devo_power = NULL,
1141 .devo_quiesce = nvme_quiesce,
1142 };
1143
1144 static struct modldrv nvme_modldrv = {
1145 .drv_modops = &mod_driverops,
1146 .drv_linkinfo = "NVMe driver",
1147 .drv_dev_ops = &nvme_dev_ops
1148 };
1149
1150 static struct modlinkage nvme_modlinkage = {
1151 .ml_rev = MODREV_1,
1152 .ml_linkage = { &nvme_modldrv, NULL }
1153 };
1154
1155 static bd_ops_t nvme_bd_ops = {
1156 .o_version = BD_OPS_CURRENT_VERSION,
1157 .o_drive_info = nvme_bd_driveinfo,
1158 .o_media_info = nvme_bd_mediainfo,
1159 .o_devid_init = nvme_bd_devid,
1160 .o_sync_cache = nvme_bd_sync,
1161 .o_read = nvme_bd_read,
1162 .o_write = nvme_bd_write,
1163 .o_free_space = nvme_bd_free_space,
1164 };
1165
1166 /*
1167 * This list will hold commands that have timed out and couldn't be aborted.
1168 * As we don't know what the hardware may still do with the DMA memory we can't
1169 * free them, so we'll keep them forever on this list where we can easily look
1170 * at them with mdb.
1171 */
1172 static struct list nvme_lost_cmds;
1173 static kmutex_t nvme_lc_mutex;
1174
1175 int
_init(void)1176 _init(void)
1177 {
1178 int error;
1179
1180 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
1181 if (error != DDI_SUCCESS)
1182 return (error);
1183
1184 if ((nvme_open_minors = id_space_create("nvme_open_minors",
1185 NVME_OPEN_MINOR_MIN, NVME_OPEN_MINOR_MAX_EXCL)) == NULL) {
1186 ddi_soft_state_fini(&nvme_state);
1187 return (ENOMEM);
1188 }
1189
1190 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
1191 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1192
1193 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
1194 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
1195 offsetof(nvme_cmd_t, nc_list));
1196
1197 mutex_init(&nvme_open_minors_mutex, NULL, MUTEX_DRIVER, NULL);
1198 avl_create(&nvme_open_minors_avl, nvme_minor_comparator,
1199 sizeof (nvme_minor_t), offsetof(nvme_minor_t, nm_avl));
1200
1201 nvme_dead_taskq = taskq_create("nvme_dead_taskq", 1, minclsyspri, 1, 1,
1202 TASKQ_PREPOPULATE);
1203
1204 bd_mod_init(&nvme_dev_ops);
1205
1206 error = mod_install(&nvme_modlinkage);
1207 if (error != DDI_SUCCESS) {
1208 ddi_soft_state_fini(&nvme_state);
1209 id_space_destroy(nvme_open_minors);
1210 mutex_destroy(&nvme_lc_mutex);
1211 list_destroy(&nvme_lost_cmds);
1212 bd_mod_fini(&nvme_dev_ops);
1213 mutex_destroy(&nvme_open_minors_mutex);
1214 avl_destroy(&nvme_open_minors_avl);
1215 taskq_destroy(nvme_dead_taskq);
1216 }
1217
1218 return (error);
1219 }
1220
1221 int
_fini(void)1222 _fini(void)
1223 {
1224 int error;
1225
1226 if (!list_is_empty(&nvme_lost_cmds))
1227 return (DDI_FAILURE);
1228
1229 error = mod_remove(&nvme_modlinkage);
1230 if (error == DDI_SUCCESS) {
1231 ddi_soft_state_fini(&nvme_state);
1232 id_space_destroy(nvme_open_minors);
1233 kmem_cache_destroy(nvme_cmd_cache);
1234 mutex_destroy(&nvme_lc_mutex);
1235 list_destroy(&nvme_lost_cmds);
1236 bd_mod_fini(&nvme_dev_ops);
1237 mutex_destroy(&nvme_open_minors_mutex);
1238 avl_destroy(&nvme_open_minors_avl);
1239 taskq_destroy(nvme_dead_taskq);
1240 }
1241
1242 return (error);
1243 }
1244
1245 int
_info(struct modinfo * modinfop)1246 _info(struct modinfo *modinfop)
1247 {
1248 return (mod_info(&nvme_modlinkage, modinfop));
1249 }
1250
1251 static inline void
nvme_put64(nvme_t * nvme,uintptr_t reg,uint64_t val)1252 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
1253 {
1254 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1255
1256 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1257 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
1258 }
1259
1260 static inline void
nvme_put32(nvme_t * nvme,uintptr_t reg,uint32_t val)1261 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
1262 {
1263 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1264
1265 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1266 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
1267 }
1268
1269 static inline uint64_t
nvme_get64(nvme_t * nvme,uintptr_t reg)1270 nvme_get64(nvme_t *nvme, uintptr_t reg)
1271 {
1272 uint64_t val;
1273
1274 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1275
1276 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1277 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
1278
1279 return (val);
1280 }
1281
1282 static inline uint32_t
nvme_get32(nvme_t * nvme,uintptr_t reg)1283 nvme_get32(nvme_t *nvme, uintptr_t reg)
1284 {
1285 uint32_t val;
1286
1287 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1288
1289 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1290 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
1291
1292 return (val);
1293 }
1294
1295 static void
nvme_mgmt_lock_fini(nvme_mgmt_lock_t * lock)1296 nvme_mgmt_lock_fini(nvme_mgmt_lock_t *lock)
1297 {
1298 ASSERT3U(lock->nml_bd_own, ==, 0);
1299 mutex_destroy(&lock->nml_lock);
1300 cv_destroy(&lock->nml_cv);
1301 }
1302
1303 static void
nvme_mgmt_lock_init(nvme_mgmt_lock_t * lock)1304 nvme_mgmt_lock_init(nvme_mgmt_lock_t *lock)
1305 {
1306 mutex_init(&lock->nml_lock, NULL, MUTEX_DRIVER, NULL);
1307 cv_init(&lock->nml_cv, NULL, CV_DRIVER, NULL);
1308 lock->nml_bd_own = 0;
1309 }
1310
1311 static void
nvme_mgmt_unlock(nvme_t * nvme)1312 nvme_mgmt_unlock(nvme_t *nvme)
1313 {
1314 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1315
1316 cv_broadcast(&lock->nml_cv);
1317 mutex_exit(&lock->nml_lock);
1318 }
1319
1320 static boolean_t
nvme_mgmt_lock_held(const nvme_t * nvme)1321 nvme_mgmt_lock_held(const nvme_t *nvme)
1322 {
1323 return (MUTEX_HELD(&nvme->n_mgmt.nml_lock) != 0);
1324 }
1325
1326 static void
nvme_mgmt_lock(nvme_t * nvme,nvme_mgmt_lock_level_t level)1327 nvme_mgmt_lock(nvme_t *nvme, nvme_mgmt_lock_level_t level)
1328 {
1329 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1330 mutex_enter(&lock->nml_lock);
1331 while (lock->nml_bd_own != 0) {
1332 if (level == NVME_MGMT_LOCK_BDRO)
1333 break;
1334 cv_wait(&lock->nml_cv, &lock->nml_lock);
1335 }
1336 }
1337
1338 /*
1339 * This and nvme_mgmt_bd_end() are used to indicate that the driver is going to
1340 * be calling into a re-entrant blkdev related function. We cannot hold the lock
1341 * across such an operation and therefore must indicate that this is logically
1342 * held, while allowing other operations to proceed. This nvme_mgmt_bd_end() may
1343 * only be called by a thread that already holds the nmve_mgmt_lock().
1344 */
1345 static void
nvme_mgmt_bd_start(nvme_t * nvme)1346 nvme_mgmt_bd_start(nvme_t *nvme)
1347 {
1348 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1349
1350 VERIFY(MUTEX_HELD(&lock->nml_lock));
1351 VERIFY3U(lock->nml_bd_own, ==, 0);
1352 lock->nml_bd_own = (uintptr_t)curthread;
1353 mutex_exit(&lock->nml_lock);
1354 }
1355
1356 static void
nvme_mgmt_bd_end(nvme_t * nvme)1357 nvme_mgmt_bd_end(nvme_t *nvme)
1358 {
1359 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1360
1361 mutex_enter(&lock->nml_lock);
1362 VERIFY3U(lock->nml_bd_own, ==, (uintptr_t)curthread);
1363 lock->nml_bd_own = 0;
1364 }
1365
1366 static boolean_t
nvme_ns_state_check(const nvme_namespace_t * ns,nvme_ioctl_common_t * ioc,const nvme_ioctl_errno_t states[NVME_NS_NSTATES])1367 nvme_ns_state_check(const nvme_namespace_t *ns, nvme_ioctl_common_t *ioc,
1368 const nvme_ioctl_errno_t states[NVME_NS_NSTATES])
1369 {
1370 VERIFY(nvme_mgmt_lock_held(ns->ns_nvme));
1371 VERIFY3U(ns->ns_state, <, NVME_NS_NSTATES);
1372
1373 if (states[ns->ns_state] == NVME_IOCTL_E_OK) {
1374 return (B_TRUE);
1375 }
1376
1377 return (nvme_ioctl_error(ioc, states[ns->ns_state], 0, 0));
1378 }
1379
1380 /*
1381 * This is a central clearing house for marking an NVMe controller dead and/or
1382 * removed. This takes care of setting the flag, taking care of outstanding
1383 * blocked locks, and sending a DDI FMA impact. This is called from a precarious
1384 * place where locking is suspect. The only guarantee we have is that the nvme_t
1385 * is valid and won't disappear until we return.
1386 *
1387 * This should only be used after attach has been called.
1388 */
1389 static void
nvme_ctrl_mark_dead(nvme_t * nvme,boolean_t removed)1390 nvme_ctrl_mark_dead(nvme_t *nvme, boolean_t removed)
1391 {
1392 boolean_t was_dead;
1393
1394 /*
1395 * See if we win the race to set things up here. If someone beat us to
1396 * it, we do not do anything.
1397 */
1398 was_dead = atomic_cas_32((volatile uint32_t *)&nvme->n_dead, B_FALSE,
1399 B_TRUE);
1400
1401 /*
1402 * If we were removed, note this in our death status, regardless of
1403 * whether or not we were already dead. We need to know this so that we
1404 * can decide if it is safe to try and interact the the device in e.g.
1405 * reset and shutdown.
1406 */
1407 if (removed) {
1408 nvme->n_dead_status = NVME_IOCTL_E_CTRL_GONE;
1409 }
1410
1411 if (was_dead) {
1412 return;
1413 }
1414
1415 /*
1416 * If this was removed, there is no reason to change the service impact.
1417 * Otherwise, we need to change our default return code to indicate that
1418 * the device is truly dead, and not simply gone.
1419 */
1420 if (!removed) {
1421 ASSERT3U(nvme->n_dead_status, ==, NVME_IOCTL_E_CTRL_DEAD);
1422 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1423 }
1424
1425 taskq_dispatch_ent(nvme_dead_taskq, nvme_rwlock_ctrl_dead, nvme,
1426 TQ_NOSLEEP, &nvme->n_dead_tqent);
1427 }
1428
1429 static boolean_t
nvme_ctrl_is_gone(const nvme_t * nvme)1430 nvme_ctrl_is_gone(const nvme_t *nvme)
1431 {
1432 if (nvme->n_dead && nvme->n_dead_status == NVME_IOCTL_E_CTRL_GONE)
1433 return (B_TRUE);
1434
1435 return (B_FALSE);
1436 }
1437
1438 static boolean_t
nvme_check_regs_hdl(nvme_t * nvme)1439 nvme_check_regs_hdl(nvme_t *nvme)
1440 {
1441 ddi_fm_error_t error;
1442
1443 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
1444
1445 if (error.fme_status != DDI_FM_OK)
1446 return (B_TRUE);
1447
1448 return (B_FALSE);
1449 }
1450
1451 static boolean_t
nvme_check_dma_hdl(nvme_dma_t * dma)1452 nvme_check_dma_hdl(nvme_dma_t *dma)
1453 {
1454 ddi_fm_error_t error;
1455
1456 if (dma == NULL)
1457 return (B_FALSE);
1458
1459 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
1460
1461 if (error.fme_status != DDI_FM_OK)
1462 return (B_TRUE);
1463
1464 return (B_FALSE);
1465 }
1466
1467 static void
nvme_free_dma_common(nvme_dma_t * dma)1468 nvme_free_dma_common(nvme_dma_t *dma)
1469 {
1470 if (dma->nd_dmah != NULL)
1471 (void) ddi_dma_unbind_handle(dma->nd_dmah);
1472 if (dma->nd_acch != NULL)
1473 ddi_dma_mem_free(&dma->nd_acch);
1474 if (dma->nd_dmah != NULL)
1475 ddi_dma_free_handle(&dma->nd_dmah);
1476 }
1477
1478 static void
nvme_free_dma(nvme_dma_t * dma)1479 nvme_free_dma(nvme_dma_t *dma)
1480 {
1481 nvme_free_dma_common(dma);
1482 kmem_free(dma, sizeof (*dma));
1483 }
1484
1485 static void
nvme_prp_dma_destructor(void * buf,void * private __unused)1486 nvme_prp_dma_destructor(void *buf, void *private __unused)
1487 {
1488 nvme_dma_t *dma = (nvme_dma_t *)buf;
1489
1490 nvme_free_dma_common(dma);
1491 }
1492
1493 static int
nvme_alloc_dma_common(nvme_t * nvme,nvme_dma_t * dma,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr)1494 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
1495 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
1496 {
1497 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
1498 &dma->nd_dmah) != DDI_SUCCESS) {
1499 /*
1500 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
1501 * the only other possible error is DDI_DMA_BADATTR which
1502 * indicates a driver bug which should cause a panic.
1503 */
1504 dev_err(nvme->n_dip, CE_PANIC,
1505 "!failed to get DMA handle, check DMA attributes");
1506 return (DDI_FAILURE);
1507 }
1508
1509 /*
1510 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
1511 * or the flags are conflicting, which isn't the case here.
1512 */
1513 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
1514 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
1515 &dma->nd_len, &dma->nd_acch);
1516
1517 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
1518 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1519 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
1520 dev_err(nvme->n_dip, CE_WARN,
1521 "!failed to bind DMA memory");
1522 NVME_BUMP_STAT(nvme, dma_bind_err);
1523 nvme_free_dma_common(dma);
1524 return (DDI_FAILURE);
1525 }
1526
1527 return (DDI_SUCCESS);
1528 }
1529
1530 static int
nvme_zalloc_dma(nvme_t * nvme,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr,nvme_dma_t ** ret)1531 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
1532 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
1533 {
1534 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
1535
1536 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
1537 DDI_SUCCESS) {
1538 *ret = NULL;
1539 kmem_free(dma, sizeof (nvme_dma_t));
1540 return (DDI_FAILURE);
1541 }
1542
1543 bzero(dma->nd_memp, dma->nd_len);
1544
1545 *ret = dma;
1546 return (DDI_SUCCESS);
1547 }
1548
1549 static int
nvme_prp_dma_constructor(void * buf,void * private,int flags __unused)1550 nvme_prp_dma_constructor(void *buf, void *private, int flags __unused)
1551 {
1552 nvme_dma_t *dma = (nvme_dma_t *)buf;
1553 nvme_t *nvme = (nvme_t *)private;
1554
1555 dma->nd_dmah = NULL;
1556 dma->nd_acch = NULL;
1557
1558 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
1559 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
1560 return (-1);
1561 }
1562
1563 ASSERT(dma->nd_ncookie == 1);
1564
1565 dma->nd_cached = B_TRUE;
1566
1567 return (0);
1568 }
1569
1570 static int
nvme_zalloc_queue_dma(nvme_t * nvme,uint32_t nentry,uint16_t qe_len,uint_t flags,nvme_dma_t ** dma)1571 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
1572 uint_t flags, nvme_dma_t **dma)
1573 {
1574 uint32_t len = nentry * qe_len;
1575 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
1576
1577 len = roundup(len, nvme->n_pagesize);
1578
1579 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
1580 != DDI_SUCCESS) {
1581 dev_err(nvme->n_dip, CE_WARN,
1582 "!failed to get DMA memory for queue");
1583 goto fail;
1584 }
1585
1586 if ((*dma)->nd_ncookie != 1) {
1587 dev_err(nvme->n_dip, CE_WARN,
1588 "!got too many cookies for queue DMA");
1589 goto fail;
1590 }
1591
1592 return (DDI_SUCCESS);
1593
1594 fail:
1595 if (*dma) {
1596 nvme_free_dma(*dma);
1597 *dma = NULL;
1598 }
1599
1600 return (DDI_FAILURE);
1601 }
1602
1603 static void
nvme_free_cq(nvme_cq_t * cq)1604 nvme_free_cq(nvme_cq_t *cq)
1605 {
1606 mutex_destroy(&cq->ncq_mutex);
1607
1608 if (cq->ncq_cmd_taskq != NULL)
1609 taskq_destroy(cq->ncq_cmd_taskq);
1610
1611 if (cq->ncq_dma != NULL)
1612 nvme_free_dma(cq->ncq_dma);
1613
1614 kmem_free(cq, sizeof (*cq));
1615 }
1616
1617 static void
nvme_free_qpair(nvme_qpair_t * qp)1618 nvme_free_qpair(nvme_qpair_t *qp)
1619 {
1620 int i;
1621
1622 mutex_destroy(&qp->nq_mutex);
1623 sema_destroy(&qp->nq_sema);
1624
1625 if (qp->nq_sqdma != NULL)
1626 nvme_free_dma(qp->nq_sqdma);
1627
1628 if (qp->nq_active_cmds > 0)
1629 for (i = 0; i != qp->nq_nentry; i++)
1630 if (qp->nq_cmd[i] != NULL)
1631 nvme_free_cmd(qp->nq_cmd[i]);
1632
1633 if (qp->nq_cmd != NULL)
1634 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
1635
1636 kmem_free(qp, sizeof (nvme_qpair_t));
1637 }
1638
1639 /*
1640 * Destroy the pre-allocated cq array, but only free individual completion
1641 * queues from the given starting index.
1642 */
1643 static void
nvme_destroy_cq_array(nvme_t * nvme,uint_t start)1644 nvme_destroy_cq_array(nvme_t *nvme, uint_t start)
1645 {
1646 uint_t i;
1647
1648 for (i = start; i < nvme->n_cq_count; i++)
1649 if (nvme->n_cq[i] != NULL)
1650 nvme_free_cq(nvme->n_cq[i]);
1651
1652 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count);
1653 }
1654
1655 static int
nvme_alloc_cq(nvme_t * nvme,uint32_t nentry,nvme_cq_t ** cqp,uint16_t idx,uint_t nthr)1656 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx,
1657 uint_t nthr)
1658 {
1659 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP);
1660 char name[64]; /* large enough for the taskq name */
1661
1662 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER,
1663 DDI_INTR_PRI(nvme->n_intr_pri));
1664
1665 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
1666 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS)
1667 goto fail;
1668
1669 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp;
1670 cq->ncq_nentry = nentry;
1671 cq->ncq_id = idx;
1672 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx);
1673
1674 /*
1675 * Each completion queue has its own command taskq.
1676 */
1677 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u",
1678 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx);
1679
1680 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX,
1681 TASKQ_PREPOPULATE);
1682
1683 if (cq->ncq_cmd_taskq == NULL) {
1684 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd "
1685 "taskq for cq %u", idx);
1686 goto fail;
1687 }
1688
1689 *cqp = cq;
1690 return (DDI_SUCCESS);
1691
1692 fail:
1693 nvme_free_cq(cq);
1694 *cqp = NULL;
1695
1696 return (DDI_FAILURE);
1697 }
1698
1699 /*
1700 * Create the n_cq array big enough to hold "ncq" completion queues.
1701 * If the array already exists it will be re-sized (but only larger).
1702 * The admin queue is included in this array, which boosts the
1703 * max number of entries to UINT16_MAX + 1.
1704 */
1705 static int
nvme_create_cq_array(nvme_t * nvme,uint_t ncq,uint32_t nentry,uint_t nthr)1706 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr)
1707 {
1708 nvme_cq_t **cq;
1709 uint_t i, cq_count;
1710
1711 ASSERT3U(ncq, >, nvme->n_cq_count);
1712
1713 cq = nvme->n_cq;
1714 cq_count = nvme->n_cq_count;
1715
1716 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP);
1717 nvme->n_cq_count = ncq;
1718
1719 for (i = 0; i < cq_count; i++)
1720 nvme->n_cq[i] = cq[i];
1721
1722 for (; i < nvme->n_cq_count; i++)
1723 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) !=
1724 DDI_SUCCESS)
1725 goto fail;
1726
1727 if (cq != NULL)
1728 kmem_free(cq, sizeof (*cq) * cq_count);
1729
1730 return (DDI_SUCCESS);
1731
1732 fail:
1733 nvme_destroy_cq_array(nvme, cq_count);
1734 /*
1735 * Restore the original array
1736 */
1737 nvme->n_cq_count = cq_count;
1738 nvme->n_cq = cq;
1739
1740 return (DDI_FAILURE);
1741 }
1742
1743 static int
nvme_alloc_qpair(nvme_t * nvme,uint32_t nentry,nvme_qpair_t ** nqp,uint_t idx)1744 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
1745 uint_t idx)
1746 {
1747 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
1748 uint_t cq_idx;
1749
1750 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
1751 DDI_INTR_PRI(nvme->n_intr_pri));
1752
1753 /*
1754 * The NVMe spec defines that a full queue has one empty (unused) slot;
1755 * initialize the semaphore accordingly.
1756 */
1757 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL);
1758
1759 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
1760 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
1761 goto fail;
1762
1763 /*
1764 * idx == 0 is adminq, those above 0 are shared io completion queues.
1765 */
1766 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1);
1767 qp->nq_cq = nvme->n_cq[cq_idx];
1768 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
1769 qp->nq_nentry = nentry;
1770
1771 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
1772
1773 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
1774 qp->nq_next_cmd = 0;
1775
1776 *nqp = qp;
1777 return (DDI_SUCCESS);
1778
1779 fail:
1780 nvme_free_qpair(qp);
1781 *nqp = NULL;
1782
1783 return (DDI_FAILURE);
1784 }
1785
1786 /*
1787 * One might reasonably consider that the nvme_cmd_cache should have a cache
1788 * constructor and destructor that takes care of the mutex/cv init/destroy, and
1789 * that nvme_free_cmd should reset more fields such that allocation becomes
1790 * simpler. This is not currently implemented as:
1791 * - nvme_cmd_cache is a global cache, shared across nvme instances and
1792 * therefore there is no easy access to the corresponding nvme_t in the
1793 * constructor to determine the required interrupt priority.
1794 * - Most fields in nvme_cmd_t would need to be zeroed in nvme_free_cmd while
1795 * preserving the mutex/cv. It is easier to able to zero the entire
1796 * structure and then init the mutex/cv only in the unlikely event that we
1797 * want an admin command.
1798 */
1799 static nvme_cmd_t *
nvme_alloc_cmd(nvme_t * nvme,int kmflag)1800 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
1801 {
1802 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
1803
1804 if (cmd != NULL) {
1805 bzero(cmd, sizeof (nvme_cmd_t));
1806 cmd->nc_nvme = nvme;
1807 }
1808
1809 return (cmd);
1810 }
1811
1812 static nvme_cmd_t *
nvme_alloc_admin_cmd(nvme_t * nvme,int kmflag)1813 nvme_alloc_admin_cmd(nvme_t *nvme, int kmflag)
1814 {
1815 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, kmflag);
1816
1817 if (cmd != NULL) {
1818 cmd->nc_flags |= NVME_CMD_F_USELOCK;
1819 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
1820 DDI_INTR_PRI(nvme->n_intr_pri));
1821 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
1822 }
1823
1824 return (cmd);
1825 }
1826
1827 static void
nvme_free_cmd(nvme_cmd_t * cmd)1828 nvme_free_cmd(nvme_cmd_t *cmd)
1829 {
1830 /* Don't free commands on the lost commands list. */
1831 if (list_link_active(&cmd->nc_list))
1832 return;
1833
1834 if (cmd->nc_dma) {
1835 nvme_free_dma(cmd->nc_dma);
1836 cmd->nc_dma = NULL;
1837 }
1838
1839 if (cmd->nc_prp) {
1840 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp);
1841 cmd->nc_prp = NULL;
1842 }
1843
1844 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
1845 cv_destroy(&cmd->nc_cv);
1846 mutex_destroy(&cmd->nc_mutex);
1847 }
1848
1849 kmem_cache_free(nvme_cmd_cache, cmd);
1850 }
1851
1852 static void
nvme_submit_admin_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd,uint32_t * qtimeoutp)1853 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
1854 {
1855 sema_p(&qp->nq_sema);
1856 nvme_submit_cmd_common(qp, cmd, qtimeoutp);
1857 }
1858
1859 static int
nvme_submit_io_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd)1860 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1861 {
1862 if (cmd->nc_nvme->n_dead) {
1863 return (EIO);
1864 }
1865
1866 if (sema_tryp(&qp->nq_sema) == 0)
1867 return (EAGAIN);
1868
1869 nvme_submit_cmd_common(qp, cmd, NULL);
1870 return (0);
1871 }
1872
1873 /*
1874 * Common command submission routine. If `qtimeoutp` is not NULL then it will
1875 * be set to the sum of the timeouts of any active commands ahead of the one
1876 * being submitted.
1877 */
1878 static void
nvme_submit_cmd_common(nvme_qpair_t * qp,nvme_cmd_t * cmd,uint32_t * qtimeoutp)1879 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
1880 {
1881 nvme_reg_sqtdbl_t tail = { 0 };
1882
1883 /*
1884 * We don't need to take a lock on cmd since it is not yet enqueued.
1885 */
1886 cmd->nc_submit_ts = gethrtime();
1887 cmd->nc_state = NVME_CMD_SUBMITTED;
1888
1889 mutex_enter(&qp->nq_mutex);
1890
1891 /*
1892 * Now that we hold the queue pair lock, we must check whether or not
1893 * the controller has been listed as dead (e.g. was removed due to
1894 * hotplug). This is necessary as otherwise we could race with
1895 * nvme_remove_callback(). Because this has not been enqueued, we don't
1896 * call nvme_unqueue_cmd(), which is why we must manually decrement the
1897 * semaphore.
1898 */
1899 if (cmd->nc_nvme->n_dead) {
1900 cmd->nc_queue_ts = gethrtime();
1901 cmd->nc_state = NVME_CMD_QUEUED;
1902 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback,
1903 cmd, TQ_NOSLEEP, &cmd->nc_tqent);
1904 sema_v(&qp->nq_sema);
1905 mutex_exit(&qp->nq_mutex);
1906 return;
1907 }
1908
1909 /*
1910 * Try to insert the cmd into the active cmd array at the nq_next_cmd
1911 * slot. If the slot is already occupied advance to the next slot and
1912 * try again. This can happen for long running commands like async event
1913 * requests.
1914 */
1915 while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
1916 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1917 qp->nq_cmd[qp->nq_next_cmd] = cmd;
1918
1919 /*
1920 * We keep track of the number of active commands in this queue, and
1921 * the sum of the timeouts for those active commands.
1922 */
1923 qp->nq_active_cmds++;
1924 if (qtimeoutp != NULL)
1925 *qtimeoutp = qp->nq_active_timeout;
1926 qp->nq_active_timeout += cmd->nc_timeout;
1927
1928 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
1929 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
1930 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
1931 sizeof (nvme_sqe_t) * qp->nq_sqtail,
1932 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
1933 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1934
1935 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
1936 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
1937
1938 mutex_exit(&qp->nq_mutex);
1939 }
1940
1941 static nvme_cmd_t *
nvme_unqueue_cmd(nvme_t * nvme,nvme_qpair_t * qp,int cid)1942 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
1943 {
1944 nvme_cmd_t *cmd;
1945
1946 ASSERT(mutex_owned(&qp->nq_mutex));
1947 ASSERT3S(cid, <, qp->nq_nentry);
1948
1949 cmd = qp->nq_cmd[cid];
1950 /*
1951 * Some controllers will erroneously add things to the completion queue
1952 * for which there is no matching outstanding command. If this happens,
1953 * it is almost certainly a controller firmware bug since nq_mutex
1954 * is held across command submission and ringing the queue doorbell,
1955 * and is also held in this function.
1956 *
1957 * If we see such an unexpected command, there is not much we can do.
1958 * These will be logged and counted in nvme_get_completed(), but
1959 * otherwise ignored.
1960 */
1961 if (cmd == NULL)
1962 return (NULL);
1963 qp->nq_cmd[cid] = NULL;
1964 ASSERT3U(qp->nq_active_cmds, >, 0);
1965 qp->nq_active_cmds--;
1966 ASSERT3U(qp->nq_active_timeout, >=, cmd->nc_timeout);
1967 qp->nq_active_timeout -= cmd->nc_timeout;
1968 sema_v(&qp->nq_sema);
1969
1970 ASSERT3P(cmd, !=, NULL);
1971 ASSERT3P(cmd->nc_nvme, ==, nvme);
1972 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
1973
1974 return (cmd);
1975 }
1976
1977 /*
1978 * This is called when an admin abort has failed to complete, once for the
1979 * original command and once for the abort itself. At this point the controller
1980 * has been marked dead. The commands are considered lost, de-queued if
1981 * possible, and placed on a global lost commands list so that they cannot be
1982 * freed and so that any DMA memory they have have is not re-used.
1983 */
1984 static void
nvme_lost_cmd(nvme_t * nvme,nvme_cmd_t * cmd)1985 nvme_lost_cmd(nvme_t *nvme, nvme_cmd_t *cmd)
1986 {
1987 ASSERT(mutex_owned(&cmd->nc_mutex));
1988
1989 switch (cmd->nc_state) {
1990 case NVME_CMD_SUBMITTED: {
1991 nvme_qpair_t *qp = nvme->n_ioq[cmd->nc_sqid];
1992
1993 /*
1994 * The command is still in the submitted state, meaning that we
1995 * have not processed a completion queue entry for it. De-queue
1996 * should be successful and if the hardware does later report
1997 * completion we'll skip it as a command for which we aren't
1998 * expecting a response (see nvme_unqueue_cmd()).
1999 */
2000 mutex_enter(&qp->nq_mutex);
2001 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
2002 mutex_exit(&qp->nq_mutex);
2003 }
2004 case NVME_CMD_ALLOCATED:
2005 case NVME_CMD_COMPLETED:
2006 /*
2007 * If the command has not been submitted, or has completed,
2008 * there is nothing to do here. In the event of an abort
2009 * command timeout, we can end up here in the process of
2010 * "losing" the original command. It's possible that command
2011 * has actually completed (or been queued on the taskq) in the
2012 * interim.
2013 */
2014 break;
2015 case NVME_CMD_QUEUED:
2016 /*
2017 * The command is on the taskq, awaiting callback. This should
2018 * be fairly rapid so wait for completion.
2019 */
2020 while (cmd->nc_state != NVME_CMD_COMPLETED)
2021 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
2022 break;
2023 case NVME_CMD_LOST:
2024 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2025 "%s: command %p already lost", __func__, (void *)cmd);
2026 break;
2027 }
2028
2029 cmd->nc_state = NVME_CMD_LOST;
2030
2031 mutex_enter(&nvme_lc_mutex);
2032 list_insert_head(&nvme_lost_cmds, cmd);
2033 mutex_exit(&nvme_lc_mutex);
2034 }
2035
2036 /*
2037 * Get the command tied to the next completed cqe and bump along completion
2038 * queue head counter.
2039 */
2040 static nvme_cmd_t *
nvme_get_completed(nvme_t * nvme,nvme_cq_t * cq)2041 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq)
2042 {
2043 nvme_qpair_t *qp;
2044 nvme_cqe_t *cqe;
2045 nvme_cmd_t *cmd;
2046
2047 ASSERT(mutex_owned(&cq->ncq_mutex));
2048
2049 retry:
2050 cqe = &cq->ncq_cq[cq->ncq_head];
2051
2052 /* Check phase tag of CQE. Hardware inverts it for new entries. */
2053 if (cqe->cqe_sf.sf_p == cq->ncq_phase)
2054 return (NULL);
2055
2056 qp = nvme->n_ioq[cqe->cqe_sqid];
2057
2058 mutex_enter(&qp->nq_mutex);
2059 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
2060 mutex_exit(&qp->nq_mutex);
2061
2062 qp->nq_sqhead = cqe->cqe_sqhd;
2063 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry;
2064
2065 /* Toggle phase on wrap-around. */
2066 if (cq->ncq_head == 0)
2067 cq->ncq_phase = cq->ncq_phase != 0 ? 0 : 1;
2068
2069 if (cmd == NULL) {
2070 dev_err(nvme->n_dip, CE_WARN,
2071 "!received completion for unknown cid 0x%x", cqe->cqe_cid);
2072 NVME_BUMP_STAT(nvme, unknown_cid);
2073 /*
2074 * We want to ignore this unexpected completion entry as it
2075 * is most likely a result of a bug in the controller firmware.
2076 * However, if we return NULL, then callers will assume there
2077 * are no more pending commands for this wakeup. Retry to keep
2078 * enumerating commands until the phase tag indicates there are
2079 * no more and we are really done.
2080 */
2081 goto retry;
2082 }
2083
2084 ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid);
2085 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
2086
2087 return (cmd);
2088 }
2089
2090 /*
2091 * Process all completed commands on the io completion queue.
2092 */
2093 static uint_t
nvme_process_iocq(nvme_t * nvme,nvme_cq_t * cq)2094 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq)
2095 {
2096 nvme_reg_cqhdbl_t head = { 0 };
2097 nvme_cmd_t *cmd;
2098 uint_t completed = 0;
2099
2100 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
2101 DDI_SUCCESS)
2102 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
2103 __func__);
2104
2105 mutex_enter(&cq->ncq_mutex);
2106
2107 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
2108 /*
2109 * NVME_CMD_F_USELOCK is applied to all commands which are
2110 * going to be waited for by another thread in nvme_wait_cmd
2111 * and indicates that the lock should be taken before modifying
2112 * protected fields, and that the mutex has been initialised.
2113 * Commands which do not require the mutex to be held have not
2114 * initialised it (to reduce overhead).
2115 */
2116 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
2117 mutex_enter(&cmd->nc_mutex);
2118 /*
2119 * The command could have been de-queued as lost while
2120 * we waited on the lock, in which case we drop it.
2121 */
2122 if (cmd->nc_state == NVME_CMD_LOST) {
2123 mutex_exit(&cmd->nc_mutex);
2124 completed++;
2125 continue;
2126 }
2127 }
2128 cmd->nc_queue_ts = gethrtime();
2129 cmd->nc_state = NVME_CMD_QUEUED;
2130 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0)
2131 mutex_exit(&cmd->nc_mutex);
2132 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd,
2133 TQ_NOSLEEP, &cmd->nc_tqent);
2134
2135 completed++;
2136 }
2137
2138 if (completed > 0) {
2139 /*
2140 * Update the completion queue head doorbell.
2141 */
2142 head.b.cqhdbl_cqh = cq->ncq_head;
2143 nvme_put32(nvme, cq->ncq_hdbl, head.r);
2144 }
2145
2146 mutex_exit(&cq->ncq_mutex);
2147
2148 return (completed);
2149 }
2150
2151 static nvme_cmd_t *
nvme_retrieve_cmd(nvme_t * nvme,nvme_qpair_t * qp)2152 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
2153 {
2154 nvme_cq_t *cq = qp->nq_cq;
2155 nvme_reg_cqhdbl_t head = { 0 };
2156 nvme_cmd_t *cmd;
2157
2158 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
2159 DDI_SUCCESS)
2160 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
2161 __func__);
2162
2163 mutex_enter(&cq->ncq_mutex);
2164
2165 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
2166 head.b.cqhdbl_cqh = cq->ncq_head;
2167 nvme_put32(nvme, cq->ncq_hdbl, head.r);
2168 }
2169
2170 mutex_exit(&cq->ncq_mutex);
2171
2172 return (cmd);
2173 }
2174
2175 static int
nvme_check_unknown_cmd_status(nvme_cmd_t * cmd)2176 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
2177 {
2178 nvme_cqe_t *cqe = &cmd->nc_cqe;
2179
2180 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2181 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
2182 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
2183 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
2184 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
2185
2186 if (cmd->nc_xfer != NULL)
2187 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2188
2189 /*
2190 * User commands should never cause us to mark the controller dead.
2191 * Though whether we ever should mark it dead as there currently isn't a
2192 * useful recovery path is another question.
2193 */
2194 if (((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) &&
2195 cmd->nc_nvme->n_strict_version) {
2196 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2197 }
2198
2199 return (EIO);
2200 }
2201
2202 static int
nvme_check_vendor_cmd_status(nvme_cmd_t * cmd)2203 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
2204 {
2205 nvme_cqe_t *cqe = &cmd->nc_cqe;
2206
2207 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2208 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
2209 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
2210 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
2211 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
2212 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
2213 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2214 }
2215
2216 return (EIO);
2217 }
2218
2219 static int
nvme_check_integrity_cmd_status(nvme_cmd_t * cmd)2220 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
2221 {
2222 nvme_cqe_t *cqe = &cmd->nc_cqe;
2223
2224 switch (cqe->cqe_sf.sf_sc) {
2225 case NVME_CQE_SC_INT_NVM_WRITE:
2226 /* write fail */
2227 /* TODO: post ereport */
2228 if (cmd->nc_xfer != NULL)
2229 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2230 return (EIO);
2231
2232 case NVME_CQE_SC_INT_NVM_READ:
2233 /* read fail */
2234 /* TODO: post ereport */
2235 if (cmd->nc_xfer != NULL)
2236 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2237 return (EIO);
2238
2239 default:
2240 return (nvme_check_unknown_cmd_status(cmd));
2241 }
2242 }
2243
2244 static int
nvme_check_generic_cmd_status(nvme_cmd_t * cmd)2245 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
2246 {
2247 nvme_cqe_t *cqe = &cmd->nc_cqe;
2248
2249 switch (cqe->cqe_sf.sf_sc) {
2250 case NVME_CQE_SC_GEN_SUCCESS:
2251 return (0);
2252
2253 /*
2254 * Errors indicating a bug in the driver should cause a panic.
2255 */
2256 case NVME_CQE_SC_GEN_INV_OPC:
2257 /* Invalid Command Opcode */
2258 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmd_err);
2259 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2260 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2261 "programming error: invalid opcode in cmd %p",
2262 (void *)cmd);
2263 }
2264 return (EINVAL);
2265
2266 case NVME_CQE_SC_GEN_INV_FLD:
2267 /* Invalid Field in Command */
2268 NVME_BUMP_STAT(cmd->nc_nvme, inv_field_err);
2269 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2270 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2271 "programming error: invalid field in cmd %p",
2272 (void *)cmd);
2273 }
2274 return (EIO);
2275
2276 case NVME_CQE_SC_GEN_ID_CNFL:
2277 /* Command ID Conflict */
2278 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2279 "cmd ID conflict in cmd %p", (void *)cmd);
2280 return (0);
2281
2282 case NVME_CQE_SC_GEN_INV_NS:
2283 /* Invalid Namespace or Format */
2284 NVME_BUMP_STAT(cmd->nc_nvme, inv_nsfmt_err);
2285 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2286 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2287 "programming error: invalid NS/format in cmd %p",
2288 (void *)cmd);
2289 }
2290 return (EINVAL);
2291
2292 case NVME_CQE_SC_GEN_CMD_SEQ_ERR:
2293 /*
2294 * Command Sequence Error
2295 *
2296 * This can be generated normally by user log page requests that
2297 * come out of order (e.g. getting the persistent event log
2298 * without establishing the context). If the kernel manages this
2299 * on its own then that's problematic.
2300 */
2301 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmdseq_err);
2302 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2303 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2304 "programming error: command sequencing error %p",
2305 (void *)cmd);
2306 }
2307 return (EINVAL);
2308
2309 case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
2310 /* LBA Out Of Range */
2311 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2312 "LBA out of range in cmd %p", (void *)cmd);
2313 return (0);
2314
2315 /*
2316 * Non-fatal errors, handle gracefully.
2317 */
2318 case NVME_CQE_SC_GEN_DATA_XFR_ERR:
2319 /* Data Transfer Error (DMA) */
2320 /* TODO: post ereport */
2321 NVME_BUMP_STAT(cmd->nc_nvme, data_xfr_err);
2322 if (cmd->nc_xfer != NULL)
2323 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2324 return (EIO);
2325
2326 case NVME_CQE_SC_GEN_INTERNAL_ERR:
2327 /*
2328 * Internal Error. The spec (v1.0, section 4.5.1.2) says
2329 * detailed error information is returned as async event,
2330 * so we pretty much ignore the error here and handle it
2331 * in the async event handler.
2332 */
2333 NVME_BUMP_STAT(cmd->nc_nvme, internal_err);
2334 if (cmd->nc_xfer != NULL)
2335 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2336 return (EIO);
2337
2338 case NVME_CQE_SC_GEN_ABORT_REQUEST:
2339 /*
2340 * Command Abort Requested. This normally happens only when a
2341 * command times out.
2342 */
2343 /* TODO: post ereport or change blkdev to handle this? */
2344 NVME_BUMP_STAT(cmd->nc_nvme, abort_rq_err);
2345 return (ECANCELED);
2346
2347 case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
2348 /* Command Aborted due to Power Loss Notification */
2349 NVME_BUMP_STAT(cmd->nc_nvme, abort_pwrloss_err);
2350 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2351 return (EIO);
2352
2353 case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
2354 /* Command Aborted due to SQ Deletion */
2355 NVME_BUMP_STAT(cmd->nc_nvme, abort_sq_del);
2356 return (EIO);
2357
2358 case NVME_CQE_SC_GEN_NVM_CAP_EXC:
2359 /* Capacity Exceeded */
2360 NVME_BUMP_STAT(cmd->nc_nvme, nvm_cap_exc);
2361 if (cmd->nc_xfer != NULL)
2362 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2363 return (EIO);
2364
2365 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
2366 /* Namespace Not Ready */
2367 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_notrdy);
2368 if (cmd->nc_xfer != NULL)
2369 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2370 return (EIO);
2371
2372 case NVME_CQE_SC_GEN_NVM_FORMATTING:
2373 /* Format in progress (1.2) */
2374 if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2))
2375 return (nvme_check_unknown_cmd_status(cmd));
2376 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_formatting);
2377 if (cmd->nc_xfer != NULL)
2378 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2379 return (EIO);
2380
2381 default:
2382 return (nvme_check_unknown_cmd_status(cmd));
2383 }
2384 }
2385
2386 static int
nvme_check_specific_cmd_status(nvme_cmd_t * cmd)2387 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
2388 {
2389 nvme_cqe_t *cqe = &cmd->nc_cqe;
2390
2391 switch (cqe->cqe_sf.sf_sc) {
2392 case NVME_CQE_SC_SPC_INV_CQ:
2393 /* Completion Queue Invalid */
2394 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
2395 NVME_BUMP_STAT(cmd->nc_nvme, inv_cq_err);
2396 return (EINVAL);
2397
2398 case NVME_CQE_SC_SPC_INV_QID:
2399 /* Invalid Queue Identifier */
2400 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2401 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
2402 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
2403 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2404 NVME_BUMP_STAT(cmd->nc_nvme, inv_qid_err);
2405 return (EINVAL);
2406
2407 case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
2408 /* Max Queue Size Exceeded */
2409 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2410 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2411 NVME_BUMP_STAT(cmd->nc_nvme, max_qsz_exc);
2412 return (EINVAL);
2413
2414 case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
2415 /* Abort Command Limit Exceeded */
2416 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
2417 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2418 "abort command limit exceeded in cmd %p", (void *)cmd);
2419 return (0);
2420
2421 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
2422 /* Async Event Request Limit Exceeded */
2423 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
2424 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2425 "async event request limit exceeded in cmd %p",
2426 (void *)cmd);
2427 return (0);
2428
2429 case NVME_CQE_SC_SPC_INV_INT_VECT:
2430 /* Invalid Interrupt Vector */
2431 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2432 NVME_BUMP_STAT(cmd->nc_nvme, inv_int_vect);
2433 return (EINVAL);
2434
2435 case NVME_CQE_SC_SPC_INV_LOG_PAGE:
2436 /* Invalid Log Page */
2437 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
2438 NVME_BUMP_STAT(cmd->nc_nvme, inv_log_page);
2439 return (EINVAL);
2440
2441 case NVME_CQE_SC_SPC_INV_FORMAT:
2442 /* Invalid Format */
2443 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT ||
2444 cmd->nc_sqe.sqe_opc == NVME_OPC_NS_MGMT);
2445 NVME_BUMP_STAT(cmd->nc_nvme, inv_format);
2446 if (cmd->nc_xfer != NULL)
2447 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2448 return (EINVAL);
2449
2450 case NVME_CQE_SC_SPC_INV_Q_DEL:
2451 /* Invalid Queue Deletion */
2452 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2453 NVME_BUMP_STAT(cmd->nc_nvme, inv_q_del);
2454 return (EINVAL);
2455
2456 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
2457 /* Conflicting Attributes */
2458 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
2459 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2460 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2461 NVME_BUMP_STAT(cmd->nc_nvme, cnfl_attr);
2462 if (cmd->nc_xfer != NULL)
2463 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2464 return (EINVAL);
2465
2466 case NVME_CQE_SC_SPC_NVM_INV_PROT:
2467 /* Invalid Protection Information */
2468 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
2469 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2470 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2471 NVME_BUMP_STAT(cmd->nc_nvme, inv_prot);
2472 if (cmd->nc_xfer != NULL)
2473 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2474 return (EINVAL);
2475
2476 case NVME_CQE_SC_SPC_NVM_READONLY:
2477 /* Write to Read Only Range */
2478 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2479 NVME_BUMP_STAT(cmd->nc_nvme, readonly);
2480 if (cmd->nc_xfer != NULL)
2481 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2482 return (EROFS);
2483
2484 case NVME_CQE_SC_SPC_INV_FW_SLOT:
2485 /* Invalid Firmware Slot */
2486 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwslot);
2487 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2488 return (EINVAL);
2489
2490 case NVME_CQE_SC_SPC_INV_FW_IMG:
2491 /* Invalid Firmware Image */
2492 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwimg);
2493 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2494 return (EINVAL);
2495
2496 case NVME_CQE_SC_SPC_FW_RESET:
2497 /* Conventional Reset Required */
2498 NVME_BUMP_STAT(cmd->nc_nvme, fwact_creset);
2499 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2500 return (0);
2501
2502 case NVME_CQE_SC_SPC_FW_NSSR:
2503 /* NVMe Subsystem Reset Required */
2504 NVME_BUMP_STAT(cmd->nc_nvme, fwact_nssr);
2505 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2506 return (0);
2507
2508 case NVME_CQE_SC_SPC_FW_NEXT_RESET:
2509 /* Activation Requires Reset */
2510 NVME_BUMP_STAT(cmd->nc_nvme, fwact_reset);
2511 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2512 return (0);
2513
2514 case NVME_CQE_SC_SPC_FW_MTFA:
2515 /* Activation Requires Maximum Time Violation */
2516 NVME_BUMP_STAT(cmd->nc_nvme, fwact_mtfa);
2517 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2518 return (EAGAIN);
2519
2520 case NVME_CQE_SC_SPC_FW_PROHIBITED:
2521 /* Activation Prohibited */
2522 NVME_BUMP_STAT(cmd->nc_nvme, fwact_prohibited);
2523 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2524 return (EINVAL);
2525
2526 case NVME_CQE_SC_SPC_FW_OVERLAP:
2527 /* Overlapping Firmware Ranges */
2528 NVME_BUMP_STAT(cmd->nc_nvme, fw_overlap);
2529 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD ||
2530 cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2531 return (EINVAL);
2532
2533 case NVME_CQE_SC_SPC_NS_ATTACHED:
2534 /* Namespace Already Attached */
2535 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2536 NVME_BUMP_STAT(cmd->nc_nvme, ns_attached);
2537 return (EEXIST);
2538
2539 case NVME_CQE_SC_SPC_NS_PRIV:
2540 /* Namespace Is Private */
2541 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2542 NVME_BUMP_STAT(cmd->nc_nvme, ns_priv);
2543 return (EACCES);
2544
2545 case NVME_CQE_SC_SPC_NS_NOT_ATTACH:
2546 /* Namespace Not Attached */
2547 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2548 NVME_BUMP_STAT(cmd->nc_nvme, ns_not_attached);
2549 return (ENOENT);
2550
2551 case NVME_CQE_SC_SPC_INV_CTRL_LIST:
2552 /* Controller List Invalid */
2553 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2554 NVME_BUMP_STAT(cmd->nc_nvme, ana_attach);
2555 return (EINVAL);
2556
2557 case NVME_CQE_SC_SPC_ANA_ATTACH:
2558 /* ANA Attach Failed */
2559 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2560 NVME_BUMP_STAT(cmd->nc_nvme, ana_attach);
2561 return (EIO);
2562
2563 case NVME_CQE_SC_SPC_NS_ATTACH_LIM:
2564 /* Namespace Attachment Limit Exceeded */
2565 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2566 NVME_BUMP_STAT(cmd->nc_nvme, ns_attach_lim);
2567 return (EOVERFLOW);
2568
2569 default:
2570 return (nvme_check_unknown_cmd_status(cmd));
2571 }
2572 }
2573
2574 static inline int
nvme_check_cmd_status(nvme_cmd_t * cmd)2575 nvme_check_cmd_status(nvme_cmd_t *cmd)
2576 {
2577 nvme_cqe_t *cqe = &cmd->nc_cqe;
2578
2579 /*
2580 * Take a shortcut if the controller is dead, or if
2581 * command status indicates no error.
2582 */
2583 if (cmd->nc_nvme->n_dead)
2584 return (EIO);
2585
2586 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2587 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2588 return (0);
2589
2590 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
2591 return (nvme_check_generic_cmd_status(cmd));
2592 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
2593 return (nvme_check_specific_cmd_status(cmd));
2594 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
2595 return (nvme_check_integrity_cmd_status(cmd));
2596 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
2597 return (nvme_check_vendor_cmd_status(cmd));
2598
2599 return (nvme_check_unknown_cmd_status(cmd));
2600 }
2601
2602 /*
2603 * Check the command status as used by an ioctl path and do not convert it to an
2604 * errno. We still allow all the command status checking to occur, but otherwise
2605 * will pass back the controller error as is.
2606 */
2607 static boolean_t
nvme_check_cmd_status_ioctl(nvme_cmd_t * cmd,nvme_ioctl_common_t * ioc)2608 nvme_check_cmd_status_ioctl(nvme_cmd_t *cmd, nvme_ioctl_common_t *ioc)
2609 {
2610 nvme_cqe_t *cqe = &cmd->nc_cqe;
2611 nvme_t *nvme = cmd->nc_nvme;
2612
2613 if (nvme->n_dead) {
2614 return (nvme_ioctl_error(ioc, nvme->n_dead_status, 0, 0));
2615 }
2616
2617 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2618 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2619 return (B_TRUE);
2620
2621 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) {
2622 (void) nvme_check_generic_cmd_status(cmd);
2623 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) {
2624 (void) nvme_check_specific_cmd_status(cmd);
2625 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) {
2626 (void) nvme_check_integrity_cmd_status(cmd);
2627 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) {
2628 (void) nvme_check_vendor_cmd_status(cmd);
2629 } else {
2630 (void) nvme_check_unknown_cmd_status(cmd);
2631 }
2632
2633 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_ERROR,
2634 cqe->cqe_sf.sf_sct, cqe->cqe_sf.sf_sc));
2635 }
2636
2637 static int
nvme_abort_cmd(nvme_cmd_t * cmd,const uint32_t sec)2638 nvme_abort_cmd(nvme_cmd_t *cmd, const uint32_t sec)
2639 {
2640 nvme_t *nvme = cmd->nc_nvme;
2641 nvme_cmd_t *abort_cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2642 nvme_abort_cmd_t ac = { 0 };
2643 int ret = 0;
2644
2645 sema_p(&nvme->n_abort_sema);
2646
2647 ac.b.ac_cid = cmd->nc_sqe.sqe_cid;
2648 ac.b.ac_sqid = cmd->nc_sqid;
2649
2650 abort_cmd->nc_sqid = 0;
2651 abort_cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
2652 abort_cmd->nc_callback = nvme_wakeup_cmd;
2653 abort_cmd->nc_sqe.sqe_cdw10 = ac.r;
2654
2655 /*
2656 * Send the ABORT to the hardware. The ABORT command will return _after_
2657 * the aborted command has completed (aborted or otherwise) so we must
2658 * drop the aborted command's lock to allow it to complete.
2659 * We want to allow at least `nvme_abort_cmd_timeout` seconds for the
2660 * abort to be processed, but more if we are aborting a long-running
2661 * command to give that time to complete/abort too.
2662 */
2663 mutex_exit(&cmd->nc_mutex);
2664 nvme_admin_cmd(abort_cmd, MAX(nvme_abort_cmd_timeout, sec));
2665 mutex_enter(&cmd->nc_mutex);
2666
2667 sema_v(&nvme->n_abort_sema);
2668
2669 /* BEGIN CSTYLED */
2670 /*
2671 * If the abort command itself has timed out, it will have been
2672 * de-queued so that its callback will not be called after this point,
2673 * and its state will be NVME_CMD_LOST.
2674 *
2675 * nvme_admin_cmd(abort_cmd)
2676 * -> nvme_wait_cmd(abort_cmd)
2677 * -> nvme_cmd(abort_cmd)
2678 * | -> nvme_admin_cmd(cmd)
2679 * | -> nvme_wait_cmd(cmd)
2680 * | -> nvme_ctrl_mark_dead()
2681 * | -> nvme_lost_cmd(cmd)
2682 * | -> cmd->nc_stat = NVME_CMD_LOST
2683 * and here we are.
2684 */
2685 /* END CSTYLED */
2686 if (abort_cmd->nc_state == NVME_CMD_LOST) {
2687 dev_err(nvme->n_dip, CE_WARN,
2688 "!ABORT of command %d/%d timed out",
2689 cmd->nc_sqe.sqe_cid, cmd->nc_sqid);
2690 NVME_BUMP_STAT(nvme, abort_timeout);
2691 ret = EIO;
2692 } else if ((ret = nvme_check_cmd_status(abort_cmd)) != 0) {
2693 dev_err(nvme->n_dip, CE_WARN,
2694 "!ABORT of command %d/%d "
2695 "failed with sct = %x, sc = %x",
2696 cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2697 abort_cmd->nc_cqe.cqe_sf.sf_sct,
2698 abort_cmd->nc_cqe.cqe_sf.sf_sc);
2699 NVME_BUMP_STAT(nvme, abort_failed);
2700 } else {
2701 boolean_t success = ((abort_cmd->nc_cqe.cqe_dw0 & 1) == 0);
2702
2703 dev_err(nvme->n_dip, CE_WARN,
2704 "!ABORT of command %d/%d %ssuccessful",
2705 cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2706 success ? "" : "un");
2707
2708 if (success) {
2709 NVME_BUMP_STAT(nvme, abort_successful);
2710 } else {
2711 NVME_BUMP_STAT(nvme, abort_unsuccessful);
2712 }
2713 }
2714
2715 /*
2716 * This abort abort_cmd has either completed or been de-queued as
2717 * lost in nvme_wait_cmd. Either way it's safe to free it here.
2718 */
2719 nvme_free_cmd(abort_cmd);
2720
2721 return (ret);
2722 }
2723
2724 /*
2725 * nvme_wait_cmd -- wait for command completion or timeout
2726 *
2727 * In case of a serious error or a timeout of the abort command the hardware
2728 * will be declared dead and FMA will be notified.
2729 */
2730 static void
nvme_wait_cmd(nvme_cmd_t * cmd,uint32_t sec)2731 nvme_wait_cmd(nvme_cmd_t *cmd, uint32_t sec)
2732 {
2733 nvme_t *nvme = cmd->nc_nvme;
2734 nvme_reg_csts_t csts;
2735
2736 ASSERT(mutex_owned(&cmd->nc_mutex));
2737
2738 while (cmd->nc_state != NVME_CMD_COMPLETED) {
2739 clock_t timeout = ddi_get_lbolt() +
2740 drv_usectohz((long)sec * MICROSEC);
2741
2742 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) {
2743 /*
2744 * If this command is on the task queue then we don't
2745 * consider it to have timed out. We are waiting for
2746 * the callback to be invoked, the timing of which can
2747 * be affected by system load and should not count
2748 * against the device; continue to wait.
2749 * While this doesn't help deal with the possibility of
2750 * a command timing out between being placed on the CQ
2751 * and arriving on the taskq, we expect interrupts to
2752 * run fairly promptly making this a small window.
2753 */
2754 if (cmd->nc_state != NVME_CMD_QUEUED)
2755 break;
2756 }
2757 }
2758
2759 if (cmd->nc_state == NVME_CMD_COMPLETED) {
2760 DTRACE_PROBE1(nvme_admin_cmd_completed, nvme_cmd_t *, cmd);
2761 nvme_admin_stat_cmd(nvme, cmd);
2762 return;
2763 }
2764
2765 /*
2766 * The command timed out.
2767 */
2768
2769 DTRACE_PROBE1(nvme_admin_cmd_timeout, nvme_cmd_t *, cmd);
2770 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2771 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
2772 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2773 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
2774 NVME_BUMP_STAT(nvme, cmd_timeout);
2775
2776 /*
2777 * Check controller for fatal status, any errors associated with the
2778 * register or DMA handle, or for a double timeout (abort command timed
2779 * out). If necessary log a warning and call FMA.
2780 */
2781 if (csts.b.csts_cfs ||
2782 nvme_check_regs_hdl(nvme) ||
2783 nvme_check_dma_hdl(cmd->nc_dma) ||
2784 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
2785 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2786 nvme_lost_cmd(nvme, cmd);
2787 return;
2788 }
2789
2790 /* Issue an abort for the command that has timed out */
2791 if (nvme_abort_cmd(cmd, sec) == 0) {
2792 /*
2793 * If the abort completed, whether or not it was
2794 * successful in aborting the command, that command
2795 * will also have completed with an appropriate
2796 * status.
2797 */
2798 while (cmd->nc_state != NVME_CMD_COMPLETED)
2799 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
2800 return;
2801 }
2802
2803 /*
2804 * Otherwise, the abort has also timed out or failed, which
2805 * will have marked the controller dead. De-queue the original command
2806 * and add it to the lost commands list.
2807 */
2808 VERIFY(cmd->nc_nvme->n_dead);
2809 nvme_lost_cmd(nvme, cmd);
2810 }
2811
2812 static void
nvme_wakeup_cmd(void * arg)2813 nvme_wakeup_cmd(void *arg)
2814 {
2815 nvme_cmd_t *cmd = arg;
2816
2817 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
2818
2819 mutex_enter(&cmd->nc_mutex);
2820 cmd->nc_state = NVME_CMD_COMPLETED;
2821 cv_signal(&cmd->nc_cv);
2822 mutex_exit(&cmd->nc_mutex);
2823 }
2824
2825 static void
nvme_async_event_task(void * arg)2826 nvme_async_event_task(void *arg)
2827 {
2828 nvme_cmd_t *cmd = arg;
2829 nvme_t *nvme = cmd->nc_nvme;
2830 nvme_error_log_entry_t *error_log = NULL;
2831 nvme_health_log_t *health_log = NULL;
2832 nvme_nschange_list_t *nslist = NULL;
2833 size_t logsize = 0;
2834 nvme_async_event_t event;
2835
2836 /*
2837 * Check for errors associated with the async request itself. The only
2838 * command-specific error is "async event limit exceeded", which
2839 * indicates a programming error in the driver and causes a panic in
2840 * nvme_check_cmd_status().
2841 *
2842 * Other possible errors are various scenarios where the async request
2843 * was aborted, or internal errors in the device. Internal errors are
2844 * reported to FMA, the command aborts need no special handling here.
2845 *
2846 * And finally, at least qemu nvme does not support async events,
2847 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we
2848 * will avoid posting async events.
2849 */
2850
2851 if (nvme_check_cmd_status(cmd) != 0) {
2852 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2853 "!async event request returned failure, sct = 0x%x, "
2854 "sc = 0x%x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
2855 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
2856 cmd->nc_cqe.cqe_sf.sf_m);
2857
2858 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2859 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
2860 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2861 }
2862
2863 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2864 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
2865 cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
2866 nvme->n_async_event_supported = B_FALSE;
2867 }
2868
2869 nvme_free_cmd(cmd);
2870 return;
2871 }
2872
2873 event.r = cmd->nc_cqe.cqe_dw0;
2874
2875 /* Clear CQE and re-submit the async request. */
2876 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
2877 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
2878 cmd = NULL; /* cmd can no longer be used after resubmission */
2879
2880 switch (event.b.ae_type) {
2881 case NVME_ASYNC_TYPE_ERROR:
2882 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
2883 if (!nvme_get_logpage_int(nvme, B_FALSE,
2884 (void **)&error_log, &logsize,
2885 NVME_LOGPAGE_ERROR)) {
2886 return;
2887 }
2888 } else {
2889 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2890 "async event reply: type=0x%x logpage=0x%x",
2891 event.b.ae_type, event.b.ae_logpage);
2892 NVME_BUMP_STAT(nvme, wrong_logpage);
2893 return;
2894 }
2895
2896 switch (event.b.ae_info) {
2897 case NVME_ASYNC_ERROR_INV_SQ:
2898 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2899 "invalid submission queue");
2900 return;
2901
2902 case NVME_ASYNC_ERROR_INV_DBL:
2903 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2904 "invalid doorbell write value");
2905 return;
2906
2907 case NVME_ASYNC_ERROR_DIAGFAIL:
2908 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
2909 nvme_ctrl_mark_dead(nvme, B_FALSE);
2910 NVME_BUMP_STAT(nvme, diagfail_event);
2911 break;
2912
2913 case NVME_ASYNC_ERROR_PERSISTENT:
2914 dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
2915 "device error");
2916 nvme_ctrl_mark_dead(nvme, B_FALSE);
2917 NVME_BUMP_STAT(nvme, persistent_event);
2918 break;
2919
2920 case NVME_ASYNC_ERROR_TRANSIENT:
2921 dev_err(nvme->n_dip, CE_WARN, "!transient internal "
2922 "device error");
2923 /* TODO: send ereport */
2924 NVME_BUMP_STAT(nvme, transient_event);
2925 break;
2926
2927 case NVME_ASYNC_ERROR_FW_LOAD:
2928 dev_err(nvme->n_dip, CE_WARN,
2929 "!firmware image load error");
2930 NVME_BUMP_STAT(nvme, fw_load_event);
2931 break;
2932 }
2933 break;
2934
2935 case NVME_ASYNC_TYPE_HEALTH:
2936 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
2937 if (!nvme_get_logpage_int(nvme, B_FALSE,
2938 (void **)&health_log, &logsize,
2939 NVME_LOGPAGE_HEALTH)) {
2940 return;
2941 }
2942 } else {
2943 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2944 "type=0x%x logpage=0x%x", event.b.ae_type,
2945 event.b.ae_logpage);
2946 NVME_BUMP_STAT(nvme, wrong_logpage);
2947 return;
2948 }
2949
2950 switch (event.b.ae_info) {
2951 case NVME_ASYNC_HEALTH_RELIABILITY:
2952 dev_err(nvme->n_dip, CE_WARN,
2953 "!device reliability compromised");
2954 /* TODO: send ereport */
2955 NVME_BUMP_STAT(nvme, reliability_event);
2956 break;
2957
2958 case NVME_ASYNC_HEALTH_TEMPERATURE:
2959 dev_err(nvme->n_dip, CE_WARN,
2960 "!temperature above threshold");
2961 /* TODO: send ereport */
2962 NVME_BUMP_STAT(nvme, temperature_event);
2963 break;
2964
2965 case NVME_ASYNC_HEALTH_SPARE:
2966 dev_err(nvme->n_dip, CE_WARN,
2967 "!spare space below threshold");
2968 /* TODO: send ereport */
2969 NVME_BUMP_STAT(nvme, spare_event);
2970 break;
2971 }
2972 break;
2973
2974 case NVME_ASYNC_TYPE_NOTICE:
2975 switch (event.b.ae_info) {
2976 case NVME_ASYNC_NOTICE_NS_CHANGE:
2977 if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) {
2978 dev_err(nvme->n_dip, CE_WARN,
2979 "!wrong logpage in async event reply: "
2980 "type=0x%x logpage=0x%x",
2981 event.b.ae_type, event.b.ae_logpage);
2982 NVME_BUMP_STAT(nvme, wrong_logpage);
2983 break;
2984 }
2985
2986 dev_err(nvme->n_dip, CE_NOTE,
2987 "namespace attribute change event, "
2988 "logpage = 0x%x", event.b.ae_logpage);
2989 NVME_BUMP_STAT(nvme, notice_event);
2990
2991 if (!nvme_get_logpage_int(nvme, B_FALSE,
2992 (void **)&nslist, &logsize,
2993 NVME_LOGPAGE_NSCHANGE)) {
2994 break;
2995 }
2996
2997 if (nslist->nscl_ns[0] == UINT32_MAX) {
2998 dev_err(nvme->n_dip, CE_CONT,
2999 "more than %u namespaces have changed.\n",
3000 NVME_NSCHANGE_LIST_SIZE);
3001 break;
3002 }
3003
3004 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
3005 for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) {
3006 uint32_t nsid = nslist->nscl_ns[i];
3007 nvme_namespace_t *ns;
3008
3009 if (nsid == 0) /* end of list */
3010 break;
3011
3012 dev_err(nvme->n_dip, CE_NOTE,
3013 "!namespace nvme%d/%u has changed.",
3014 ddi_get_instance(nvme->n_dip), nsid);
3015
3016 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
3017 continue;
3018
3019 ns = nvme_nsid2ns(nvme, nsid);
3020 if (ns->ns_state <= NVME_NS_STATE_NOT_IGNORED)
3021 continue;
3022
3023 nvme_mgmt_bd_start(nvme);
3024 bd_state_change(ns->ns_bd_hdl);
3025 nvme_mgmt_bd_end(nvme);
3026 }
3027 nvme_mgmt_unlock(nvme);
3028
3029 break;
3030
3031 case NVME_ASYNC_NOTICE_FW_ACTIVATE:
3032 dev_err(nvme->n_dip, CE_NOTE,
3033 "firmware activation starting, "
3034 "logpage = 0x%x", event.b.ae_logpage);
3035 NVME_BUMP_STAT(nvme, notice_event);
3036 break;
3037
3038 case NVME_ASYNC_NOTICE_TELEMETRY:
3039 dev_err(nvme->n_dip, CE_NOTE,
3040 "telemetry log changed, "
3041 "logpage = 0x%x", event.b.ae_logpage);
3042 NVME_BUMP_STAT(nvme, notice_event);
3043 break;
3044
3045 case NVME_ASYNC_NOTICE_NS_ASYMM:
3046 dev_err(nvme->n_dip, CE_NOTE,
3047 "asymmetric namespace access change, "
3048 "logpage = 0x%x", event.b.ae_logpage);
3049 NVME_BUMP_STAT(nvme, notice_event);
3050 break;
3051
3052 case NVME_ASYNC_NOTICE_LATENCYLOG:
3053 dev_err(nvme->n_dip, CE_NOTE,
3054 "predictable latency event aggregate log change, "
3055 "logpage = 0x%x", event.b.ae_logpage);
3056 NVME_BUMP_STAT(nvme, notice_event);
3057 break;
3058
3059 case NVME_ASYNC_NOTICE_LBASTATUS:
3060 dev_err(nvme->n_dip, CE_NOTE,
3061 "LBA status information alert, "
3062 "logpage = 0x%x", event.b.ae_logpage);
3063 NVME_BUMP_STAT(nvme, notice_event);
3064 break;
3065
3066 case NVME_ASYNC_NOTICE_ENDURANCELOG:
3067 dev_err(nvme->n_dip, CE_NOTE,
3068 "endurance group event aggregate log page change, "
3069 "logpage = 0x%x", event.b.ae_logpage);
3070 NVME_BUMP_STAT(nvme, notice_event);
3071 break;
3072
3073 default:
3074 dev_err(nvme->n_dip, CE_WARN,
3075 "!unknown notice async event received, "
3076 "info = 0x%x, logpage = 0x%x", event.b.ae_info,
3077 event.b.ae_logpage);
3078 NVME_BUMP_STAT(nvme, unknown_event);
3079 break;
3080 }
3081 break;
3082
3083 case NVME_ASYNC_TYPE_VENDOR:
3084 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
3085 "received, info = 0x%x, logpage = 0x%x", event.b.ae_info,
3086 event.b.ae_logpage);
3087 NVME_BUMP_STAT(nvme, vendor_event);
3088 break;
3089
3090 default:
3091 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
3092 "type = 0x%x, info = 0x%x, logpage = 0x%x", event.b.ae_type,
3093 event.b.ae_info, event.b.ae_logpage);
3094 NVME_BUMP_STAT(nvme, unknown_event);
3095 break;
3096 }
3097
3098 if (error_log != NULL)
3099 kmem_free(error_log, logsize);
3100
3101 if (health_log != NULL)
3102 kmem_free(health_log, logsize);
3103
3104 if (nslist != NULL)
3105 kmem_free(nslist, logsize);
3106 }
3107
3108 static void
nvme_admin_cmd(nvme_cmd_t * cmd,uint32_t sec)3109 nvme_admin_cmd(nvme_cmd_t *cmd, uint32_t sec)
3110 {
3111 uint32_t qtimeout;
3112
3113 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
3114
3115 mutex_enter(&cmd->nc_mutex);
3116 cmd->nc_timeout = sec;
3117 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd, &qtimeout);
3118 /*
3119 * We will wait for a total of this command's specified timeout plus
3120 * the sum of the timeouts of any commands queued ahead of this one. If
3121 * we aren't first in the queue, this will inflate the timeout somewhat
3122 * but these times are not critical and it means that if we get stuck
3123 * behind a long running command such as a namespace format then we
3124 * won't time out and trigger an abort.
3125 */
3126 nvme_wait_cmd(cmd, sec + qtimeout);
3127 mutex_exit(&cmd->nc_mutex);
3128 }
3129
3130 static void
nvme_async_event(nvme_t * nvme)3131 nvme_async_event(nvme_t *nvme)
3132 {
3133 nvme_cmd_t *cmd;
3134
3135 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3136 cmd->nc_sqid = 0;
3137 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
3138 cmd->nc_callback = nvme_async_event_task;
3139 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3140
3141 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
3142 }
3143
3144 /*
3145 * There are commands such as format or vendor unique commands that are going to
3146 * manipulate the data in a namespace or destroy them, we make sure that none of
3147 * the ones that will be impacted are actually attached.
3148 */
3149 static boolean_t
nvme_no_blkdev_attached(nvme_t * nvme,uint32_t nsid)3150 nvme_no_blkdev_attached(nvme_t *nvme, uint32_t nsid)
3151 {
3152 ASSERT(nvme_mgmt_lock_held(nvme));
3153 ASSERT3U(nsid, !=, 0);
3154
3155 if (nsid != NVME_NSID_BCAST) {
3156 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
3157 return (ns->ns_state < NVME_NS_STATE_ATTACHED);
3158 }
3159
3160 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
3161 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
3162
3163 if (ns->ns_state >= NVME_NS_STATE_ATTACHED) {
3164 return (B_FALSE);
3165 }
3166 }
3167
3168 return (B_TRUE);
3169 }
3170
3171 static boolean_t
nvme_format_nvm(nvme_t * nvme,nvme_ioctl_format_t * ioc)3172 nvme_format_nvm(nvme_t *nvme, nvme_ioctl_format_t *ioc)
3173 {
3174 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3175 nvme_format_nvm_t format_nvm = { 0 };
3176 boolean_t ret;
3177
3178 format_nvm.b.fm_lbaf = bitx32(ioc->nif_lbaf, 3, 0);
3179 format_nvm.b.fm_ses = bitx32(ioc->nif_ses, 2, 0);
3180
3181 cmd->nc_sqid = 0;
3182 cmd->nc_callback = nvme_wakeup_cmd;
3183 cmd->nc_sqe.sqe_nsid = ioc->nif_common.nioc_nsid;
3184 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
3185 cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
3186
3187 /*
3188 * We don't want to panic on any format commands. There are two reasons
3189 * for this:
3190 *
3191 * 1) All format commands are initiated by users. We don't want to panic
3192 * on user commands.
3193 *
3194 * 2) Several devices like the Samsung SM951 don't allow formatting of
3195 * all namespaces in one command and we'd prefer to handle that
3196 * gracefully.
3197 */
3198 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3199
3200 nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
3201
3202 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nif_common) != 0) {
3203 dev_err(nvme->n_dip, CE_WARN,
3204 "!FORMAT failed with sct = %x, sc = %x",
3205 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3206 ret = B_FALSE;
3207 goto fail;
3208 }
3209
3210 ret = B_TRUE;
3211 fail:
3212 nvme_free_cmd(cmd);
3213 return (ret);
3214 }
3215
3216 /*
3217 * Retrieve a specific log page. The contents of the log page request should
3218 * have already been validated by the system.
3219 */
3220 static boolean_t
nvme_get_logpage(nvme_t * nvme,boolean_t user,nvme_ioctl_get_logpage_t * log,void ** buf)3221 nvme_get_logpage(nvme_t *nvme, boolean_t user, nvme_ioctl_get_logpage_t *log,
3222 void **buf)
3223 {
3224 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3225 nvme_getlogpage_dw10_t dw10;
3226 uint32_t offlo, offhi;
3227 nvme_getlogpage_dw11_t dw11;
3228 nvme_getlogpage_dw14_t dw14;
3229 uint32_t ndw;
3230 boolean_t ret = B_FALSE;
3231
3232 bzero(&dw10, sizeof (dw10));
3233 bzero(&dw11, sizeof (dw11));
3234 bzero(&dw14, sizeof (dw14));
3235
3236 cmd->nc_sqid = 0;
3237 cmd->nc_callback = nvme_wakeup_cmd;
3238 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
3239 cmd->nc_sqe.sqe_nsid = log->nigl_common.nioc_nsid;
3240
3241 if (user)
3242 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3243
3244 /*
3245 * The size field is the number of double words, but is a zeros based
3246 * value. We need to store our actual value minus one.
3247 */
3248 ndw = (uint32_t)(log->nigl_len / 4);
3249 ASSERT3U(ndw, >, 0);
3250 ndw--;
3251
3252 dw10.b.lp_lid = bitx32(log->nigl_lid, 7, 0);
3253 dw10.b.lp_lsp = bitx32(log->nigl_lsp, 6, 0);
3254 dw10.b.lp_rae = bitx32(log->nigl_lsp, 0, 0);
3255 dw10.b.lp_lnumdl = bitx32(ndw, 15, 0);
3256
3257 dw11.b.lp_numdu = bitx32(ndw, 31, 16);
3258 dw11.b.lp_lsi = bitx32(log->nigl_lsi, 15, 0);
3259
3260 offlo = bitx64(log->nigl_offset, 31, 0);
3261 offhi = bitx64(log->nigl_offset, 63, 32);
3262
3263 dw14.b.lp_csi = bitx32(log->nigl_csi, 7, 0);
3264
3265 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3266 cmd->nc_sqe.sqe_cdw11 = dw11.r;
3267 cmd->nc_sqe.sqe_cdw12 = offlo;
3268 cmd->nc_sqe.sqe_cdw13 = offhi;
3269 cmd->nc_sqe.sqe_cdw14 = dw14.r;
3270
3271 if (nvme_zalloc_dma(nvme, log->nigl_len, DDI_DMA_READ,
3272 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
3273 dev_err(nvme->n_dip, CE_WARN,
3274 "!nvme_zalloc_dma failed for GET LOG PAGE");
3275 ret = nvme_ioctl_error(&log->nigl_common,
3276 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3277 goto fail;
3278 }
3279
3280 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
3281 ret = nvme_ioctl_error(&log->nigl_common,
3282 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3283 goto fail;
3284 }
3285 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3286
3287 if (!nvme_check_cmd_status_ioctl(cmd, &log->nigl_common)) {
3288 if (!user) {
3289 dev_err(nvme->n_dip, CE_WARN,
3290 "!GET LOG PAGE failed with sct = %x, sc = %x",
3291 cmd->nc_cqe.cqe_sf.sf_sct,
3292 cmd->nc_cqe.cqe_sf.sf_sc);
3293 }
3294 ret = B_FALSE;
3295 goto fail;
3296 }
3297
3298 *buf = kmem_alloc(log->nigl_len, KM_SLEEP);
3299 bcopy(cmd->nc_dma->nd_memp, *buf, log->nigl_len);
3300
3301 ret = B_TRUE;
3302 fail:
3303 nvme_free_cmd(cmd);
3304
3305 return (ret);
3306 }
3307
3308 /*
3309 * This is an internal wrapper for when the kernel wants to get a log page.
3310 * Currently this assumes that the only thing that is required is the log page
3311 * ID. If more information is required, we'll be better served to just use the
3312 * general ioctl interface.
3313 */
3314 static boolean_t
nvme_get_logpage_int(nvme_t * nvme,boolean_t user,void ** buf,size_t * bufsize,uint8_t lid)3315 nvme_get_logpage_int(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize,
3316 uint8_t lid)
3317 {
3318 const nvme_log_page_info_t *info = NULL;
3319 nvme_ioctl_get_logpage_t log;
3320 nvme_valid_ctrl_data_t data;
3321 boolean_t bret;
3322 bool var;
3323
3324 for (size_t i = 0; i < nvme_std_log_npages; i++) {
3325 if (nvme_std_log_pages[i].nlpi_lid == lid &&
3326 nvme_std_log_pages[i].nlpi_csi == NVME_CSI_NVM) {
3327 info = &nvme_std_log_pages[i];
3328 break;
3329 }
3330 }
3331
3332 if (info == NULL) {
3333 return (B_FALSE);
3334 }
3335
3336 data.vcd_vers = &nvme->n_version;
3337 data.vcd_id = nvme->n_idctl;
3338 bzero(&log, sizeof (log));
3339 log.nigl_common.nioc_nsid = NVME_NSID_BCAST;
3340 log.nigl_csi = info->nlpi_csi;
3341 log.nigl_lid = info->nlpi_lid;
3342 log.nigl_len = nvme_log_page_info_size(info, &data, &var);
3343
3344 /*
3345 * We only support getting standard fixed-length log pages through the
3346 * kernel interface at this time. If a log page either has an unknown
3347 * size or has a variable length, then we cannot get it.
3348 */
3349 if (log.nigl_len == 0 || var) {
3350 return (B_FALSE);
3351 }
3352
3353 bret = nvme_get_logpage(nvme, user, &log, buf);
3354 if (!bret) {
3355 return (B_FALSE);
3356 }
3357
3358 *bufsize = log.nigl_len;
3359 return (B_TRUE);
3360 }
3361
3362 static boolean_t
nvme_identify(nvme_t * nvme,boolean_t user,nvme_ioctl_identify_t * ioc,void ** buf)3363 nvme_identify(nvme_t *nvme, boolean_t user, nvme_ioctl_identify_t *ioc,
3364 void **buf)
3365 {
3366 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3367 boolean_t ret = B_FALSE;
3368 nvme_identify_dw10_t dw10;
3369
3370 ASSERT3P(buf, !=, NULL);
3371
3372 bzero(&dw10, sizeof (dw10));
3373
3374 cmd->nc_sqid = 0;
3375 cmd->nc_callback = nvme_wakeup_cmd;
3376 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
3377 cmd->nc_sqe.sqe_nsid = ioc->nid_common.nioc_nsid;
3378
3379 dw10.b.id_cns = bitx32(ioc->nid_cns, 7, 0);
3380 dw10.b.id_cntid = bitx32(ioc->nid_ctrlid, 15, 0);
3381
3382 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3383
3384 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
3385 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
3386 dev_err(nvme->n_dip, CE_WARN,
3387 "!nvme_zalloc_dma failed for IDENTIFY");
3388 ret = nvme_ioctl_error(&ioc->nid_common,
3389 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3390 goto fail;
3391 }
3392
3393 if (cmd->nc_dma->nd_ncookie > 2) {
3394 dev_err(nvme->n_dip, CE_WARN,
3395 "!too many DMA cookies for IDENTIFY");
3396 NVME_BUMP_STAT(nvme, too_many_cookies);
3397 ret = nvme_ioctl_error(&ioc->nid_common,
3398 NVME_IOCTL_E_BAD_PRP, 0, 0);
3399 goto fail;
3400 }
3401
3402 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
3403 if (cmd->nc_dma->nd_ncookie > 1) {
3404 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
3405 &cmd->nc_dma->nd_cookie);
3406 cmd->nc_sqe.sqe_dptr.d_prp[1] =
3407 cmd->nc_dma->nd_cookie.dmac_laddress;
3408 }
3409
3410 if (user)
3411 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3412
3413 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3414
3415 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nid_common)) {
3416 dev_err(nvme->n_dip, CE_WARN,
3417 "!IDENTIFY failed with sct = %x, sc = %x",
3418 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3419 ret = B_FALSE;
3420 goto fail;
3421 }
3422
3423 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
3424 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
3425 ret = B_TRUE;
3426
3427 fail:
3428 nvme_free_cmd(cmd);
3429
3430 return (ret);
3431 }
3432
3433 static boolean_t
nvme_identify_int(nvme_t * nvme,uint32_t nsid,uint8_t cns,void ** buf)3434 nvme_identify_int(nvme_t *nvme, uint32_t nsid, uint8_t cns, void **buf)
3435 {
3436 nvme_ioctl_identify_t id;
3437
3438 bzero(&id, sizeof (nvme_ioctl_identify_t));
3439 id.nid_common.nioc_nsid = nsid;
3440 id.nid_cns = cns;
3441
3442 return (nvme_identify(nvme, B_FALSE, &id, buf));
3443 }
3444
3445 static int
nvme_set_features(nvme_t * nvme,boolean_t user,uint32_t nsid,uint8_t feature,uint32_t val,uint32_t * res)3446 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
3447 uint32_t val, uint32_t *res)
3448 {
3449 _NOTE(ARGUNUSED(nsid));
3450 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3451 int ret = EINVAL;
3452
3453 ASSERT(res != NULL);
3454
3455 cmd->nc_sqid = 0;
3456 cmd->nc_callback = nvme_wakeup_cmd;
3457 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
3458 cmd->nc_sqe.sqe_cdw10 = feature;
3459 cmd->nc_sqe.sqe_cdw11 = val;
3460
3461 if (user)
3462 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3463
3464 switch (feature) {
3465 case NVME_FEAT_WRITE_CACHE:
3466 if (!nvme->n_write_cache_present)
3467 goto fail;
3468 break;
3469
3470 case NVME_FEAT_NQUEUES:
3471 break;
3472
3473 default:
3474 goto fail;
3475 }
3476
3477 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3478
3479 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3480 dev_err(nvme->n_dip, CE_WARN,
3481 "!SET FEATURES %d failed with sct = %x, sc = %x",
3482 feature, cmd->nc_cqe.cqe_sf.sf_sct,
3483 cmd->nc_cqe.cqe_sf.sf_sc);
3484 goto fail;
3485 }
3486
3487 *res = cmd->nc_cqe.cqe_dw0;
3488
3489 fail:
3490 nvme_free_cmd(cmd);
3491 return (ret);
3492 }
3493
3494 static int
nvme_write_cache_set(nvme_t * nvme,boolean_t enable)3495 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
3496 {
3497 nvme_write_cache_t nwc = { 0 };
3498
3499 if (enable)
3500 nwc.b.wc_wce = 1;
3501
3502 /*
3503 * We've seen some cases where this fails due to us being told we've
3504 * specified an invalid namespace when operating against the Xen xcp-ng
3505 * qemu NVMe virtual device. As such, we generally ensure that trying to
3506 * enable this doesn't lead us to panic. It's not completely clear why
3507 * specifying namespace zero here fails, but not when we're setting the
3508 * number of queues below.
3509 */
3510 return (nvme_set_features(nvme, B_TRUE, 0, NVME_FEAT_WRITE_CACHE,
3511 nwc.r, &nwc.r));
3512 }
3513
3514 static int
nvme_set_nqueues(nvme_t * nvme)3515 nvme_set_nqueues(nvme_t *nvme)
3516 {
3517 nvme_nqueues_t nq = { 0 };
3518 int ret;
3519
3520 /*
3521 * The default is to allocate one completion queue per vector.
3522 */
3523 if (nvme->n_completion_queues == -1)
3524 nvme->n_completion_queues = nvme->n_intr_cnt;
3525
3526 /*
3527 * There is no point in having more completion queues than
3528 * interrupt vectors.
3529 */
3530 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3531 nvme->n_intr_cnt);
3532
3533 /*
3534 * The default is to use one submission queue per completion queue.
3535 */
3536 if (nvme->n_submission_queues == -1)
3537 nvme->n_submission_queues = nvme->n_completion_queues;
3538
3539 /*
3540 * There is no point in having more completion queues than
3541 * submission queues.
3542 */
3543 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3544 nvme->n_submission_queues);
3545
3546 ASSERT(nvme->n_submission_queues > 0);
3547 ASSERT(nvme->n_completion_queues > 0);
3548
3549 nq.b.nq_nsq = nvme->n_submission_queues - 1;
3550 nq.b.nq_ncq = nvme->n_completion_queues - 1;
3551
3552 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r,
3553 &nq.r);
3554
3555 if (ret == 0) {
3556 /*
3557 * Never use more than the requested number of queues.
3558 */
3559 nvme->n_submission_queues = MIN(nvme->n_submission_queues,
3560 nq.b.nq_nsq + 1);
3561 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3562 nq.b.nq_ncq + 1);
3563 }
3564
3565 return (ret);
3566 }
3567
3568 static int
nvme_create_completion_queue(nvme_t * nvme,nvme_cq_t * cq)3569 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq)
3570 {
3571 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3572 nvme_create_queue_dw10_t dw10 = { 0 };
3573 nvme_create_cq_dw11_t c_dw11 = { 0 };
3574 int ret;
3575
3576 dw10.b.q_qid = cq->ncq_id;
3577 dw10.b.q_qsize = cq->ncq_nentry - 1;
3578
3579 c_dw11.b.cq_pc = 1;
3580 c_dw11.b.cq_ien = 1;
3581 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt;
3582
3583 cmd->nc_sqid = 0;
3584 cmd->nc_callback = nvme_wakeup_cmd;
3585 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
3586 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3587 cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
3588 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress;
3589
3590 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3591
3592 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3593 dev_err(nvme->n_dip, CE_WARN,
3594 "!CREATE CQUEUE failed with sct = %x, sc = %x",
3595 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3596 }
3597
3598 nvme_free_cmd(cmd);
3599
3600 return (ret);
3601 }
3602
3603 static int
nvme_create_io_qpair(nvme_t * nvme,nvme_qpair_t * qp,uint16_t idx)3604 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
3605 {
3606 nvme_cq_t *cq = qp->nq_cq;
3607 nvme_cmd_t *cmd;
3608 nvme_create_queue_dw10_t dw10 = { 0 };
3609 nvme_create_sq_dw11_t s_dw11 = { 0 };
3610 int ret;
3611
3612 /*
3613 * It is possible to have more qpairs than completion queues,
3614 * and when the idx > ncq_id, that completion queue is shared
3615 * and has already been created.
3616 */
3617 if (idx <= cq->ncq_id &&
3618 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS)
3619 return (DDI_FAILURE);
3620
3621 dw10.b.q_qid = idx;
3622 dw10.b.q_qsize = qp->nq_nentry - 1;
3623
3624 s_dw11.b.sq_pc = 1;
3625 s_dw11.b.sq_cqid = cq->ncq_id;
3626
3627 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3628 cmd->nc_sqid = 0;
3629 cmd->nc_callback = nvme_wakeup_cmd;
3630 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
3631 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3632 cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
3633 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
3634
3635 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3636
3637 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3638 dev_err(nvme->n_dip, CE_WARN,
3639 "!CREATE SQUEUE failed with sct = %x, sc = %x",
3640 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3641 }
3642
3643 nvme_free_cmd(cmd);
3644
3645 return (ret);
3646 }
3647
3648 static boolean_t
nvme_reset(nvme_t * nvme,boolean_t quiesce)3649 nvme_reset(nvme_t *nvme, boolean_t quiesce)
3650 {
3651 nvme_reg_csts_t csts;
3652 int i;
3653
3654 /*
3655 * If the device is gone, do not try to interact with it. We define
3656 * that resetting such a device is impossible, and always fails.
3657 */
3658 if (nvme_ctrl_is_gone(nvme)) {
3659 return (B_FALSE);
3660 }
3661
3662 nvme_put32(nvme, NVME_REG_CC, 0);
3663
3664 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3665 if (csts.b.csts_rdy == 1) {
3666 nvme_put32(nvme, NVME_REG_CC, 0);
3667
3668 /*
3669 * The timeout value is from the Controller Capabilities
3670 * register (CAP.TO, section 3.1.1). This is the worst case
3671 * time to wait for CSTS.RDY to transition from 1 to 0 after
3672 * CC.EN transitions from 1 to 0.
3673 *
3674 * The timeout units are in 500 ms units, and we are delaying
3675 * in 50ms chunks, hence counting to n_timeout * 10.
3676 */
3677 for (i = 0; i < nvme->n_timeout * 10; i++) {
3678 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3679 if (csts.b.csts_rdy == 0)
3680 break;
3681
3682 /*
3683 * Quiescing drivers should not use locks or timeouts,
3684 * so if this is the quiesce path, use a quiesce-safe
3685 * delay.
3686 */
3687 if (quiesce) {
3688 drv_usecwait(50000);
3689 } else {
3690 delay(drv_usectohz(50000));
3691 }
3692 }
3693 }
3694
3695 nvme_put32(nvme, NVME_REG_AQA, 0);
3696 nvme_put32(nvme, NVME_REG_ASQ, 0);
3697 nvme_put32(nvme, NVME_REG_ACQ, 0);
3698
3699 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3700 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
3701 }
3702
3703 static void
nvme_shutdown(nvme_t * nvme,boolean_t quiesce)3704 nvme_shutdown(nvme_t *nvme, boolean_t quiesce)
3705 {
3706 nvme_reg_cc_t cc;
3707 nvme_reg_csts_t csts;
3708 int i;
3709
3710 /*
3711 * Do not try to interact with the device if it is gone. Since it is
3712 * not there, in some sense it must already be shut down anyway.
3713 */
3714 if (nvme_ctrl_is_gone(nvme)) {
3715 return;
3716 }
3717
3718 cc.r = nvme_get32(nvme, NVME_REG_CC);
3719 cc.b.cc_shn = NVME_CC_SHN_NORMAL;
3720 nvme_put32(nvme, NVME_REG_CC, cc.r);
3721
3722 for (i = 0; i < 10; i++) {
3723 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3724 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
3725 break;
3726
3727 if (quiesce) {
3728 drv_usecwait(100000);
3729 } else {
3730 delay(drv_usectohz(100000));
3731 }
3732 }
3733 }
3734
3735 /*
3736 * Return length of string without trailing spaces.
3737 */
3738 static size_t
nvme_strlen(const char * str,size_t len)3739 nvme_strlen(const char *str, size_t len)
3740 {
3741 if (len <= 0)
3742 return (0);
3743
3744 while (str[--len] == ' ')
3745 ;
3746
3747 return (++len);
3748 }
3749
3750 static void
nvme_config_min_block_size(nvme_t * nvme,char * model,char * val)3751 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val)
3752 {
3753 ulong_t bsize = 0;
3754 char *msg = "";
3755
3756 if (ddi_strtoul(val, NULL, 0, &bsize) != 0)
3757 goto err;
3758
3759 if (!ISP2(bsize)) {
3760 msg = ": not a power of 2";
3761 goto err;
3762 }
3763
3764 if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) {
3765 msg = ": too low";
3766 goto err;
3767 }
3768
3769 nvme->n_min_block_size = bsize;
3770 return;
3771
3772 err:
3773 dev_err(nvme->n_dip, CE_WARN,
3774 "!nvme-config-list: ignoring invalid min-phys-block-size '%s' "
3775 "for model '%s'%s", val, model, msg);
3776
3777 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
3778 }
3779
3780 static void
nvme_config_boolean(nvme_t * nvme,char * model,char * name,char * val,boolean_t * b)3781 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val,
3782 boolean_t *b)
3783 {
3784 if (strcmp(val, "on") == 0 ||
3785 strcmp(val, "true") == 0)
3786 *b = B_TRUE;
3787 else if (strcmp(val, "off") == 0 ||
3788 strcmp(val, "false") == 0)
3789 *b = B_FALSE;
3790 else
3791 dev_err(nvme->n_dip, CE_WARN,
3792 "!nvme-config-list: invalid value for %s '%s'"
3793 " for model '%s', ignoring", name, val, model);
3794 }
3795
3796 static void
nvme_config_list(nvme_t * nvme)3797 nvme_config_list(nvme_t *nvme)
3798 {
3799 char **config_list;
3800 uint_t nelem;
3801 int rv;
3802
3803 /*
3804 * We're following the pattern of 'sd-config-list' here, but extend it.
3805 * Instead of two we have three separate strings for "model", "fwrev",
3806 * and "name-value-list".
3807 */
3808 rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip,
3809 DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem);
3810
3811 if (rv != DDI_PROP_SUCCESS) {
3812 if (rv == DDI_PROP_CANNOT_DECODE) {
3813 dev_err(nvme->n_dip, CE_WARN,
3814 "!nvme-config-list: cannot be decoded");
3815 }
3816
3817 return;
3818 }
3819
3820 if ((nelem % 3) != 0) {
3821 dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be "
3822 "triplets of <model>/<fwrev>/<name-value-list> strings ");
3823 goto out;
3824 }
3825
3826 for (uint_t i = 0; i < nelem; i += 3) {
3827 char *model = config_list[i];
3828 char *fwrev = config_list[i + 1];
3829 char *nvp, *save_nv;
3830 size_t id_model_len, id_fwrev_len;
3831
3832 id_model_len = nvme_strlen(nvme->n_idctl->id_model,
3833 sizeof (nvme->n_idctl->id_model));
3834
3835 if (strlen(model) != id_model_len)
3836 continue;
3837
3838 if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0)
3839 continue;
3840
3841 id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev,
3842 sizeof (nvme->n_idctl->id_fwrev));
3843
3844 if (strlen(fwrev) != 0) {
3845 boolean_t match = B_FALSE;
3846 char *fwr, *last_fw;
3847
3848 for (fwr = strtok_r(fwrev, ",", &last_fw);
3849 fwr != NULL;
3850 fwr = strtok_r(NULL, ",", &last_fw)) {
3851 if (strlen(fwr) != id_fwrev_len)
3852 continue;
3853
3854 if (strncmp(fwr, nvme->n_idctl->id_fwrev,
3855 id_fwrev_len) == 0)
3856 match = B_TRUE;
3857 }
3858
3859 if (!match)
3860 continue;
3861 }
3862
3863 /*
3864 * We should now have a comma-separated list of name:value
3865 * pairs.
3866 */
3867 for (nvp = strtok_r(config_list[i + 2], ",", &save_nv);
3868 nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) {
3869 char *name = nvp;
3870 char *val = strchr(nvp, ':');
3871
3872 if (val == NULL || name == val) {
3873 dev_err(nvme->n_dip, CE_WARN,
3874 "!nvme-config-list: <name-value-list> "
3875 "for model '%s' is malformed", model);
3876 goto out;
3877 }
3878
3879 /*
3880 * Null-terminate 'name', move 'val' past ':' sep.
3881 */
3882 *val++ = '\0';
3883
3884 /*
3885 * Process the name:val pairs that we know about.
3886 */
3887 if (strcmp(name, "ignore-unknown-vendor-status") == 0) {
3888 nvme_config_boolean(nvme, model, name, val,
3889 &nvme->n_ignore_unknown_vendor_status);
3890 } else if (strcmp(name, "min-phys-block-size") == 0) {
3891 nvme_config_min_block_size(nvme, model, val);
3892 } else if (strcmp(name, "volatile-write-cache") == 0) {
3893 nvme_config_boolean(nvme, model, name, val,
3894 &nvme->n_write_cache_enabled);
3895 } else {
3896 /*
3897 * Unknown 'name'.
3898 */
3899 dev_err(nvme->n_dip, CE_WARN,
3900 "!nvme-config-list: unknown config '%s' "
3901 "for model '%s', ignoring", name, model);
3902 }
3903 }
3904 }
3905
3906 out:
3907 ddi_prop_free(config_list);
3908 }
3909
3910 static void
nvme_prepare_devid(nvme_t * nvme,uint32_t nsid)3911 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
3912 {
3913 /*
3914 * Section 7.7 of the spec describes how to get a unique ID for
3915 * the controller: the vendor ID, the model name and the serial
3916 * number shall be unique when combined.
3917 *
3918 * If a namespace has no EUI64 we use the above and add the hex
3919 * namespace ID to get a unique ID for the namespace.
3920 */
3921 char model[sizeof (nvme->n_idctl->id_model) + 1];
3922 char serial[sizeof (nvme->n_idctl->id_serial) + 1];
3923
3924 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
3925 bcopy(nvme->n_idctl->id_serial, serial,
3926 sizeof (nvme->n_idctl->id_serial));
3927
3928 model[sizeof (nvme->n_idctl->id_model)] = '\0';
3929 serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
3930
3931 nvme_nsid2ns(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X",
3932 nvme->n_idctl->id_vid, model, serial, nsid);
3933 }
3934
3935 static nvme_identify_nsid_list_t *
nvme_update_nsid_list(nvme_t * nvme,int cns)3936 nvme_update_nsid_list(nvme_t *nvme, int cns)
3937 {
3938 nvme_identify_nsid_list_t *nslist;
3939
3940 /*
3941 * We currently don't handle cases where there are more than
3942 * 1024 active namespaces, requiring several IDENTIFY commands.
3943 */
3944 if (nvme_identify_int(nvme, 0, cns, (void **)&nslist))
3945 return (nslist);
3946
3947 return (NULL);
3948 }
3949
3950 nvme_namespace_t *
nvme_nsid2ns(nvme_t * nvme,uint32_t nsid)3951 nvme_nsid2ns(nvme_t *nvme, uint32_t nsid)
3952 {
3953 ASSERT3U(nsid, !=, 0);
3954 ASSERT3U(nsid, <=, nvme->n_namespace_count);
3955 return (&nvme->n_ns[nsid - 1]);
3956 }
3957
3958 static boolean_t
nvme_allocated_ns(nvme_namespace_t * ns)3959 nvme_allocated_ns(nvme_namespace_t *ns)
3960 {
3961 nvme_t *nvme = ns->ns_nvme;
3962 uint32_t i;
3963
3964 ASSERT(nvme_mgmt_lock_held(nvme));
3965
3966 /*
3967 * If supported, update the list of allocated namespace IDs.
3968 */
3969 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) &&
3970 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
3971 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
3972 NVME_IDENTIFY_NSID_ALLOC_LIST);
3973 boolean_t found = B_FALSE;
3974
3975 /*
3976 * When namespace management is supported, this really shouldn't
3977 * be NULL. Treat all namespaces as allocated if it is.
3978 */
3979 if (nslist == NULL)
3980 return (B_TRUE);
3981
3982 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
3983 if (ns->ns_id == 0)
3984 break;
3985
3986 if (ns->ns_id == nslist->nl_nsid[i])
3987 found = B_TRUE;
3988 }
3989
3990 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
3991 return (found);
3992 } else {
3993 /*
3994 * If namespace management isn't supported, report all
3995 * namespaces as allocated.
3996 */
3997 return (B_TRUE);
3998 }
3999 }
4000
4001 static boolean_t
nvme_active_ns(nvme_namespace_t * ns)4002 nvme_active_ns(nvme_namespace_t *ns)
4003 {
4004 nvme_t *nvme = ns->ns_nvme;
4005 uint64_t *ptr;
4006 uint32_t i;
4007
4008 ASSERT(nvme_mgmt_lock_held(nvme));
4009
4010 /*
4011 * If supported, update the list of active namespace IDs.
4012 */
4013 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) {
4014 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
4015 NVME_IDENTIFY_NSID_LIST);
4016 boolean_t found = B_FALSE;
4017
4018 /*
4019 * When namespace management is supported, this really shouldn't
4020 * be NULL. Treat all namespaces as allocated if it is.
4021 */
4022 if (nslist == NULL)
4023 return (B_TRUE);
4024
4025 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
4026 if (ns->ns_id == 0)
4027 break;
4028
4029 if (ns->ns_id == nslist->nl_nsid[i])
4030 found = B_TRUE;
4031 }
4032
4033 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
4034 return (found);
4035 }
4036
4037 /*
4038 * Workaround for revision 1.0:
4039 * Check whether the IDENTIFY NAMESPACE data is zero-filled.
4040 */
4041 for (ptr = (uint64_t *)ns->ns_idns;
4042 ptr != (uint64_t *)(ns->ns_idns + 1);
4043 ptr++) {
4044 if (*ptr != 0) {
4045 return (B_TRUE);
4046 }
4047 }
4048
4049 return (B_FALSE);
4050 }
4051
4052 static int
nvme_init_ns(nvme_t * nvme,uint32_t nsid)4053 nvme_init_ns(nvme_t *nvme, uint32_t nsid)
4054 {
4055 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
4056 nvme_identify_nsid_t *idns;
4057 nvme_ns_state_t orig_state;
4058
4059 ns->ns_nvme = nvme;
4060
4061 ASSERT(nvme_mgmt_lock_held(nvme));
4062
4063 /*
4064 * Because we might rescan a namespace and this will fail after boot
4065 * that'd leave us in a bad spot. We need to do something about this
4066 * longer term, but it's not clear how exactly we would recover right
4067 * now.
4068 */
4069 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
4070 (void **)&idns)) {
4071 dev_err(nvme->n_dip, CE_WARN,
4072 "!failed to identify namespace %d", nsid);
4073 return (DDI_FAILURE);
4074 }
4075
4076 if (ns->ns_idns != NULL)
4077 kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t));
4078
4079 ns->ns_idns = idns;
4080 ns->ns_id = nsid;
4081
4082 /*
4083 * Save the current state so we can tell what changed. Look at the
4084 * current state of the device. We will flag active devices that should
4085 * be ignored after this.
4086 */
4087 orig_state = ns->ns_state;
4088 if (nvme_active_ns(ns)) {
4089 /*
4090 * If the device previously had blkdev active, then that is its
4091 * current state. Otherwise, we consider this an upgrade and
4092 * just set it to not ignored.
4093 */
4094 if (orig_state == NVME_NS_STATE_ATTACHED) {
4095 ns->ns_state = NVME_NS_STATE_ATTACHED;
4096 } else {
4097 ns->ns_state = NVME_NS_STATE_NOT_IGNORED;
4098 }
4099 } else if (nvme_allocated_ns(ns)) {
4100 ns->ns_state = NVME_NS_STATE_ALLOCATED;
4101 } else {
4102 ns->ns_state = NVME_NS_STATE_UNALLOCATED;
4103 }
4104
4105 ns->ns_block_count = idns->id_nsize;
4106 ns->ns_block_size =
4107 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
4108 ns->ns_best_block_size = ns->ns_block_size;
4109
4110 /*
4111 * Get the EUI64 if present.
4112 */
4113 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
4114 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
4115
4116 /*
4117 * Get the NGUID if present.
4118 */
4119 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2))
4120 bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid));
4121
4122 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
4123 if (*(uint64_t *)ns->ns_eui64 == 0)
4124 nvme_prepare_devid(nvme, ns->ns_id);
4125
4126 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id);
4127
4128 /*
4129 * Find the LBA format with no metadata and the best relative
4130 * performance. A value of 3 means "degraded", 0 is best.
4131 */
4132 for (uint32_t j = 0, last_rp = 3; j <= idns->id_nlbaf; j++) {
4133 if (idns->id_lbaf[j].lbaf_lbads == 0)
4134 break;
4135 if (idns->id_lbaf[j].lbaf_ms != 0)
4136 continue;
4137 if (idns->id_lbaf[j].lbaf_rp >= last_rp)
4138 continue;
4139 last_rp = idns->id_lbaf[j].lbaf_rp;
4140 ns->ns_best_block_size =
4141 1 << idns->id_lbaf[j].lbaf_lbads;
4142 }
4143
4144 if (ns->ns_best_block_size < nvme->n_min_block_size)
4145 ns->ns_best_block_size = nvme->n_min_block_size;
4146
4147 /*
4148 * We currently don't support namespaces that are inactive, or use
4149 * either:
4150 * - protection information
4151 * - illegal block size (< 512)
4152 */
4153 if (ns->ns_state >= NVME_NS_STATE_NOT_IGNORED) {
4154 if (idns->id_dps.dp_pinfo) {
4155 dev_err(nvme->n_dip, CE_WARN,
4156 "!ignoring namespace %d, unsupported feature: "
4157 "pinfo = %d", nsid, idns->id_dps.dp_pinfo);
4158 ns->ns_state = NVME_NS_STATE_ACTIVE;
4159 }
4160
4161 if (ns->ns_block_size < 512) {
4162 dev_err(nvme->n_dip, CE_WARN,
4163 "!ignoring namespace %d, unsupported block size "
4164 "%"PRIu64, nsid, (uint64_t)ns->ns_block_size);
4165 ns->ns_state = NVME_NS_STATE_ACTIVE;
4166 }
4167 }
4168
4169 /*
4170 * If we were previously in a state where blkdev was active and suddenly
4171 * we think it should not be because ignore is set, then something has
4172 * gone behind our backs and this is not going to be recoverable.
4173 */
4174 if (orig_state == NVME_NS_STATE_ATTACHED &&
4175 ns->ns_state != NVME_NS_STATE_ATTACHED) {
4176 dev_err(nvme->n_dip, CE_PANIC, "namespace %u state "
4177 "unexpectedly changed and removed blkdev support!", nsid);
4178 }
4179
4180 /*
4181 * Keep a count of namespaces which are attachable.
4182 * See comments in nvme_bd_driveinfo() to understand its effect.
4183 */
4184 if (orig_state > NVME_NS_STATE_ACTIVE) {
4185 /*
4186 * Wasn't attachable previously, but now needs to be.
4187 * Discount it.
4188 */
4189 if (ns->ns_state < NVME_NS_STATE_NOT_IGNORED)
4190 nvme->n_namespaces_attachable--;
4191 } else if (ns->ns_state >= NVME_NS_STATE_NOT_IGNORED) {
4192 /*
4193 * Previously ignored, but now not. Count it.
4194 */
4195 nvme->n_namespaces_attachable++;
4196 }
4197
4198 return (DDI_SUCCESS);
4199 }
4200
4201 static boolean_t
nvme_bd_attach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)4202 nvme_bd_attach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
4203 {
4204 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
4205 int ret;
4206
4207 ASSERT(nvme_mgmt_lock_held(nvme));
4208
4209 if (!nvme_ns_state_check(ns, com, nvme_bd_attach_states)) {
4210 return (B_FALSE);
4211 }
4212
4213 if (ns->ns_bd_hdl == NULL) {
4214 bd_ops_t ops = nvme_bd_ops;
4215
4216 if (!nvme->n_idctl->id_oncs.on_dset_mgmt)
4217 ops.o_free_space = NULL;
4218
4219 ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr,
4220 KM_SLEEP);
4221
4222 if (ns->ns_bd_hdl == NULL) {
4223 dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev "
4224 "handle for namespace id %u", com->nioc_nsid);
4225 return (nvme_ioctl_error(com,
4226 NVME_IOCTL_E_BLKDEV_ATTACH, 0, 0));
4227 }
4228 }
4229
4230 nvme_mgmt_bd_start(nvme);
4231 ret = bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl);
4232 nvme_mgmt_bd_end(nvme);
4233 if (ret != DDI_SUCCESS) {
4234 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_ATTACH,
4235 0, 0));
4236 }
4237
4238 ns->ns_state = NVME_NS_STATE_ATTACHED;
4239
4240 return (B_TRUE);
4241 }
4242
4243 static boolean_t
nvme_bd_detach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)4244 nvme_bd_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
4245 {
4246 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
4247 int ret;
4248
4249 ASSERT(nvme_mgmt_lock_held(nvme));
4250
4251 if (!nvme_ns_state_check(ns, com, nvme_bd_detach_states)) {
4252 return (B_FALSE);
4253 }
4254
4255 nvme_mgmt_bd_start(nvme);
4256 ASSERT3P(ns->ns_bd_hdl, !=, NULL);
4257 ret = bd_detach_handle(ns->ns_bd_hdl);
4258 nvme_mgmt_bd_end(nvme);
4259
4260 if (ret != DDI_SUCCESS) {
4261 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_DETACH, 0,
4262 0));
4263 }
4264
4265 ns->ns_state = NVME_NS_STATE_NOT_IGNORED;
4266 return (B_TRUE);
4267
4268 }
4269
4270 /*
4271 * Rescan the namespace information associated with the namespaces indicated by
4272 * ioc. They should not be attached to blkdev right now.
4273 */
4274 static void
nvme_rescan_ns(nvme_t * nvme,uint32_t nsid)4275 nvme_rescan_ns(nvme_t *nvme, uint32_t nsid)
4276 {
4277 ASSERT(nvme_mgmt_lock_held(nvme));
4278 ASSERT3U(nsid, !=, 0);
4279
4280 if (nsid != NVME_NSID_BCAST) {
4281 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
4282
4283 ASSERT3U(ns->ns_state, <, NVME_NS_STATE_ATTACHED);
4284 (void) nvme_init_ns(nvme, nsid);
4285 return;
4286 }
4287
4288 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
4289 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
4290
4291 ASSERT3U(ns->ns_state, <, NVME_NS_STATE_ATTACHED);
4292 (void) nvme_init_ns(nvme, i);
4293 }
4294 }
4295
4296 typedef struct nvme_quirk_table {
4297 uint16_t nq_vendor_id;
4298 uint16_t nq_device_id;
4299 nvme_quirk_t nq_quirks;
4300 } nvme_quirk_table_t;
4301
4302 static const nvme_quirk_table_t nvme_quirks[] = {
4303 { 0x1987, 0x5018, NVME_QUIRK_START_CID }, /* Phison E18 */
4304 };
4305
4306 static void
nvme_detect_quirks(nvme_t * nvme)4307 nvme_detect_quirks(nvme_t *nvme)
4308 {
4309 for (uint_t i = 0; i < ARRAY_SIZE(nvme_quirks); i++) {
4310 const nvme_quirk_table_t *nqt = &nvme_quirks[i];
4311
4312 if (nqt->nq_vendor_id == nvme->n_vendor_id &&
4313 nqt->nq_device_id == nvme->n_device_id) {
4314 nvme->n_quirks = nqt->nq_quirks;
4315 return;
4316 }
4317 }
4318 }
4319
4320 static int
nvme_init(nvme_t * nvme)4321 nvme_init(nvme_t *nvme)
4322 {
4323 nvme_reg_cc_t cc = { 0 };
4324 nvme_reg_aqa_t aqa = { 0 };
4325 nvme_reg_asq_t asq = { 0 };
4326 nvme_reg_acq_t acq = { 0 };
4327 nvme_reg_cap_t cap;
4328 nvme_reg_vs_t vs;
4329 nvme_reg_csts_t csts;
4330 int i = 0;
4331 uint16_t nqueues;
4332 uint_t tq_threads;
4333 char model[sizeof (nvme->n_idctl->id_model) + 1];
4334 char *vendor, *product;
4335 uint32_t nsid;
4336
4337 /* Check controller version */
4338 vs.r = nvme_get32(nvme, NVME_REG_VS);
4339 nvme->n_version.v_major = vs.b.vs_mjr;
4340 nvme->n_version.v_minor = vs.b.vs_mnr;
4341 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d\n",
4342 nvme->n_version.v_major, nvme->n_version.v_minor);
4343
4344 if (nvme->n_version.v_major > nvme_version_major) {
4345 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x",
4346 nvme_version_major);
4347 if (nvme->n_strict_version)
4348 goto fail;
4349 }
4350
4351 /* retrieve controller configuration */
4352 cap.r = nvme_get64(nvme, NVME_REG_CAP);
4353
4354 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
4355 dev_err(nvme->n_dip, CE_WARN,
4356 "!NVM command set not supported by hardware");
4357 goto fail;
4358 }
4359
4360 nvme->n_nssr_supported = cap.b.cap_nssrs;
4361 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
4362 nvme->n_timeout = cap.b.cap_to;
4363 nvme->n_arbitration_mechanisms = cap.b.cap_ams;
4364 nvme->n_cont_queues_reqd = cap.b.cap_cqr;
4365 nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
4366
4367 /*
4368 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
4369 * the base page size of 4k (1<<12), so add 12 here to get the real
4370 * page size value.
4371 */
4372 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
4373 cap.b.cap_mpsmax + 12);
4374 nvme->n_pagesize = 1UL << (nvme->n_pageshift);
4375
4376 /*
4377 * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
4378 */
4379 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
4380 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
4381
4382 /*
4383 * Set up PRP DMA to transfer 1 page-aligned page at a time.
4384 * Maxxfer may be increased after we identified the controller limits.
4385 */
4386 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
4387 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
4388 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
4389 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
4390
4391 /*
4392 * Reset controller if it's still in ready state.
4393 */
4394 if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
4395 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
4396 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
4397 nvme->n_dead = B_TRUE;
4398 goto fail;
4399 }
4400
4401 /*
4402 * Create the cq array with one completion queue to be assigned
4403 * to the admin queue pair and a limited number of taskqs (4).
4404 */
4405 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) !=
4406 DDI_SUCCESS) {
4407 dev_err(nvme->n_dip, CE_WARN,
4408 "!failed to pre-allocate admin completion queue");
4409 goto fail;
4410 }
4411 /*
4412 * Create the admin queue pair.
4413 */
4414 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
4415 != DDI_SUCCESS) {
4416 dev_err(nvme->n_dip, CE_WARN,
4417 "!unable to allocate admin qpair");
4418 goto fail;
4419 }
4420 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
4421 nvme->n_ioq[0] = nvme->n_adminq;
4422
4423 if (nvme->n_quirks & NVME_QUIRK_START_CID)
4424 nvme->n_adminq->nq_next_cmd++;
4425
4426 nvme->n_progress |= NVME_ADMIN_QUEUE;
4427
4428 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4429 "admin-queue-len", nvme->n_admin_queue_len);
4430
4431 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
4432 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
4433 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress;
4434
4435 ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
4436 ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
4437
4438 nvme_put32(nvme, NVME_REG_AQA, aqa.r);
4439 nvme_put64(nvme, NVME_REG_ASQ, asq);
4440 nvme_put64(nvme, NVME_REG_ACQ, acq);
4441
4442 cc.b.cc_ams = 0; /* use Round-Robin arbitration */
4443 cc.b.cc_css = 0; /* use NVM command set */
4444 cc.b.cc_mps = nvme->n_pageshift - 12;
4445 cc.b.cc_shn = 0; /* no shutdown in progress */
4446 cc.b.cc_en = 1; /* enable controller */
4447 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */
4448 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */
4449
4450 nvme_put32(nvme, NVME_REG_CC, cc.r);
4451
4452 /*
4453 * Wait for the controller to become ready.
4454 */
4455 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
4456 if (csts.b.csts_rdy == 0) {
4457 for (i = 0; i != nvme->n_timeout * 10; i++) {
4458 delay(drv_usectohz(50000));
4459 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
4460
4461 if (csts.b.csts_cfs == 1) {
4462 dev_err(nvme->n_dip, CE_WARN,
4463 "!controller fatal status at init");
4464 ddi_fm_service_impact(nvme->n_dip,
4465 DDI_SERVICE_LOST);
4466 nvme->n_dead = B_TRUE;
4467 goto fail;
4468 }
4469
4470 if (csts.b.csts_rdy == 1)
4471 break;
4472 }
4473 }
4474
4475 if (csts.b.csts_rdy == 0) {
4476 dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
4477 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
4478 nvme->n_dead = B_TRUE;
4479 goto fail;
4480 }
4481
4482 /*
4483 * Assume an abort command limit of 1. We'll destroy and re-init
4484 * that later when we know the true abort command limit.
4485 */
4486 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
4487
4488 /*
4489 * Set up initial interrupt for admin queue.
4490 */
4491 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
4492 != DDI_SUCCESS) &&
4493 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
4494 != DDI_SUCCESS) &&
4495 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
4496 != DDI_SUCCESS)) {
4497 dev_err(nvme->n_dip, CE_WARN,
4498 "!failed to set up initial interrupt");
4499 goto fail;
4500 }
4501
4502 /*
4503 * Post an asynchronous event command to catch errors.
4504 * We assume the asynchronous events are supported as required by
4505 * specification (Figure 40 in section 5 of NVMe 1.2).
4506 * However, since at least qemu does not follow the specification,
4507 * we need a mechanism to protect ourselves.
4508 */
4509 nvme->n_async_event_supported = B_TRUE;
4510 nvme_async_event(nvme);
4511
4512 /*
4513 * Identify Controller
4514 */
4515 if (!nvme_identify_int(nvme, 0, NVME_IDENTIFY_CTRL,
4516 (void **)&nvme->n_idctl)) {
4517 dev_err(nvme->n_dip, CE_WARN, "!failed to identify controller");
4518 goto fail;
4519 }
4520
4521 /*
4522 * Get the common namespace information if available. If not, we use the
4523 * information for nsid 1.
4524 */
4525 if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) &&
4526 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
4527 nsid = NVME_NSID_BCAST;
4528 } else {
4529 nsid = 1;
4530 }
4531
4532 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
4533 (void **)&nvme->n_idcomns)) {
4534 dev_err(nvme->n_dip, CE_WARN, "!failed to identify common "
4535 "namespace information");
4536 goto fail;
4537 }
4538 /*
4539 * Process nvme-config-list (if present) in nvme.conf.
4540 */
4541 nvme_config_list(nvme);
4542
4543 /*
4544 * Get Vendor & Product ID
4545 */
4546 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
4547 model[sizeof (nvme->n_idctl->id_model)] = '\0';
4548 sata_split_model(model, &vendor, &product);
4549
4550 if (vendor == NULL)
4551 nvme->n_vendor = strdup("NVMe");
4552 else
4553 nvme->n_vendor = strdup(vendor);
4554
4555 nvme->n_product = strdup(product);
4556
4557 /*
4558 * Get controller limits.
4559 */
4560 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
4561 MIN(nvme->n_admin_queue_len / 10,
4562 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
4563
4564 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4565 "async-event-limit", nvme->n_async_event_limit);
4566
4567 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
4568
4569 /*
4570 * Reinitialize the semaphore with the true abort command limit
4571 * supported by the hardware. It's not necessary to disable interrupts
4572 * as only command aborts use the semaphore, and no commands are
4573 * executed or aborted while we're here.
4574 */
4575 sema_destroy(&nvme->n_abort_sema);
4576 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
4577 SEMA_DRIVER, NULL);
4578
4579 nvme->n_progress |= NVME_CTRL_LIMITS;
4580
4581 if (nvme->n_idctl->id_mdts == 0)
4582 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
4583 else
4584 nvme->n_max_data_transfer_size =
4585 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
4586
4587 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
4588
4589 /*
4590 * Limit n_max_data_transfer_size to what we can handle in one PRP.
4591 * Chained PRPs are currently unsupported.
4592 *
4593 * This is a no-op on hardware which doesn't support a transfer size
4594 * big enough to require chained PRPs.
4595 */
4596 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
4597 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
4598
4599 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
4600
4601 /*
4602 * Make sure the minimum/maximum queue entry sizes are not
4603 * larger/smaller than the default.
4604 */
4605
4606 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
4607 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
4608 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
4609 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
4610 goto fail;
4611
4612 /*
4613 * Check for the presence of a Volatile Write Cache. If present,
4614 * enable or disable based on the value of the property
4615 * volatile-write-cache-enable (default is enabled).
4616 */
4617 nvme->n_write_cache_present =
4618 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
4619
4620 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4621 "volatile-write-cache-present",
4622 nvme->n_write_cache_present ? 1 : 0);
4623
4624 if (!nvme->n_write_cache_present) {
4625 nvme->n_write_cache_enabled = B_FALSE;
4626 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
4627 != 0) {
4628 dev_err(nvme->n_dip, CE_WARN,
4629 "!failed to %sable volatile write cache",
4630 nvme->n_write_cache_enabled ? "en" : "dis");
4631 /*
4632 * Assume the cache is (still) enabled.
4633 */
4634 nvme->n_write_cache_enabled = B_TRUE;
4635 }
4636
4637 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4638 "volatile-write-cache-enable",
4639 nvme->n_write_cache_enabled ? 1 : 0);
4640
4641 /*
4642 * Get number of supported namespaces and allocate namespace array.
4643 */
4644 nvme->n_namespace_count = nvme->n_idctl->id_nn;
4645
4646 if (nvme->n_namespace_count == 0) {
4647 dev_err(nvme->n_dip, CE_WARN,
4648 "!controllers without namespaces are not supported");
4649 goto fail;
4650 }
4651
4652 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
4653 nvme->n_namespace_count, KM_SLEEP);
4654
4655 /*
4656 * Try to set up MSI/MSI-X interrupts.
4657 */
4658 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
4659 != 0) {
4660 nvme_release_interrupts(nvme);
4661
4662 nqueues = MIN(UINT16_MAX, ncpus);
4663
4664 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
4665 nqueues) != DDI_SUCCESS) &&
4666 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
4667 nqueues) != DDI_SUCCESS)) {
4668 dev_err(nvme->n_dip, CE_WARN,
4669 "!failed to set up MSI/MSI-X interrupts");
4670 goto fail;
4671 }
4672 }
4673
4674 /*
4675 * Create I/O queue pairs.
4676 */
4677
4678 if (nvme_set_nqueues(nvme) != 0) {
4679 dev_err(nvme->n_dip, CE_WARN,
4680 "!failed to set number of I/O queues to %d",
4681 nvme->n_intr_cnt);
4682 goto fail;
4683 }
4684
4685 /*
4686 * Reallocate I/O queue array
4687 */
4688 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
4689 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
4690 (nvme->n_submission_queues + 1), KM_SLEEP);
4691 nvme->n_ioq[0] = nvme->n_adminq;
4692
4693 /*
4694 * There should always be at least as many submission queues
4695 * as completion queues.
4696 */
4697 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues);
4698
4699 nvme->n_ioq_count = nvme->n_submission_queues;
4700
4701 nvme->n_io_squeue_len =
4702 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries);
4703
4704 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len",
4705 nvme->n_io_squeue_len);
4706
4707 /*
4708 * Pre-allocate completion queues.
4709 * When there are the same number of submission and completion
4710 * queues there is no value in having a larger completion
4711 * queue length.
4712 */
4713 if (nvme->n_submission_queues == nvme->n_completion_queues)
4714 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4715 nvme->n_io_squeue_len);
4716
4717 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4718 nvme->n_max_queue_entries);
4719
4720 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len",
4721 nvme->n_io_cqueue_len);
4722
4723 /*
4724 * Assign the equal quantity of taskq threads to each completion
4725 * queue, capping the total number of threads to the number
4726 * of CPUs.
4727 */
4728 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues;
4729
4730 /*
4731 * In case the calculation above is zero, we need at least one
4732 * thread per completion queue.
4733 */
4734 tq_threads = MAX(1, tq_threads);
4735
4736 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1,
4737 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) {
4738 dev_err(nvme->n_dip, CE_WARN,
4739 "!failed to pre-allocate completion queues");
4740 goto fail;
4741 }
4742
4743 /*
4744 * If we use less completion queues than interrupt vectors return
4745 * some of the interrupt vectors back to the system.
4746 */
4747 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) {
4748 nvme_release_interrupts(nvme);
4749
4750 if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
4751 nvme->n_completion_queues + 1) != DDI_SUCCESS) {
4752 dev_err(nvme->n_dip, CE_WARN,
4753 "!failed to reduce number of interrupts");
4754 goto fail;
4755 }
4756 }
4757
4758 /*
4759 * Alloc & register I/O queue pairs
4760 */
4761
4762 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
4763 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len,
4764 &nvme->n_ioq[i], i) != DDI_SUCCESS) {
4765 dev_err(nvme->n_dip, CE_WARN,
4766 "!unable to allocate I/O qpair %d", i);
4767 goto fail;
4768 }
4769
4770 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
4771 dev_err(nvme->n_dip, CE_WARN,
4772 "!unable to create I/O qpair %d", i);
4773 goto fail;
4774 }
4775 }
4776
4777 /*
4778 * Post more asynchronous events commands to reduce event reporting
4779 * latency as suggested by the spec.
4780 */
4781 if (nvme->n_async_event_supported) {
4782 for (i = 1; i != nvme->n_async_event_limit; i++)
4783 nvme_async_event(nvme);
4784 }
4785
4786 return (DDI_SUCCESS);
4787
4788 fail:
4789 (void) nvme_reset(nvme, B_FALSE);
4790 return (DDI_FAILURE);
4791 }
4792
4793 static uint_t
nvme_intr(caddr_t arg1,caddr_t arg2)4794 nvme_intr(caddr_t arg1, caddr_t arg2)
4795 {
4796 nvme_t *nvme = (nvme_t *)arg1;
4797 int inum = (int)(uintptr_t)arg2;
4798 int ccnt = 0;
4799 int qnum;
4800
4801 if (inum >= nvme->n_intr_cnt)
4802 return (DDI_INTR_UNCLAIMED);
4803
4804 if (nvme->n_dead) {
4805 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
4806 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
4807 }
4808
4809 /*
4810 * The interrupt vector a queue uses is calculated as queue_idx %
4811 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
4812 * in steps of n_intr_cnt to process all queues using this vector.
4813 */
4814 for (qnum = inum;
4815 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL;
4816 qnum += nvme->n_intr_cnt) {
4817 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]);
4818 }
4819
4820 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
4821 }
4822
4823 static void
nvme_release_interrupts(nvme_t * nvme)4824 nvme_release_interrupts(nvme_t *nvme)
4825 {
4826 int i;
4827
4828 for (i = 0; i < nvme->n_intr_cnt; i++) {
4829 if (nvme->n_inth[i] == NULL)
4830 break;
4831
4832 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4833 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
4834 else
4835 (void) ddi_intr_disable(nvme->n_inth[i]);
4836
4837 (void) ddi_intr_remove_handler(nvme->n_inth[i]);
4838 (void) ddi_intr_free(nvme->n_inth[i]);
4839 }
4840
4841 kmem_free(nvme->n_inth, nvme->n_inth_sz);
4842 nvme->n_inth = NULL;
4843 nvme->n_inth_sz = 0;
4844
4845 nvme->n_progress &= ~NVME_INTERRUPTS;
4846 }
4847
4848 static int
nvme_setup_interrupts(nvme_t * nvme,int intr_type,int nqpairs)4849 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
4850 {
4851 int nintrs, navail, count;
4852 int ret;
4853 int i;
4854
4855 if (nvme->n_intr_types == 0) {
4856 ret = ddi_intr_get_supported_types(nvme->n_dip,
4857 &nvme->n_intr_types);
4858 if (ret != DDI_SUCCESS) {
4859 dev_err(nvme->n_dip, CE_WARN,
4860 "!%s: ddi_intr_get_supported types failed",
4861 __func__);
4862 return (ret);
4863 }
4864 #ifdef __x86
4865 if (get_hwenv() == HW_VMWARE)
4866 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
4867 #endif
4868 }
4869
4870 if ((nvme->n_intr_types & intr_type) == 0)
4871 return (DDI_FAILURE);
4872
4873 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
4874 if (ret != DDI_SUCCESS) {
4875 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
4876 __func__);
4877 return (ret);
4878 }
4879
4880 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
4881 if (ret != DDI_SUCCESS) {
4882 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
4883 __func__);
4884 return (ret);
4885 }
4886
4887 /* We want at most one interrupt per queue pair. */
4888 if (navail > nqpairs)
4889 navail = nqpairs;
4890
4891 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
4892 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
4893
4894 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
4895 &count, 0);
4896 if (ret != DDI_SUCCESS) {
4897 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
4898 __func__);
4899 goto fail;
4900 }
4901
4902 nvme->n_intr_cnt = count;
4903
4904 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
4905 if (ret != DDI_SUCCESS) {
4906 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
4907 __func__);
4908 goto fail;
4909 }
4910
4911 for (i = 0; i < count; i++) {
4912 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
4913 (void *)nvme, (void *)(uintptr_t)i);
4914 if (ret != DDI_SUCCESS) {
4915 dev_err(nvme->n_dip, CE_WARN,
4916 "!%s: ddi_intr_add_handler failed", __func__);
4917 goto fail;
4918 }
4919 }
4920
4921 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
4922
4923 for (i = 0; i < count; i++) {
4924 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4925 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
4926 else
4927 ret = ddi_intr_enable(nvme->n_inth[i]);
4928
4929 if (ret != DDI_SUCCESS) {
4930 dev_err(nvme->n_dip, CE_WARN,
4931 "!%s: enabling interrupt %d failed", __func__, i);
4932 goto fail;
4933 }
4934 }
4935
4936 nvme->n_intr_type = intr_type;
4937
4938 nvme->n_progress |= NVME_INTERRUPTS;
4939
4940 return (DDI_SUCCESS);
4941
4942 fail:
4943 nvme_release_interrupts(nvme);
4944
4945 return (ret);
4946 }
4947
4948 static int
nvme_fm_errcb(dev_info_t * dip,ddi_fm_error_t * fm_error,const void * arg)4949 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
4950 {
4951 _NOTE(ARGUNUSED(arg));
4952
4953 pci_ereport_post(dip, fm_error, NULL);
4954 return (fm_error->fme_status);
4955 }
4956
4957 static void
nvme_remove_callback(dev_info_t * dip,ddi_eventcookie_t cookie,void * a,void * b)4958 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a,
4959 void *b)
4960 {
4961 nvme_t *nvme = a;
4962
4963 nvme_ctrl_mark_dead(nvme, B_TRUE);
4964
4965 /*
4966 * Fail all outstanding commands, including those in the admin queue
4967 * (queue 0).
4968 */
4969 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) {
4970 nvme_qpair_t *qp = nvme->n_ioq[i];
4971
4972 mutex_enter(&qp->nq_mutex);
4973 for (size_t j = 0; j < qp->nq_nentry; j++) {
4974 nvme_cmd_t *cmd = qp->nq_cmd[j];
4975 nvme_cmd_t *u_cmd;
4976
4977 if (cmd == NULL) {
4978 continue;
4979 }
4980
4981 /*
4982 * Since we have the queue lock held the entire time we
4983 * iterate over it, it's not possible for the queue to
4984 * change underneath us. Thus, we don't need to check
4985 * that the return value of nvme_unqueue_cmd matches the
4986 * requested cmd to unqueue.
4987 */
4988 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
4989 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq,
4990 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
4991
4992 ASSERT3P(u_cmd, ==, cmd);
4993 }
4994 mutex_exit(&qp->nq_mutex);
4995 }
4996 }
4997
4998 /*
4999 * Open minor management
5000 */
5001 static int
nvme_minor_comparator(const void * l,const void * r)5002 nvme_minor_comparator(const void *l, const void *r)
5003 {
5004 const nvme_minor_t *lm = l;
5005 const nvme_minor_t *rm = r;
5006
5007 if (lm->nm_minor > rm->nm_minor) {
5008 return (1);
5009 } else if (lm->nm_minor < rm->nm_minor) {
5010 return (-1);
5011 } else {
5012 return (0);
5013 }
5014 }
5015
5016 static void
nvme_minor_free(nvme_minor_t * minor)5017 nvme_minor_free(nvme_minor_t *minor)
5018 {
5019 if (minor->nm_minor > 0) {
5020 ASSERT3S(minor->nm_minor, >=, NVME_OPEN_MINOR_MIN);
5021 id_free(nvme_open_minors, minor->nm_minor);
5022 minor->nm_minor = 0;
5023 }
5024 VERIFY0(list_link_active(&minor->nm_ctrl_lock.nli_node));
5025 VERIFY0(list_link_active(&minor->nm_ns_lock.nli_node));
5026 cv_destroy(&minor->nm_cv);
5027 kmem_free(minor, sizeof (nvme_minor_t));
5028 }
5029
5030 static nvme_minor_t *
nvme_minor_find_by_dev(dev_t dev)5031 nvme_minor_find_by_dev(dev_t dev)
5032 {
5033 id_t id = (id_t)getminor(dev);
5034 nvme_minor_t search = { .nm_minor = id };
5035 nvme_minor_t *ret;
5036
5037 mutex_enter(&nvme_open_minors_mutex);
5038 ret = avl_find(&nvme_open_minors_avl, &search, NULL);
5039 mutex_exit(&nvme_open_minors_mutex);
5040
5041 return (ret);
5042 }
5043
5044 static int
nvme_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)5045 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
5046 {
5047 nvme_t *nvme;
5048 int instance;
5049 int nregs;
5050 off_t regsize;
5051 char name[32];
5052
5053 if (cmd != DDI_ATTACH)
5054 return (DDI_FAILURE);
5055
5056 instance = ddi_get_instance(dip);
5057
5058 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
5059 return (DDI_FAILURE);
5060
5061 nvme = ddi_get_soft_state(nvme_state, instance);
5062 ddi_set_driver_private(dip, nvme);
5063 nvme->n_dip = dip;
5064
5065 /*
5066 * Map PCI config space
5067 */
5068 if (pci_config_setup(dip, &nvme->n_pcicfg_handle) != DDI_SUCCESS) {
5069 dev_err(dip, CE_WARN, "!failed to map PCI config space");
5070 goto fail;
5071 }
5072 nvme->n_progress |= NVME_PCI_CONFIG;
5073
5074 /*
5075 * Get the various PCI IDs from config space
5076 */
5077 nvme->n_vendor_id =
5078 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_VENID);
5079 nvme->n_device_id =
5080 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_DEVID);
5081 nvme->n_revision_id =
5082 pci_config_get8(nvme->n_pcicfg_handle, PCI_CONF_REVID);
5083 nvme->n_subsystem_device_id =
5084 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBSYSID);
5085 nvme->n_subsystem_vendor_id =
5086 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBVENID);
5087
5088 nvme_detect_quirks(nvme);
5089
5090 /*
5091 * Set up event handlers for hot removal. While npe(4D) supports the hot
5092 * removal event being injected for devices, the same is not true of all
5093 * of our possible parents (i.e. pci(4D) as of this writing). The most
5094 * common case this shows up is in some virtualization environments. We
5095 * should treat this as non-fatal so that way devices work but leave
5096 * this set up in such a way that if a nexus does grow support for this
5097 * we're good to go.
5098 */
5099 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT,
5100 &nvme->n_rm_cookie) == DDI_SUCCESS) {
5101 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie,
5102 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) !=
5103 DDI_SUCCESS) {
5104 goto fail;
5105 }
5106 } else {
5107 nvme->n_ev_rm_cb_id = NULL;
5108 }
5109
5110 mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL);
5111 nvme->n_progress |= NVME_MUTEX_INIT;
5112
5113 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5114 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
5115 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
5116 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
5117 B_TRUE : B_FALSE;
5118 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5119 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
5120 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5121 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN);
5122 /*
5123 * Double up the default for completion queues in case of
5124 * queue sharing.
5125 */
5126 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5127 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN);
5128 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5129 DDI_PROP_DONTPASS, "async-event-limit",
5130 NVME_DEFAULT_ASYNC_EVENT_LIMIT);
5131 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5132 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
5133 B_TRUE : B_FALSE;
5134 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5135 DDI_PROP_DONTPASS, "min-phys-block-size",
5136 NVME_DEFAULT_MIN_BLOCK_SIZE);
5137 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5138 DDI_PROP_DONTPASS, "max-submission-queues", -1);
5139 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5140 DDI_PROP_DONTPASS, "max-completion-queues", -1);
5141
5142 if (!ISP2(nvme->n_min_block_size) ||
5143 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
5144 dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
5145 "using default %d", ISP2(nvme->n_min_block_size) ?
5146 "too low" : "not a power of 2",
5147 NVME_DEFAULT_MIN_BLOCK_SIZE);
5148 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
5149 }
5150
5151 if (nvme->n_submission_queues != -1 &&
5152 (nvme->n_submission_queues < 1 ||
5153 nvme->n_submission_queues > UINT16_MAX)) {
5154 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not "
5155 "valid. Must be [1..%d]", nvme->n_submission_queues,
5156 UINT16_MAX);
5157 nvme->n_submission_queues = -1;
5158 }
5159
5160 if (nvme->n_completion_queues != -1 &&
5161 (nvme->n_completion_queues < 1 ||
5162 nvme->n_completion_queues > UINT16_MAX)) {
5163 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not "
5164 "valid. Must be [1..%d]", nvme->n_completion_queues,
5165 UINT16_MAX);
5166 nvme->n_completion_queues = -1;
5167 }
5168
5169 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
5170 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
5171 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
5172 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
5173
5174 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN)
5175 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN;
5176 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN)
5177 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN;
5178
5179 if (nvme->n_async_event_limit < 1)
5180 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
5181
5182 nvme->n_reg_acc_attr = nvme_reg_acc_attr;
5183 nvme->n_queue_dma_attr = nvme_queue_dma_attr;
5184 nvme->n_prp_dma_attr = nvme_prp_dma_attr;
5185 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
5186
5187 /*
5188 * Set up FMA support.
5189 */
5190 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
5191 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
5192 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
5193 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
5194
5195 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
5196
5197 if (nvme->n_fm_cap) {
5198 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
5199 nvme->n_reg_acc_attr.devacc_attr_access =
5200 DDI_FLAGERR_ACC;
5201
5202 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
5203 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
5204 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
5205 }
5206
5207 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
5208 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5209 pci_ereport_setup(dip);
5210
5211 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5212 ddi_fm_handler_register(dip, nvme_fm_errcb,
5213 (void *)nvme);
5214 }
5215
5216 nvme->n_progress |= NVME_FMA_INIT;
5217
5218 /*
5219 * The spec defines several register sets. Only the controller
5220 * registers (set 1) are currently used.
5221 */
5222 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
5223 nregs < 2 ||
5224 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE)
5225 goto fail;
5226
5227 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
5228 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
5229 dev_err(dip, CE_WARN, "!failed to map regset 1");
5230 goto fail;
5231 }
5232
5233 nvme->n_progress |= NVME_REGS_MAPPED;
5234
5235 /*
5236 * Set up kstats
5237 */
5238 if (!nvme_stat_init(nvme)) {
5239 dev_err(dip, CE_WARN, "!failed to create device kstats");
5240 goto fail;
5241 }
5242 nvme->n_progress |= NVME_STAT_INIT;
5243
5244 /*
5245 * Create PRP DMA cache
5246 */
5247 (void) snprintf(name, sizeof (name), "%s%d_prp_cache",
5248 ddi_driver_name(dip), ddi_get_instance(dip));
5249 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
5250 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
5251 NULL, (void *)nvme, NULL, 0);
5252
5253 if (nvme_init(nvme) != DDI_SUCCESS)
5254 goto fail;
5255
5256 /*
5257 * Initialize the driver with the UFM subsystem
5258 */
5259 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops,
5260 &nvme->n_ufmh, nvme) != 0) {
5261 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem");
5262 goto fail;
5263 }
5264 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL);
5265 ddi_ufm_update(nvme->n_ufmh);
5266 nvme->n_progress |= NVME_UFM_INIT;
5267
5268 nvme_mgmt_lock_init(&nvme->n_mgmt);
5269 nvme_lock_init(&nvme->n_lock);
5270 nvme->n_progress |= NVME_MGMT_INIT;
5271 nvme->n_dead_status = NVME_IOCTL_E_CTRL_DEAD;
5272
5273 /*
5274 * Identify namespaces.
5275 */
5276 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
5277
5278 boolean_t minor_logged = B_FALSE;
5279 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5280 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5281
5282 nvme_lock_init(&ns->ns_lock);
5283 ns->ns_progress |= NVME_NS_LOCK;
5284
5285 /*
5286 * Namespaces start out in the active state. This is the
5287 * default state until we find out information about the
5288 * namespaces in more detail. nvme_init_ns() will go through and
5289 * determine what the proper state should be. It will also use
5290 * this state change to keep an accurate count of attachable
5291 * namespaces.
5292 */
5293 ns->ns_state = NVME_NS_STATE_ACTIVE;
5294 if (nvme_init_ns(nvme, i) != 0) {
5295 nvme_mgmt_unlock(nvme);
5296 goto fail;
5297 }
5298
5299 /*
5300 * We only create compat minor nodes for the namespace for the
5301 * first NVME_MINOR_MAX namespaces. Those that are beyond this
5302 * can only be accessed through the primary controller node,
5303 * which is generally fine as that's what libnvme uses and is
5304 * our preferred path. Not having a minor is better than not
5305 * having the namespace!
5306 */
5307 if (i > NVME_MINOR_MAX) {
5308 if (!minor_logged) {
5309 dev_err(dip, CE_WARN, "namespace minor "
5310 "creation limited to the first %u "
5311 "namespaces, device has %u",
5312 NVME_MINOR_MAX, nvme->n_namespace_count);
5313 minor_logged = B_TRUE;
5314 }
5315 continue;
5316 }
5317
5318 if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR,
5319 NVME_MINOR(ddi_get_instance(nvme->n_dip), i),
5320 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
5321 nvme_mgmt_unlock(nvme);
5322 dev_err(dip, CE_WARN,
5323 "!failed to create minor node for namespace %d", i);
5324 goto fail;
5325 }
5326 ns->ns_progress |= NVME_NS_MINOR;
5327 }
5328
5329 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
5330 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) !=
5331 DDI_SUCCESS) {
5332 nvme_mgmt_unlock(nvme);
5333 dev_err(dip, CE_WARN, "nvme_attach: "
5334 "cannot create devctl minor node");
5335 goto fail;
5336 }
5337
5338 /*
5339 * Attempt to attach all namespaces that are in a reasonable state. This
5340 * should not fail attach.
5341 */
5342 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5343 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5344 nvme_ioctl_common_t com = { .nioc_nsid = i };
5345
5346 if (ns->ns_state < NVME_NS_STATE_NOT_IGNORED)
5347 continue;
5348
5349 if (!nvme_bd_attach_ns(nvme, &com) && com.nioc_drv_err !=
5350 NVME_IOCTL_E_UNSUP_ATTACH_NS) {
5351 dev_err(nvme->n_dip, CE_WARN, "!failed to attach "
5352 "namespace %d due to blkdev error (0x%x)", i,
5353 com.nioc_drv_err);
5354 }
5355 }
5356
5357 nvme_mgmt_unlock(nvme);
5358
5359 return (DDI_SUCCESS);
5360
5361 fail:
5362 /* attach successful anyway so that FMA can retire the device */
5363 if (nvme->n_dead)
5364 return (DDI_SUCCESS);
5365
5366 (void) nvme_detach(dip, DDI_DETACH);
5367
5368 return (DDI_FAILURE);
5369 }
5370
5371 static int
nvme_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)5372 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
5373 {
5374 int instance;
5375 nvme_t *nvme;
5376
5377 if (cmd != DDI_DETACH)
5378 return (DDI_FAILURE);
5379
5380 instance = ddi_get_instance(dip);
5381
5382 nvme = ddi_get_soft_state(nvme_state, instance);
5383
5384 if (nvme == NULL)
5385 return (DDI_FAILURE);
5386
5387 /*
5388 * Remove all minor nodes from the device regardless of the source in
5389 * one swoop.
5390 */
5391 ddi_remove_minor_node(dip, NULL);
5392
5393 /*
5394 * We need to remove the event handler as one of the first things that
5395 * we do. If we proceed with other teardown without removing the event
5396 * handler, we could end up in a very unfortunate race with ourselves.
5397 * The DDI does not serialize these with detach (just like timeout(9F)
5398 * and others).
5399 */
5400 if (nvme->n_ev_rm_cb_id != NULL) {
5401 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id);
5402 }
5403 nvme->n_ev_rm_cb_id = NULL;
5404
5405 /*
5406 * If the controller was marked dead, there is a slight chance that we
5407 * are asynchronusly processing the removal taskq. Because we have
5408 * removed the callback handler above and all minor nodes and commands
5409 * are closed, there is no other way to get in here. As such, we wait on
5410 * the nvme_dead_taskq to complete so we can avoid tracking if it's
5411 * running or not.
5412 */
5413 taskq_wait(nvme_dead_taskq);
5414
5415 if (nvme->n_ns) {
5416 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5417 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5418
5419 if (ns->ns_bd_hdl) {
5420 (void) bd_detach_handle(ns->ns_bd_hdl);
5421 bd_free_handle(ns->ns_bd_hdl);
5422 }
5423
5424 if (ns->ns_idns)
5425 kmem_free(ns->ns_idns,
5426 sizeof (nvme_identify_nsid_t));
5427 if (ns->ns_devid)
5428 strfree(ns->ns_devid);
5429
5430 if ((ns->ns_progress & NVME_NS_LOCK) != 0)
5431 nvme_lock_fini(&ns->ns_lock);
5432 }
5433
5434 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
5435 nvme->n_namespace_count);
5436 }
5437
5438 if (nvme->n_progress & NVME_MGMT_INIT) {
5439 nvme_lock_fini(&nvme->n_lock);
5440 nvme_mgmt_lock_fini(&nvme->n_mgmt);
5441 }
5442
5443 if (nvme->n_progress & NVME_UFM_INIT) {
5444 ddi_ufm_fini(nvme->n_ufmh);
5445 mutex_destroy(&nvme->n_fwslot_mutex);
5446 }
5447
5448 if (nvme->n_progress & NVME_INTERRUPTS)
5449 nvme_release_interrupts(nvme);
5450
5451 for (uint_t i = 0; i < nvme->n_cq_count; i++) {
5452 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL)
5453 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq);
5454 }
5455
5456 if (nvme->n_progress & NVME_MUTEX_INIT) {
5457 mutex_destroy(&nvme->n_minor_mutex);
5458 }
5459
5460 if (nvme->n_ioq_count > 0) {
5461 for (uint_t i = 1; i != nvme->n_ioq_count + 1; i++) {
5462 if (nvme->n_ioq[i] != NULL) {
5463 /* TODO: send destroy queue commands */
5464 nvme_free_qpair(nvme->n_ioq[i]);
5465 }
5466 }
5467
5468 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
5469 (nvme->n_ioq_count + 1));
5470 }
5471
5472 if (nvme->n_prp_cache != NULL) {
5473 kmem_cache_destroy(nvme->n_prp_cache);
5474 }
5475
5476 if (nvme->n_progress & NVME_REGS_MAPPED) {
5477 nvme_shutdown(nvme, B_FALSE);
5478 (void) nvme_reset(nvme, B_FALSE);
5479 }
5480
5481 if (nvme->n_progress & NVME_CTRL_LIMITS)
5482 sema_destroy(&nvme->n_abort_sema);
5483
5484 if (nvme->n_progress & NVME_ADMIN_QUEUE)
5485 nvme_free_qpair(nvme->n_adminq);
5486
5487 if (nvme->n_cq_count > 0) {
5488 nvme_destroy_cq_array(nvme, 0);
5489 nvme->n_cq = NULL;
5490 nvme->n_cq_count = 0;
5491 }
5492
5493 if (nvme->n_idcomns)
5494 kmem_free(nvme->n_idcomns, NVME_IDENTIFY_BUFSIZE);
5495
5496 if (nvme->n_idctl)
5497 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
5498
5499 if (nvme->n_progress & NVME_REGS_MAPPED)
5500 ddi_regs_map_free(&nvme->n_regh);
5501
5502 if (nvme->n_progress & NVME_STAT_INIT)
5503 nvme_stat_cleanup(nvme);
5504
5505 if (nvme->n_progress & NVME_FMA_INIT) {
5506 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5507 ddi_fm_handler_unregister(nvme->n_dip);
5508
5509 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
5510 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5511 pci_ereport_teardown(nvme->n_dip);
5512
5513 ddi_fm_fini(nvme->n_dip);
5514 }
5515
5516 if (nvme->n_progress & NVME_PCI_CONFIG)
5517 pci_config_teardown(&nvme->n_pcicfg_handle);
5518
5519 if (nvme->n_vendor != NULL)
5520 strfree(nvme->n_vendor);
5521
5522 if (nvme->n_product != NULL)
5523 strfree(nvme->n_product);
5524
5525 ddi_soft_state_free(nvme_state, instance);
5526
5527 return (DDI_SUCCESS);
5528 }
5529
5530 static int
nvme_quiesce(dev_info_t * dip)5531 nvme_quiesce(dev_info_t *dip)
5532 {
5533 int instance;
5534 nvme_t *nvme;
5535
5536 instance = ddi_get_instance(dip);
5537
5538 nvme = ddi_get_soft_state(nvme_state, instance);
5539
5540 if (nvme == NULL)
5541 return (DDI_FAILURE);
5542
5543 nvme_shutdown(nvme, B_TRUE);
5544
5545 (void) nvme_reset(nvme, B_TRUE);
5546
5547 return (DDI_SUCCESS);
5548 }
5549
5550 static int
nvme_fill_prp(nvme_cmd_t * cmd,ddi_dma_handle_t dma)5551 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma)
5552 {
5553 nvme_t *nvme = cmd->nc_nvme;
5554 uint_t nprp_per_page, nprp;
5555 uint64_t *prp;
5556 const ddi_dma_cookie_t *cookie;
5557 uint_t idx;
5558 uint_t ncookies = ddi_dma_ncookies(dma);
5559
5560 if (ncookies == 0)
5561 return (DDI_FAILURE);
5562
5563 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL)
5564 return (DDI_FAILURE);
5565 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress;
5566
5567 if (ncookies == 1) {
5568 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5569 return (DDI_SUCCESS);
5570 } else if (ncookies == 2) {
5571 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL)
5572 return (DDI_FAILURE);
5573 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress;
5574 return (DDI_SUCCESS);
5575 }
5576
5577 /*
5578 * At this point, we're always operating on cookies at
5579 * index >= 1 and writing the addresses of those cookies
5580 * into a new page. The address of that page is stored
5581 * as the second PRP entry.
5582 */
5583 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t);
5584 ASSERT(nprp_per_page > 0);
5585
5586 /*
5587 * We currently don't support chained PRPs and set up our DMA
5588 * attributes to reflect that. If we still get an I/O request
5589 * that needs a chained PRP something is very wrong. Account
5590 * for the first cookie here, which we've placed in d_prp[0].
5591 */
5592 nprp = howmany(ncookies - 1, nprp_per_page);
5593 VERIFY(nprp == 1);
5594
5595 /*
5596 * Allocate a page of pointers, in which we'll write the
5597 * addresses of cookies 1 to `ncookies`.
5598 */
5599 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
5600 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5601 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress;
5602
5603 prp = (uint64_t *)cmd->nc_prp->nd_memp;
5604 for (idx = 1; idx < ncookies; idx++) {
5605 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL)
5606 return (DDI_FAILURE);
5607 *prp++ = cookie->dmac_laddress;
5608 }
5609
5610 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5611 DDI_DMA_SYNC_FORDEV);
5612 return (DDI_SUCCESS);
5613 }
5614
5615 /*
5616 * The maximum number of requests supported for a deallocate request is
5617 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and
5618 * unchanged through at least 1.4a). The definition of nvme_range_t is also
5619 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for
5620 * a deallocate request will fit into the smallest supported namespace page
5621 * (4k).
5622 */
5623 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096);
5624
5625 static int
nvme_fill_ranges(nvme_cmd_t * cmd,bd_xfer_t * xfer,uint64_t blocksize,int allocflag)5626 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize,
5627 int allocflag)
5628 {
5629 const dkioc_free_list_t *dfl = xfer->x_dfl;
5630 const dkioc_free_list_ext_t *exts = dfl->dfl_exts;
5631 nvme_t *nvme = cmd->nc_nvme;
5632 nvme_range_t *ranges = NULL;
5633 uint_t i;
5634
5635 /*
5636 * The number of ranges in the request is 0s based (that is
5637 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ...,
5638 * word10 == 255 -> 256 ranges). Therefore the allowed values are
5639 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request,
5640 * we either provided bad info in nvme_bd_driveinfo() or there is a bug
5641 * in blkdev.
5642 */
5643 VERIFY3U(dfl->dfl_num_exts, >, 0);
5644 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES);
5645 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff;
5646
5647 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE;
5648
5649 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag);
5650 if (cmd->nc_prp == NULL)
5651 return (DDI_FAILURE);
5652
5653 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5654 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp;
5655
5656 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress;
5657 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5658
5659 for (i = 0; i < dfl->dfl_num_exts; i++) {
5660 uint64_t lba, len;
5661
5662 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize;
5663 len = exts[i].dfle_length / blocksize;
5664
5665 VERIFY3U(len, <=, UINT32_MAX);
5666
5667 /* No context attributes for a deallocate request */
5668 ranges[i].nr_ctxattr = 0;
5669 ranges[i].nr_len = len;
5670 ranges[i].nr_lba = lba;
5671 }
5672
5673 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5674 DDI_DMA_SYNC_FORDEV);
5675
5676 return (DDI_SUCCESS);
5677 }
5678
5679 static nvme_cmd_t *
nvme_create_nvm_cmd(nvme_namespace_t * ns,uint8_t opc,bd_xfer_t * xfer)5680 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
5681 {
5682 nvme_t *nvme = ns->ns_nvme;
5683 nvme_cmd_t *cmd;
5684 int allocflag;
5685
5686 /*
5687 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
5688 */
5689 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP;
5690 cmd = nvme_alloc_cmd(nvme, allocflag);
5691
5692 if (cmd == NULL)
5693 return (NULL);
5694
5695 cmd->nc_sqe.sqe_opc = opc;
5696 cmd->nc_callback = nvme_bd_xfer_done;
5697 cmd->nc_xfer = xfer;
5698
5699 switch (opc) {
5700 case NVME_OPC_NVM_WRITE:
5701 case NVME_OPC_NVM_READ:
5702 VERIFY(xfer->x_nblks <= 0x10000);
5703
5704 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5705
5706 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
5707 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
5708 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
5709
5710 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS)
5711 goto fail;
5712 break;
5713
5714 case NVME_OPC_NVM_FLUSH:
5715 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5716 break;
5717
5718 case NVME_OPC_NVM_DSET_MGMT:
5719 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5720
5721 if (nvme_fill_ranges(cmd, xfer,
5722 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS)
5723 goto fail;
5724 break;
5725
5726 default:
5727 goto fail;
5728 }
5729
5730 return (cmd);
5731
5732 fail:
5733 nvme_free_cmd(cmd);
5734 return (NULL);
5735 }
5736
5737 static void
nvme_bd_xfer_done(void * arg)5738 nvme_bd_xfer_done(void *arg)
5739 {
5740 nvme_cmd_t *cmd = arg;
5741 bd_xfer_t *xfer = cmd->nc_xfer;
5742 int error = 0;
5743
5744 error = nvme_check_cmd_status(cmd);
5745 nvme_free_cmd(cmd);
5746
5747 bd_xfer_done(xfer, error);
5748 }
5749
5750 static void
nvme_bd_driveinfo(void * arg,bd_drive_t * drive)5751 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
5752 {
5753 nvme_namespace_t *ns = arg;
5754 nvme_t *nvme = ns->ns_nvme;
5755 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable);
5756
5757 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5758
5759 /*
5760 * Set the blkdev qcount to the number of submission queues.
5761 * It will then create one waitq/runq pair for each submission
5762 * queue and spread I/O requests across the queues.
5763 */
5764 drive->d_qcount = nvme->n_ioq_count;
5765
5766 /*
5767 * I/O activity to individual namespaces is distributed across
5768 * each of the d_qcount blkdev queues (which has been set to
5769 * the number of nvme submission queues). d_qsize is the number
5770 * of submitted and not completed I/Os within each queue that blkdev
5771 * will allow before it starts holding them in the waitq.
5772 *
5773 * Each namespace will create a child blkdev instance, for each one
5774 * we try and set the d_qsize so that each namespace gets an
5775 * equal portion of the submission queue.
5776 *
5777 * If post instantiation of the nvme drive, n_namespaces_attachable
5778 * changes and a namespace is attached it could calculate a
5779 * different d_qsize. It may even be that the sum of the d_qsizes is
5780 * now beyond the submission queue size. Should that be the case
5781 * and the I/O rate is such that blkdev attempts to submit more
5782 * I/Os than the size of the submission queue, the excess I/Os
5783 * will be held behind the semaphore nq_sema.
5784 */
5785 drive->d_qsize = nvme->n_io_squeue_len / ns_count;
5786
5787 /*
5788 * Don't let the queue size drop below the minimum, though.
5789 */
5790 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN);
5791
5792 /*
5793 * d_maxxfer is not set, which means the value is taken from the DMA
5794 * attributes specified to bd_alloc_handle.
5795 */
5796
5797 drive->d_removable = B_FALSE;
5798 drive->d_hotpluggable = B_FALSE;
5799
5800 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
5801 drive->d_target = ns->ns_id;
5802 drive->d_lun = 0;
5803
5804 drive->d_model = nvme->n_idctl->id_model;
5805 drive->d_model_len = sizeof (nvme->n_idctl->id_model);
5806 drive->d_vendor = nvme->n_vendor;
5807 drive->d_vendor_len = strlen(nvme->n_vendor);
5808 drive->d_product = nvme->n_product;
5809 drive->d_product_len = strlen(nvme->n_product);
5810 drive->d_serial = nvme->n_idctl->id_serial;
5811 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
5812 drive->d_revision = nvme->n_idctl->id_fwrev;
5813 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
5814
5815 /*
5816 * If we support the dataset management command, the only restrictions
5817 * on a discard request are the maximum number of ranges (segments)
5818 * per single request.
5819 */
5820 if (nvme->n_idctl->id_oncs.on_dset_mgmt)
5821 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES;
5822
5823 nvme_mgmt_unlock(nvme);
5824 }
5825
5826 static int
nvme_bd_mediainfo(void * arg,bd_media_t * media)5827 nvme_bd_mediainfo(void *arg, bd_media_t *media)
5828 {
5829 nvme_namespace_t *ns = arg;
5830 nvme_t *nvme = ns->ns_nvme;
5831
5832 if (nvme->n_dead) {
5833 return (EIO);
5834 }
5835
5836 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5837
5838 media->m_nblks = ns->ns_block_count;
5839 media->m_blksize = ns->ns_block_size;
5840 media->m_readonly = B_FALSE;
5841 media->m_solidstate = B_TRUE;
5842
5843 media->m_pblksize = ns->ns_best_block_size;
5844
5845 nvme_mgmt_unlock(nvme);
5846
5847 return (0);
5848 }
5849
5850 static int
nvme_bd_cmd(nvme_namespace_t * ns,bd_xfer_t * xfer,uint8_t opc)5851 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
5852 {
5853 nvme_t *nvme = ns->ns_nvme;
5854 nvme_cmd_t *cmd;
5855 nvme_qpair_t *ioq;
5856 boolean_t poll;
5857 int ret;
5858
5859 if (nvme->n_dead) {
5860 return (EIO);
5861 }
5862
5863 cmd = nvme_create_nvm_cmd(ns, opc, xfer);
5864 if (cmd == NULL)
5865 return (ENOMEM);
5866
5867 cmd->nc_sqid = xfer->x_qnum + 1;
5868 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
5869 ioq = nvme->n_ioq[cmd->nc_sqid];
5870
5871 /*
5872 * Get the polling flag before submitting the command. The command may
5873 * complete immediately after it was submitted, which means we must
5874 * treat both cmd and xfer as if they have been freed already.
5875 */
5876 poll = (xfer->x_flags & BD_XFER_POLL) != 0;
5877
5878 ret = nvme_submit_io_cmd(ioq, cmd);
5879
5880 if (ret != 0)
5881 return (ret);
5882
5883 if (!poll)
5884 return (0);
5885
5886 do {
5887 cmd = nvme_retrieve_cmd(nvme, ioq);
5888 if (cmd != NULL) {
5889 ASSERT0(cmd->nc_flags & NVME_CMD_F_USELOCK);
5890 cmd->nc_callback(cmd);
5891 } else {
5892 drv_usecwait(10);
5893 }
5894 } while (ioq->nq_active_cmds != 0);
5895
5896 return (0);
5897 }
5898
5899 static int
nvme_bd_read(void * arg,bd_xfer_t * xfer)5900 nvme_bd_read(void *arg, bd_xfer_t *xfer)
5901 {
5902 nvme_namespace_t *ns = arg;
5903
5904 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
5905 }
5906
5907 static int
nvme_bd_write(void * arg,bd_xfer_t * xfer)5908 nvme_bd_write(void *arg, bd_xfer_t *xfer)
5909 {
5910 nvme_namespace_t *ns = arg;
5911
5912 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
5913 }
5914
5915 static int
nvme_bd_sync(void * arg,bd_xfer_t * xfer)5916 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
5917 {
5918 nvme_namespace_t *ns = arg;
5919
5920 if (ns->ns_nvme->n_dead)
5921 return (EIO);
5922
5923 /*
5924 * If the volatile write cache is not present or not enabled the FLUSH
5925 * command is a no-op, so we can take a shortcut here.
5926 */
5927 if (!ns->ns_nvme->n_write_cache_present) {
5928 bd_xfer_done(xfer, ENOTSUP);
5929 return (0);
5930 }
5931
5932 if (!ns->ns_nvme->n_write_cache_enabled) {
5933 bd_xfer_done(xfer, 0);
5934 return (0);
5935 }
5936
5937 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
5938 }
5939
5940 static int
nvme_bd_devid(void * arg,dev_info_t * devinfo,ddi_devid_t * devid)5941 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
5942 {
5943 nvme_namespace_t *ns = arg;
5944 nvme_t *nvme = ns->ns_nvme;
5945
5946 if (nvme->n_dead) {
5947 return (EIO);
5948 }
5949
5950 if (*(uint64_t *)ns->ns_nguid != 0 ||
5951 *(uint64_t *)(ns->ns_nguid + 8) != 0) {
5952 return (ddi_devid_init(devinfo, DEVID_NVME_NGUID,
5953 sizeof (ns->ns_nguid), ns->ns_nguid, devid));
5954 } else if (*(uint64_t *)ns->ns_eui64 != 0) {
5955 return (ddi_devid_init(devinfo, DEVID_NVME_EUI64,
5956 sizeof (ns->ns_eui64), ns->ns_eui64, devid));
5957 } else {
5958 return (ddi_devid_init(devinfo, DEVID_NVME_NSID,
5959 strlen(ns->ns_devid), ns->ns_devid, devid));
5960 }
5961 }
5962
5963 static int
nvme_bd_free_space(void * arg,bd_xfer_t * xfer)5964 nvme_bd_free_space(void *arg, bd_xfer_t *xfer)
5965 {
5966 nvme_namespace_t *ns = arg;
5967
5968 if (xfer->x_dfl == NULL)
5969 return (EINVAL);
5970
5971 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt)
5972 return (ENOTSUP);
5973
5974 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT));
5975 }
5976
5977 static int
nvme_open(dev_t * devp,int flag,int otyp,cred_t * cred_p)5978 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
5979 {
5980 #ifndef __lock_lint
5981 _NOTE(ARGUNUSED(cred_p));
5982 #endif
5983 nvme_t *nvme;
5984 nvme_minor_t *minor = NULL;
5985 uint32_t nsid;
5986 minor_t m = getminor(*devp);
5987 int rv = 0;
5988
5989 if (otyp != OTYP_CHR)
5990 return (EINVAL);
5991
5992 if (m >= NVME_OPEN_MINOR_MIN)
5993 return (ENXIO);
5994
5995 nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(m));
5996 nsid = NVME_MINOR_NSID(m);
5997
5998 if (nvme == NULL)
5999 return (ENXIO);
6000
6001 if (nsid > MIN(nvme->n_namespace_count, NVME_MINOR_MAX))
6002 return (ENXIO);
6003
6004 if (nvme->n_dead)
6005 return (EIO);
6006
6007 /*
6008 * At this point, we're going to allow an open to proceed on this
6009 * device. We need to allocate a new instance for this (presuming one is
6010 * available).
6011 */
6012 minor = kmem_zalloc(sizeof (nvme_minor_t), KM_NOSLEEP_LAZY);
6013 if (minor == NULL) {
6014 return (ENOMEM);
6015 }
6016
6017 cv_init(&minor->nm_cv, NULL, CV_DRIVER, NULL);
6018 list_link_init(&minor->nm_ctrl_lock.nli_node);
6019 minor->nm_ctrl_lock.nli_nvme = nvme;
6020 minor->nm_ctrl_lock.nli_minor = minor;
6021 list_link_init(&minor->nm_ns_lock.nli_node);
6022 minor->nm_ns_lock.nli_nvme = nvme;
6023 minor->nm_ns_lock.nli_minor = minor;
6024 minor->nm_minor = id_alloc_nosleep(nvme_open_minors);
6025 if (minor->nm_minor == -1) {
6026 nvme_minor_free(minor);
6027 return (ENOSPC);
6028 }
6029
6030 minor->nm_ctrl = nvme;
6031 if (nsid != 0) {
6032 minor->nm_ns = nvme_nsid2ns(nvme, nsid);
6033 }
6034
6035 /*
6036 * Before we check for exclusive access and attempt a lock if requested,
6037 * ensure that this minor is persisted.
6038 */
6039 mutex_enter(&nvme_open_minors_mutex);
6040 avl_add(&nvme_open_minors_avl, minor);
6041 mutex_exit(&nvme_open_minors_mutex);
6042
6043 /*
6044 * A request for opening this FEXCL, is translated into a non-blocking
6045 * write lock of the appropriate entity. This honors the original
6046 * semantics here. In the future, we should see if we can remove this
6047 * and turn a request for FEXCL at open into ENOTSUP.
6048 */
6049 mutex_enter(&nvme->n_minor_mutex);
6050 if ((flag & FEXCL) != 0) {
6051 nvme_ioctl_lock_t lock = {
6052 .nil_level = NVME_LOCK_L_WRITE,
6053 .nil_flags = NVME_LOCK_F_DONT_BLOCK
6054 };
6055
6056 if (minor->nm_ns != NULL) {
6057 lock.nil_ent = NVME_LOCK_E_NS;
6058 lock.nil_common.nioc_nsid = nsid;
6059 } else {
6060 lock.nil_ent = NVME_LOCK_E_CTRL;
6061 }
6062 nvme_rwlock(minor, &lock);
6063 if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) {
6064 mutex_exit(&nvme->n_minor_mutex);
6065
6066 mutex_enter(&nvme_open_minors_mutex);
6067 avl_remove(&nvme_open_minors_avl, minor);
6068 mutex_exit(&nvme_open_minors_mutex);
6069
6070 nvme_minor_free(minor);
6071 return (EBUSY);
6072 }
6073 }
6074 mutex_exit(&nvme->n_minor_mutex);
6075
6076 *devp = makedevice(getmajor(*devp), (minor_t)minor->nm_minor);
6077 return (rv);
6078
6079 }
6080
6081 static int
nvme_close(dev_t dev,int flag __unused,int otyp,cred_t * cred_p __unused)6082 nvme_close(dev_t dev, int flag __unused, int otyp, cred_t *cred_p __unused)
6083 {
6084 nvme_minor_t *minor;
6085 nvme_t *nvme;
6086
6087 if (otyp != OTYP_CHR) {
6088 return (ENXIO);
6089 }
6090
6091 minor = nvme_minor_find_by_dev(dev);
6092 if (minor == NULL) {
6093 return (ENXIO);
6094 }
6095
6096 mutex_enter(&nvme_open_minors_mutex);
6097 avl_remove(&nvme_open_minors_avl, minor);
6098 mutex_exit(&nvme_open_minors_mutex);
6099
6100 /*
6101 * When this device is being closed, we must ensure that any locks held
6102 * by this are dealt with.
6103 */
6104 nvme = minor->nm_ctrl;
6105 mutex_enter(&nvme->n_minor_mutex);
6106 ASSERT3U(minor->nm_ctrl_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
6107 ASSERT3U(minor->nm_ns_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
6108
6109 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
6110 VERIFY3P(minor->nm_ctrl_lock.nli_lock, !=, NULL);
6111 nvme_rwunlock(&minor->nm_ctrl_lock,
6112 minor->nm_ctrl_lock.nli_lock);
6113 }
6114
6115 if (minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
6116 VERIFY3P(minor->nm_ns_lock.nli_lock, !=, NULL);
6117 nvme_rwunlock(&minor->nm_ns_lock, minor->nm_ns_lock.nli_lock);
6118 }
6119 mutex_exit(&nvme->n_minor_mutex);
6120
6121 nvme_minor_free(minor);
6122
6123 return (0);
6124 }
6125
6126 void
nvme_ioctl_success(nvme_ioctl_common_t * ioc)6127 nvme_ioctl_success(nvme_ioctl_common_t *ioc)
6128 {
6129 ioc->nioc_drv_err = NVME_IOCTL_E_OK;
6130 ioc->nioc_ctrl_sc = NVME_CQE_SC_GEN_SUCCESS;
6131 ioc->nioc_ctrl_sct = NVME_CQE_SCT_GENERIC;
6132 }
6133
6134 boolean_t
nvme_ioctl_error(nvme_ioctl_common_t * ioc,nvme_ioctl_errno_t err,uint32_t sct,uint32_t sc)6135 nvme_ioctl_error(nvme_ioctl_common_t *ioc, nvme_ioctl_errno_t err, uint32_t sct,
6136 uint32_t sc)
6137 {
6138 ioc->nioc_drv_err = err;
6139 ioc->nioc_ctrl_sct = sct;
6140 ioc->nioc_ctrl_sc = sc;
6141
6142 return (B_FALSE);
6143 }
6144
6145 static int
nvme_ioctl_copyout_error(nvme_ioctl_errno_t err,intptr_t uaddr,int mode)6146 nvme_ioctl_copyout_error(nvme_ioctl_errno_t err, intptr_t uaddr, int mode)
6147 {
6148 nvme_ioctl_common_t ioc;
6149
6150 ASSERT3U(err, !=, NVME_IOCTL_E_CTRL_ERROR);
6151 bzero(&ioc, sizeof (ioc));
6152 if (ddi_copyout(&ioc, (void *)uaddr, sizeof (nvme_ioctl_common_t),
6153 mode & FKIOCTL) != 0) {
6154 return (EFAULT);
6155 }
6156 return (0);
6157 }
6158
6159 /*
6160 * The companion to the namespace checking. This occurs after any rewriting
6161 * occurs. This is the primary point that we attempt to enforce any operation's
6162 * exclusivity. Note, it is theoretically possible for an operation to be
6163 * ongoing and to have someone with an exclusive lock ask to unlock it for some
6164 * reason. This does not maintain the number of such events that are going on.
6165 * While perhaps this is leaving too much up to the user, by the same token we
6166 * don't try to stop them from issuing two different format NVM commands
6167 * targeting the whole device at the same time either, even though the
6168 * controller would really rather that didn't happen.
6169 */
6170 static boolean_t
nvme_ioctl_excl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)6171 nvme_ioctl_excl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
6172 const nvme_ioctl_check_t *check)
6173 {
6174 nvme_t *const nvme = minor->nm_ctrl;
6175 nvme_namespace_t *ns;
6176 boolean_t have_ctrl, have_ns, ctrl_is_excl, ns_is_excl;
6177
6178 /*
6179 * If the command doesn't require anything, then we're done.
6180 */
6181 if (check->nck_excl == NVME_IOCTL_EXCL_SKIP) {
6182 return (B_TRUE);
6183 }
6184
6185 if (ioc->nioc_nsid == 0 || ioc->nioc_nsid == NVME_NSID_BCAST) {
6186 ns = NULL;
6187 } else {
6188 ns = nvme_nsid2ns(nvme, ioc->nioc_nsid);
6189 }
6190
6191 mutex_enter(&nvme->n_minor_mutex);
6192 ctrl_is_excl = nvme->n_lock.nl_writer != NULL;
6193 have_ctrl = nvme->n_lock.nl_writer == &minor->nm_ctrl_lock;
6194 if (ns != NULL) {
6195 /*
6196 * We explicitly test the namespace lock's writer versus asking
6197 * the minor because the minor's namespace lock may apply to a
6198 * different namespace.
6199 */
6200 ns_is_excl = ns->ns_lock.nl_writer != NULL;
6201 have_ns = ns->ns_lock.nl_writer == &minor->nm_ns_lock;
6202 ASSERT0(have_ctrl && have_ns);
6203 #ifdef DEBUG
6204 if (have_ns) {
6205 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, ns);
6206 }
6207 #endif
6208 } else {
6209 ns_is_excl = B_FALSE;
6210 have_ns = B_FALSE;
6211 }
6212 ASSERT0(ctrl_is_excl && ns_is_excl);
6213 mutex_exit(&nvme->n_minor_mutex);
6214
6215 if (check->nck_excl == NVME_IOCTL_EXCL_CTRL) {
6216 if (have_ctrl) {
6217 return (B_TRUE);
6218 }
6219
6220 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NEED_CTRL_WRLOCK,
6221 0, 0));
6222 }
6223
6224 if (check->nck_excl == NVME_IOCTL_EXCL_WRITE) {
6225 if (ns == NULL) {
6226 if (have_ctrl) {
6227 return (B_TRUE);
6228 }
6229 return (nvme_ioctl_error(ioc,
6230 NVME_IOCTL_E_NEED_CTRL_WRLOCK, 0, 0));
6231 } else {
6232 if (have_ctrl || have_ns) {
6233 return (B_TRUE);
6234 }
6235 return (nvme_ioctl_error(ioc,
6236 NVME_IOCTL_E_NEED_NS_WRLOCK, 0, 0));
6237 }
6238 }
6239
6240 /*
6241 * Now we have an operation that does not require exclusive access. We
6242 * can proceed as long as no one else has it or if someone does it is
6243 * us. Regardless of what we target, a controller lock will stop us.
6244 */
6245 if (ctrl_is_excl && !have_ctrl) {
6246 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_LOCKED, 0, 0));
6247 }
6248
6249 /*
6250 * Only check namespace exclusivity if we are targeting one.
6251 */
6252 if (ns != NULL && ns_is_excl && !have_ns) {
6253 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_LOCKED, 0, 0));
6254 }
6255
6256 return (B_TRUE);
6257 }
6258
6259 /*
6260 * Perform common checking as to whether or not an ioctl operation may proceed.
6261 * We check in this function various aspects of the namespace attributes that
6262 * it's calling on. Once the namespace attributes and any possible rewriting
6263 * have been performed, then we proceed to check whether or not the requisite
6264 * exclusive access is present in nvme_ioctl_excl_check().
6265 */
6266 static boolean_t
nvme_ioctl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)6267 nvme_ioctl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
6268 const nvme_ioctl_check_t *check)
6269 {
6270 /*
6271 * If the minor has a namespace pointer, then it is constrained to that
6272 * namespace. If a namespace is allowed, then there are only two valid
6273 * values that we can find. The first is matching the minor. The second
6274 * is our value zero, which will be transformed to the current
6275 * namespace.
6276 */
6277 if (minor->nm_ns != NULL) {
6278 if (!check->nck_ns_ok || !check->nck_ns_minor_ok) {
6279 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NOT_CTRL, 0,
6280 0));
6281 }
6282
6283 if (ioc->nioc_nsid == 0) {
6284 ioc->nioc_nsid = minor->nm_ns->ns_id;
6285 } else if (ioc->nioc_nsid != minor->nm_ns->ns_id) {
6286 return (nvme_ioctl_error(ioc,
6287 NVME_IOCTL_E_MINOR_WRONG_NS, 0, 0));
6288 }
6289
6290 return (nvme_ioctl_excl_check(minor, ioc, check));
6291 }
6292
6293 /*
6294 * If we've been told to skip checking the controller, here's where we
6295 * do that. This should really only be for commands which use the
6296 * namespace ID for listing purposes and therefore can have
6297 * traditionally illegal values here.
6298 */
6299 if (check->nck_skip_ctrl) {
6300 return (nvme_ioctl_excl_check(minor, ioc, check));
6301 }
6302
6303 /*
6304 * At this point, we know that we're on the controller's node. We first
6305 * deal with the simple case, is a namespace allowed at all or not. If
6306 * it is not allowed, then the only acceptable value is zero.
6307 */
6308 if (!check->nck_ns_ok) {
6309 if (ioc->nioc_nsid != 0) {
6310 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_UNUSE, 0,
6311 0));
6312 }
6313
6314 return (nvme_ioctl_excl_check(minor, ioc, check));
6315 }
6316
6317 /*
6318 * At this point, we know that a controller is allowed to use a
6319 * namespace. If we haven't been given zero or the broadcast namespace,
6320 * check to see if it's actually a valid namespace ID. If is outside of
6321 * range, then it is an error. Next, if we have been requested to
6322 * rewrite 0 (the this controller indicator) as the broadcast namespace,
6323 * do so.
6324 *
6325 * While we validate that this namespace is within the valid range, we
6326 * do not check if it is active or inactive. That is left to our callers
6327 * to determine.
6328 */
6329 if (ioc->nioc_nsid > minor->nm_ctrl->n_namespace_count &&
6330 ioc->nioc_nsid != NVME_NSID_BCAST) {
6331 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_RANGE, 0, 0));
6332 }
6333
6334 if (ioc->nioc_nsid == 0 && check->nck_ctrl_rewrite) {
6335 ioc->nioc_nsid = NVME_NSID_BCAST;
6336 }
6337
6338 /*
6339 * Finally, see if we have ended up with a broadcast namespace ID
6340 * whether through specification or rewriting. If that is not allowed,
6341 * then that is an error.
6342 */
6343 if (!check->nck_bcast_ok && ioc->nioc_nsid == NVME_NSID_BCAST) {
6344 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_BCAST_NS, 0, 0));
6345 }
6346
6347 return (nvme_ioctl_excl_check(minor, ioc, check));
6348 }
6349
6350 static int
nvme_ioctl_ctrl_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6351 nvme_ioctl_ctrl_info(nvme_minor_t *minor, intptr_t arg, int mode,
6352 cred_t *cred_p)
6353 {
6354 nvme_t *const nvme = minor->nm_ctrl;
6355 nvme_ioctl_ctrl_info_t *info;
6356 nvme_reg_cap_t cap = { 0 };
6357 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_CTRL };
6358 void *idbuf;
6359
6360 if ((mode & FREAD) == 0)
6361 return (EBADF);
6362
6363 info = kmem_alloc(sizeof (nvme_ioctl_ctrl_info_t), KM_NOSLEEP_LAZY);
6364 if (info == NULL) {
6365 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
6366 mode));
6367 }
6368
6369 if (ddi_copyin((void *)arg, info, sizeof (nvme_ioctl_ctrl_info_t),
6370 mode & FKIOCTL) != 0) {
6371 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6372 return (EFAULT);
6373 }
6374
6375 if (!nvme_ioctl_check(minor, &info->nci_common,
6376 &nvme_check_ctrl_info)) {
6377 goto copyout;
6378 }
6379
6380 /*
6381 * We explicitly do not use the identify controller copy in the kernel
6382 * right now so that way we can get a snapshot of the controller's
6383 * current capacity and values. While it's tempting to try to use this
6384 * to refresh the kernel's version we don't just to simplify the rest of
6385 * the driver right now.
6386 */
6387 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
6388 info->nci_common = id.nid_common;
6389 goto copyout;
6390 }
6391 bcopy(idbuf, &info->nci_ctrl_id, sizeof (nvme_identify_ctrl_t));
6392 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
6393
6394 /*
6395 * Use the kernel's cached common namespace information for this.
6396 */
6397 bcopy(nvme->n_idcomns, &info->nci_common_ns,
6398 sizeof (nvme_identify_nsid_t));
6399
6400 info->nci_vers = nvme->n_version;
6401
6402 /*
6403 * The MPSMIN and MPSMAX fields in the CAP register use 0 to
6404 * specify the base page size of 4k (1<<12), so add 12 here to
6405 * get the real page size value.
6406 */
6407 cap.r = nvme_get64(nvme, NVME_REG_CAP);
6408 info->nci_caps.cap_mpsmax = 1 << (12 + cap.b.cap_mpsmax);
6409 info->nci_caps.cap_mpsmin = 1 << (12 + cap.b.cap_mpsmin);
6410
6411 info->nci_nintrs = (uint32_t)nvme->n_intr_cnt;
6412
6413 copyout:
6414 if (ddi_copyout(info, (void *)arg, sizeof (nvme_ioctl_ctrl_info_t),
6415 mode & FKIOCTL) != 0) {
6416 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6417 return (EFAULT);
6418 }
6419
6420 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6421 return (0);
6422 }
6423
6424 static int
nvme_ioctl_ns_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6425 nvme_ioctl_ns_info(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6426 {
6427 nvme_t *const nvme = minor->nm_ctrl;
6428 nvme_ioctl_ns_info_t *ns_info;
6429 nvme_namespace_t *ns;
6430 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_NSID };
6431 void *idbuf;
6432
6433 if ((mode & FREAD) == 0)
6434 return (EBADF);
6435
6436 ns_info = kmem_zalloc(sizeof (nvme_ioctl_ns_info_t), KM_NOSLEEP_LAZY);
6437 if (ns_info == NULL) {
6438 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
6439 mode));
6440 }
6441
6442 if (ddi_copyin((void *)arg, ns_info, sizeof (nvme_ioctl_ns_info_t),
6443 mode & FKIOCTL) != 0) {
6444 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6445 return (EFAULT);
6446 }
6447
6448 if (!nvme_ioctl_check(minor, &ns_info->nni_common,
6449 &nvme_check_ns_info)) {
6450 goto copyout;
6451 }
6452
6453 ASSERT3U(ns_info->nni_common.nioc_nsid, >, 0);
6454 ns = nvme_nsid2ns(nvme, ns_info->nni_common.nioc_nsid);
6455
6456 /*
6457 * First fetch a fresh copy of the namespace information. Most callers
6458 * are using this because they will want a mostly accurate snapshot of
6459 * capacity and utilization.
6460 */
6461 id.nid_common.nioc_nsid = ns_info->nni_common.nioc_nsid;
6462 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
6463 ns_info->nni_common = id.nid_common;
6464 goto copyout;
6465 }
6466 bcopy(idbuf, &ns_info->nni_id, sizeof (nvme_identify_nsid_t));
6467 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
6468
6469 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6470 ns_info->nni_state = ns->ns_state;
6471 if (ns->ns_state >= NVME_NS_STATE_ATTACHED) {
6472 const char *addr;
6473
6474 ns_info->nni_state = NVME_NS_STATE_ATTACHED;
6475 addr = bd_address(ns->ns_bd_hdl);
6476 if (strlcpy(ns_info->nni_addr, addr,
6477 sizeof (ns_info->nni_addr)) >= sizeof (ns_info->nni_addr)) {
6478 nvme_mgmt_unlock(nvme);
6479 (void) nvme_ioctl_error(&ns_info->nni_common,
6480 NVME_IOCTL_E_BD_ADDR_OVER, 0, 0);
6481 goto copyout;
6482 }
6483 }
6484 nvme_mgmt_unlock(nvme);
6485
6486 copyout:
6487 if (ddi_copyout(ns_info, (void *)arg, sizeof (nvme_ioctl_ns_info_t),
6488 mode & FKIOCTL) != 0) {
6489 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6490 return (EFAULT);
6491 }
6492
6493 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6494 return (0);
6495 }
6496
6497 static int
nvme_ioctl_identify(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6498 nvme_ioctl_identify(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6499 {
6500 _NOTE(ARGUNUSED(cred_p));
6501 nvme_t *const nvme = minor->nm_ctrl;
6502 void *idctl;
6503 uint_t model;
6504 nvme_ioctl_identify_t id;
6505 #ifdef _MULTI_DATAMODEL
6506 nvme_ioctl_identify32_t id32;
6507 #endif
6508 boolean_t ns_minor;
6509
6510 if ((mode & FREAD) == 0)
6511 return (EBADF);
6512
6513 model = ddi_model_convert_from(mode);
6514 switch (model) {
6515 #ifdef _MULTI_DATAMODEL
6516 case DDI_MODEL_ILP32:
6517 bzero(&id, sizeof (id));
6518 if (ddi_copyin((void *)arg, &id32, sizeof (id32),
6519 mode & FKIOCTL) != 0) {
6520 return (EFAULT);
6521 }
6522 id.nid_common.nioc_nsid = id32.nid_common.nioc_nsid;
6523 id.nid_cns = id32.nid_cns;
6524 id.nid_ctrlid = id32.nid_ctrlid;
6525 id.nid_data = id32.nid_data;
6526 break;
6527 #endif /* _MULTI_DATAMODEL */
6528 case DDI_MODEL_NONE:
6529 if (ddi_copyin((void *)arg, &id, sizeof (id),
6530 mode & FKIOCTL) != 0) {
6531 return (EFAULT);
6532 }
6533 break;
6534 default:
6535 return (ENOTSUP);
6536 }
6537
6538 if (!nvme_ioctl_check(minor, &id.nid_common, &nvme_check_identify)) {
6539 goto copyout;
6540 }
6541
6542 ns_minor = minor->nm_ns != NULL;
6543 if (!nvme_validate_identify(nvme, &id, ns_minor)) {
6544 goto copyout;
6545 }
6546
6547 if (nvme_identify(nvme, B_TRUE, &id, &idctl)) {
6548 int ret = ddi_copyout(idctl, (void *)id.nid_data,
6549 NVME_IDENTIFY_BUFSIZE, mode & FKIOCTL);
6550 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
6551 if (ret != 0) {
6552 (void) nvme_ioctl_error(&id.nid_common,
6553 NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6554 goto copyout;
6555 }
6556
6557 nvme_ioctl_success(&id.nid_common);
6558 }
6559
6560 copyout:
6561 switch (model) {
6562 #ifdef _MULTI_DATAMODEL
6563 case DDI_MODEL_ILP32:
6564 id32.nid_common = id.nid_common;
6565
6566 if (ddi_copyout(&id32, (void *)arg, sizeof (id32),
6567 mode & FKIOCTL) != 0) {
6568 return (EFAULT);
6569 }
6570 break;
6571 #endif /* _MULTI_DATAMODEL */
6572 case DDI_MODEL_NONE:
6573 if (ddi_copyout(&id, (void *)arg, sizeof (id),
6574 mode & FKIOCTL) != 0) {
6575 return (EFAULT);
6576 }
6577 break;
6578 default:
6579 return (ENOTSUP);
6580 }
6581
6582 return (0);
6583 }
6584
6585 /*
6586 * Execute commands on behalf of the various ioctls.
6587 *
6588 * If this returns true then the command completed successfully. Otherwise error
6589 * information is returned in the nvme_ioctl_common_t arguments.
6590 */
6591 typedef struct {
6592 nvme_sqe_t *ica_sqe;
6593 void *ica_data;
6594 uint32_t ica_data_len;
6595 uint_t ica_dma_flags;
6596 int ica_copy_flags;
6597 uint32_t ica_timeout;
6598 uint32_t ica_cdw0;
6599 } nvme_ioc_cmd_args_t;
6600
6601 static boolean_t
nvme_ioc_cmd(nvme_t * nvme,nvme_ioctl_common_t * ioc,nvme_ioc_cmd_args_t * args)6602 nvme_ioc_cmd(nvme_t *nvme, nvme_ioctl_common_t *ioc, nvme_ioc_cmd_args_t *args)
6603 {
6604 nvme_cmd_t *cmd;
6605 boolean_t ret = B_FALSE;
6606
6607 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
6608 cmd->nc_sqid = 0;
6609
6610 /*
6611 * This function is used to facilitate requests from
6612 * userspace, so don't panic if the command fails. This
6613 * is especially true for admin passthru commands, where
6614 * the actual command data structure is entirely defined
6615 * by userspace.
6616 */
6617 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
6618
6619 cmd->nc_callback = nvme_wakeup_cmd;
6620 cmd->nc_sqe = *args->ica_sqe;
6621
6622 if ((args->ica_dma_flags & DDI_DMA_RDWR) != 0) {
6623 if (args->ica_data == NULL) {
6624 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_DMA_MEM,
6625 0, 0);
6626 goto free_cmd;
6627 }
6628
6629 if (nvme_zalloc_dma(nvme, args->ica_data_len,
6630 args->ica_dma_flags, &nvme->n_prp_dma_attr, &cmd->nc_dma) !=
6631 DDI_SUCCESS) {
6632 dev_err(nvme->n_dip, CE_WARN,
6633 "!nvme_zalloc_dma failed for nvme_ioc_cmd()");
6634 ret = nvme_ioctl_error(ioc,
6635 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6636 goto free_cmd;
6637 }
6638
6639 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
6640 ret = nvme_ioctl_error(ioc,
6641 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6642 goto free_cmd;
6643 }
6644
6645 if ((args->ica_dma_flags & DDI_DMA_WRITE) != 0 &&
6646 ddi_copyin(args->ica_data, cmd->nc_dma->nd_memp,
6647 args->ica_data_len, args->ica_copy_flags) != 0) {
6648 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA,
6649 0, 0);
6650 goto free_cmd;
6651 }
6652 }
6653
6654 nvme_admin_cmd(cmd, args->ica_timeout);
6655
6656 if (!nvme_check_cmd_status_ioctl(cmd, ioc)) {
6657 ret = B_FALSE;
6658 goto free_cmd;
6659 }
6660
6661 args->ica_cdw0 = cmd->nc_cqe.cqe_dw0;
6662
6663 if ((args->ica_dma_flags & DDI_DMA_READ) != 0 &&
6664 ddi_copyout(cmd->nc_dma->nd_memp, args->ica_data,
6665 args->ica_data_len, args->ica_copy_flags) != 0) {
6666 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6667 goto free_cmd;
6668 }
6669
6670 ret = B_TRUE;
6671 nvme_ioctl_success(ioc);
6672
6673 free_cmd:
6674 nvme_free_cmd(cmd);
6675
6676 return (ret);
6677 }
6678
6679 static int
nvme_ioctl_get_logpage(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6680 nvme_ioctl_get_logpage(nvme_minor_t *minor, intptr_t arg, int mode,
6681 cred_t *cred_p)
6682 {
6683 nvme_t *const nvme = minor->nm_ctrl;
6684 void *buf;
6685 nvme_ioctl_get_logpage_t log;
6686 uint_t model;
6687 #ifdef _MULTI_DATAMODEL
6688 nvme_ioctl_get_logpage32_t log32;
6689 #endif
6690
6691 if ((mode & FREAD) == 0) {
6692 return (EBADF);
6693 }
6694
6695 model = ddi_model_convert_from(mode);
6696 switch (model) {
6697 #ifdef _MULTI_DATAMODEL
6698 case DDI_MODEL_ILP32:
6699 bzero(&log, sizeof (log));
6700 if (ddi_copyin((void *)arg, &log32, sizeof (log32),
6701 mode & FKIOCTL) != 0) {
6702 return (EFAULT);
6703 }
6704
6705 log.nigl_common.nioc_nsid = log32.nigl_common.nioc_nsid;
6706 log.nigl_csi = log32.nigl_csi;
6707 log.nigl_lid = log32.nigl_lid;
6708 log.nigl_lsp = log32.nigl_lsp;
6709 log.nigl_len = log32.nigl_len;
6710 log.nigl_offset = log32.nigl_offset;
6711 log.nigl_data = log32.nigl_data;
6712 break;
6713 #endif /* _MULTI_DATAMODEL */
6714 case DDI_MODEL_NONE:
6715 if (ddi_copyin((void *)arg, &log, sizeof (log),
6716 mode & FKIOCTL) != 0) {
6717 return (EFAULT);
6718 }
6719 break;
6720 default:
6721 return (ENOTSUP);
6722 }
6723
6724 /*
6725 * Eventually we'd like to do a soft lock on the namespaces from
6726 * changing out from us during this operation in the future. But we
6727 * haven't implemented that yet.
6728 */
6729 if (!nvme_ioctl_check(minor, &log.nigl_common,
6730 &nvme_check_get_logpage)) {
6731 goto copyout;
6732 }
6733
6734 if (!nvme_validate_logpage(nvme, &log)) {
6735 goto copyout;
6736 }
6737
6738 if (nvme_get_logpage(nvme, B_TRUE, &log, &buf)) {
6739 int copy;
6740
6741 copy = ddi_copyout(buf, (void *)log.nigl_data, log.nigl_len,
6742 mode & FKIOCTL);
6743 kmem_free(buf, log.nigl_len);
6744 if (copy != 0) {
6745 (void) nvme_ioctl_error(&log.nigl_common,
6746 NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6747 goto copyout;
6748 }
6749
6750 nvme_ioctl_success(&log.nigl_common);
6751 }
6752
6753 copyout:
6754 switch (model) {
6755 #ifdef _MULTI_DATAMODEL
6756 case DDI_MODEL_ILP32:
6757 bzero(&log32, sizeof (log32));
6758
6759 log32.nigl_common = log.nigl_common;
6760 log32.nigl_csi = log.nigl_csi;
6761 log32.nigl_lid = log.nigl_lid;
6762 log32.nigl_lsp = log.nigl_lsp;
6763 log32.nigl_len = log.nigl_len;
6764 log32.nigl_offset = log.nigl_offset;
6765 log32.nigl_data = log.nigl_data;
6766 if (ddi_copyout(&log32, (void *)arg, sizeof (log32),
6767 mode & FKIOCTL) != 0) {
6768 return (EFAULT);
6769 }
6770 break;
6771 #endif /* _MULTI_DATAMODEL */
6772 case DDI_MODEL_NONE:
6773 if (ddi_copyout(&log, (void *)arg, sizeof (log),
6774 mode & FKIOCTL) != 0) {
6775 return (EFAULT);
6776 }
6777 break;
6778 default:
6779 return (ENOTSUP);
6780 }
6781
6782 return (0);
6783 }
6784
6785 static int
nvme_ioctl_get_feature(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6786 nvme_ioctl_get_feature(nvme_minor_t *minor, intptr_t arg, int mode,
6787 cred_t *cred_p)
6788 {
6789 nvme_t *const nvme = minor->nm_ctrl;
6790 nvme_ioctl_get_feature_t feat;
6791 uint_t model;
6792 #ifdef _MULTI_DATAMODEL
6793 nvme_ioctl_get_feature32_t feat32;
6794 #endif
6795 nvme_get_features_dw10_t gf_dw10 = { 0 };
6796 nvme_ioc_cmd_args_t args = { NULL };
6797 nvme_sqe_t sqe = {
6798 .sqe_opc = NVME_OPC_GET_FEATURES
6799 };
6800
6801 if ((mode & FREAD) == 0) {
6802 return (EBADF);
6803 }
6804
6805 model = ddi_model_convert_from(mode);
6806 switch (model) {
6807 #ifdef _MULTI_DATAMODEL
6808 case DDI_MODEL_ILP32:
6809 bzero(&feat, sizeof (feat));
6810 if (ddi_copyin((void *)arg, &feat32, sizeof (feat32),
6811 mode & FKIOCTL) != 0) {
6812 return (EFAULT);
6813 }
6814
6815 feat.nigf_common.nioc_nsid = feat32.nigf_common.nioc_nsid;
6816 feat.nigf_fid = feat32.nigf_fid;
6817 feat.nigf_sel = feat32.nigf_sel;
6818 feat.nigf_cdw11 = feat32.nigf_cdw11;
6819 feat.nigf_data = feat32.nigf_data;
6820 feat.nigf_len = feat32.nigf_len;
6821 break;
6822 #endif /* _MULTI_DATAMODEL */
6823 case DDI_MODEL_NONE:
6824 if (ddi_copyin((void *)arg, &feat, sizeof (feat),
6825 mode & FKIOCTL) != 0) {
6826 return (EFAULT);
6827 }
6828 break;
6829 default:
6830 return (ENOTSUP);
6831 }
6832
6833 if (!nvme_ioctl_check(minor, &feat.nigf_common,
6834 &nvme_check_get_feature)) {
6835 goto copyout;
6836 }
6837
6838 if (!nvme_validate_get_feature(nvme, &feat)) {
6839 goto copyout;
6840 }
6841
6842 gf_dw10.b.gt_fid = bitx32(feat.nigf_fid, 7, 0);
6843 gf_dw10.b.gt_sel = bitx32(feat.nigf_sel, 2, 0);
6844 sqe.sqe_cdw10 = gf_dw10.r;
6845 sqe.sqe_cdw11 = feat.nigf_cdw11;
6846 sqe.sqe_nsid = feat.nigf_common.nioc_nsid;
6847
6848 args.ica_sqe = &sqe;
6849 if (feat.nigf_len != 0) {
6850 args.ica_data = (void *)feat.nigf_data;
6851 args.ica_data_len = feat.nigf_len;
6852 args.ica_dma_flags = DDI_DMA_READ;
6853 }
6854 args.ica_copy_flags = mode;
6855 args.ica_timeout = nvme_admin_cmd_timeout;
6856
6857 if (!nvme_ioc_cmd(nvme, &feat.nigf_common, &args)) {
6858 goto copyout;
6859 }
6860
6861 feat.nigf_cdw0 = args.ica_cdw0;
6862
6863 copyout:
6864 switch (model) {
6865 #ifdef _MULTI_DATAMODEL
6866 case DDI_MODEL_ILP32:
6867 bzero(&feat32, sizeof (feat32));
6868
6869 feat32.nigf_common = feat.nigf_common;
6870 feat32.nigf_fid = feat.nigf_fid;
6871 feat32.nigf_sel = feat.nigf_sel;
6872 feat32.nigf_cdw11 = feat.nigf_cdw11;
6873 feat32.nigf_data = feat.nigf_data;
6874 feat32.nigf_len = feat.nigf_len;
6875 feat32.nigf_cdw0 = feat.nigf_cdw0;
6876 if (ddi_copyout(&feat32, (void *)arg, sizeof (feat32),
6877 mode & FKIOCTL) != 0) {
6878 return (EFAULT);
6879 }
6880 break;
6881 #endif /* _MULTI_DATAMODEL */
6882 case DDI_MODEL_NONE:
6883 if (ddi_copyout(&feat, (void *)arg, sizeof (feat),
6884 mode & FKIOCTL) != 0) {
6885 return (EFAULT);
6886 }
6887 break;
6888 default:
6889 return (ENOTSUP);
6890 }
6891
6892 return (0);
6893 }
6894
6895 static int
nvme_ioctl_format(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6896 nvme_ioctl_format(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6897 {
6898 nvme_t *const nvme = minor->nm_ctrl;
6899 nvme_ioctl_format_t ioc;
6900
6901 if ((mode & FWRITE) == 0)
6902 return (EBADF);
6903
6904 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6905 return (EPERM);
6906
6907 if (ddi_copyin((void *)(uintptr_t)arg, &ioc,
6908 sizeof (nvme_ioctl_format_t), mode & FKIOCTL) != 0)
6909 return (EFAULT);
6910
6911 if (!nvme_ioctl_check(minor, &ioc.nif_common, &nvme_check_format)) {
6912 goto copyout;
6913 }
6914
6915 if (!nvme_validate_format(nvme, &ioc)) {
6916 goto copyout;
6917 }
6918
6919 /*
6920 * The broadcast namespace can format all namespaces attached to the
6921 * controller, meaning active namespaces. However, a targeted format can
6922 * impact any allocated namespace, even one not attached. As such, we
6923 * need different checks for each situation.
6924 */
6925 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6926 if (ioc.nif_common.nioc_nsid == NVME_NSID_BCAST) {
6927 if (!nvme_no_blkdev_attached(nvme, ioc.nif_common.nioc_nsid)) {
6928 nvme_mgmt_unlock(nvme);
6929 (void) nvme_ioctl_error(&ioc.nif_common,
6930 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
6931 goto copyout;
6932 }
6933 } else {
6934 nvme_namespace_t *ns = nvme_nsid2ns(nvme,
6935 ioc.nif_common.nioc_nsid);
6936
6937 if (!nvme_ns_state_check(ns, &ioc.nif_common,
6938 nvme_format_nvm_states)) {
6939 nvme_mgmt_unlock(nvme);
6940 goto copyout;
6941 }
6942 }
6943
6944 if (nvme_format_nvm(nvme, &ioc)) {
6945 nvme_ioctl_success(&ioc.nif_common);
6946 nvme_rescan_ns(nvme, ioc.nif_common.nioc_nsid);
6947 }
6948 nvme_mgmt_unlock(nvme);
6949
6950 copyout:
6951 if (ddi_copyout(&ioc, (void *)(uintptr_t)arg, sizeof (ioc),
6952 mode & FKIOCTL) != 0) {
6953 return (EFAULT);
6954 }
6955
6956 return (0);
6957 }
6958
6959 static int
nvme_ioctl_bd_detach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6960 nvme_ioctl_bd_detach(nvme_minor_t *minor, intptr_t arg, int mode,
6961 cred_t *cred_p)
6962 {
6963 nvme_t *const nvme = minor->nm_ctrl;
6964 nvme_ioctl_common_t com;
6965
6966 if ((mode & FWRITE) == 0)
6967 return (EBADF);
6968
6969 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6970 return (EPERM);
6971
6972 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
6973 mode & FKIOCTL) != 0) {
6974 return (EFAULT);
6975 }
6976
6977 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
6978 goto copyout;
6979 }
6980
6981 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6982 if (nvme_bd_detach_ns(nvme, &com)) {
6983 nvme_ioctl_success(&com);
6984 }
6985 nvme_mgmt_unlock(nvme);
6986
6987 copyout:
6988 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
6989 mode & FKIOCTL) != 0) {
6990 return (EFAULT);
6991 }
6992
6993 return (0);
6994 }
6995
6996 static int
nvme_ioctl_bd_attach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6997 nvme_ioctl_bd_attach(nvme_minor_t *minor, intptr_t arg, int mode,
6998 cred_t *cred_p)
6999 {
7000 nvme_t *const nvme = minor->nm_ctrl;
7001 nvme_ioctl_common_t com;
7002 nvme_namespace_t *ns;
7003
7004 if ((mode & FWRITE) == 0)
7005 return (EBADF);
7006
7007 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7008 return (EPERM);
7009
7010 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
7011 mode & FKIOCTL) != 0) {
7012 return (EFAULT);
7013 }
7014
7015 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
7016 goto copyout;
7017 }
7018
7019 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7020 ns = nvme_nsid2ns(nvme, com.nioc_nsid);
7021
7022 /*
7023 * Strictly speaking we shouldn't need to call nvme_init_ns() here as
7024 * we should be properly refreshing the internal state when we are
7025 * issuing commands that change things. However, we opt to still do so
7026 * as a bit of a safety check lest we give the kernel something bad or a
7027 * vendor unique command somehow did something behind our backs.
7028 */
7029 if (ns->ns_state < NVME_NS_STATE_ATTACHED) {
7030 nvme_rescan_ns(nvme, com.nioc_nsid);
7031 }
7032
7033 if (nvme_bd_attach_ns(nvme, &com)) {
7034 nvme_ioctl_success(&com);
7035 }
7036 nvme_mgmt_unlock(nvme);
7037
7038 copyout:
7039 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7040 mode & FKIOCTL) != 0) {
7041 return (EFAULT);
7042 }
7043
7044 return (0);
7045 }
7046
7047 /*
7048 * Attach or detach a controller from the specified namespace. While this in
7049 * theory allows for multiple controllers to be specified, currently we only
7050 * support using the controller that we've issued this ioctl on. In the future
7051 * when we have better ways to test dual-attached controllers then this should
7052 * be extended to take the controller list from userland.
7053 */
7054 static boolean_t
nvme_ctrl_attach_detach_ns(nvme_t * nvme,nvme_namespace_t * ns,nvme_ioctl_common_t * ioc,boolean_t attach)7055 nvme_ctrl_attach_detach_ns(nvme_t *nvme, nvme_namespace_t *ns,
7056 nvme_ioctl_common_t *ioc, boolean_t attach)
7057 {
7058 nvme_ioc_cmd_args_t args = { NULL };
7059 nvme_sqe_t sqe;
7060 nvme_ns_mgmt_dw10_t dw10;
7061 uint16_t ctrlids[2];
7062
7063 ASSERT(nvme_mgmt_lock_held(nvme));
7064
7065 bzero(&sqe, sizeof (sqe));
7066 sqe.sqe_nsid = ioc->nioc_nsid;
7067 sqe.sqe_opc = NVME_OPC_NS_ATTACH;
7068
7069 dw10.r = 0;
7070 dw10.b.nsm_sel = attach ? NVME_NS_ATTACH_CTRL_ATTACH :
7071 NVME_NS_ATTACH_CTRL_DETACH;
7072 sqe.sqe_cdw10 = dw10.r;
7073
7074 /*
7075 * As we only support sending our current controller's id along, we can
7076 * simplify this and don't need both allocating a full
7077 * nvme_identify_ctrl_list_t for two items.
7078 */
7079 ctrlids[0] = 1;
7080 ctrlids[1] = nvme->n_idctl->id_cntlid;
7081
7082 args.ica_sqe = &sqe;
7083 args.ica_data = ctrlids;
7084 args.ica_data_len = sizeof (ctrlids);
7085 args.ica_dma_flags = DDI_DMA_WRITE;
7086 args.ica_copy_flags = FKIOCTL;
7087 args.ica_timeout = nvme_admin_cmd_timeout;
7088
7089 return (nvme_ioc_cmd(nvme, ioc, &args));
7090 }
7091
7092 static int
nvme_ioctl_ctrl_detach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7093 nvme_ioctl_ctrl_detach(nvme_minor_t *minor, intptr_t arg, int mode,
7094 cred_t *cred_p)
7095 {
7096 nvme_t *const nvme = minor->nm_ctrl;
7097 nvme_ioctl_common_t com;
7098 nvme_namespace_t *ns;
7099
7100 if ((mode & FWRITE) == 0)
7101 return (EBADF);
7102
7103 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7104 return (EPERM);
7105
7106 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
7107 mode & FKIOCTL) != 0) {
7108 return (EFAULT);
7109 }
7110
7111 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
7112 goto copyout;
7113 }
7114
7115 if (!nvme_validate_ctrl_attach_detach_ns(nvme, &com)) {
7116 goto copyout;
7117 }
7118
7119 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7120 ns = nvme_nsid2ns(nvme, com.nioc_nsid);
7121
7122 if (nvme_ns_state_check(ns, &com, nvme_ctrl_detach_states)) {
7123 if (nvme_ctrl_attach_detach_ns(nvme, ns, &com, B_FALSE)) {
7124 nvme_rescan_ns(nvme, com.nioc_nsid);
7125 nvme_ioctl_success(&com);
7126 }
7127 }
7128 nvme_mgmt_unlock(nvme);
7129
7130 copyout:
7131 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7132 mode & FKIOCTL) != 0) {
7133 return (EFAULT);
7134 }
7135
7136 return (0);
7137 }
7138
7139 static int
nvme_ioctl_ns_create(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7140 nvme_ioctl_ns_create(nvme_minor_t *minor, intptr_t arg, int mode,
7141 cred_t *cred_p)
7142 {
7143 nvme_t *const nvme = minor->nm_ctrl;
7144 nvme_ioctl_ns_create_t create;
7145
7146 if ((mode & FWRITE) == 0)
7147 return (EBADF);
7148
7149 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7150 return (EPERM);
7151
7152 if (ddi_copyin((void *)(uintptr_t)arg, &create, sizeof (create),
7153 mode & FKIOCTL) != 0) {
7154 return (EFAULT);
7155 }
7156
7157 if (!nvme_ioctl_check(minor, &create.nnc_common,
7158 &nvme_check_ns_create)) {
7159 goto copyout;
7160 }
7161
7162 if (!nvme_validate_ns_create(nvme, &create)) {
7163 goto copyout;
7164 }
7165
7166 /*
7167 * Now that we've validated this, proceed to build up the actual data
7168 * request. We need to fill out the relevant identify namespace data
7169 * structure fields.
7170 */
7171 nvme_identify_nsid_t *idns = kmem_zalloc(sizeof (nvme_identify_nsid_t),
7172 KM_NOSLEEP_LAZY);
7173 if (idns == NULL) {
7174 (void) nvme_ioctl_error(&create.nnc_common,
7175 NVME_IOCTL_E_NO_KERN_MEM, 0, 0);
7176 goto copyout;
7177 }
7178
7179 idns->id_nsize = create.nnc_nsze;
7180 idns->id_ncap = create.nnc_ncap;
7181 idns->id_flbas.lba_format = create.nnc_flbas;
7182 idns->id_nmic.nm_shared = bitx32(create.nnc_nmic, 0, 0);
7183
7184 nvme_ioc_cmd_args_t args = { NULL };
7185 nvme_sqe_t sqe;
7186 nvme_ns_mgmt_dw10_t dw10;
7187 nvme_ns_mgmt_dw11_t dw11;
7188
7189 bzero(&sqe, sizeof (sqe));
7190 sqe.sqe_nsid = create.nnc_common.nioc_nsid;
7191 sqe.sqe_opc = NVME_OPC_NS_MGMT;
7192
7193 dw10.r = 0;
7194 dw10.b.nsm_sel = NVME_NS_MGMT_NS_CREATE;
7195 sqe.sqe_cdw10 = dw10.r;
7196
7197 dw11.r = 0;
7198 dw11.b.nsm_csi = create.nnc_csi;
7199 sqe.sqe_cdw11 = dw11.r;
7200
7201 args.ica_sqe = &sqe;
7202 args.ica_data = idns;
7203 args.ica_data_len = sizeof (nvme_identify_nsid_t);
7204 args.ica_dma_flags = DDI_DMA_WRITE;
7205 args.ica_copy_flags = FKIOCTL;
7206 args.ica_timeout = nvme_format_cmd_timeout;
7207
7208 /*
7209 * This command manipulates our understanding of a namespace's state.
7210 * While we don't need to check anything before we proceed, we still
7211 * logically require the lock.
7212 */
7213 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7214 if (nvme_ioc_cmd(nvme, &create.nnc_common, &args)) {
7215 create.nnc_nsid = args.ica_cdw0;
7216 nvme_rescan_ns(nvme, create.nnc_nsid);
7217 nvme_ioctl_success(&create.nnc_common);
7218 }
7219 nvme_mgmt_unlock(nvme);
7220 kmem_free(idns, sizeof (nvme_identify_nsid_t));
7221
7222 copyout:
7223 if (ddi_copyout(&create, (void *)(uintptr_t)arg, sizeof (create),
7224 mode & FKIOCTL) != 0) {
7225 return (EFAULT);
7226 }
7227
7228 return (0);
7229
7230 }
7231
7232 static int
nvme_ioctl_ns_delete(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7233 nvme_ioctl_ns_delete(nvme_minor_t *minor, intptr_t arg, int mode,
7234 cred_t *cred_p)
7235 {
7236 nvme_t *const nvme = minor->nm_ctrl;
7237 nvme_ioctl_common_t com;
7238
7239 if ((mode & FWRITE) == 0)
7240 return (EBADF);
7241
7242 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7243 return (EPERM);
7244
7245 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
7246 mode & FKIOCTL) != 0) {
7247 return (EFAULT);
7248 }
7249
7250 if (!nvme_ioctl_check(minor, &com, &nvme_check_ns_delete)) {
7251 goto copyout;
7252 }
7253
7254 if (!nvme_validate_ns_delete(nvme, &com)) {
7255 goto copyout;
7256 }
7257
7258 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7259 if (com.nioc_nsid == NVME_NSID_BCAST) {
7260 if (!nvme_no_blkdev_attached(nvme, com.nioc_nsid)) {
7261 nvme_mgmt_unlock(nvme);
7262 (void) nvme_ioctl_error(&com,
7263 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
7264 goto copyout;
7265 }
7266 } else {
7267 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com.nioc_nsid);
7268
7269 if (!nvme_ns_state_check(ns, &com, nvme_ns_delete_states)) {
7270 nvme_mgmt_unlock(nvme);
7271 goto copyout;
7272 }
7273 }
7274
7275 nvme_ioc_cmd_args_t args = { NULL };
7276 nvme_sqe_t sqe;
7277 nvme_ns_mgmt_dw10_t dw10;
7278
7279 bzero(&sqe, sizeof (sqe));
7280 sqe.sqe_nsid = com.nioc_nsid;
7281 sqe.sqe_opc = NVME_OPC_NS_MGMT;
7282
7283 dw10.r = 0;
7284 dw10.b.nsm_sel = NVME_NS_MGMT_NS_DELETE;
7285 sqe.sqe_cdw10 = dw10.r;
7286
7287 args.ica_sqe = &sqe;
7288 args.ica_data = NULL;
7289 args.ica_data_len = 0;
7290 args.ica_dma_flags = 0;
7291 args.ica_copy_flags = 0;
7292 args.ica_timeout = nvme_format_cmd_timeout;
7293
7294 if (nvme_ioc_cmd(nvme, &com, &args)) {
7295 nvme_rescan_ns(nvme, com.nioc_nsid);
7296 nvme_ioctl_success(&com);
7297 }
7298 nvme_mgmt_unlock(nvme);
7299
7300 copyout:
7301 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7302 mode & FKIOCTL) != 0) {
7303 return (EFAULT);
7304 }
7305
7306 return (0);
7307 }
7308
7309 static int
nvme_ioctl_ctrl_attach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7310 nvme_ioctl_ctrl_attach(nvme_minor_t *minor, intptr_t arg, int mode,
7311 cred_t *cred_p)
7312 {
7313 nvme_t *const nvme = minor->nm_ctrl;
7314 nvme_ioctl_common_t com;
7315 nvme_namespace_t *ns;
7316
7317 if ((mode & FWRITE) == 0)
7318 return (EBADF);
7319
7320 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7321 return (EPERM);
7322
7323 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
7324 mode & FKIOCTL) != 0) {
7325 return (EFAULT);
7326 }
7327
7328 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
7329 goto copyout;
7330 }
7331
7332 if (!nvme_validate_ctrl_attach_detach_ns(nvme, &com)) {
7333 goto copyout;
7334 }
7335
7336 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7337 ns = nvme_nsid2ns(nvme, com.nioc_nsid);
7338
7339 if (nvme_ns_state_check(ns, &com, nvme_ctrl_attach_states)) {
7340 if (nvme_ctrl_attach_detach_ns(nvme, ns, &com, B_TRUE)) {
7341 nvme_rescan_ns(nvme, com.nioc_nsid);
7342 nvme_ioctl_success(&com);
7343 }
7344 }
7345 nvme_mgmt_unlock(nvme);
7346
7347 copyout:
7348 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7349 mode & FKIOCTL) != 0) {
7350 return (EFAULT);
7351 }
7352
7353 return (0);
7354 }
7355
7356 static void
nvme_ufm_update(nvme_t * nvme)7357 nvme_ufm_update(nvme_t *nvme)
7358 {
7359 mutex_enter(&nvme->n_fwslot_mutex);
7360 ddi_ufm_update(nvme->n_ufmh);
7361 if (nvme->n_fwslot != NULL) {
7362 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t));
7363 nvme->n_fwslot = NULL;
7364 }
7365 mutex_exit(&nvme->n_fwslot_mutex);
7366 }
7367
7368 /*
7369 * Download new firmware to the device's internal staging area. We do not call
7370 * nvme_ufm_update() here because after a firmware download, there has been no
7371 * change to any of the actual persistent firmware data. That requires a
7372 * subsequent ioctl (NVME_IOC_FIRMWARE_COMMIT) to commit the firmware to a slot
7373 * or to activate a slot.
7374 */
7375 static int
nvme_ioctl_firmware_download(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7376 nvme_ioctl_firmware_download(nvme_minor_t *minor, intptr_t arg, int mode,
7377 cred_t *cred_p)
7378 {
7379 nvme_t *const nvme = minor->nm_ctrl;
7380 nvme_ioctl_fw_load_t fw;
7381 uint64_t len, maxcopy;
7382 offset_t offset;
7383 uint32_t gran;
7384 nvme_valid_ctrl_data_t data;
7385 uintptr_t buf;
7386 nvme_sqe_t sqe = {
7387 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD
7388 };
7389
7390 if ((mode & FWRITE) == 0)
7391 return (EBADF);
7392
7393 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7394 return (EPERM);
7395
7396 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
7397 mode & FKIOCTL) != 0) {
7398 return (EFAULT);
7399 }
7400
7401 if (!nvme_ioctl_check(minor, &fw.fwl_common, &nvme_check_firmware)) {
7402 goto copyout;
7403 }
7404
7405 if (!nvme_validate_fw_load(nvme, &fw)) {
7406 goto copyout;
7407 }
7408
7409 len = fw.fwl_len;
7410 offset = fw.fwl_off;
7411 buf = fw.fwl_buf;
7412
7413 /*
7414 * We need to determine the minimum and maximum amount of data that we
7415 * will send to the device in a given go. Starting in NMVe 1.3 this must
7416 * be a multiple of the firmware update granularity (FWUG), but must not
7417 * exceed the maximum data transfer that we've set. Many devices don't
7418 * report something here, which means we'll end up getting our default
7419 * value. Our policy is a little simple, but it's basically if the
7420 * maximum data transfer is evenly divided by the granularity, then use
7421 * it. Otherwise we use the granularity itself. The granularity is
7422 * always in page sized units, so trying to find another optimum point
7423 * isn't worth it. If we encounter a contradiction, then we will have to
7424 * error out.
7425 */
7426 data.vcd_vers = &nvme->n_version;
7427 data.vcd_id = nvme->n_idctl;
7428 gran = nvme_fw_load_granularity(&data);
7429
7430 if ((nvme->n_max_data_transfer_size % gran) == 0) {
7431 maxcopy = nvme->n_max_data_transfer_size;
7432 } else if (gran <= nvme->n_max_data_transfer_size) {
7433 maxcopy = gran;
7434 } else {
7435 (void) nvme_ioctl_error(&fw.fwl_common,
7436 NVME_IOCTL_E_FW_LOAD_IMPOS_GRAN, 0, 0);
7437 goto copyout;
7438 }
7439
7440 while (len > 0) {
7441 nvme_ioc_cmd_args_t args = { NULL };
7442 uint64_t copylen = MIN(maxcopy, len);
7443
7444 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1;
7445 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT);
7446
7447 args.ica_sqe = &sqe;
7448 args.ica_data = (void *)buf;
7449 args.ica_data_len = copylen;
7450 args.ica_dma_flags = DDI_DMA_WRITE;
7451 args.ica_copy_flags = mode;
7452 args.ica_timeout = nvme_admin_cmd_timeout;
7453
7454 if (!nvme_ioc_cmd(nvme, &fw.fwl_common, &args)) {
7455 break;
7456 }
7457
7458 buf += copylen;
7459 offset += copylen;
7460 len -= copylen;
7461 }
7462
7463 copyout:
7464 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
7465 mode & FKIOCTL) != 0) {
7466 return (EFAULT);
7467 }
7468
7469 return (0);
7470 }
7471
7472 static int
nvme_ioctl_firmware_commit(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7473 nvme_ioctl_firmware_commit(nvme_minor_t *minor, intptr_t arg, int mode,
7474 cred_t *cred_p)
7475 {
7476 nvme_t *const nvme = minor->nm_ctrl;
7477 nvme_ioctl_fw_commit_t fw;
7478 nvme_firmware_commit_dw10_t fc_dw10 = { 0 };
7479 nvme_ioc_cmd_args_t args = { NULL };
7480 nvme_sqe_t sqe = {
7481 .sqe_opc = NVME_OPC_FW_ACTIVATE
7482 };
7483
7484 if ((mode & FWRITE) == 0)
7485 return (EBADF);
7486
7487 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7488 return (EPERM);
7489
7490 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
7491 mode & FKIOCTL) != 0) {
7492 return (EFAULT);
7493 }
7494
7495 if (!nvme_ioctl_check(minor, &fw.fwc_common, &nvme_check_firmware)) {
7496 goto copyout;
7497 }
7498
7499 if (!nvme_validate_fw_commit(nvme, &fw)) {
7500 goto copyout;
7501 }
7502
7503 fc_dw10.b.fc_slot = fw.fwc_slot;
7504 fc_dw10.b.fc_action = fw.fwc_action;
7505 sqe.sqe_cdw10 = fc_dw10.r;
7506
7507 args.ica_sqe = &sqe;
7508 args.ica_timeout = nvme_commit_save_cmd_timeout;
7509
7510 /*
7511 * There are no conditional actions to take based on this succeeding or
7512 * failing. A failure is recorded in the ioctl structure returned to the
7513 * user.
7514 */
7515 (void) nvme_ioc_cmd(nvme, &fw.fwc_common, &args);
7516
7517 /*
7518 * Let the DDI UFM subsystem know that the firmware information for
7519 * this device has changed. We perform this unconditionally as an
7520 * invalidation doesn't particularly hurt us.
7521 */
7522 nvme_ufm_update(nvme);
7523
7524 copyout:
7525 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
7526 mode & FKIOCTL) != 0) {
7527 return (EFAULT);
7528 }
7529
7530 return (0);
7531 }
7532
7533 /*
7534 * Helper to copy in a passthru command from userspace, handling
7535 * different data models.
7536 */
7537 static int
nvme_passthru_copyin_cmd(const void * buf,nvme_ioctl_passthru_t * cmd,int mode)7538 nvme_passthru_copyin_cmd(const void *buf, nvme_ioctl_passthru_t *cmd, int mode)
7539 {
7540 switch (ddi_model_convert_from(mode & FMODELS)) {
7541 #ifdef _MULTI_DATAMODEL
7542 case DDI_MODEL_ILP32: {
7543 nvme_ioctl_passthru32_t cmd32;
7544
7545 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0)
7546 return (EFAULT);
7547
7548 bzero(cmd, sizeof (nvme_ioctl_passthru_t));
7549
7550 cmd->npc_common.nioc_nsid = cmd32.npc_common.nioc_nsid;
7551 cmd->npc_opcode = cmd32.npc_opcode;
7552 cmd->npc_timeout = cmd32.npc_timeout;
7553 cmd->npc_flags = cmd32.npc_flags;
7554 cmd->npc_impact = cmd32.npc_impact;
7555 cmd->npc_cdw12 = cmd32.npc_cdw12;
7556 cmd->npc_cdw13 = cmd32.npc_cdw13;
7557 cmd->npc_cdw14 = cmd32.npc_cdw14;
7558 cmd->npc_cdw15 = cmd32.npc_cdw15;
7559 cmd->npc_buflen = cmd32.npc_buflen;
7560 cmd->npc_buf = cmd32.npc_buf;
7561 break;
7562 }
7563 #endif /* _MULTI_DATAMODEL */
7564 case DDI_MODEL_NONE:
7565 if (ddi_copyin(buf, (void *)cmd, sizeof (nvme_ioctl_passthru_t),
7566 mode) != 0) {
7567 return (EFAULT);
7568 }
7569 break;
7570 default:
7571 return (ENOTSUP);
7572 }
7573
7574 return (0);
7575 }
7576
7577 /*
7578 * Helper to copy out a passthru command result to userspace, handling
7579 * different data models.
7580 */
7581 static int
nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t * cmd,void * buf,int mode)7582 nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t *cmd, void *buf, int mode)
7583 {
7584 switch (ddi_model_convert_from(mode & FMODELS)) {
7585 #ifdef _MULTI_DATAMODEL
7586 case DDI_MODEL_ILP32: {
7587 nvme_ioctl_passthru32_t cmd32;
7588
7589 bzero(&cmd32, sizeof (nvme_ioctl_passthru32_t));
7590
7591 cmd32.npc_common = cmd->npc_common;
7592 cmd32.npc_opcode = cmd->npc_opcode;
7593 cmd32.npc_timeout = cmd->npc_timeout;
7594 cmd32.npc_flags = cmd->npc_flags;
7595 cmd32.npc_impact = cmd->npc_impact;
7596 cmd32.npc_cdw0 = cmd->npc_cdw0;
7597 cmd32.npc_cdw12 = cmd->npc_cdw12;
7598 cmd32.npc_cdw13 = cmd->npc_cdw13;
7599 cmd32.npc_cdw14 = cmd->npc_cdw14;
7600 cmd32.npc_cdw15 = cmd->npc_cdw15;
7601 cmd32.npc_buflen = (size32_t)cmd->npc_buflen;
7602 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf;
7603 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0)
7604 return (EFAULT);
7605 break;
7606 }
7607 #endif /* _MULTI_DATAMODEL */
7608 case DDI_MODEL_NONE:
7609 if (ddi_copyout(cmd, buf, sizeof (nvme_ioctl_passthru_t),
7610 mode) != 0) {
7611 return (EFAULT);
7612 }
7613 break;
7614 default:
7615 return (ENOTSUP);
7616 }
7617 return (0);
7618 }
7619
7620 /*
7621 * Run an arbitrary vendor-specific admin command on the device.
7622 */
7623 static int
nvme_ioctl_passthru(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7624 nvme_ioctl_passthru(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
7625 {
7626 nvme_t *const nvme = minor->nm_ctrl;
7627 int rv;
7628 nvme_ioctl_passthru_t pass;
7629 nvme_sqe_t sqe;
7630 nvme_ioc_cmd_args_t args = { NULL };
7631
7632 /*
7633 * Basic checks: permissions, data model, argument size.
7634 */
7635 if ((mode & FWRITE) == 0)
7636 return (EBADF);
7637
7638 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7639 return (EPERM);
7640
7641 if ((rv = nvme_passthru_copyin_cmd((void *)(uintptr_t)arg, &pass,
7642 mode)) != 0) {
7643 return (rv);
7644 }
7645
7646 if (!nvme_ioctl_check(minor, &pass.npc_common, &nvme_check_passthru)) {
7647 goto copyout;
7648 }
7649
7650 if (!nvme_validate_vuc(nvme, &pass)) {
7651 goto copyout;
7652 }
7653
7654 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7655 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
7656 /*
7657 * We've been told this has ns impact. Right now force that to
7658 * be every ns until we have more use cases and reason to trust
7659 * the nsid field.
7660 */
7661 if (!nvme_no_blkdev_attached(nvme, NVME_NSID_BCAST)) {
7662 nvme_mgmt_unlock(nvme);
7663 (void) nvme_ioctl_error(&pass.npc_common,
7664 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
7665 goto copyout;
7666 }
7667 }
7668
7669 bzero(&sqe, sizeof (sqe));
7670
7671 sqe.sqe_opc = pass.npc_opcode;
7672 sqe.sqe_nsid = pass.npc_common.nioc_nsid;
7673 sqe.sqe_cdw10 = (uint32_t)(pass.npc_buflen >> NVME_DWORD_SHIFT);
7674 sqe.sqe_cdw12 = pass.npc_cdw12;
7675 sqe.sqe_cdw13 = pass.npc_cdw13;
7676 sqe.sqe_cdw14 = pass.npc_cdw14;
7677 sqe.sqe_cdw15 = pass.npc_cdw15;
7678
7679 args.ica_sqe = &sqe;
7680 args.ica_data = (void *)pass.npc_buf;
7681 args.ica_data_len = pass.npc_buflen;
7682 args.ica_copy_flags = mode;
7683 args.ica_timeout = pass.npc_timeout;
7684
7685 if ((pass.npc_flags & NVME_PASSTHRU_READ) != 0)
7686 args.ica_dma_flags |= DDI_DMA_READ;
7687 else if ((pass.npc_flags & NVME_PASSTHRU_WRITE) != 0)
7688 args.ica_dma_flags |= DDI_DMA_WRITE;
7689
7690 if (nvme_ioc_cmd(nvme, &pass.npc_common, &args)) {
7691 pass.npc_cdw0 = args.ica_cdw0;
7692 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
7693 nvme_rescan_ns(nvme, NVME_NSID_BCAST);
7694 }
7695 }
7696 nvme_mgmt_unlock(nvme);
7697
7698 copyout:
7699 rv = nvme_passthru_copyout_cmd(&pass, (void *)(uintptr_t)arg,
7700 mode);
7701
7702 return (rv);
7703 }
7704
7705 static int
nvme_ioctl_lock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7706 nvme_ioctl_lock(nvme_minor_t *minor, intptr_t arg, int mode,
7707 cred_t *cred_p)
7708 {
7709 nvme_ioctl_lock_t lock;
7710 const nvme_lock_flags_t all_flags = NVME_LOCK_F_DONT_BLOCK;
7711 nvme_t *nvme = minor->nm_ctrl;
7712
7713 if ((mode & FWRITE) == 0)
7714 return (EBADF);
7715
7716 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7717 return (EPERM);
7718
7719 if (ddi_copyin((void *)(uintptr_t)arg, &lock, sizeof (lock),
7720 mode & FKIOCTL) != 0) {
7721 return (EFAULT);
7722 }
7723
7724 if (lock.nil_ent != NVME_LOCK_E_CTRL &&
7725 lock.nil_ent != NVME_LOCK_E_NS) {
7726 (void) nvme_ioctl_error(&lock.nil_common,
7727 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7728 goto copyout;
7729 }
7730
7731 if (lock.nil_level != NVME_LOCK_L_READ &&
7732 lock.nil_level != NVME_LOCK_L_WRITE) {
7733 (void) nvme_ioctl_error(&lock.nil_common,
7734 NVME_IOCTL_E_BAD_LOCK_LEVEL, 0, 0);
7735 goto copyout;
7736 }
7737
7738 if ((lock.nil_flags & ~all_flags) != 0) {
7739 (void) nvme_ioctl_error(&lock.nil_common,
7740 NVME_IOCTL_E_BAD_LOCK_FLAGS, 0, 0);
7741 goto copyout;
7742 }
7743
7744 if (!nvme_ioctl_check(minor, &lock.nil_common, &nvme_check_locking)) {
7745 goto copyout;
7746 }
7747
7748 /*
7749 * If we're on a namespace, confirm that we're not asking for the
7750 * controller.
7751 */
7752 if (lock.nil_common.nioc_nsid != 0 &&
7753 lock.nil_ent == NVME_LOCK_E_CTRL) {
7754 (void) nvme_ioctl_error(&lock.nil_common,
7755 NVME_IOCTL_E_NS_CANNOT_LOCK_CTRL, 0, 0);
7756 goto copyout;
7757 }
7758
7759 /*
7760 * We've reached the point where we can no longer actually check things
7761 * without serializing state. First, we need to check to make sure that
7762 * none of our invariants are being broken for locking:
7763 *
7764 * 1) The caller isn't already blocking for a lock operation to
7765 * complete.
7766 *
7767 * 2) The caller is attempting to grab a lock that they already have.
7768 * While there are other rule violations that this might create, we opt
7769 * to check this ahead of it so we can have slightly better error
7770 * messages for our callers.
7771 *
7772 * 3) The caller is trying to grab a controller lock, while holding a
7773 * namespace lock.
7774 *
7775 * 4) The caller has a controller write lock and is trying to get a
7776 * namespace lock. For now, we disallow this case. Holding a controller
7777 * read lock is allowed, but the write lock allows you to operate on all
7778 * namespaces anyways. In addition, this simplifies the locking logic;
7779 * however, this constraint may be loosened in the future.
7780 *
7781 * 5) The caller is trying to acquire a second namespace lock when they
7782 * already have one.
7783 */
7784 mutex_enter(&nvme->n_minor_mutex);
7785 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_BLOCKED ||
7786 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_BLOCKED) {
7787 (void) nvme_ioctl_error(&lock.nil_common,
7788 NVME_IOCTL_E_LOCK_PENDING, 0, 0);
7789 mutex_exit(&nvme->n_minor_mutex);
7790 goto copyout;
7791 }
7792
7793 if ((lock.nil_ent == NVME_LOCK_E_CTRL &&
7794 minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) ||
7795 (lock.nil_ent == NVME_LOCK_E_NS &&
7796 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
7797 minor->nm_ns_lock.nli_ns->ns_id == lock.nil_common.nioc_nsid)) {
7798 (void) nvme_ioctl_error(&lock.nil_common,
7799 NVME_IOCTL_E_LOCK_ALREADY_HELD, 0, 0);
7800 mutex_exit(&nvme->n_minor_mutex);
7801 goto copyout;
7802 }
7803
7804 if (lock.nil_ent == NVME_LOCK_E_CTRL &&
7805 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
7806 (void) nvme_ioctl_error(&lock.nil_common,
7807 NVME_IOCTL_E_LOCK_NO_CTRL_WITH_NS, 0, 0);
7808 mutex_exit(&nvme->n_minor_mutex);
7809 goto copyout;
7810 }
7811
7812 if (lock.nil_ent == NVME_LOCK_E_NS &&
7813 (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
7814 minor->nm_ctrl_lock.nli_curlevel == NVME_LOCK_L_WRITE)) {
7815 (void) nvme_ioctl_error(&lock.nil_common,
7816 NVME_IOCTL_LOCK_NO_NS_WITH_CTRL_WRLOCK, 0, 0);
7817 mutex_exit(&nvme->n_minor_mutex);
7818 goto copyout;
7819 }
7820
7821 if (lock.nil_ent == NVME_LOCK_E_NS &&
7822 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
7823 (void) nvme_ioctl_error(&lock.nil_common,
7824 NVME_IOCTL_E_LOCK_NO_2ND_NS, 0, 0);
7825 mutex_exit(&nvme->n_minor_mutex);
7826 goto copyout;
7827 }
7828
7829 #ifdef DEBUG
7830 /*
7831 * This is a big block of sanity checks to make sure that we haven't
7832 * allowed anything bad to happen.
7833 */
7834 if (lock.nil_ent == NVME_LOCK_E_NS) {
7835 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
7836 ASSERT3U(minor->nm_ns_lock.nli_state, ==,
7837 NVME_LOCK_STATE_UNLOCKED);
7838 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
7839 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7840
7841 if (minor->nm_ns != NULL) {
7842 ASSERT3U(minor->nm_ns->ns_id, ==,
7843 lock.nil_common.nioc_nsid);
7844 }
7845
7846 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
7847 } else {
7848 ASSERT3P(minor->nm_ctrl_lock.nli_lock, ==, NULL);
7849 ASSERT3U(minor->nm_ctrl_lock.nli_state, ==,
7850 NVME_LOCK_STATE_UNLOCKED);
7851 ASSERT3U(minor->nm_ctrl_lock.nli_curlevel, ==, 0);
7852 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7853 ASSERT0(list_link_active(&minor->nm_ctrl_lock.nli_node));
7854
7855 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
7856 ASSERT3U(minor->nm_ns_lock.nli_state, ==,
7857 NVME_LOCK_STATE_UNLOCKED);
7858 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
7859 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7860 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
7861 }
7862 #endif /* DEBUG */
7863
7864 /*
7865 * At this point we should actually attempt a locking operation.
7866 */
7867 nvme_rwlock(minor, &lock);
7868 mutex_exit(&nvme->n_minor_mutex);
7869
7870 copyout:
7871 if (ddi_copyout(&lock, (void *)(uintptr_t)arg, sizeof (lock),
7872 mode & FKIOCTL) != 0) {
7873 return (EFAULT);
7874 }
7875
7876 return (0);
7877 }
7878
7879 static int
nvme_ioctl_unlock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7880 nvme_ioctl_unlock(nvme_minor_t *minor, intptr_t arg, int mode,
7881 cred_t *cred_p)
7882 {
7883 nvme_ioctl_unlock_t unlock;
7884 nvme_t *const nvme = minor->nm_ctrl;
7885 boolean_t is_ctrl;
7886 nvme_lock_t *lock;
7887 nvme_minor_lock_info_t *info;
7888
7889 /*
7890 * Note, we explicitly don't check for privileges for unlock. The idea
7891 * being that if you have the lock, that's what matters. If you don't
7892 * have the lock, it doesn't matter what privileges that you have at
7893 * all.
7894 */
7895 if ((mode & FWRITE) == 0)
7896 return (EBADF);
7897
7898 if (ddi_copyin((void *)(uintptr_t)arg, &unlock, sizeof (unlock),
7899 mode & FKIOCTL) != 0) {
7900 return (EFAULT);
7901 }
7902
7903 if (unlock.niu_ent != NVME_LOCK_E_CTRL &&
7904 unlock.niu_ent != NVME_LOCK_E_NS) {
7905 (void) nvme_ioctl_error(&unlock.niu_common,
7906 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7907 goto copyout;
7908 }
7909
7910 if (!nvme_ioctl_check(minor, &unlock.niu_common, &nvme_check_locking)) {
7911 goto copyout;
7912 }
7913
7914 /*
7915 * If we're on a namespace, confirm that we're not asking for the
7916 * controller.
7917 */
7918 if (unlock.niu_common.nioc_nsid != 0 &&
7919 unlock.niu_ent == NVME_LOCK_E_CTRL) {
7920 (void) nvme_ioctl_error(&unlock.niu_common,
7921 NVME_IOCTL_E_NS_CANNOT_UNLOCK_CTRL, 0, 0);
7922 goto copyout;
7923 }
7924
7925 mutex_enter(&nvme->n_minor_mutex);
7926 if (unlock.niu_ent == NVME_LOCK_E_CTRL) {
7927 if (minor->nm_ctrl_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7928 mutex_exit(&nvme->n_minor_mutex);
7929 (void) nvme_ioctl_error(&unlock.niu_common,
7930 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7931 goto copyout;
7932 }
7933 } else {
7934 if (minor->nm_ns_lock.nli_ns == NULL) {
7935 mutex_exit(&nvme->n_minor_mutex);
7936 (void) nvme_ioctl_error(&unlock.niu_common,
7937 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7938 goto copyout;
7939 }
7940
7941 /*
7942 * Check that our unlock request corresponds to the namespace ID
7943 * that is currently locked. This could happen if we're using
7944 * the controller node and it specified a valid, but not locked,
7945 * namespace ID.
7946 */
7947 if (minor->nm_ns_lock.nli_ns->ns_id !=
7948 unlock.niu_common.nioc_nsid) {
7949 mutex_exit(&nvme->n_minor_mutex);
7950 ASSERT3P(minor->nm_ns, ==, NULL);
7951 (void) nvme_ioctl_error(&unlock.niu_common,
7952 NVME_IOCTL_E_LOCK_WRONG_NS, 0, 0);
7953 goto copyout;
7954 }
7955
7956 if (minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7957 mutex_exit(&nvme->n_minor_mutex);
7958 (void) nvme_ioctl_error(&unlock.niu_common,
7959 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7960 goto copyout;
7961 }
7962 }
7963
7964 /*
7965 * Finally, perform the unlock.
7966 */
7967 is_ctrl = unlock.niu_ent == NVME_LOCK_E_CTRL;
7968 if (is_ctrl) {
7969 lock = &nvme->n_lock;
7970 info = &minor->nm_ctrl_lock;
7971 } else {
7972 nvme_namespace_t *ns;
7973 const uint32_t nsid = unlock.niu_common.nioc_nsid;
7974
7975 ns = nvme_nsid2ns(nvme, nsid);
7976 lock = &ns->ns_lock;
7977 info = &minor->nm_ns_lock;
7978 VERIFY3P(ns, ==, info->nli_ns);
7979 }
7980 nvme_rwunlock(info, lock);
7981 mutex_exit(&nvme->n_minor_mutex);
7982 nvme_ioctl_success(&unlock.niu_common);
7983
7984 copyout:
7985 if (ddi_copyout(&unlock, (void *)(uintptr_t)arg, sizeof (unlock),
7986 mode & FKIOCTL) != 0) {
7987 return (EFAULT);
7988 }
7989
7990 return (0);
7991 }
7992
7993 static int
nvme_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)7994 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
7995 int *rval_p)
7996 {
7997 #ifndef __lock_lint
7998 _NOTE(ARGUNUSED(rval_p));
7999 #endif
8000 int ret;
8001 nvme_minor_t *minor;
8002 nvme_t *nvme;
8003
8004 minor = nvme_minor_find_by_dev(dev);
8005 if (minor == NULL) {
8006 return (ENXIO);
8007 }
8008
8009 nvme = minor->nm_ctrl;
8010 if (nvme == NULL)
8011 return (ENXIO);
8012
8013 if (IS_DEVCTL(cmd))
8014 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
8015
8016 if (nvme->n_dead && (cmd != NVME_IOC_BD_DETACH && cmd !=
8017 NVME_IOC_UNLOCK)) {
8018 if (IS_NVME_IOC(cmd) == 0) {
8019 return (EIO);
8020 }
8021
8022 return (nvme_ioctl_copyout_error(nvme->n_dead_status, arg,
8023 mode));
8024 }
8025
8026 /*
8027 * ioctls that are no longer using the original ioctl structure.
8028 */
8029 switch (cmd) {
8030 case NVME_IOC_CTRL_INFO:
8031 ret = nvme_ioctl_ctrl_info(minor, arg, mode, cred_p);
8032 break;
8033 case NVME_IOC_IDENTIFY:
8034 ret = nvme_ioctl_identify(minor, arg, mode, cred_p);
8035 break;
8036 case NVME_IOC_GET_LOGPAGE:
8037 ret = nvme_ioctl_get_logpage(minor, arg, mode, cred_p);
8038 break;
8039 case NVME_IOC_GET_FEATURE:
8040 ret = nvme_ioctl_get_feature(minor, arg, mode, cred_p);
8041 break;
8042 case NVME_IOC_BD_DETACH:
8043 ret = nvme_ioctl_bd_detach(minor, arg, mode, cred_p);
8044 break;
8045 case NVME_IOC_BD_ATTACH:
8046 ret = nvme_ioctl_bd_attach(minor, arg, mode, cred_p);
8047 break;
8048 case NVME_IOC_FORMAT:
8049 ret = nvme_ioctl_format(minor, arg, mode, cred_p);
8050 break;
8051 case NVME_IOC_FIRMWARE_DOWNLOAD:
8052 ret = nvme_ioctl_firmware_download(minor, arg, mode, cred_p);
8053 break;
8054 case NVME_IOC_FIRMWARE_COMMIT:
8055 ret = nvme_ioctl_firmware_commit(minor, arg, mode, cred_p);
8056 break;
8057 case NVME_IOC_NS_INFO:
8058 ret = nvme_ioctl_ns_info(minor, arg, mode, cred_p);
8059 break;
8060 case NVME_IOC_PASSTHRU:
8061 ret = nvme_ioctl_passthru(minor, arg, mode, cred_p);
8062 break;
8063 case NVME_IOC_LOCK:
8064 ret = nvme_ioctl_lock(minor, arg, mode, cred_p);
8065 break;
8066 case NVME_IOC_UNLOCK:
8067 ret = nvme_ioctl_unlock(minor, arg, mode, cred_p);
8068 break;
8069 case NVME_IOC_CTRL_DETACH:
8070 ret = nvme_ioctl_ctrl_detach(minor, arg, mode, cred_p);
8071 break;
8072 case NVME_IOC_CTRL_ATTACH:
8073 ret = nvme_ioctl_ctrl_attach(minor, arg, mode, cred_p);
8074 break;
8075 case NVME_IOC_NS_CREATE:
8076 ret = nvme_ioctl_ns_create(minor, arg, mode, cred_p);
8077 break;
8078 case NVME_IOC_NS_DELETE:
8079 ret = nvme_ioctl_ns_delete(minor, arg, mode, cred_p);
8080 break;
8081 default:
8082 ret = ENOTTY;
8083 break;
8084 }
8085
8086 ASSERT(!nvme_mgmt_lock_held(nvme));
8087 return (ret);
8088 }
8089
8090 /*
8091 * DDI UFM Callbacks
8092 */
8093 static int
nvme_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * img)8094 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
8095 ddi_ufm_image_t *img)
8096 {
8097 nvme_t *nvme = arg;
8098
8099 if (imgno != 0)
8100 return (EINVAL);
8101
8102 ddi_ufm_image_set_desc(img, "Firmware");
8103 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot);
8104
8105 return (0);
8106 }
8107
8108 /*
8109 * Fill out firmware slot information for the requested slot. The firmware
8110 * slot information is gathered by requesting the Firmware Slot Information log
8111 * page. The format of the page is described in section 5.10.1.3.
8112 *
8113 * We lazily cache the log page on the first call and then invalidate the cache
8114 * data after a successful firmware download or firmware commit command.
8115 * The cached data is protected by a mutex as the state can change
8116 * asynchronous to this callback.
8117 */
8118 static int
nvme_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slot)8119 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
8120 uint_t slotno, ddi_ufm_slot_t *slot)
8121 {
8122 nvme_t *nvme = arg;
8123 void *log = NULL;
8124 size_t bufsize;
8125 ddi_ufm_attr_t attr = 0;
8126 char fw_ver[NVME_FWVER_SZ + 1];
8127
8128 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1))
8129 return (EINVAL);
8130
8131 mutex_enter(&nvme->n_fwslot_mutex);
8132 if (nvme->n_fwslot == NULL) {
8133 if (!nvme_get_logpage_int(nvme, B_TRUE, &log, &bufsize,
8134 NVME_LOGPAGE_FWSLOT) ||
8135 bufsize != sizeof (nvme_fwslot_log_t)) {
8136 if (log != NULL)
8137 kmem_free(log, bufsize);
8138 mutex_exit(&nvme->n_fwslot_mutex);
8139 return (EIO);
8140 }
8141 nvme->n_fwslot = (nvme_fwslot_log_t *)log;
8142 }
8143
8144 /*
8145 * NVMe numbers firmware slots starting at 1
8146 */
8147 if (slotno == (nvme->n_fwslot->fw_afi - 1))
8148 attr |= DDI_UFM_ATTR_ACTIVE;
8149
8150 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0)
8151 attr |= DDI_UFM_ATTR_WRITEABLE;
8152
8153 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') {
8154 attr |= DDI_UFM_ATTR_EMPTY;
8155 } else {
8156 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno],
8157 NVME_FWVER_SZ);
8158 fw_ver[NVME_FWVER_SZ] = '\0';
8159 ddi_ufm_slot_set_version(slot, fw_ver);
8160 }
8161 mutex_exit(&nvme->n_fwslot_mutex);
8162
8163 ddi_ufm_slot_set_attrs(slot, attr);
8164
8165 return (0);
8166 }
8167
8168 static int
nvme_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)8169 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
8170 {
8171 *caps = DDI_UFM_CAP_REPORT;
8172 return (0);
8173 }
8174
8175 boolean_t
nvme_ctrl_atleast(nvme_t * nvme,const nvme_version_t * min)8176 nvme_ctrl_atleast(nvme_t *nvme, const nvme_version_t *min)
8177 {
8178 return (nvme_vers_atleast(&nvme->n_version, min) ? B_TRUE : B_FALSE);
8179 }
8180