1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved.
14 * Copyright 2019 Unix Software Ltd.
15 * Copyright 2020 Joyent, Inc.
16 * Copyright 2020 Racktop Systems.
17 * Copyright 2025 Oxide Computer Company.
18 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
19 * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
20 */
21
22 /*
23 * blkdev driver for NVMe compliant storage devices
24 *
25 * This driver targets and is designed to support all NVMe 1.x and NVMe 2.x
26 * devices. Features are added to the driver as we encounter devices that
27 * require them and our needs, so some commands or log pages may not take
28 * advantage of newer features that devices support at this time. When you
29 * encounter such a case, it is generally fine to add that support to the driver
30 * as long as you take care to ensure that the requisite device version is met
31 * before using it.
32 *
33 * The driver has only been tested on x86 systems and will not work on big-
34 * endian systems without changes to the code accessing registers and data
35 * structures used by the hardware.
36 *
37 * ---------------
38 * Interrupt Usage
39 * ---------------
40 *
41 * The driver will use a single interrupt while configuring the device as the
42 * specification requires, but contrary to the specification it will try to use
43 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it
44 * will switch to multiple-message MSI(-X) if supported. The driver wants to
45 * have one interrupt vector per CPU, but it will work correctly if less are
46 * available. Interrupts can be shared by queues, the interrupt handler will
47 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only
48 * the admin queue will share an interrupt with one I/O queue. The interrupt
49 * handler will retrieve completed commands from all queues sharing an interrupt
50 * vector and will post them to a taskq for completion processing.
51 *
52 * ------------------
53 * Command Processing
54 * ------------------
55 *
56 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up
57 * to 65536 I/O commands. The driver will configure one I/O queue pair per
58 * available interrupt vector, with the queue length usually much smaller than
59 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
60 * interrupt vectors will be used.
61 *
62 * Additionally the hardware provides a single special admin queue pair that can
63 * hold up to 4096 admin commands.
64 *
65 * From the hardware perspective both queues of a queue pair are independent,
66 * but they share some driver state: the command array (holding pointers to
67 * commands currently being processed by the hardware) and the active command
68 * counter. Access to a submission queue and the shared state is protected by
69 * nq_mutex; completion queue is protected by ncq_mutex.
70 *
71 * When a command is submitted to a queue pair the active command counter is
72 * incremented and a pointer to the command is stored in the command array. The
73 * array index is used as command identifier (CID) in the submission queue
74 * entry. Some commands may take a very long time to complete, and if the queue
75 * wraps around in that time a submission may find the next array slot to still
76 * be used by a long-running command. In this case the array is sequentially
77 * searched for the next free slot. The length of the command array is the same
78 * as the configured queue length. Queue overrun is prevented by the semaphore,
79 * so a command submission may block if the queue is full.
80 *
81 * ------------------
82 * Polled I/O Support
83 * ------------------
84 *
85 * For kernel core dump support the driver can do polled I/O. As interrupts are
86 * turned off while dumping the driver will just submit a command in the regular
87 * way, and then repeatedly attempt a command retrieval until it gets the
88 * command back.
89 *
90 * -----------------
91 * Namespace Support
92 * -----------------
93 *
94 * NVMe devices can have multiple namespaces, each being a independent data
95 * store. The driver supports multiple namespaces and creates a blkdev interface
96 * for each namespace found. Namespaces can have various attributes to support
97 * protection information. This driver does not support any of this and ignores
98 * namespaces that have these attributes.
99 *
100 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
101 * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally
102 * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64
103 * if present to generate the devid, and passes the EUI64 to blkdev to use it
104 * in the device node names.
105 *
106 * When a device has more than (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a
107 * single controller, additional namespaces will not have minor nodes created.
108 * They can still be used and specified by the controller and libnvme. This
109 * limit is trying to balance the number of controllers and namespaces while
110 * fitting within the constraints of MAXMIN32, aka a 32-bit device number which
111 * only has 18-bits for the minor number. See the minor node section for more
112 * information.
113 *
114 * The driver supports namespace management, meaning the ability to create and
115 * destroy namespaces, and to attach and detach namespaces from controllers.
116 * Each namespace has an associated nvme_ns_state_t, which transitions through
117 * several states. The UNALLOCATED, ALLOCATED, and ACTIVE states are states that
118 * are defined by the NVMe specification. Not all ACTIVE namespaces may be
119 * attached to blkdev(4D) due to the use of features we don't support, for
120 * example, metadata protection. Such namespaces are automatically in the
121 * NOT_IGNORED state. Once they are attached to blkdev they enter the ATTACHED
122 * state.
123 *
124 * By default, a device can only transition one such state at a time. Each
125 * command that transitions between states has a corresponding array of errnos
126 * to use to transition. Examples of this are the nvme_ns_delete_states[],
127 * nvme_ctrl_attach_states[], etc. These dictate whether it is okay or not for a
128 * command that changes state to occur or not based on the current state. Each
129 * of these returns a specific error allowing one to understand why something
130 * isn't in the proper state. This allows library consumers to determine whether
131 * or not a namespace is already in the current state it's targeting to be
132 * ignored or not. The following diagram summarizes namespace transitions:
133 *
134 * +-------------+
135 * | |
136 * | Unallocated |
137 * | |
138 * +-------------+
139 * | ^
140 * | |
141 * Namespace Management: . .* * . . . Namespace Management:
142 * Create | | Delete
143 * NVME_IOC_NS_CREATE | | NVME_IOC_NS_DELETE
144 * v |
145 * +-------------+
146 * | |
147 * | Allocated |
148 * | |
149 * +-------------+
150 * | ^
151 * | |
152 * Namespace Attachment: . .* * . . . Namespace Attachment:
153 * Controller Attach | | Controller Detach
154 * NVME_IOC_CTRL_ATTACH | | NVME_IOC_CTRL_DETACH
155 * v |
156 * +------------+ |
157 * | | | +----------+
158 * | Active |>-----+----<| Not |
159 * | |--*-------->| Ignored |
160 * +------------+ . +----------+
161 * . | ^
162 * automatic kernel transition | |
163 * | * . . blkdev Detach
164 * blkdev attach . . * | NVME_IOC_BD_DETACH
165 * NVME_IOC_BD_ATTACH | |
166 * v |
167 * +----------+
168 * | |
169 * | blkdev |
170 * | attached |
171 * | |
172 * +----------+
173 *
174 * -----------
175 * Minor nodes
176 * -----------
177 *
178 * For each NVMe device the driver exposes one minor node for the controller and
179 * one minor node for each namespace. The only operations supported by those
180 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the
181 * primary control interface for the devices. The character device is a private
182 * interface and we attempt stability through libnvme and more so nvmeadm.
183 *
184 * The controller minor node is much more flexible than the namespace minor node
185 * and should be preferred. The controller node allows one to target any
186 * namespace that the device has, while the namespace is limited in what it can
187 * acquire. While the namespace minor exists, it should not be relied upon and
188 * is not by libnvme.
189 *
190 * The minor number space is split in two. We use the lower part to support the
191 * controller and namespaces as described above in the 'Namespace Support'
192 * section. The second set is used for cloning opens. We set aside one million
193 * minors for this purpose. We utilize a cloning open so that way we can have
194 * per-file_t state. This is how we end up implementing and tracking locking
195 * state and related.
196 *
197 * When we have this cloned open, then we allocate a new nvme_minor_t which gets
198 * its minor number from the nvme_open_minors id_space_t and is stored in the
199 * nvme_open_minors_avl. While someone calls open on a controller or namespace
200 * minor, everything else occurs in the context of one of these ephemeral
201 * minors.
202 *
203 * ------------------------------------
204 * ioctls, Errors, and Exclusive Access
205 * ------------------------------------
206 *
207 * All of the logical commands that one can issue are driven through the
208 * ioctl(9E) interface. All of our ioctls have a similar shape where they
209 * all include the 'nvme_ioctl_common_t' as their first member.
210 *
211 * This common ioctl structure is used to communicate the namespace that should
212 * be targeted. When the namespace is left as 0, then that indicates that it
213 * should target whatever the default is of the minor node. For a namespace
214 * minor, that will be transparently rewritten to the namespace's namespace id.
215 *
216 * In addition, the nvme_ioctl_common_t structure also has a standard error
217 * return. Our goal in our ioctl path is to ensure that we have useful semantic
218 * errors as much as possible. EINVAL, EIO, etc. are all overloaded. Instead as
219 * long as we can copy in our structure, then we will set a semantic error. If
220 * we have an error from the controller, then that will be included there.
221 *
222 * Each command has a specific policy that controls whether or not it is allowed
223 * on the namespace or controller minor, whether the broadcast namespace is
224 * allowed, various settings around what kind of exclusive access is allowed,
225 * and more. Each of these is wrapped up in a bit of policy described by the
226 * 'nvme_ioctl_check_t' structure.
227 *
228 * The device provides a form of exclusion in the form of both a
229 * controller-level and namespace-level read and write lock. Most operations do
230 * not require a lock (e.g. get log page, identify, etc.), but a few do (e.g.
231 * format nvm, firmware related activity, etc.). A read lock guarantees that you
232 * can complete your operation without interference, but read locks are not
233 * required. If you don't take a read lock and someone comes in with a write
234 * lock, then subsequent operations will fail with a semantic error indicating
235 * that you were blocked due to this.
236 *
237 * Here are some of the rules that govern our locks:
238 *
239 * 1. Writers starve readers. Any readers are allowed to finish when there is a
240 * pending writer; however, all subsequent readers will be blocked upon that
241 * writer.
242 * 2. A controller write lock takes priority over all other locks. Put
243 * differently a controller writer not only starves subsequent controller
244 * readers, but also all namespace read and write locks.
245 * 3. Each namespace lock is independent.
246 * 4. At most a single namespace lock may be owned.
247 * 5. If you own a namespace lock, you may not take a controller lock (to help
248 * with lock ordering).
249 * 6. In a similar spirit, if you own a controller write lock, you may not take
250 * any namespace lock. Someone with the controller write lock can perform any
251 * operations that they need to. However, if you have a controller read lock
252 * you may take any namespace lock.
253 * 7. There is no ability to upgrade a read lock to a write lock.
254 * 8. There is no recursive locking.
255 *
256 * While there's a lot there to keep track of, the goals of these are to
257 * constrain things so as to avoid deadlock. This is more complex than the
258 * original implementation in the driver which only allowed for an exclusive
259 * open that was tied to the thread. The first issue with tying this to the
260 * thread was that that didn't work well for software that utilized thread
261 * pools, like complex daemons. The second issue is that we want the ability for
262 * daemons, such as a FRU monitor, to be able to retain a file descriptor to the
263 * device without blocking others from taking action except during critical
264 * periods.
265 *
266 * In particular to enable something like libnvme, we didn't want someone to
267 * have to open and close the file descriptor to change what kind of exclusive
268 * access they desired.
269 *
270 * There are two different sets of data structures that we employ for tracking
271 * locking information:
272 *
273 * 1) The nvme_lock_t structure is contained in both the nvme_t and the
274 * nvme_namespace_t and tracks the current writer, readers, and pending writers
275 * and readers. Each of these lists or the writer pointer all refer to our
276 * second data structure.
277 *
278 * When a lock is owned by a single writer, then the nl_writer field is set to a
279 * specific minor's lock data structure. If instead readers are present, then
280 * the nl_readers list_t is not empty. An invariant of the system is that if
281 * nl_writer is non-NULL, nl_readers must be empty and conversely, if nl_readers
282 * is not empty, nl_writer must be NULL.
283 *
284 * 2) The nvme_minor_lock_info_t exists in the nvme_minor_t. There is one
285 * information structure which represents the minor's controller lock and a
286 * second one that represents the minor's namespace lock. The members of this
287 * are broken into tracking what the current lock is and what it targets. It
288 * also several members that are intended for debugging (nli_last_change,
289 * nli_acq_kthread, etc.).
290 *
291 * While the minor has two different lock information structures, our rules
292 * ensure that only one of the two can be pending and that they shouldn't result
293 * in a deadlock. When a lock is pending, the caller is sleeping on the minor's
294 * nm_cv member.
295 *
296 * These relationships are represented in the following image which shows a
297 * controller write lock being held with a pending readers on the controller
298 * lock and pending writers on one of the controller's namespaces.
299 *
300 * +---------+
301 * | nvme_t |
302 * | |
303 * | n_lock -|-------+
304 * | n_ns -+ | | +-----------------------------+
305 * +-------|-+ +-----------------+ | nvme_minor_t |
306 * | | nvme_lock_t | | |
307 * | | | | +------------------------+ |
308 * | | writer --|-------------->| nvme_minor_lock_info_t | |
309 * | | reader list | | | nm_ctrl_lock | |
310 * | | pending writers | | +------------------------+ |
311 * | | pending readers |------+ | +------------------------+ |
312 * | +-----------------+ | | | nvme_minor_lock_info_t | |
313 * | | | | nm_ns_lock | |
314 * | | | +------------------------+ |
315 * | | +-----------------------------+
316 * +------------------+ | +-----------------+
317 * | nvme_namespace_t | | | nvme_minor_t |
318 * | | | | |
319 * | ns_lock ---+ | | | +-------------+ |
320 * +------------|-----+ +-----------------|>|nm_ctrl_lock | |
321 * | | +-------------+ |
322 * v +-----------------+
323 * +------------------+ ...
324 * | nvme_lock_t | +-----------------+
325 * | | | nvme_minor_t |
326 * | writer | | |
327 * | reader list | | +-------------+ |
328 * | pending writers -|-----------------+ | |nm_ctrl_lock | |
329 * | pending readers | | | +-------------+ |
330 * +------------------+ | +-----------------+
331 * +-----------------------------+ | +-----------------------------+
332 * | nvme_minor_t | | | nvme_minor_t |
333 * | | | | |
334 * | +------------------------+ | | | +------------------------+ |
335 * | | nvme_minor_lock_info_t | | | | | nvme_minor_lock_info_t | |
336 * | | nm_ctrl_lock | | | | | nm_ctrl_lock | |
337 * | +------------------------+ | | | +------------------------+ |
338 * | +------------------------+ | v | +------------------------+ |
339 * | | nvme_minor_lock_info_t |-|-----|->| nvme_minor_lock_info_t | |
340 * | | nm_ns_lock | | | | nm_ns_lock | |
341 * | +------------------------+ | | +------------------------+ |
342 * +-----------------------------+ +-----------------------------+
343 *
344 * ----------------
345 * Blkdev Interface
346 * ----------------
347 *
348 * This driver uses blkdev to do all the heavy lifting involved with presenting
349 * a disk device to the system. As a result, the processing of I/O requests is
350 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
351 * setup, and splitting of transfers into manageable chunks.
352 *
353 * I/O requests coming in from blkdev are turned into NVM commands and posted to
354 * an I/O queue. The queue is selected by taking the CPU id modulo the number of
355 * queues. There is currently no timeout handling of I/O commands.
356 *
357 * Blkdev also supports querying device/media information and generating a
358 * devid. The driver reports the best block size as determined by the namespace
359 * format back to blkdev as physical block size to support partition and block
360 * alignment. The devid is either based on the namespace GUID or EUI64, if
361 * present, or composed using the device vendor ID, model number, serial number,
362 * and the namespace ID.
363 *
364 * --------------
365 * Error Handling
366 * --------------
367 *
368 * Error handling is currently limited to detecting fatal hardware errors,
369 * either by asynchronous events, or synchronously through command status or
370 * admin command timeouts. In case of severe errors the device is fenced off,
371 * all further requests will return EIO. FMA is then called to fault the device.
372 *
373 * The hardware has a limit for outstanding asynchronous event requests. Before
374 * this limit is known the driver assumes it is at least 1 and posts a single
375 * asynchronous request. Later when the limit is known more asynchronous event
376 * requests are posted to allow quicker reception of error information. When an
377 * asynchronous event is posted by the hardware the driver will parse the error
378 * status fields and log information or fault the device, depending on the
379 * severity of the asynchronous event. The asynchronous event request is then
380 * reused and posted to the admin queue again.
381 *
382 * On command completion the command status is checked for errors. In case of
383 * errors indicating a driver bug the driver panics. Almost all other error
384 * status values just cause EIO to be returned.
385 *
386 * Command timeouts are currently detected for all admin commands except
387 * asynchronous event requests. If a command times out and the hardware appears
388 * to be healthy the driver attempts to abort the command. The abort command
389 * timeout is a separate tunable but the original command timeout will be used
390 * if it is greater. If the abort times out too the driver assumes the device
391 * to be dead, fences it off, and calls FMA to retire it. In all other cases
392 * the aborted command should return immediately with a status indicating it
393 * was aborted, and the driver will wait indefinitely for that to happen. No
394 * timeout handling of normal I/O commands is presently done.
395 *
396 * Any command that times out due to the controller dropping dead will be put on
397 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA
398 * memory being reused by the system and later being written to by a "dead"
399 * NVMe controller.
400 *
401 * -------
402 * Locking
403 * -------
404 *
405 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held
406 * when accessing shared state and submission queue registers, ncq_mutex
407 * is held when accessing completion queue state and registers.
408 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while
409 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both
410 * mutexes themselves.
411 *
412 * Each command also has its own nc_mutex, which is associated with the
413 * condition variable nc_cv. It is only used on admin commands which are run
414 * synchronously. In that case it must be held across calls to
415 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by
416 * nvme_admin_cmd(). It must also be held whenever the completion state of the
417 * command is changed or while an admin command timeout is handled.
418 *
419 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first.
420 * More than one nc_mutex may only be held when aborting commands. In this case,
421 * the nc_mutex of the command to be aborted must be held across the call to
422 * nvme_abort_cmd() to prevent the command from completing while the abort is in
423 * progress.
424 *
425 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be
426 * acquired first. More than one nq_mutex is never held by a single thread.
427 * The ncq_mutex is only held by nvme_retrieve_cmd() and
428 * nvme_process_iocq(). nvme_process_iocq() is only called from the
429 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the
430 * mutex is non-contentious but is required for implementation completeness
431 * and safety.
432 *
433 * Each nvme_t has an n_admin_stat_mutex that protects the admin command
434 * statistics structure. If this is taken in conjunction with any other locks,
435 * then it must be taken last.
436 *
437 * There is one mutex n_minor_mutex which protects all open flags nm_open and
438 * exclusive-open thread pointers nm_oexcl of each minor node associated with a
439 * controller and its namespaces.
440 *
441 * In addition, there is a logical namespace management mutex which protects the
442 * data about namespaces. When interrogating the metadata of any namespace, this
443 * lock must be held. This gets tricky as we need to call into blkdev, which may
444 * issue callbacks into us which want this and it is illegal to hold locks
445 * across those blkdev calls as otherwise they might lead to deadlock (blkdev
446 * leverages ndi_devi_enter()).
447 *
448 * The lock exposes two levels, one that we call 'NVME' and one 'BDRO' or blkdev
449 * read-only. The idea is that most callers will use the NVME level which says
450 * this is a full traditional mutex operation. The BDRO level is used by blkdev
451 * callback functions and is a promise to only only read the data. When a blkdev
452 * operation starts, the lock holder will use nvme_mgmt_bd_start(). This
453 * strictly speaking drops the mutex, but records that the lock is logically
454 * held by the thread that did the start() operation.
455 *
456 * During this time, other threads (or even the same one) may end up calling
457 * into nvme_mgmt_lock(). Only one person may still hold the lock at any time;
458 * however, the BRDO level will be allowed to proceed during this time. This
459 * allows us to make consistent progress and honor the blkdev lock ordering
460 * requirements, albeit it is not as straightforward as a simple mutex.
461 *
462 * ---------------------
463 * Quiesce / Fast Reboot
464 * ---------------------
465 *
466 * The driver currently does not support fast reboot. A quiesce(9E) entry point
467 * is still provided which is used to send a shutdown notification to the
468 * device.
469 *
470 *
471 * ------------
472 * NVMe Hotplug
473 * ------------
474 *
475 * The driver supports hot removal. The driver uses the NDI event framework
476 * to register a callback, nvme_remove_callback, to clean up when a disk is
477 * removed. In particular, the driver will unqueue outstanding I/O commands and
478 * set n_dead on the softstate to true so that other operations, such as ioctls
479 * and command submissions, fail as well.
480 *
481 * While the callback registration relies on the NDI event framework, the
482 * removal event itself is kicked off in the PCIe hotplug framework, when the
483 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a
484 * device was removed from the slot.
485 *
486 * The NVMe driver instance itself will remain until the final close of the
487 * device.
488 *
489 * ---------------
490 * DDI UFM Support
491 * ---------------
492 *
493 * The driver supports the DDI UFM framework for reporting information about
494 * the device's firmware image and slot configuration. This data can be
495 * queried by userland software via ioctls to the ufm driver. For more
496 * information, see ddi_ufm(9E).
497 *
498 * --------------------
499 * Driver Configuration
500 * --------------------
501 *
502 * The following driver properties can be changed to control some aspects of the
503 * drivers operation:
504 * - strict-version: can be set to 0 to allow devices conforming to newer
505 * major versions to be used
506 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
507 * specific command status as a fatal error leading device faulting
508 * - admin-queue-len: the maximum length of the admin queue (16-4096)
509 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536)
510 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536)
511 * - async-event-limit: the maximum number of asynchronous event requests to be
512 * posted by the driver
513 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
514 * cache
515 * - min-phys-block-size: the minimum physical block size to report to blkdev,
516 * which is among other things the basis for ZFS vdev ashift
517 * - max-submission-queues: the maximum number of I/O submission queues.
518 * - max-completion-queues: the maximum number of I/O completion queues,
519 * can be less than max-submission-queues, in which case the completion
520 * queues are shared.
521 *
522 * In addition to the above properties, some device-specific tunables can be
523 * configured using the nvme-config-list global property. The value of this
524 * property is a list of triplets. The formal syntax is:
525 *
526 * nvme-config-list ::= <triplet> [, <triplet>]* ;
527 * <triplet> ::= "<model>" , "<rev-list>" , "<tuple-list>"
528 * <rev-list> ::= [ <fwrev> [, <fwrev>]*]
529 * <tuple-list> ::= <tunable> [, <tunable>]*
530 * <tunable> ::= <name> : <value>
531 *
532 * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and
533 * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list>
534 * contains one or more tunables to apply to all controllers that match the
535 * specified model number and optionally firmware revision. Each <tunable> is a
536 * <name> : <value> pair. Supported tunables are:
537 *
538 * - ignore-unknown-vendor-status: can be set to "on" to not handle any vendor
539 * specific command status as a fatal error leading device faulting
540 *
541 * - min-phys-block-size: the minimum physical block size to report to blkdev,
542 * which is among other things the basis for ZFS vdev ashift
543 *
544 * - volatile-write-cache: can be set to "on" or "off" to enable or disable the
545 * volatile write cache, if present
546 *
547 *
548 * TODO:
549 * - figure out sane default for I/O queue depth reported to blkdev
550 * - FMA handling of media errors
551 * - support for devices supporting very large I/O requests using chained PRPs
552 * - support for configuring hardware parameters like interrupt coalescing
553 * - support for big-endian systems
554 * - support for fast reboot
555 * - support for NVMe Subsystem Reset (1.1)
556 * - support for Scatter/Gather lists (1.1)
557 * - support for Reservations (1.1)
558 * - support for power management
559 */
560
561 #include <sys/byteorder.h>
562 #ifdef _BIG_ENDIAN
563 #error nvme driver needs porting for big-endian platforms
564 #endif
565
566 #include <sys/modctl.h>
567 #include <sys/conf.h>
568 #include <sys/devops.h>
569 #include <sys/ddi.h>
570 #include <sys/ddi_ufm.h>
571 #include <sys/sunddi.h>
572 #include <sys/sunndi.h>
573 #include <sys/bitmap.h>
574 #include <sys/sysmacros.h>
575 #include <sys/param.h>
576 #include <sys/varargs.h>
577 #include <sys/cpuvar.h>
578 #include <sys/disp.h>
579 #include <sys/blkdev.h>
580 #include <sys/atomic.h>
581 #include <sys/archsystm.h>
582 #include <sys/sata/sata_hba.h>
583 #include <sys/stat.h>
584 #include <sys/policy.h>
585 #include <sys/list.h>
586 #include <sys/dkio.h>
587 #include <sys/pci.h>
588 #include <sys/mkdev.h>
589
590 #include <sys/nvme.h>
591
592 #ifdef __x86
593 #include <sys/x86_archext.h>
594 #endif
595
596 #include "nvme_reg.h"
597 #include "nvme_var.h"
598
599 /*
600 * Assertions to make sure that we've properly captured various aspects of the
601 * packed structures and haven't broken them during updates.
602 */
603 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE);
604 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256);
605 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512);
606 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520);
607 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768);
608 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792);
609 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048);
610 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072);
611
612 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE);
613 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32);
614 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92);
615 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104);
616 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128);
617 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384);
618
619 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE);
620 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE);
621
622 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE);
623 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32);
624 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64);
625
626 CTASSERT(sizeof (nvme_nschange_list_t) == 4096);
627
628 /* NVMe spec version supported */
629 static const int nvme_version_major = 2;
630
631 /* Tunable for FORMAT NVM command timeout in seconds, default is 600s */
632 uint32_t nvme_format_cmd_timeout = 600;
633
634 /* Tunable for firmware commit with NVME_FWC_SAVE, default is 15s */
635 uint32_t nvme_commit_save_cmd_timeout = 15;
636
637 /*
638 * Tunable for the admin command timeout used for commands other than those
639 * with their own timeouts defined above; in seconds. While most commands are
640 * expected to complete very quickly (sub-second), experience has shown that
641 * some controllers can occasionally be a bit slower, and not always consistent
642 * in the time taken - times of up to around 4.2s have been observed. Setting
643 * this to 15s by default provides headroom.
644 */
645 uint32_t nvme_admin_cmd_timeout = 15;
646
647 /*
648 * Tunable for abort command timeout in seconds, default is 60s. This timeout
649 * is used when issuing an abort command, currently only in response to a
650 * different admin command timing out. Aborts always complete after the command
651 * that they are attempting to abort so we need to allow enough time for the
652 * controller to process the long running command that we are attempting to
653 * abort. The abort timeout here is only used if it is greater than the timeout
654 * for the command that is being aborted.
655 */
656 uint32_t nvme_abort_cmd_timeout = 60;
657
658 /*
659 * Tunable for the size of arbitrary vendor specific admin commands,
660 * default is 16MiB.
661 */
662 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24;
663
664 /*
665 * Tunable for the max timeout of arbitary vendor specific admin commands,
666 * default is 60s.
667 */
668 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60;
669
670 /*
671 * This ID space, AVL, and lock are used for keeping track of minor state across
672 * opens between different devices.
673 */
674 static id_space_t *nvme_open_minors;
675 static avl_tree_t nvme_open_minors_avl;
676 kmutex_t nvme_open_minors_mutex;
677
678 /*
679 * Removal taskq used for n_dead callback processing.
680 */
681 taskq_t *nvme_dead_taskq;
682
683 /*
684 * This enumeration is used in tandem with nvme_mgmt_lock() to describe which
685 * form of the lock is being taken. See the theory statement for more context.
686 */
687 typedef enum {
688 /*
689 * This is the primary form of taking the management lock and indicates
690 * that the user intends to do a read/write of it. This should always be
691 * used for any ioctl paths or truly anything other than a blkdev
692 * information operation.
693 */
694 NVME_MGMT_LOCK_NVME,
695 /*
696 * This is a subordinate form of the lock whereby the user is in blkdev
697 * callback context and will only intend to read the namespace data.
698 */
699 NVME_MGMT_LOCK_BDRO
700 } nvme_mgmt_lock_level_t;
701
702 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
703 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
704 static int nvme_quiesce(dev_info_t *);
705 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
706 static int nvme_setup_interrupts(nvme_t *, int, int);
707 static void nvme_release_interrupts(nvme_t *);
708 static uint_t nvme_intr(caddr_t, caddr_t);
709
710 static void nvme_shutdown(nvme_t *, boolean_t);
711 static boolean_t nvme_reset(nvme_t *, boolean_t);
712 static int nvme_init(nvme_t *);
713 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
714 static void nvme_free_cmd(nvme_cmd_t *);
715 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
716 bd_xfer_t *);
717 static void nvme_admin_cmd(nvme_cmd_t *, uint32_t);
718 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
719 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
720 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
721 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
722 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
723 static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
724 static void nvme_wakeup_cmd(void *);
725 static void nvme_async_event_task(void *);
726
727 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
728 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
729 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
730 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
731 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
732 static inline int nvme_check_cmd_status(nvme_cmd_t *);
733 static boolean_t nvme_check_cmd_status_ioctl(nvme_cmd_t *,
734 nvme_ioctl_common_t *);
735
736 static int nvme_abort_cmd(nvme_cmd_t *, const uint32_t);
737 static void nvme_async_event(nvme_t *);
738 static boolean_t nvme_format_nvm(nvme_t *, nvme_ioctl_format_t *);
739 static boolean_t nvme_get_logpage_int(nvme_t *, boolean_t, void **, size_t *,
740 uint8_t);
741 static boolean_t nvme_identify(nvme_t *, boolean_t, nvme_ioctl_identify_t *,
742 void **);
743 static boolean_t nvme_identify_int(nvme_t *, uint32_t, uint8_t, void **);
744 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t,
745 uint32_t *);
746 static int nvme_write_cache_set(nvme_t *, boolean_t);
747 static int nvme_set_nqueues(nvme_t *);
748
749 static void nvme_free_dma(nvme_dma_t *);
750 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
751 nvme_dma_t **);
752 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
753 nvme_dma_t **);
754 static void nvme_free_qpair(nvme_qpair_t *);
755 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t);
756 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
757
758 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
759 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
760 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
761 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
762
763 static boolean_t nvme_check_regs_hdl(nvme_t *);
764 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
765
766 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t);
767
768 static void nvme_bd_xfer_done(void *);
769 static void nvme_bd_driveinfo(void *, bd_drive_t *);
770 static int nvme_bd_mediainfo(void *, bd_media_t *);
771 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
772 static int nvme_bd_read(void *, bd_xfer_t *);
773 static int nvme_bd_write(void *, bd_xfer_t *);
774 static int nvme_bd_sync(void *, bd_xfer_t *);
775 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
776 static int nvme_bd_free_space(void *, bd_xfer_t *);
777
778 static int nvme_prp_dma_constructor(void *, void *, int);
779 static void nvme_prp_dma_destructor(void *, void *);
780
781 static void nvme_prepare_devid(nvme_t *, uint32_t);
782
783 /* DDI UFM callbacks */
784 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
785 ddi_ufm_image_t *);
786 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
787 ddi_ufm_slot_t *);
788 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
789
790 static int nvme_open(dev_t *, int, int, cred_t *);
791 static int nvme_close(dev_t, int, int, cred_t *);
792 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
793
794 static int nvme_init_ns(nvme_t *, uint32_t);
795 static boolean_t nvme_bd_attach_ns(nvme_t *, nvme_ioctl_common_t *);
796 static boolean_t nvme_bd_detach_ns(nvme_t *, nvme_ioctl_common_t *);
797
798 static int nvme_minor_comparator(const void *, const void *);
799
800 static ddi_ufm_ops_t nvme_ufm_ops = {
801 NULL,
802 nvme_ufm_fill_image,
803 nvme_ufm_fill_slot,
804 nvme_ufm_getcaps
805 };
806
807 /*
808 * Minor numbers are split amongst those used for controllers and for device
809 * opens. The number of controller minors are limited based open MAXMIN32 per
810 * the theory statement. We allocate 1 million minors as a total guess at a
811 * number that'll probably be enough. The starting point of the open minors can
812 * be shifted to accommodate future expansion of the NVMe device minors.
813 */
814 #define NVME_MINOR_INST_SHIFT 9
815 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
816 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT)
817 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
818 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2)
819
820 #define NVME_OPEN_NMINORS (1024 * 1024)
821 #define NVME_OPEN_MINOR_MIN (MAXMIN32 + 1)
822 #define NVME_OPEN_MINOR_MAX_EXCL (NVME_OPEN_MINOR_MIN + \
823 NVME_OPEN_NMINORS)
824
825 #define NVME_BUMP_STAT(nvme, stat) \
826 atomic_inc_64(&nvme->n_device_stat.nds_ ## stat.value.ui64)
827
828 static void *nvme_state;
829 static kmem_cache_t *nvme_cmd_cache;
830
831 /*
832 * DMA attributes for queue DMA memory
833 *
834 * Queue DMA memory must be page aligned. The maximum length of a queue is
835 * 65536 entries, and an entry can be 64 bytes long.
836 */
837 static const ddi_dma_attr_t nvme_queue_dma_attr = {
838 .dma_attr_version = DMA_ATTR_V0,
839 .dma_attr_addr_lo = 0,
840 .dma_attr_addr_hi = 0xffffffffffffffffULL,
841 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
842 .dma_attr_align = 0x1000,
843 .dma_attr_burstsizes = 0x7ff,
844 .dma_attr_minxfer = 0x1000,
845 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
846 .dma_attr_seg = 0xffffffffffffffffULL,
847 .dma_attr_sgllen = 1,
848 .dma_attr_granular = 1,
849 .dma_attr_flags = 0,
850 };
851
852 /*
853 * DMA attributes for transfers using Physical Region Page (PRP) entries
854 *
855 * A PRP entry describes one page of DMA memory using the page size specified
856 * in the controller configuration's memory page size register (CC.MPS). It uses
857 * a 64bit base address aligned to this page size. There is no limitation on
858 * chaining PRPs together for arbitrarily large DMA transfers. These DMA
859 * attributes will be copied into the nvme_t during nvme_attach() and the
860 * dma_attr_maxxfer will be updated.
861 */
862 static const ddi_dma_attr_t nvme_prp_dma_attr = {
863 .dma_attr_version = DMA_ATTR_V0,
864 .dma_attr_addr_lo = 0,
865 .dma_attr_addr_hi = 0xffffffffffffffffULL,
866 .dma_attr_count_max = 0xfff,
867 .dma_attr_align = 0x1000,
868 .dma_attr_burstsizes = 0x7ff,
869 .dma_attr_minxfer = 0x1000,
870 .dma_attr_maxxfer = 0x1000,
871 .dma_attr_seg = 0xfff,
872 .dma_attr_sgllen = -1,
873 .dma_attr_granular = 1,
874 .dma_attr_flags = 0,
875 };
876
877 /*
878 * DMA attributes for transfers using scatter/gather lists
879 *
880 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
881 * 32bit length field. SGL Segment and SGL Last Segment entries require the
882 * length to be a multiple of 16 bytes. While the SGL DMA attributes are copied
883 * into the nvme_t, they are not currently used for any I/O.
884 */
885 static const ddi_dma_attr_t nvme_sgl_dma_attr = {
886 .dma_attr_version = DMA_ATTR_V0,
887 .dma_attr_addr_lo = 0,
888 .dma_attr_addr_hi = 0xffffffffffffffffULL,
889 .dma_attr_count_max = 0xffffffffUL,
890 .dma_attr_align = 1,
891 .dma_attr_burstsizes = 0x7ff,
892 .dma_attr_minxfer = 0x10,
893 .dma_attr_maxxfer = 0xfffffffffULL,
894 .dma_attr_seg = 0xffffffffffffffffULL,
895 .dma_attr_sgllen = -1,
896 .dma_attr_granular = 0x10,
897 .dma_attr_flags = 0
898 };
899
900 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
901 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
902 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
903 .devacc_attr_dataorder = DDI_STRICTORDER_ACC
904 };
905
906 /*
907 * ioctl validation policies. These are policies that determine which namespaces
908 * are allowed or disallowed for various operations. Note, all policy items
909 * should be explicitly listed here to help make it clear what our intent is.
910 * That is also why some of these are identical or repeated when they cover
911 * different ioctls.
912 */
913
914 /*
915 * The controller information ioctl generally contains read-only information
916 * about the controller that is sourced from multiple different pieces of
917 * information. This does not operate on a namespace and none are accepted.
918 */
919 static const nvme_ioctl_check_t nvme_check_ctrl_info = {
920 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
921 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
922 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
923 };
924
925 /*
926 * The kernel namespace information requires a namespace ID to be specified. It
927 * does not allow for the broadcast ID to be specified.
928 */
929 static const nvme_ioctl_check_t nvme_check_ns_info = {
930 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
931 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
932 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
933 };
934
935 /*
936 * Identify commands are allowed to operate on a namespace minor. Unfortunately,
937 * the namespace field in identify commands is a bit, weird. In particular, some
938 * commands need a valid namespace, while others are namespace listing
939 * operations, which means illegal namespaces like zero are allowed.
940 */
941 static const nvme_ioctl_check_t nvme_check_identify = {
942 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
943 .nck_skip_ctrl = B_TRUE, .nck_ctrl_rewrite = B_FALSE,
944 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
945 };
946
947 /*
948 * The get log page command requires the ability to specify namespaces. When
949 * targeting the controller, one must use the broadcast NSID.
950 */
951 static const nvme_ioctl_check_t nvme_check_get_logpage = {
952 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
953 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
954 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
955 };
956
957 /*
958 * When getting a feature, we do not want rewriting behavior as most features do
959 * not require a namespace to be specified. Specific instances are checked in
960 * nvme_validate_get_feature().
961 */
962 static const nvme_ioctl_check_t nvme_check_get_feature = {
963 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
964 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
965 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
966 };
967
968 /*
969 * Format commands must target a namespace. The broadcast namespace must be used
970 * when referring to the controller.
971 */
972 static const nvme_ioctl_check_t nvme_check_format = {
973 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
974 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
975 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_WRITE
976 };
977
978 /*
979 * blkdev and controller attach and detach must always target a namespace.
980 * However, the broadcast namespace is not allowed. We still perform rewriting
981 * so that way specifying the controller node with 0 will be caught.
982 */
983 static const nvme_ioctl_check_t nvme_check_attach_detach = {
984 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
985 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
986 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
987 };
988
989 /*
990 * Namespace creation operations cannot target a namespace as the new namespace
991 * ID will be returned in the operation. This operation requires the entire
992 * controller lock to be owned as one has to coordinate this operation with all
993 * of the actual namespace logic that's present.
994 */
995 static const nvme_ioctl_check_t nvme_check_ns_create = {
996 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
997 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
998 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_CTRL
999 };
1000
1001 /*
1002 * NVMe namespace delete must always target a namespace. The broadcast namespace
1003 * isn't allowed. We perform rewriting so that way we can catch this.
1004 * Importantly this only requires holding an exclusive lock on the namespace,
1005 * not on the whole device like creating a namespace does. Note, we don't allow
1006 * this on the namespace minor itself as part of our path towards transitioning
1007 * away from its use.
1008 */
1009 static const nvme_ioctl_check_t nvme_check_ns_delete = {
1010 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
1011 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
1012 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
1013 };
1014
1015 /*
1016 * Firmware operations must not target a namespace and are only allowed from the
1017 * controller.
1018 */
1019 static const nvme_ioctl_check_t nvme_check_firmware = {
1020 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
1021 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
1022 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
1023 };
1024
1025 /*
1026 * Passthru commands are an odd set. We only allow them from the primary
1027 * controller; however, we allow a namespace to be specified in them and allow
1028 * the broadcast namespace. We do not perform rewriting because we don't know
1029 * what the semantics are. We explicitly exempt passthru commands from needing
1030 * an exclusive lock and leave it up to them to tell us the impact of the
1031 * command and semantics. As this is a privileged interface and the semantics
1032 * are arbitrary, there's not much we can do without some assistance from the
1033 * consumer.
1034 */
1035 static const nvme_ioctl_check_t nvme_check_passthru = {
1036 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
1037 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
1038 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
1039 };
1040
1041 /*
1042 * Lock operations are allowed to target a namespace, but must not be rewritten.
1043 * There is no support for the broadcast namespace. This is the only ioctl that
1044 * should skip exclusive checking as it's used to grant it.
1045 */
1046 static const nvme_ioctl_check_t nvme_check_locking = {
1047 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
1048 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
1049 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_SKIP
1050 };
1051
1052 /*
1053 * These data tables indicate how we handle the various states a namespace may
1054 * be in before we put it through the namespace state transition diagram. Note,
1055 * namespace creation does not allow one to specify a namespace ID, therefore
1056 * there it doesn't have a set of entries here.
1057 *
1058 * See Namespace Support in the theory statement for more information.
1059 */
1060 static const nvme_ioctl_errno_t nvme_ns_delete_states[] = {
1061 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1062 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
1063 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1064 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1065 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
1066 };
1067
1068 static const nvme_ioctl_errno_t nvme_ctrl_attach_states[] = {
1069 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1070 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
1071 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1072 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1073 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
1074 };
1075
1076 static const nvme_ioctl_errno_t nvme_ctrl_detach_states[] = {
1077 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1078 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
1079 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_OK,
1080 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
1081 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
1082 };
1083
1084 static const nvme_ioctl_errno_t nvme_bd_attach_states[] = {
1085 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1086 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
1087 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_UNSUP_ATTACH_NS,
1088 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
1089 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH,
1090 };
1091
1092 static const nvme_ioctl_errno_t nvme_bd_detach_states[] = {
1093 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1094 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
1095 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1096 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
1097 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_OK,
1098 };
1099
1100 static const nvme_ioctl_errno_t nvme_format_nvm_states[] = {
1101 [NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
1102 [NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
1103 [NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_OK,
1104 [NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
1105 [NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
1106 };
1107
1108 static struct cb_ops nvme_cb_ops = {
1109 .cb_open = nvme_open,
1110 .cb_close = nvme_close,
1111 .cb_strategy = nodev,
1112 .cb_print = nodev,
1113 .cb_dump = nodev,
1114 .cb_read = nodev,
1115 .cb_write = nodev,
1116 .cb_ioctl = nvme_ioctl,
1117 .cb_devmap = nodev,
1118 .cb_mmap = nodev,
1119 .cb_segmap = nodev,
1120 .cb_chpoll = nochpoll,
1121 .cb_prop_op = ddi_prop_op,
1122 .cb_str = 0,
1123 .cb_flag = D_NEW | D_MP,
1124 .cb_rev = CB_REV,
1125 .cb_aread = nodev,
1126 .cb_awrite = nodev
1127 };
1128
1129 static struct dev_ops nvme_dev_ops = {
1130 .devo_rev = DEVO_REV,
1131 .devo_refcnt = 0,
1132 .devo_getinfo = ddi_no_info,
1133 .devo_identify = nulldev,
1134 .devo_probe = nulldev,
1135 .devo_attach = nvme_attach,
1136 .devo_detach = nvme_detach,
1137 .devo_reset = nodev,
1138 .devo_cb_ops = &nvme_cb_ops,
1139 .devo_bus_ops = NULL,
1140 .devo_power = NULL,
1141 .devo_quiesce = nvme_quiesce,
1142 };
1143
1144 static struct modldrv nvme_modldrv = {
1145 .drv_modops = &mod_driverops,
1146 .drv_linkinfo = "NVMe driver",
1147 .drv_dev_ops = &nvme_dev_ops
1148 };
1149
1150 static struct modlinkage nvme_modlinkage = {
1151 .ml_rev = MODREV_1,
1152 .ml_linkage = { &nvme_modldrv, NULL }
1153 };
1154
1155 static bd_ops_t nvme_bd_ops = {
1156 .o_version = BD_OPS_CURRENT_VERSION,
1157 .o_drive_info = nvme_bd_driveinfo,
1158 .o_media_info = nvme_bd_mediainfo,
1159 .o_devid_init = nvme_bd_devid,
1160 .o_sync_cache = nvme_bd_sync,
1161 .o_read = nvme_bd_read,
1162 .o_write = nvme_bd_write,
1163 .o_free_space = nvme_bd_free_space,
1164 };
1165
1166 /*
1167 * This list will hold commands that have timed out and couldn't be aborted.
1168 * As we don't know what the hardware may still do with the DMA memory we can't
1169 * free them, so we'll keep them forever on this list where we can easily look
1170 * at them with mdb.
1171 */
1172 static struct list nvme_lost_cmds;
1173 static kmutex_t nvme_lc_mutex;
1174
1175 int
_init(void)1176 _init(void)
1177 {
1178 int error;
1179
1180 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
1181 if (error != DDI_SUCCESS)
1182 return (error);
1183
1184 if ((nvme_open_minors = id_space_create("nvme_open_minors",
1185 NVME_OPEN_MINOR_MIN, NVME_OPEN_MINOR_MAX_EXCL)) == NULL) {
1186 ddi_soft_state_fini(&nvme_state);
1187 return (ENOMEM);
1188 }
1189
1190 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
1191 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1192
1193 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
1194 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
1195 offsetof(nvme_cmd_t, nc_list));
1196
1197 mutex_init(&nvme_open_minors_mutex, NULL, MUTEX_DRIVER, NULL);
1198 avl_create(&nvme_open_minors_avl, nvme_minor_comparator,
1199 sizeof (nvme_minor_t), offsetof(nvme_minor_t, nm_avl));
1200
1201 nvme_dead_taskq = taskq_create("nvme_dead_taskq", 1, minclsyspri, 1, 1,
1202 TASKQ_PREPOPULATE);
1203
1204 bd_mod_init(&nvme_dev_ops);
1205
1206 error = mod_install(&nvme_modlinkage);
1207 if (error != DDI_SUCCESS) {
1208 ddi_soft_state_fini(&nvme_state);
1209 id_space_destroy(nvme_open_minors);
1210 mutex_destroy(&nvme_lc_mutex);
1211 list_destroy(&nvme_lost_cmds);
1212 bd_mod_fini(&nvme_dev_ops);
1213 mutex_destroy(&nvme_open_minors_mutex);
1214 avl_destroy(&nvme_open_minors_avl);
1215 taskq_destroy(nvme_dead_taskq);
1216 }
1217
1218 return (error);
1219 }
1220
1221 int
_fini(void)1222 _fini(void)
1223 {
1224 int error;
1225
1226 if (!list_is_empty(&nvme_lost_cmds))
1227 return (DDI_FAILURE);
1228
1229 error = mod_remove(&nvme_modlinkage);
1230 if (error == DDI_SUCCESS) {
1231 ddi_soft_state_fini(&nvme_state);
1232 id_space_destroy(nvme_open_minors);
1233 kmem_cache_destroy(nvme_cmd_cache);
1234 mutex_destroy(&nvme_lc_mutex);
1235 list_destroy(&nvme_lost_cmds);
1236 bd_mod_fini(&nvme_dev_ops);
1237 mutex_destroy(&nvme_open_minors_mutex);
1238 avl_destroy(&nvme_open_minors_avl);
1239 taskq_destroy(nvme_dead_taskq);
1240 }
1241
1242 return (error);
1243 }
1244
1245 int
_info(struct modinfo * modinfop)1246 _info(struct modinfo *modinfop)
1247 {
1248 return (mod_info(&nvme_modlinkage, modinfop));
1249 }
1250
1251 static inline void
nvme_put64(nvme_t * nvme,uintptr_t reg,uint64_t val)1252 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
1253 {
1254 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1255
1256 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1257 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
1258 }
1259
1260 static inline void
nvme_put32(nvme_t * nvme,uintptr_t reg,uint32_t val)1261 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
1262 {
1263 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1264
1265 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1266 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
1267 }
1268
1269 static inline uint64_t
nvme_get64(nvme_t * nvme,uintptr_t reg)1270 nvme_get64(nvme_t *nvme, uintptr_t reg)
1271 {
1272 uint64_t val;
1273
1274 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1275
1276 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1277 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
1278
1279 return (val);
1280 }
1281
1282 static inline uint32_t
nvme_get32(nvme_t * nvme,uintptr_t reg)1283 nvme_get32(nvme_t *nvme, uintptr_t reg)
1284 {
1285 uint32_t val;
1286
1287 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1288
1289 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1290 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
1291
1292 return (val);
1293 }
1294
1295 static void
nvme_mgmt_lock_fini(nvme_mgmt_lock_t * lock)1296 nvme_mgmt_lock_fini(nvme_mgmt_lock_t *lock)
1297 {
1298 ASSERT3U(lock->nml_bd_own, ==, 0);
1299 mutex_destroy(&lock->nml_lock);
1300 cv_destroy(&lock->nml_cv);
1301 }
1302
1303 static void
nvme_mgmt_lock_init(nvme_mgmt_lock_t * lock)1304 nvme_mgmt_lock_init(nvme_mgmt_lock_t *lock)
1305 {
1306 mutex_init(&lock->nml_lock, NULL, MUTEX_DRIVER, NULL);
1307 cv_init(&lock->nml_cv, NULL, CV_DRIVER, NULL);
1308 lock->nml_bd_own = 0;
1309 }
1310
1311 static void
nvme_mgmt_unlock(nvme_t * nvme)1312 nvme_mgmt_unlock(nvme_t *nvme)
1313 {
1314 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1315
1316 cv_broadcast(&lock->nml_cv);
1317 mutex_exit(&lock->nml_lock);
1318 }
1319
1320 static boolean_t
nvme_mgmt_lock_held(const nvme_t * nvme)1321 nvme_mgmt_lock_held(const nvme_t *nvme)
1322 {
1323 return (MUTEX_HELD(&nvme->n_mgmt.nml_lock) != 0);
1324 }
1325
1326 static void
nvme_mgmt_lock(nvme_t * nvme,nvme_mgmt_lock_level_t level)1327 nvme_mgmt_lock(nvme_t *nvme, nvme_mgmt_lock_level_t level)
1328 {
1329 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1330 mutex_enter(&lock->nml_lock);
1331 while (lock->nml_bd_own != 0) {
1332 if (level == NVME_MGMT_LOCK_BDRO)
1333 break;
1334 cv_wait(&lock->nml_cv, &lock->nml_lock);
1335 }
1336 }
1337
1338 /*
1339 * This and nvme_mgmt_bd_end() are used to indicate that the driver is going to
1340 * be calling into a re-entrant blkdev related function. We cannot hold the lock
1341 * across such an operation and therefore must indicate that this is logically
1342 * held, while allowing other operations to proceed. This nvme_mgmt_bd_end() may
1343 * only be called by a thread that already holds the nmve_mgmt_lock().
1344 */
1345 static void
nvme_mgmt_bd_start(nvme_t * nvme)1346 nvme_mgmt_bd_start(nvme_t *nvme)
1347 {
1348 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1349
1350 VERIFY(MUTEX_HELD(&lock->nml_lock));
1351 VERIFY3U(lock->nml_bd_own, ==, 0);
1352 lock->nml_bd_own = (uintptr_t)curthread;
1353 mutex_exit(&lock->nml_lock);
1354 }
1355
1356 static void
nvme_mgmt_bd_end(nvme_t * nvme)1357 nvme_mgmt_bd_end(nvme_t *nvme)
1358 {
1359 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1360
1361 mutex_enter(&lock->nml_lock);
1362 VERIFY3U(lock->nml_bd_own, ==, (uintptr_t)curthread);
1363 lock->nml_bd_own = 0;
1364 }
1365
1366 static boolean_t
nvme_ns_state_check(const nvme_namespace_t * ns,nvme_ioctl_common_t * ioc,const nvme_ioctl_errno_t states[NVME_NS_NSTATES])1367 nvme_ns_state_check(const nvme_namespace_t *ns, nvme_ioctl_common_t *ioc,
1368 const nvme_ioctl_errno_t states[NVME_NS_NSTATES])
1369 {
1370 VERIFY(nvme_mgmt_lock_held(ns->ns_nvme));
1371 VERIFY3U(ns->ns_state, <, NVME_NS_NSTATES);
1372
1373 if (states[ns->ns_state] == NVME_IOCTL_E_OK) {
1374 return (B_TRUE);
1375 }
1376
1377 return (nvme_ioctl_error(ioc, states[ns->ns_state], 0, 0));
1378 }
1379
1380 /*
1381 * This is a central clearing house for marking an NVMe controller dead and/or
1382 * removed. This takes care of setting the flag, taking care of outstanding
1383 * blocked locks, and sending a DDI FMA impact. This is called from a precarious
1384 * place where locking is suspect. The only guarantee we have is that the nvme_t
1385 * is valid and won't disappear until we return.
1386 */
1387 static void
nvme_ctrl_mark_dead(nvme_t * nvme,boolean_t removed)1388 nvme_ctrl_mark_dead(nvme_t *nvme, boolean_t removed)
1389 {
1390 boolean_t was_dead;
1391
1392 /*
1393 * See if we win the race to set things up here. If someone beat us to
1394 * it, we do not do anything.
1395 */
1396 was_dead = atomic_cas_32((volatile uint32_t *)&nvme->n_dead, B_FALSE,
1397 B_TRUE);
1398
1399 /*
1400 * If we were removed, note this in our death status, regardless of
1401 * whether or not we were already dead. We need to know this so that we
1402 * can decide if it is safe to try and interact the the device in e.g.
1403 * reset and shutdown.
1404 */
1405 if (removed) {
1406 nvme->n_dead_status = NVME_IOCTL_E_CTRL_GONE;
1407 }
1408
1409 if (was_dead) {
1410 return;
1411 }
1412
1413 /*
1414 * If this was removed, there is no reason to change the service impact.
1415 * Otherwise, we need to change our default return code to indicate that
1416 * the device is truly dead, and not simply gone.
1417 */
1418 if (!removed) {
1419 ASSERT3U(nvme->n_dead_status, ==, NVME_IOCTL_E_CTRL_DEAD);
1420 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1421 }
1422
1423 taskq_dispatch_ent(nvme_dead_taskq, nvme_rwlock_ctrl_dead, nvme,
1424 TQ_NOSLEEP, &nvme->n_dead_tqent);
1425 }
1426
1427 static boolean_t
nvme_ctrl_is_gone(const nvme_t * nvme)1428 nvme_ctrl_is_gone(const nvme_t *nvme)
1429 {
1430 if (nvme->n_dead && nvme->n_dead_status == NVME_IOCTL_E_CTRL_GONE)
1431 return (B_TRUE);
1432
1433 return (B_FALSE);
1434 }
1435
1436 static boolean_t
nvme_check_regs_hdl(nvme_t * nvme)1437 nvme_check_regs_hdl(nvme_t *nvme)
1438 {
1439 ddi_fm_error_t error;
1440
1441 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
1442
1443 if (error.fme_status != DDI_FM_OK)
1444 return (B_TRUE);
1445
1446 return (B_FALSE);
1447 }
1448
1449 static boolean_t
nvme_check_dma_hdl(nvme_dma_t * dma)1450 nvme_check_dma_hdl(nvme_dma_t *dma)
1451 {
1452 ddi_fm_error_t error;
1453
1454 if (dma == NULL)
1455 return (B_FALSE);
1456
1457 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
1458
1459 if (error.fme_status != DDI_FM_OK)
1460 return (B_TRUE);
1461
1462 return (B_FALSE);
1463 }
1464
1465 static void
nvme_free_dma_common(nvme_dma_t * dma)1466 nvme_free_dma_common(nvme_dma_t *dma)
1467 {
1468 if (dma->nd_dmah != NULL)
1469 (void) ddi_dma_unbind_handle(dma->nd_dmah);
1470 if (dma->nd_acch != NULL)
1471 ddi_dma_mem_free(&dma->nd_acch);
1472 if (dma->nd_dmah != NULL)
1473 ddi_dma_free_handle(&dma->nd_dmah);
1474 }
1475
1476 static void
nvme_free_dma(nvme_dma_t * dma)1477 nvme_free_dma(nvme_dma_t *dma)
1478 {
1479 nvme_free_dma_common(dma);
1480 kmem_free(dma, sizeof (*dma));
1481 }
1482
1483 static void
nvme_prp_dma_destructor(void * buf,void * private __unused)1484 nvme_prp_dma_destructor(void *buf, void *private __unused)
1485 {
1486 nvme_dma_t *dma = (nvme_dma_t *)buf;
1487
1488 nvme_free_dma_common(dma);
1489 }
1490
1491 static int
nvme_alloc_dma_common(nvme_t * nvme,nvme_dma_t * dma,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr)1492 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
1493 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
1494 {
1495 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
1496 &dma->nd_dmah) != DDI_SUCCESS) {
1497 /*
1498 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
1499 * the only other possible error is DDI_DMA_BADATTR which
1500 * indicates a driver bug which should cause a panic.
1501 */
1502 dev_err(nvme->n_dip, CE_PANIC,
1503 "!failed to get DMA handle, check DMA attributes");
1504 return (DDI_FAILURE);
1505 }
1506
1507 /*
1508 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
1509 * or the flags are conflicting, which isn't the case here.
1510 */
1511 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
1512 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
1513 &dma->nd_len, &dma->nd_acch);
1514
1515 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
1516 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1517 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
1518 dev_err(nvme->n_dip, CE_WARN,
1519 "!failed to bind DMA memory");
1520 NVME_BUMP_STAT(nvme, dma_bind_err);
1521 nvme_free_dma_common(dma);
1522 return (DDI_FAILURE);
1523 }
1524
1525 return (DDI_SUCCESS);
1526 }
1527
1528 static int
nvme_zalloc_dma(nvme_t * nvme,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr,nvme_dma_t ** ret)1529 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
1530 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
1531 {
1532 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
1533
1534 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
1535 DDI_SUCCESS) {
1536 *ret = NULL;
1537 kmem_free(dma, sizeof (nvme_dma_t));
1538 return (DDI_FAILURE);
1539 }
1540
1541 bzero(dma->nd_memp, dma->nd_len);
1542
1543 *ret = dma;
1544 return (DDI_SUCCESS);
1545 }
1546
1547 static int
nvme_prp_dma_constructor(void * buf,void * private,int flags __unused)1548 nvme_prp_dma_constructor(void *buf, void *private, int flags __unused)
1549 {
1550 nvme_dma_t *dma = (nvme_dma_t *)buf;
1551 nvme_t *nvme = (nvme_t *)private;
1552
1553 dma->nd_dmah = NULL;
1554 dma->nd_acch = NULL;
1555
1556 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
1557 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
1558 return (-1);
1559 }
1560
1561 ASSERT(dma->nd_ncookie == 1);
1562
1563 dma->nd_cached = B_TRUE;
1564
1565 return (0);
1566 }
1567
1568 static int
nvme_zalloc_queue_dma(nvme_t * nvme,uint32_t nentry,uint16_t qe_len,uint_t flags,nvme_dma_t ** dma)1569 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
1570 uint_t flags, nvme_dma_t **dma)
1571 {
1572 uint32_t len = nentry * qe_len;
1573 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
1574
1575 len = roundup(len, nvme->n_pagesize);
1576
1577 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
1578 != DDI_SUCCESS) {
1579 dev_err(nvme->n_dip, CE_WARN,
1580 "!failed to get DMA memory for queue");
1581 goto fail;
1582 }
1583
1584 if ((*dma)->nd_ncookie != 1) {
1585 dev_err(nvme->n_dip, CE_WARN,
1586 "!got too many cookies for queue DMA");
1587 goto fail;
1588 }
1589
1590 return (DDI_SUCCESS);
1591
1592 fail:
1593 if (*dma) {
1594 nvme_free_dma(*dma);
1595 *dma = NULL;
1596 }
1597
1598 return (DDI_FAILURE);
1599 }
1600
1601 static void
nvme_free_cq(nvme_cq_t * cq)1602 nvme_free_cq(nvme_cq_t *cq)
1603 {
1604 mutex_destroy(&cq->ncq_mutex);
1605
1606 if (cq->ncq_cmd_taskq != NULL)
1607 taskq_destroy(cq->ncq_cmd_taskq);
1608
1609 if (cq->ncq_dma != NULL)
1610 nvme_free_dma(cq->ncq_dma);
1611
1612 kmem_free(cq, sizeof (*cq));
1613 }
1614
1615 static void
nvme_free_qpair(nvme_qpair_t * qp)1616 nvme_free_qpair(nvme_qpair_t *qp)
1617 {
1618 int i;
1619
1620 mutex_destroy(&qp->nq_mutex);
1621 sema_destroy(&qp->nq_sema);
1622
1623 if (qp->nq_sqdma != NULL)
1624 nvme_free_dma(qp->nq_sqdma);
1625
1626 if (qp->nq_active_cmds > 0)
1627 for (i = 0; i != qp->nq_nentry; i++)
1628 if (qp->nq_cmd[i] != NULL)
1629 nvme_free_cmd(qp->nq_cmd[i]);
1630
1631 if (qp->nq_cmd != NULL)
1632 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
1633
1634 kmem_free(qp, sizeof (nvme_qpair_t));
1635 }
1636
1637 /*
1638 * Destroy the pre-allocated cq array, but only free individual completion
1639 * queues from the given starting index.
1640 */
1641 static void
nvme_destroy_cq_array(nvme_t * nvme,uint_t start)1642 nvme_destroy_cq_array(nvme_t *nvme, uint_t start)
1643 {
1644 uint_t i;
1645
1646 for (i = start; i < nvme->n_cq_count; i++)
1647 if (nvme->n_cq[i] != NULL)
1648 nvme_free_cq(nvme->n_cq[i]);
1649
1650 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count);
1651 }
1652
1653 static int
nvme_alloc_cq(nvme_t * nvme,uint32_t nentry,nvme_cq_t ** cqp,uint16_t idx,uint_t nthr)1654 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx,
1655 uint_t nthr)
1656 {
1657 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP);
1658 char name[64]; /* large enough for the taskq name */
1659
1660 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER,
1661 DDI_INTR_PRI(nvme->n_intr_pri));
1662
1663 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
1664 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS)
1665 goto fail;
1666
1667 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp;
1668 cq->ncq_nentry = nentry;
1669 cq->ncq_id = idx;
1670 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx);
1671
1672 /*
1673 * Each completion queue has its own command taskq.
1674 */
1675 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u",
1676 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx);
1677
1678 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX,
1679 TASKQ_PREPOPULATE);
1680
1681 if (cq->ncq_cmd_taskq == NULL) {
1682 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd "
1683 "taskq for cq %u", idx);
1684 goto fail;
1685 }
1686
1687 *cqp = cq;
1688 return (DDI_SUCCESS);
1689
1690 fail:
1691 nvme_free_cq(cq);
1692 *cqp = NULL;
1693
1694 return (DDI_FAILURE);
1695 }
1696
1697 /*
1698 * Create the n_cq array big enough to hold "ncq" completion queues.
1699 * If the array already exists it will be re-sized (but only larger).
1700 * The admin queue is included in this array, which boosts the
1701 * max number of entries to UINT16_MAX + 1.
1702 */
1703 static int
nvme_create_cq_array(nvme_t * nvme,uint_t ncq,uint32_t nentry,uint_t nthr)1704 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr)
1705 {
1706 nvme_cq_t **cq;
1707 uint_t i, cq_count;
1708
1709 ASSERT3U(ncq, >, nvme->n_cq_count);
1710
1711 cq = nvme->n_cq;
1712 cq_count = nvme->n_cq_count;
1713
1714 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP);
1715 nvme->n_cq_count = ncq;
1716
1717 for (i = 0; i < cq_count; i++)
1718 nvme->n_cq[i] = cq[i];
1719
1720 for (; i < nvme->n_cq_count; i++)
1721 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) !=
1722 DDI_SUCCESS)
1723 goto fail;
1724
1725 if (cq != NULL)
1726 kmem_free(cq, sizeof (*cq) * cq_count);
1727
1728 return (DDI_SUCCESS);
1729
1730 fail:
1731 nvme_destroy_cq_array(nvme, cq_count);
1732 /*
1733 * Restore the original array
1734 */
1735 nvme->n_cq_count = cq_count;
1736 nvme->n_cq = cq;
1737
1738 return (DDI_FAILURE);
1739 }
1740
1741 static int
nvme_alloc_qpair(nvme_t * nvme,uint32_t nentry,nvme_qpair_t ** nqp,uint_t idx)1742 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
1743 uint_t idx)
1744 {
1745 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
1746 uint_t cq_idx;
1747
1748 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
1749 DDI_INTR_PRI(nvme->n_intr_pri));
1750
1751 /*
1752 * The NVMe spec defines that a full queue has one empty (unused) slot;
1753 * initialize the semaphore accordingly.
1754 */
1755 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL);
1756
1757 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
1758 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
1759 goto fail;
1760
1761 /*
1762 * idx == 0 is adminq, those above 0 are shared io completion queues.
1763 */
1764 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1);
1765 qp->nq_cq = nvme->n_cq[cq_idx];
1766 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
1767 qp->nq_nentry = nentry;
1768
1769 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
1770
1771 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
1772 qp->nq_next_cmd = 0;
1773
1774 *nqp = qp;
1775 return (DDI_SUCCESS);
1776
1777 fail:
1778 nvme_free_qpair(qp);
1779 *nqp = NULL;
1780
1781 return (DDI_FAILURE);
1782 }
1783
1784 /*
1785 * One might reasonably consider that the nvme_cmd_cache should have a cache
1786 * constructor and destructor that takes care of the mutex/cv init/destroy, and
1787 * that nvme_free_cmd should reset more fields such that allocation becomes
1788 * simpler. This is not currently implemented as:
1789 * - nvme_cmd_cache is a global cache, shared across nvme instances and
1790 * therefore there is no easy access to the corresponding nvme_t in the
1791 * constructor to determine the required interrupt priority.
1792 * - Most fields in nvme_cmd_t would need to be zeroed in nvme_free_cmd while
1793 * preserving the mutex/cv. It is easier to able to zero the entire
1794 * structure and then init the mutex/cv only in the unlikely event that we
1795 * want an admin command.
1796 */
1797 static nvme_cmd_t *
nvme_alloc_cmd(nvme_t * nvme,int kmflag)1798 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
1799 {
1800 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
1801
1802 if (cmd != NULL) {
1803 bzero(cmd, sizeof (nvme_cmd_t));
1804 cmd->nc_nvme = nvme;
1805 }
1806
1807 return (cmd);
1808 }
1809
1810 static nvme_cmd_t *
nvme_alloc_admin_cmd(nvme_t * nvme,int kmflag)1811 nvme_alloc_admin_cmd(nvme_t *nvme, int kmflag)
1812 {
1813 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, kmflag);
1814
1815 if (cmd != NULL) {
1816 cmd->nc_flags |= NVME_CMD_F_USELOCK;
1817 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
1818 DDI_INTR_PRI(nvme->n_intr_pri));
1819 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
1820 }
1821
1822 return (cmd);
1823 }
1824
1825 static void
nvme_free_cmd(nvme_cmd_t * cmd)1826 nvme_free_cmd(nvme_cmd_t *cmd)
1827 {
1828 /* Don't free commands on the lost commands list. */
1829 if (list_link_active(&cmd->nc_list))
1830 return;
1831
1832 if (cmd->nc_dma) {
1833 nvme_free_dma(cmd->nc_dma);
1834 cmd->nc_dma = NULL;
1835 }
1836
1837 if (cmd->nc_prp) {
1838 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp);
1839 cmd->nc_prp = NULL;
1840 }
1841
1842 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
1843 cv_destroy(&cmd->nc_cv);
1844 mutex_destroy(&cmd->nc_mutex);
1845 }
1846
1847 kmem_cache_free(nvme_cmd_cache, cmd);
1848 }
1849
1850 static void
nvme_submit_admin_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd,uint32_t * qtimeoutp)1851 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
1852 {
1853 sema_p(&qp->nq_sema);
1854 nvme_submit_cmd_common(qp, cmd, qtimeoutp);
1855 }
1856
1857 static int
nvme_submit_io_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd)1858 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1859 {
1860 if (cmd->nc_nvme->n_dead) {
1861 return (EIO);
1862 }
1863
1864 if (sema_tryp(&qp->nq_sema) == 0)
1865 return (EAGAIN);
1866
1867 nvme_submit_cmd_common(qp, cmd, NULL);
1868 return (0);
1869 }
1870
1871 /*
1872 * Common command submission routine. If `qtimeoutp` is not NULL then it will
1873 * be set to the sum of the timeouts of any active commands ahead of the one
1874 * being submitted.
1875 */
1876 static void
nvme_submit_cmd_common(nvme_qpair_t * qp,nvme_cmd_t * cmd,uint32_t * qtimeoutp)1877 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
1878 {
1879 nvme_reg_sqtdbl_t tail = { 0 };
1880
1881 /*
1882 * We don't need to take a lock on cmd since it is not yet enqueued.
1883 */
1884 cmd->nc_submit_ts = gethrtime();
1885 cmd->nc_state = NVME_CMD_SUBMITTED;
1886
1887 mutex_enter(&qp->nq_mutex);
1888
1889 /*
1890 * Now that we hold the queue pair lock, we must check whether or not
1891 * the controller has been listed as dead (e.g. was removed due to
1892 * hotplug). This is necessary as otherwise we could race with
1893 * nvme_remove_callback(). Because this has not been enqueued, we don't
1894 * call nvme_unqueue_cmd(), which is why we must manually decrement the
1895 * semaphore.
1896 */
1897 if (cmd->nc_nvme->n_dead) {
1898 cmd->nc_queue_ts = gethrtime();
1899 cmd->nc_state = NVME_CMD_QUEUED;
1900 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback,
1901 cmd, TQ_NOSLEEP, &cmd->nc_tqent);
1902 sema_v(&qp->nq_sema);
1903 mutex_exit(&qp->nq_mutex);
1904 return;
1905 }
1906
1907 /*
1908 * Try to insert the cmd into the active cmd array at the nq_next_cmd
1909 * slot. If the slot is already occupied advance to the next slot and
1910 * try again. This can happen for long running commands like async event
1911 * requests.
1912 */
1913 while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
1914 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1915 qp->nq_cmd[qp->nq_next_cmd] = cmd;
1916
1917 /*
1918 * We keep track of the number of active commands in this queue, and
1919 * the sum of the timeouts for those active commands.
1920 */
1921 qp->nq_active_cmds++;
1922 if (qtimeoutp != NULL)
1923 *qtimeoutp = qp->nq_active_timeout;
1924 qp->nq_active_timeout += cmd->nc_timeout;
1925
1926 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
1927 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
1928 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
1929 sizeof (nvme_sqe_t) * qp->nq_sqtail,
1930 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
1931 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1932
1933 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
1934 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
1935
1936 mutex_exit(&qp->nq_mutex);
1937 }
1938
1939 static nvme_cmd_t *
nvme_unqueue_cmd(nvme_t * nvme,nvme_qpair_t * qp,int cid)1940 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
1941 {
1942 nvme_cmd_t *cmd;
1943
1944 ASSERT(mutex_owned(&qp->nq_mutex));
1945 ASSERT3S(cid, <, qp->nq_nentry);
1946
1947 cmd = qp->nq_cmd[cid];
1948 /*
1949 * Some controllers will erroneously add things to the completion queue
1950 * for which there is no matching outstanding command. If this happens,
1951 * it is almost certainly a controller firmware bug since nq_mutex
1952 * is held across command submission and ringing the queue doorbell,
1953 * and is also held in this function.
1954 *
1955 * If we see such an unexpected command, there is not much we can do.
1956 * These will be logged and counted in nvme_get_completed(), but
1957 * otherwise ignored.
1958 */
1959 if (cmd == NULL)
1960 return (NULL);
1961 qp->nq_cmd[cid] = NULL;
1962 ASSERT3U(qp->nq_active_cmds, >, 0);
1963 qp->nq_active_cmds--;
1964 ASSERT3U(qp->nq_active_timeout, >=, cmd->nc_timeout);
1965 qp->nq_active_timeout -= cmd->nc_timeout;
1966 sema_v(&qp->nq_sema);
1967
1968 ASSERT3P(cmd, !=, NULL);
1969 ASSERT3P(cmd->nc_nvme, ==, nvme);
1970 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
1971
1972 return (cmd);
1973 }
1974
1975 /*
1976 * This is called when an admin abort has failed to complete, once for the
1977 * original command and once for the abort itself. At this point the controller
1978 * has been marked dead. The commands are considered lost, de-queued if
1979 * possible, and placed on a global lost commands list so that they cannot be
1980 * freed and so that any DMA memory they have have is not re-used.
1981 */
1982 static void
nvme_lost_cmd(nvme_t * nvme,nvme_cmd_t * cmd)1983 nvme_lost_cmd(nvme_t *nvme, nvme_cmd_t *cmd)
1984 {
1985 ASSERT(mutex_owned(&cmd->nc_mutex));
1986
1987 switch (cmd->nc_state) {
1988 case NVME_CMD_SUBMITTED: {
1989 nvme_qpair_t *qp = nvme->n_ioq[cmd->nc_sqid];
1990
1991 /*
1992 * The command is still in the submitted state, meaning that we
1993 * have not processed a completion queue entry for it. De-queue
1994 * should be successful and if the hardware does later report
1995 * completion we'll skip it as a command for which we aren't
1996 * expecting a response (see nvme_unqueue_cmd()).
1997 */
1998 mutex_enter(&qp->nq_mutex);
1999 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
2000 mutex_exit(&qp->nq_mutex);
2001 }
2002 case NVME_CMD_ALLOCATED:
2003 case NVME_CMD_COMPLETED:
2004 /*
2005 * If the command has not been submitted, or has completed,
2006 * there is nothing to do here. In the event of an abort
2007 * command timeout, we can end up here in the process of
2008 * "losing" the original command. It's possible that command
2009 * has actually completed (or been queued on the taskq) in the
2010 * interim.
2011 */
2012 break;
2013 case NVME_CMD_QUEUED:
2014 /*
2015 * The command is on the taskq, awaiting callback. This should
2016 * be fairly rapid so wait for completion.
2017 */
2018 while (cmd->nc_state != NVME_CMD_COMPLETED)
2019 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
2020 break;
2021 case NVME_CMD_LOST:
2022 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2023 "%s: command %p already lost", __func__, (void *)cmd);
2024 break;
2025 }
2026
2027 cmd->nc_state = NVME_CMD_LOST;
2028
2029 mutex_enter(&nvme_lc_mutex);
2030 list_insert_head(&nvme_lost_cmds, cmd);
2031 mutex_exit(&nvme_lc_mutex);
2032 }
2033
2034 /*
2035 * Get the command tied to the next completed cqe and bump along completion
2036 * queue head counter.
2037 */
2038 static nvme_cmd_t *
nvme_get_completed(nvme_t * nvme,nvme_cq_t * cq)2039 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq)
2040 {
2041 nvme_qpair_t *qp;
2042 nvme_cqe_t *cqe;
2043 nvme_cmd_t *cmd;
2044
2045 ASSERT(mutex_owned(&cq->ncq_mutex));
2046
2047 retry:
2048 cqe = &cq->ncq_cq[cq->ncq_head];
2049
2050 /* Check phase tag of CQE. Hardware inverts it for new entries. */
2051 if (cqe->cqe_sf.sf_p == cq->ncq_phase)
2052 return (NULL);
2053
2054 qp = nvme->n_ioq[cqe->cqe_sqid];
2055
2056 mutex_enter(&qp->nq_mutex);
2057 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
2058 mutex_exit(&qp->nq_mutex);
2059
2060 qp->nq_sqhead = cqe->cqe_sqhd;
2061 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry;
2062
2063 /* Toggle phase on wrap-around. */
2064 if (cq->ncq_head == 0)
2065 cq->ncq_phase = cq->ncq_phase != 0 ? 0 : 1;
2066
2067 if (cmd == NULL) {
2068 dev_err(nvme->n_dip, CE_WARN,
2069 "!received completion for unknown cid 0x%x", cqe->cqe_cid);
2070 NVME_BUMP_STAT(nvme, unknown_cid);
2071 /*
2072 * We want to ignore this unexpected completion entry as it
2073 * is most likely a result of a bug in the controller firmware.
2074 * However, if we return NULL, then callers will assume there
2075 * are no more pending commands for this wakeup. Retry to keep
2076 * enumerating commands until the phase tag indicates there are
2077 * no more and we are really done.
2078 */
2079 goto retry;
2080 }
2081
2082 ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid);
2083 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
2084
2085 return (cmd);
2086 }
2087
2088 /*
2089 * Process all completed commands on the io completion queue.
2090 */
2091 static uint_t
nvme_process_iocq(nvme_t * nvme,nvme_cq_t * cq)2092 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq)
2093 {
2094 nvme_reg_cqhdbl_t head = { 0 };
2095 nvme_cmd_t *cmd;
2096 uint_t completed = 0;
2097
2098 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
2099 DDI_SUCCESS)
2100 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
2101 __func__);
2102
2103 mutex_enter(&cq->ncq_mutex);
2104
2105 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
2106 /*
2107 * NVME_CMD_F_USELOCK is applied to all commands which are
2108 * going to be waited for by another thread in nvme_wait_cmd
2109 * and indicates that the lock should be taken before modifying
2110 * protected fields, and that the mutex has been initialised.
2111 * Commands which do not require the mutex to be held have not
2112 * initialised it (to reduce overhead).
2113 */
2114 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
2115 mutex_enter(&cmd->nc_mutex);
2116 /*
2117 * The command could have been de-queued as lost while
2118 * we waited on the lock, in which case we drop it.
2119 */
2120 if (cmd->nc_state == NVME_CMD_LOST) {
2121 mutex_exit(&cmd->nc_mutex);
2122 completed++;
2123 continue;
2124 }
2125 }
2126 cmd->nc_queue_ts = gethrtime();
2127 cmd->nc_state = NVME_CMD_QUEUED;
2128 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0)
2129 mutex_exit(&cmd->nc_mutex);
2130 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd,
2131 TQ_NOSLEEP, &cmd->nc_tqent);
2132
2133 completed++;
2134 }
2135
2136 if (completed > 0) {
2137 /*
2138 * Update the completion queue head doorbell.
2139 */
2140 head.b.cqhdbl_cqh = cq->ncq_head;
2141 nvme_put32(nvme, cq->ncq_hdbl, head.r);
2142 }
2143
2144 mutex_exit(&cq->ncq_mutex);
2145
2146 return (completed);
2147 }
2148
2149 static nvme_cmd_t *
nvme_retrieve_cmd(nvme_t * nvme,nvme_qpair_t * qp)2150 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
2151 {
2152 nvme_cq_t *cq = qp->nq_cq;
2153 nvme_reg_cqhdbl_t head = { 0 };
2154 nvme_cmd_t *cmd;
2155
2156 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
2157 DDI_SUCCESS)
2158 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
2159 __func__);
2160
2161 mutex_enter(&cq->ncq_mutex);
2162
2163 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
2164 head.b.cqhdbl_cqh = cq->ncq_head;
2165 nvme_put32(nvme, cq->ncq_hdbl, head.r);
2166 }
2167
2168 mutex_exit(&cq->ncq_mutex);
2169
2170 return (cmd);
2171 }
2172
2173 static int
nvme_check_unknown_cmd_status(nvme_cmd_t * cmd)2174 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
2175 {
2176 nvme_cqe_t *cqe = &cmd->nc_cqe;
2177
2178 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2179 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
2180 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
2181 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
2182 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
2183
2184 if (cmd->nc_xfer != NULL)
2185 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2186
2187 /*
2188 * User commands should never cause us to mark the controller dead.
2189 * Though whether we ever should mark it dead as there currently isn't a
2190 * useful recovery path is another question.
2191 */
2192 if (((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) &&
2193 cmd->nc_nvme->n_strict_version) {
2194 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2195 }
2196
2197 return (EIO);
2198 }
2199
2200 static int
nvme_check_vendor_cmd_status(nvme_cmd_t * cmd)2201 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
2202 {
2203 nvme_cqe_t *cqe = &cmd->nc_cqe;
2204
2205 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2206 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
2207 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
2208 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
2209 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
2210 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
2211 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2212 }
2213
2214 return (EIO);
2215 }
2216
2217 static int
nvme_check_integrity_cmd_status(nvme_cmd_t * cmd)2218 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
2219 {
2220 nvme_cqe_t *cqe = &cmd->nc_cqe;
2221
2222 switch (cqe->cqe_sf.sf_sc) {
2223 case NVME_CQE_SC_INT_NVM_WRITE:
2224 /* write fail */
2225 /* TODO: post ereport */
2226 if (cmd->nc_xfer != NULL)
2227 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2228 return (EIO);
2229
2230 case NVME_CQE_SC_INT_NVM_READ:
2231 /* read fail */
2232 /* TODO: post ereport */
2233 if (cmd->nc_xfer != NULL)
2234 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2235 return (EIO);
2236
2237 default:
2238 return (nvme_check_unknown_cmd_status(cmd));
2239 }
2240 }
2241
2242 static int
nvme_check_generic_cmd_status(nvme_cmd_t * cmd)2243 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
2244 {
2245 nvme_cqe_t *cqe = &cmd->nc_cqe;
2246
2247 switch (cqe->cqe_sf.sf_sc) {
2248 case NVME_CQE_SC_GEN_SUCCESS:
2249 return (0);
2250
2251 /*
2252 * Errors indicating a bug in the driver should cause a panic.
2253 */
2254 case NVME_CQE_SC_GEN_INV_OPC:
2255 /* Invalid Command Opcode */
2256 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmd_err);
2257 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2258 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2259 "programming error: invalid opcode in cmd %p",
2260 (void *)cmd);
2261 }
2262 return (EINVAL);
2263
2264 case NVME_CQE_SC_GEN_INV_FLD:
2265 /* Invalid Field in Command */
2266 NVME_BUMP_STAT(cmd->nc_nvme, inv_field_err);
2267 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2268 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2269 "programming error: invalid field in cmd %p",
2270 (void *)cmd);
2271 }
2272 return (EIO);
2273
2274 case NVME_CQE_SC_GEN_ID_CNFL:
2275 /* Command ID Conflict */
2276 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2277 "cmd ID conflict in cmd %p", (void *)cmd);
2278 return (0);
2279
2280 case NVME_CQE_SC_GEN_INV_NS:
2281 /* Invalid Namespace or Format */
2282 NVME_BUMP_STAT(cmd->nc_nvme, inv_nsfmt_err);
2283 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2284 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2285 "programming error: invalid NS/format in cmd %p",
2286 (void *)cmd);
2287 }
2288 return (EINVAL);
2289
2290 case NVME_CQE_SC_GEN_CMD_SEQ_ERR:
2291 /*
2292 * Command Sequence Error
2293 *
2294 * This can be generated normally by user log page requests that
2295 * come out of order (e.g. getting the persistent event log
2296 * without establishing the context). If the kernel manages this
2297 * on its own then that's problematic.
2298 */
2299 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmdseq_err);
2300 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2301 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2302 "programming error: command sequencing error %p",
2303 (void *)cmd);
2304 }
2305 return (EINVAL);
2306
2307 case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
2308 /* LBA Out Of Range */
2309 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2310 "LBA out of range in cmd %p", (void *)cmd);
2311 return (0);
2312
2313 /*
2314 * Non-fatal errors, handle gracefully.
2315 */
2316 case NVME_CQE_SC_GEN_DATA_XFR_ERR:
2317 /* Data Transfer Error (DMA) */
2318 /* TODO: post ereport */
2319 NVME_BUMP_STAT(cmd->nc_nvme, data_xfr_err);
2320 if (cmd->nc_xfer != NULL)
2321 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2322 return (EIO);
2323
2324 case NVME_CQE_SC_GEN_INTERNAL_ERR:
2325 /*
2326 * Internal Error. The spec (v1.0, section 4.5.1.2) says
2327 * detailed error information is returned as async event,
2328 * so we pretty much ignore the error here and handle it
2329 * in the async event handler.
2330 */
2331 NVME_BUMP_STAT(cmd->nc_nvme, internal_err);
2332 if (cmd->nc_xfer != NULL)
2333 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2334 return (EIO);
2335
2336 case NVME_CQE_SC_GEN_ABORT_REQUEST:
2337 /*
2338 * Command Abort Requested. This normally happens only when a
2339 * command times out.
2340 */
2341 /* TODO: post ereport or change blkdev to handle this? */
2342 NVME_BUMP_STAT(cmd->nc_nvme, abort_rq_err);
2343 return (ECANCELED);
2344
2345 case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
2346 /* Command Aborted due to Power Loss Notification */
2347 NVME_BUMP_STAT(cmd->nc_nvme, abort_pwrloss_err);
2348 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2349 return (EIO);
2350
2351 case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
2352 /* Command Aborted due to SQ Deletion */
2353 NVME_BUMP_STAT(cmd->nc_nvme, abort_sq_del);
2354 return (EIO);
2355
2356 case NVME_CQE_SC_GEN_NVM_CAP_EXC:
2357 /* Capacity Exceeded */
2358 NVME_BUMP_STAT(cmd->nc_nvme, nvm_cap_exc);
2359 if (cmd->nc_xfer != NULL)
2360 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2361 return (EIO);
2362
2363 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
2364 /* Namespace Not Ready */
2365 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_notrdy);
2366 if (cmd->nc_xfer != NULL)
2367 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2368 return (EIO);
2369
2370 case NVME_CQE_SC_GEN_NVM_FORMATTING:
2371 /* Format in progress (1.2) */
2372 if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2))
2373 return (nvme_check_unknown_cmd_status(cmd));
2374 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_formatting);
2375 if (cmd->nc_xfer != NULL)
2376 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2377 return (EIO);
2378
2379 default:
2380 return (nvme_check_unknown_cmd_status(cmd));
2381 }
2382 }
2383
2384 static int
nvme_check_specific_cmd_status(nvme_cmd_t * cmd)2385 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
2386 {
2387 nvme_cqe_t *cqe = &cmd->nc_cqe;
2388
2389 switch (cqe->cqe_sf.sf_sc) {
2390 case NVME_CQE_SC_SPC_INV_CQ:
2391 /* Completion Queue Invalid */
2392 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
2393 NVME_BUMP_STAT(cmd->nc_nvme, inv_cq_err);
2394 return (EINVAL);
2395
2396 case NVME_CQE_SC_SPC_INV_QID:
2397 /* Invalid Queue Identifier */
2398 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2399 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
2400 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
2401 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2402 NVME_BUMP_STAT(cmd->nc_nvme, inv_qid_err);
2403 return (EINVAL);
2404
2405 case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
2406 /* Max Queue Size Exceeded */
2407 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2408 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2409 NVME_BUMP_STAT(cmd->nc_nvme, max_qsz_exc);
2410 return (EINVAL);
2411
2412 case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
2413 /* Abort Command Limit Exceeded */
2414 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
2415 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2416 "abort command limit exceeded in cmd %p", (void *)cmd);
2417 return (0);
2418
2419 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
2420 /* Async Event Request Limit Exceeded */
2421 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
2422 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2423 "async event request limit exceeded in cmd %p",
2424 (void *)cmd);
2425 return (0);
2426
2427 case NVME_CQE_SC_SPC_INV_INT_VECT:
2428 /* Invalid Interrupt Vector */
2429 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2430 NVME_BUMP_STAT(cmd->nc_nvme, inv_int_vect);
2431 return (EINVAL);
2432
2433 case NVME_CQE_SC_SPC_INV_LOG_PAGE:
2434 /* Invalid Log Page */
2435 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
2436 NVME_BUMP_STAT(cmd->nc_nvme, inv_log_page);
2437 return (EINVAL);
2438
2439 case NVME_CQE_SC_SPC_INV_FORMAT:
2440 /* Invalid Format */
2441 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT ||
2442 cmd->nc_sqe.sqe_opc == NVME_OPC_NS_MGMT);
2443 NVME_BUMP_STAT(cmd->nc_nvme, inv_format);
2444 if (cmd->nc_xfer != NULL)
2445 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2446 return (EINVAL);
2447
2448 case NVME_CQE_SC_SPC_INV_Q_DEL:
2449 /* Invalid Queue Deletion */
2450 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2451 NVME_BUMP_STAT(cmd->nc_nvme, inv_q_del);
2452 return (EINVAL);
2453
2454 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
2455 /* Conflicting Attributes */
2456 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
2457 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2458 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2459 NVME_BUMP_STAT(cmd->nc_nvme, cnfl_attr);
2460 if (cmd->nc_xfer != NULL)
2461 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2462 return (EINVAL);
2463
2464 case NVME_CQE_SC_SPC_NVM_INV_PROT:
2465 /* Invalid Protection Information */
2466 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
2467 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2468 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2469 NVME_BUMP_STAT(cmd->nc_nvme, inv_prot);
2470 if (cmd->nc_xfer != NULL)
2471 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2472 return (EINVAL);
2473
2474 case NVME_CQE_SC_SPC_NVM_READONLY:
2475 /* Write to Read Only Range */
2476 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2477 NVME_BUMP_STAT(cmd->nc_nvme, readonly);
2478 if (cmd->nc_xfer != NULL)
2479 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2480 return (EROFS);
2481
2482 case NVME_CQE_SC_SPC_INV_FW_SLOT:
2483 /* Invalid Firmware Slot */
2484 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwslot);
2485 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2486 return (EINVAL);
2487
2488 case NVME_CQE_SC_SPC_INV_FW_IMG:
2489 /* Invalid Firmware Image */
2490 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwimg);
2491 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2492 return (EINVAL);
2493
2494 case NVME_CQE_SC_SPC_FW_RESET:
2495 /* Conventional Reset Required */
2496 NVME_BUMP_STAT(cmd->nc_nvme, fwact_creset);
2497 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2498 return (0);
2499
2500 case NVME_CQE_SC_SPC_FW_NSSR:
2501 /* NVMe Subsystem Reset Required */
2502 NVME_BUMP_STAT(cmd->nc_nvme, fwact_nssr);
2503 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2504 return (0);
2505
2506 case NVME_CQE_SC_SPC_FW_NEXT_RESET:
2507 /* Activation Requires Reset */
2508 NVME_BUMP_STAT(cmd->nc_nvme, fwact_reset);
2509 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2510 return (0);
2511
2512 case NVME_CQE_SC_SPC_FW_MTFA:
2513 /* Activation Requires Maximum Time Violation */
2514 NVME_BUMP_STAT(cmd->nc_nvme, fwact_mtfa);
2515 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2516 return (EAGAIN);
2517
2518 case NVME_CQE_SC_SPC_FW_PROHIBITED:
2519 /* Activation Prohibited */
2520 NVME_BUMP_STAT(cmd->nc_nvme, fwact_prohibited);
2521 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2522 return (EINVAL);
2523
2524 case NVME_CQE_SC_SPC_FW_OVERLAP:
2525 /* Overlapping Firmware Ranges */
2526 NVME_BUMP_STAT(cmd->nc_nvme, fw_overlap);
2527 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD ||
2528 cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2529 return (EINVAL);
2530
2531 case NVME_CQE_SC_SPC_NS_ATTACHED:
2532 /* Namespace Already Attached */
2533 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2534 NVME_BUMP_STAT(cmd->nc_nvme, ns_attached);
2535 return (EEXIST);
2536
2537 case NVME_CQE_SC_SPC_NS_PRIV:
2538 /* Namespace Is Private */
2539 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2540 NVME_BUMP_STAT(cmd->nc_nvme, ns_priv);
2541 return (EACCES);
2542
2543 case NVME_CQE_SC_SPC_NS_NOT_ATTACH:
2544 /* Namespace Not Attached */
2545 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2546 NVME_BUMP_STAT(cmd->nc_nvme, ns_not_attached);
2547 return (ENOENT);
2548
2549 case NVME_CQE_SC_SPC_INV_CTRL_LIST:
2550 /* Controller List Invalid */
2551 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2552 NVME_BUMP_STAT(cmd->nc_nvme, ana_attach);
2553 return (EINVAL);
2554
2555 case NVME_CQE_SC_SPC_ANA_ATTACH:
2556 /* ANA Attach Failed */
2557 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2558 NVME_BUMP_STAT(cmd->nc_nvme, ana_attach);
2559 return (EIO);
2560
2561 case NVME_CQE_SC_SPC_NS_ATTACH_LIM:
2562 /* Namespace Attachment Limit Exceeded */
2563 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
2564 NVME_BUMP_STAT(cmd->nc_nvme, ns_attach_lim);
2565 return (EOVERFLOW);
2566
2567 default:
2568 return (nvme_check_unknown_cmd_status(cmd));
2569 }
2570 }
2571
2572 static inline int
nvme_check_cmd_status(nvme_cmd_t * cmd)2573 nvme_check_cmd_status(nvme_cmd_t *cmd)
2574 {
2575 nvme_cqe_t *cqe = &cmd->nc_cqe;
2576
2577 /*
2578 * Take a shortcut if the controller is dead, or if
2579 * command status indicates no error.
2580 */
2581 if (cmd->nc_nvme->n_dead)
2582 return (EIO);
2583
2584 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2585 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2586 return (0);
2587
2588 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
2589 return (nvme_check_generic_cmd_status(cmd));
2590 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
2591 return (nvme_check_specific_cmd_status(cmd));
2592 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
2593 return (nvme_check_integrity_cmd_status(cmd));
2594 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
2595 return (nvme_check_vendor_cmd_status(cmd));
2596
2597 return (nvme_check_unknown_cmd_status(cmd));
2598 }
2599
2600 /*
2601 * Check the command status as used by an ioctl path and do not convert it to an
2602 * errno. We still allow all the command status checking to occur, but otherwise
2603 * will pass back the controller error as is.
2604 */
2605 static boolean_t
nvme_check_cmd_status_ioctl(nvme_cmd_t * cmd,nvme_ioctl_common_t * ioc)2606 nvme_check_cmd_status_ioctl(nvme_cmd_t *cmd, nvme_ioctl_common_t *ioc)
2607 {
2608 nvme_cqe_t *cqe = &cmd->nc_cqe;
2609 nvme_t *nvme = cmd->nc_nvme;
2610
2611 if (nvme->n_dead) {
2612 return (nvme_ioctl_error(ioc, nvme->n_dead_status, 0, 0));
2613 }
2614
2615 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2616 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2617 return (B_TRUE);
2618
2619 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) {
2620 (void) nvme_check_generic_cmd_status(cmd);
2621 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) {
2622 (void) nvme_check_specific_cmd_status(cmd);
2623 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) {
2624 (void) nvme_check_integrity_cmd_status(cmd);
2625 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) {
2626 (void) nvme_check_vendor_cmd_status(cmd);
2627 } else {
2628 (void) nvme_check_unknown_cmd_status(cmd);
2629 }
2630
2631 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_ERROR,
2632 cqe->cqe_sf.sf_sct, cqe->cqe_sf.sf_sc));
2633 }
2634
2635 static int
nvme_abort_cmd(nvme_cmd_t * cmd,const uint32_t sec)2636 nvme_abort_cmd(nvme_cmd_t *cmd, const uint32_t sec)
2637 {
2638 nvme_t *nvme = cmd->nc_nvme;
2639 nvme_cmd_t *abort_cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2640 nvme_abort_cmd_t ac = { 0 };
2641 int ret = 0;
2642
2643 sema_p(&nvme->n_abort_sema);
2644
2645 ac.b.ac_cid = cmd->nc_sqe.sqe_cid;
2646 ac.b.ac_sqid = cmd->nc_sqid;
2647
2648 abort_cmd->nc_sqid = 0;
2649 abort_cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
2650 abort_cmd->nc_callback = nvme_wakeup_cmd;
2651 abort_cmd->nc_sqe.sqe_cdw10 = ac.r;
2652
2653 /*
2654 * Send the ABORT to the hardware. The ABORT command will return _after_
2655 * the aborted command has completed (aborted or otherwise) so we must
2656 * drop the aborted command's lock to allow it to complete.
2657 * We want to allow at least `nvme_abort_cmd_timeout` seconds for the
2658 * abort to be processed, but more if we are aborting a long-running
2659 * command to give that time to complete/abort too.
2660 */
2661 mutex_exit(&cmd->nc_mutex);
2662 nvme_admin_cmd(abort_cmd, MAX(nvme_abort_cmd_timeout, sec));
2663 mutex_enter(&cmd->nc_mutex);
2664
2665 sema_v(&nvme->n_abort_sema);
2666
2667 /* BEGIN CSTYLED */
2668 /*
2669 * If the abort command itself has timed out, it will have been
2670 * de-queued so that its callback will not be called after this point,
2671 * and its state will be NVME_CMD_LOST.
2672 *
2673 * nvme_admin_cmd(abort_cmd)
2674 * -> nvme_wait_cmd(abort_cmd)
2675 * -> nvme_cmd(abort_cmd)
2676 * | -> nvme_admin_cmd(cmd)
2677 * | -> nvme_wait_cmd(cmd)
2678 * | -> nvme_ctrl_mark_dead()
2679 * | -> nvme_lost_cmd(cmd)
2680 * | -> cmd->nc_stat = NVME_CMD_LOST
2681 * and here we are.
2682 */
2683 /* END CSTYLED */
2684 if (abort_cmd->nc_state == NVME_CMD_LOST) {
2685 dev_err(nvme->n_dip, CE_WARN,
2686 "!ABORT of command %d/%d timed out",
2687 cmd->nc_sqe.sqe_cid, cmd->nc_sqid);
2688 NVME_BUMP_STAT(nvme, abort_timeout);
2689 ret = EIO;
2690 } else if ((ret = nvme_check_cmd_status(abort_cmd)) != 0) {
2691 dev_err(nvme->n_dip, CE_WARN,
2692 "!ABORT of command %d/%d "
2693 "failed with sct = %x, sc = %x",
2694 cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2695 abort_cmd->nc_cqe.cqe_sf.sf_sct,
2696 abort_cmd->nc_cqe.cqe_sf.sf_sc);
2697 NVME_BUMP_STAT(nvme, abort_failed);
2698 } else {
2699 boolean_t success = ((abort_cmd->nc_cqe.cqe_dw0 & 1) == 0);
2700
2701 dev_err(nvme->n_dip, CE_WARN,
2702 "!ABORT of command %d/%d %ssuccessful",
2703 cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2704 success ? "" : "un");
2705
2706 if (success) {
2707 NVME_BUMP_STAT(nvme, abort_successful);
2708 } else {
2709 NVME_BUMP_STAT(nvme, abort_unsuccessful);
2710 }
2711 }
2712
2713 /*
2714 * This abort abort_cmd has either completed or been de-queued as
2715 * lost in nvme_wait_cmd. Either way it's safe to free it here.
2716 */
2717 nvme_free_cmd(abort_cmd);
2718
2719 return (ret);
2720 }
2721
2722 /*
2723 * nvme_wait_cmd -- wait for command completion or timeout
2724 *
2725 * In case of a serious error or a timeout of the abort command the hardware
2726 * will be declared dead and FMA will be notified.
2727 */
2728 static void
nvme_wait_cmd(nvme_cmd_t * cmd,uint32_t sec)2729 nvme_wait_cmd(nvme_cmd_t *cmd, uint32_t sec)
2730 {
2731 nvme_t *nvme = cmd->nc_nvme;
2732 nvme_reg_csts_t csts;
2733
2734 ASSERT(mutex_owned(&cmd->nc_mutex));
2735
2736 while (cmd->nc_state != NVME_CMD_COMPLETED) {
2737 clock_t timeout = ddi_get_lbolt() +
2738 drv_usectohz((long)sec * MICROSEC);
2739
2740 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) {
2741 /*
2742 * If this command is on the task queue then we don't
2743 * consider it to have timed out. We are waiting for
2744 * the callback to be invoked, the timing of which can
2745 * be affected by system load and should not count
2746 * against the device; continue to wait.
2747 * While this doesn't help deal with the possibility of
2748 * a command timing out between being placed on the CQ
2749 * and arriving on the taskq, we expect interrupts to
2750 * run fairly promptly making this a small window.
2751 */
2752 if (cmd->nc_state != NVME_CMD_QUEUED)
2753 break;
2754 }
2755 }
2756
2757 if (cmd->nc_state == NVME_CMD_COMPLETED) {
2758 DTRACE_PROBE1(nvme_admin_cmd_completed, nvme_cmd_t *, cmd);
2759 nvme_admin_stat_cmd(nvme, cmd);
2760 return;
2761 }
2762
2763 /*
2764 * The command timed out.
2765 */
2766
2767 DTRACE_PROBE1(nvme_admin_cmd_timeout, nvme_cmd_t *, cmd);
2768 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2769 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
2770 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2771 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
2772 NVME_BUMP_STAT(nvme, cmd_timeout);
2773
2774 /*
2775 * Check controller for fatal status, any errors associated with the
2776 * register or DMA handle, or for a double timeout (abort command timed
2777 * out). If necessary log a warning and call FMA.
2778 */
2779 if (csts.b.csts_cfs ||
2780 nvme_check_regs_hdl(nvme) ||
2781 nvme_check_dma_hdl(cmd->nc_dma) ||
2782 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
2783 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2784 nvme_lost_cmd(nvme, cmd);
2785 return;
2786 }
2787
2788 /* Issue an abort for the command that has timed out */
2789 if (nvme_abort_cmd(cmd, sec) == 0) {
2790 /*
2791 * If the abort completed, whether or not it was
2792 * successful in aborting the command, that command
2793 * will also have completed with an appropriate
2794 * status.
2795 */
2796 while (cmd->nc_state != NVME_CMD_COMPLETED)
2797 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
2798 return;
2799 }
2800
2801 /*
2802 * Otherwise, the abort has also timed out or failed, which
2803 * will have marked the controller dead. De-queue the original command
2804 * and add it to the lost commands list.
2805 */
2806 VERIFY(cmd->nc_nvme->n_dead);
2807 nvme_lost_cmd(nvme, cmd);
2808 }
2809
2810 static void
nvme_wakeup_cmd(void * arg)2811 nvme_wakeup_cmd(void *arg)
2812 {
2813 nvme_cmd_t *cmd = arg;
2814
2815 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
2816
2817 mutex_enter(&cmd->nc_mutex);
2818 cmd->nc_state = NVME_CMD_COMPLETED;
2819 cv_signal(&cmd->nc_cv);
2820 mutex_exit(&cmd->nc_mutex);
2821 }
2822
2823 static void
nvme_async_event_task(void * arg)2824 nvme_async_event_task(void *arg)
2825 {
2826 nvme_cmd_t *cmd = arg;
2827 nvme_t *nvme = cmd->nc_nvme;
2828 nvme_error_log_entry_t *error_log = NULL;
2829 nvme_health_log_t *health_log = NULL;
2830 nvme_nschange_list_t *nslist = NULL;
2831 size_t logsize = 0;
2832 nvme_async_event_t event;
2833
2834 /*
2835 * Check for errors associated with the async request itself. The only
2836 * command-specific error is "async event limit exceeded", which
2837 * indicates a programming error in the driver and causes a panic in
2838 * nvme_check_cmd_status().
2839 *
2840 * Other possible errors are various scenarios where the async request
2841 * was aborted, or internal errors in the device. Internal errors are
2842 * reported to FMA, the command aborts need no special handling here.
2843 *
2844 * And finally, at least qemu nvme does not support async events,
2845 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we
2846 * will avoid posting async events.
2847 */
2848
2849 if (nvme_check_cmd_status(cmd) != 0) {
2850 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2851 "!async event request returned failure, sct = 0x%x, "
2852 "sc = 0x%x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
2853 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
2854 cmd->nc_cqe.cqe_sf.sf_m);
2855
2856 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2857 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
2858 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2859 }
2860
2861 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2862 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
2863 cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
2864 nvme->n_async_event_supported = B_FALSE;
2865 }
2866
2867 nvme_free_cmd(cmd);
2868 return;
2869 }
2870
2871 event.r = cmd->nc_cqe.cqe_dw0;
2872
2873 /* Clear CQE and re-submit the async request. */
2874 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
2875 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
2876 cmd = NULL; /* cmd can no longer be used after resubmission */
2877
2878 switch (event.b.ae_type) {
2879 case NVME_ASYNC_TYPE_ERROR:
2880 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
2881 if (!nvme_get_logpage_int(nvme, B_FALSE,
2882 (void **)&error_log, &logsize,
2883 NVME_LOGPAGE_ERROR)) {
2884 return;
2885 }
2886 } else {
2887 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2888 "async event reply: type=0x%x logpage=0x%x",
2889 event.b.ae_type, event.b.ae_logpage);
2890 NVME_BUMP_STAT(nvme, wrong_logpage);
2891 return;
2892 }
2893
2894 switch (event.b.ae_info) {
2895 case NVME_ASYNC_ERROR_INV_SQ:
2896 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2897 "invalid submission queue");
2898 return;
2899
2900 case NVME_ASYNC_ERROR_INV_DBL:
2901 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2902 "invalid doorbell write value");
2903 return;
2904
2905 case NVME_ASYNC_ERROR_DIAGFAIL:
2906 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
2907 nvme_ctrl_mark_dead(nvme, B_FALSE);
2908 NVME_BUMP_STAT(nvme, diagfail_event);
2909 break;
2910
2911 case NVME_ASYNC_ERROR_PERSISTENT:
2912 dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
2913 "device error");
2914 nvme_ctrl_mark_dead(nvme, B_FALSE);
2915 NVME_BUMP_STAT(nvme, persistent_event);
2916 break;
2917
2918 case NVME_ASYNC_ERROR_TRANSIENT:
2919 dev_err(nvme->n_dip, CE_WARN, "!transient internal "
2920 "device error");
2921 /* TODO: send ereport */
2922 NVME_BUMP_STAT(nvme, transient_event);
2923 break;
2924
2925 case NVME_ASYNC_ERROR_FW_LOAD:
2926 dev_err(nvme->n_dip, CE_WARN,
2927 "!firmware image load error");
2928 NVME_BUMP_STAT(nvme, fw_load_event);
2929 break;
2930 }
2931 break;
2932
2933 case NVME_ASYNC_TYPE_HEALTH:
2934 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
2935 if (!nvme_get_logpage_int(nvme, B_FALSE,
2936 (void **)&health_log, &logsize,
2937 NVME_LOGPAGE_HEALTH)) {
2938 return;
2939 }
2940 } else {
2941 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2942 "type=0x%x logpage=0x%x", event.b.ae_type,
2943 event.b.ae_logpage);
2944 NVME_BUMP_STAT(nvme, wrong_logpage);
2945 return;
2946 }
2947
2948 switch (event.b.ae_info) {
2949 case NVME_ASYNC_HEALTH_RELIABILITY:
2950 dev_err(nvme->n_dip, CE_WARN,
2951 "!device reliability compromised");
2952 /* TODO: send ereport */
2953 NVME_BUMP_STAT(nvme, reliability_event);
2954 break;
2955
2956 case NVME_ASYNC_HEALTH_TEMPERATURE:
2957 dev_err(nvme->n_dip, CE_WARN,
2958 "!temperature above threshold");
2959 /* TODO: send ereport */
2960 NVME_BUMP_STAT(nvme, temperature_event);
2961 break;
2962
2963 case NVME_ASYNC_HEALTH_SPARE:
2964 dev_err(nvme->n_dip, CE_WARN,
2965 "!spare space below threshold");
2966 /* TODO: send ereport */
2967 NVME_BUMP_STAT(nvme, spare_event);
2968 break;
2969 }
2970 break;
2971
2972 case NVME_ASYNC_TYPE_NOTICE:
2973 switch (event.b.ae_info) {
2974 case NVME_ASYNC_NOTICE_NS_CHANGE:
2975 if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) {
2976 dev_err(nvme->n_dip, CE_WARN,
2977 "!wrong logpage in async event reply: "
2978 "type=0x%x logpage=0x%x",
2979 event.b.ae_type, event.b.ae_logpage);
2980 NVME_BUMP_STAT(nvme, wrong_logpage);
2981 break;
2982 }
2983
2984 dev_err(nvme->n_dip, CE_NOTE,
2985 "namespace attribute change event, "
2986 "logpage = 0x%x", event.b.ae_logpage);
2987 NVME_BUMP_STAT(nvme, notice_event);
2988
2989 if (!nvme_get_logpage_int(nvme, B_FALSE,
2990 (void **)&nslist, &logsize,
2991 NVME_LOGPAGE_NSCHANGE)) {
2992 break;
2993 }
2994
2995 if (nslist->nscl_ns[0] == UINT32_MAX) {
2996 dev_err(nvme->n_dip, CE_CONT,
2997 "more than %u namespaces have changed.\n",
2998 NVME_NSCHANGE_LIST_SIZE);
2999 break;
3000 }
3001
3002 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
3003 for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) {
3004 uint32_t nsid = nslist->nscl_ns[i];
3005 nvme_namespace_t *ns;
3006
3007 if (nsid == 0) /* end of list */
3008 break;
3009
3010 dev_err(nvme->n_dip, CE_NOTE,
3011 "!namespace nvme%d/%u has changed.",
3012 ddi_get_instance(nvme->n_dip), nsid);
3013
3014 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
3015 continue;
3016
3017 ns = nvme_nsid2ns(nvme, nsid);
3018 if (ns->ns_state <= NVME_NS_STATE_NOT_IGNORED)
3019 continue;
3020
3021 nvme_mgmt_bd_start(nvme);
3022 bd_state_change(ns->ns_bd_hdl);
3023 nvme_mgmt_bd_end(nvme);
3024 }
3025 nvme_mgmt_unlock(nvme);
3026
3027 break;
3028
3029 case NVME_ASYNC_NOTICE_FW_ACTIVATE:
3030 dev_err(nvme->n_dip, CE_NOTE,
3031 "firmware activation starting, "
3032 "logpage = 0x%x", event.b.ae_logpage);
3033 NVME_BUMP_STAT(nvme, notice_event);
3034 break;
3035
3036 case NVME_ASYNC_NOTICE_TELEMETRY:
3037 dev_err(nvme->n_dip, CE_NOTE,
3038 "telemetry log changed, "
3039 "logpage = 0x%x", event.b.ae_logpage);
3040 NVME_BUMP_STAT(nvme, notice_event);
3041 break;
3042
3043 case NVME_ASYNC_NOTICE_NS_ASYMM:
3044 dev_err(nvme->n_dip, CE_NOTE,
3045 "asymmetric namespace access change, "
3046 "logpage = 0x%x", event.b.ae_logpage);
3047 NVME_BUMP_STAT(nvme, notice_event);
3048 break;
3049
3050 case NVME_ASYNC_NOTICE_LATENCYLOG:
3051 dev_err(nvme->n_dip, CE_NOTE,
3052 "predictable latency event aggregate log change, "
3053 "logpage = 0x%x", event.b.ae_logpage);
3054 NVME_BUMP_STAT(nvme, notice_event);
3055 break;
3056
3057 case NVME_ASYNC_NOTICE_LBASTATUS:
3058 dev_err(nvme->n_dip, CE_NOTE,
3059 "LBA status information alert, "
3060 "logpage = 0x%x", event.b.ae_logpage);
3061 NVME_BUMP_STAT(nvme, notice_event);
3062 break;
3063
3064 case NVME_ASYNC_NOTICE_ENDURANCELOG:
3065 dev_err(nvme->n_dip, CE_NOTE,
3066 "endurance group event aggregate log page change, "
3067 "logpage = 0x%x", event.b.ae_logpage);
3068 NVME_BUMP_STAT(nvme, notice_event);
3069 break;
3070
3071 default:
3072 dev_err(nvme->n_dip, CE_WARN,
3073 "!unknown notice async event received, "
3074 "info = 0x%x, logpage = 0x%x", event.b.ae_info,
3075 event.b.ae_logpage);
3076 NVME_BUMP_STAT(nvme, unknown_event);
3077 break;
3078 }
3079 break;
3080
3081 case NVME_ASYNC_TYPE_VENDOR:
3082 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
3083 "received, info = 0x%x, logpage = 0x%x", event.b.ae_info,
3084 event.b.ae_logpage);
3085 NVME_BUMP_STAT(nvme, vendor_event);
3086 break;
3087
3088 default:
3089 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
3090 "type = 0x%x, info = 0x%x, logpage = 0x%x", event.b.ae_type,
3091 event.b.ae_info, event.b.ae_logpage);
3092 NVME_BUMP_STAT(nvme, unknown_event);
3093 break;
3094 }
3095
3096 if (error_log != NULL)
3097 kmem_free(error_log, logsize);
3098
3099 if (health_log != NULL)
3100 kmem_free(health_log, logsize);
3101
3102 if (nslist != NULL)
3103 kmem_free(nslist, logsize);
3104 }
3105
3106 static void
nvme_admin_cmd(nvme_cmd_t * cmd,uint32_t sec)3107 nvme_admin_cmd(nvme_cmd_t *cmd, uint32_t sec)
3108 {
3109 uint32_t qtimeout;
3110
3111 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
3112
3113 mutex_enter(&cmd->nc_mutex);
3114 cmd->nc_timeout = sec;
3115 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd, &qtimeout);
3116 /*
3117 * We will wait for a total of this command's specified timeout plus
3118 * the sum of the timeouts of any commands queued ahead of this one. If
3119 * we aren't first in the queue, this will inflate the timeout somewhat
3120 * but these times are not critical and it means that if we get stuck
3121 * behind a long running command such as a namespace format then we
3122 * won't time out and trigger an abort.
3123 */
3124 nvme_wait_cmd(cmd, sec + qtimeout);
3125 mutex_exit(&cmd->nc_mutex);
3126 }
3127
3128 static void
nvme_async_event(nvme_t * nvme)3129 nvme_async_event(nvme_t *nvme)
3130 {
3131 nvme_cmd_t *cmd;
3132
3133 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3134 cmd->nc_sqid = 0;
3135 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
3136 cmd->nc_callback = nvme_async_event_task;
3137 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3138
3139 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
3140 }
3141
3142 /*
3143 * There are commands such as format or vendor unique commands that are going to
3144 * manipulate the data in a namespace or destroy them, we make sure that none of
3145 * the ones that will be impacted are actually attached.
3146 */
3147 static boolean_t
nvme_no_blkdev_attached(nvme_t * nvme,uint32_t nsid)3148 nvme_no_blkdev_attached(nvme_t *nvme, uint32_t nsid)
3149 {
3150 ASSERT(nvme_mgmt_lock_held(nvme));
3151 ASSERT3U(nsid, !=, 0);
3152
3153 if (nsid != NVME_NSID_BCAST) {
3154 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
3155 return (ns->ns_state < NVME_NS_STATE_ATTACHED);
3156 }
3157
3158 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
3159 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
3160
3161 if (ns->ns_state >= NVME_NS_STATE_ATTACHED) {
3162 return (B_FALSE);
3163 }
3164 }
3165
3166 return (B_TRUE);
3167 }
3168
3169 static boolean_t
nvme_format_nvm(nvme_t * nvme,nvme_ioctl_format_t * ioc)3170 nvme_format_nvm(nvme_t *nvme, nvme_ioctl_format_t *ioc)
3171 {
3172 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3173 nvme_format_nvm_t format_nvm = { 0 };
3174 boolean_t ret;
3175
3176 format_nvm.b.fm_lbaf = bitx32(ioc->nif_lbaf, 3, 0);
3177 format_nvm.b.fm_ses = bitx32(ioc->nif_ses, 2, 0);
3178
3179 cmd->nc_sqid = 0;
3180 cmd->nc_callback = nvme_wakeup_cmd;
3181 cmd->nc_sqe.sqe_nsid = ioc->nif_common.nioc_nsid;
3182 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
3183 cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
3184
3185 /*
3186 * We don't want to panic on any format commands. There are two reasons
3187 * for this:
3188 *
3189 * 1) All format commands are initiated by users. We don't want to panic
3190 * on user commands.
3191 *
3192 * 2) Several devices like the Samsung SM951 don't allow formatting of
3193 * all namespaces in one command and we'd prefer to handle that
3194 * gracefully.
3195 */
3196 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3197
3198 nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
3199
3200 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nif_common) != 0) {
3201 dev_err(nvme->n_dip, CE_WARN,
3202 "!FORMAT failed with sct = %x, sc = %x",
3203 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3204 ret = B_FALSE;
3205 goto fail;
3206 }
3207
3208 ret = B_TRUE;
3209 fail:
3210 nvme_free_cmd(cmd);
3211 return (ret);
3212 }
3213
3214 /*
3215 * Retrieve a specific log page. The contents of the log page request should
3216 * have already been validated by the system.
3217 */
3218 static boolean_t
nvme_get_logpage(nvme_t * nvme,boolean_t user,nvme_ioctl_get_logpage_t * log,void ** buf)3219 nvme_get_logpage(nvme_t *nvme, boolean_t user, nvme_ioctl_get_logpage_t *log,
3220 void **buf)
3221 {
3222 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3223 nvme_getlogpage_dw10_t dw10;
3224 uint32_t offlo, offhi;
3225 nvme_getlogpage_dw11_t dw11;
3226 nvme_getlogpage_dw14_t dw14;
3227 uint32_t ndw;
3228 boolean_t ret = B_FALSE;
3229
3230 bzero(&dw10, sizeof (dw10));
3231 bzero(&dw11, sizeof (dw11));
3232 bzero(&dw14, sizeof (dw14));
3233
3234 cmd->nc_sqid = 0;
3235 cmd->nc_callback = nvme_wakeup_cmd;
3236 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
3237 cmd->nc_sqe.sqe_nsid = log->nigl_common.nioc_nsid;
3238
3239 if (user)
3240 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3241
3242 /*
3243 * The size field is the number of double words, but is a zeros based
3244 * value. We need to store our actual value minus one.
3245 */
3246 ndw = (uint32_t)(log->nigl_len / 4);
3247 ASSERT3U(ndw, >, 0);
3248 ndw--;
3249
3250 dw10.b.lp_lid = bitx32(log->nigl_lid, 7, 0);
3251 dw10.b.lp_lsp = bitx32(log->nigl_lsp, 6, 0);
3252 dw10.b.lp_rae = bitx32(log->nigl_lsp, 0, 0);
3253 dw10.b.lp_lnumdl = bitx32(ndw, 15, 0);
3254
3255 dw11.b.lp_numdu = bitx32(ndw, 31, 16);
3256 dw11.b.lp_lsi = bitx32(log->nigl_lsi, 15, 0);
3257
3258 offlo = bitx64(log->nigl_offset, 31, 0);
3259 offhi = bitx64(log->nigl_offset, 63, 32);
3260
3261 dw14.b.lp_csi = bitx32(log->nigl_csi, 7, 0);
3262
3263 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3264 cmd->nc_sqe.sqe_cdw11 = dw11.r;
3265 cmd->nc_sqe.sqe_cdw12 = offlo;
3266 cmd->nc_sqe.sqe_cdw13 = offhi;
3267 cmd->nc_sqe.sqe_cdw14 = dw14.r;
3268
3269 if (nvme_zalloc_dma(nvme, log->nigl_len, DDI_DMA_READ,
3270 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
3271 dev_err(nvme->n_dip, CE_WARN,
3272 "!nvme_zalloc_dma failed for GET LOG PAGE");
3273 ret = nvme_ioctl_error(&log->nigl_common,
3274 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3275 goto fail;
3276 }
3277
3278 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
3279 ret = nvme_ioctl_error(&log->nigl_common,
3280 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3281 goto fail;
3282 }
3283 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3284
3285 if (!nvme_check_cmd_status_ioctl(cmd, &log->nigl_common)) {
3286 if (!user) {
3287 dev_err(nvme->n_dip, CE_WARN,
3288 "!GET LOG PAGE failed with sct = %x, sc = %x",
3289 cmd->nc_cqe.cqe_sf.sf_sct,
3290 cmd->nc_cqe.cqe_sf.sf_sc);
3291 }
3292 ret = B_FALSE;
3293 goto fail;
3294 }
3295
3296 *buf = kmem_alloc(log->nigl_len, KM_SLEEP);
3297 bcopy(cmd->nc_dma->nd_memp, *buf, log->nigl_len);
3298
3299 ret = B_TRUE;
3300 fail:
3301 nvme_free_cmd(cmd);
3302
3303 return (ret);
3304 }
3305
3306 /*
3307 * This is an internal wrapper for when the kernel wants to get a log page.
3308 * Currently this assumes that the only thing that is required is the log page
3309 * ID. If more information is required, we'll be better served to just use the
3310 * general ioctl interface.
3311 */
3312 static boolean_t
nvme_get_logpage_int(nvme_t * nvme,boolean_t user,void ** buf,size_t * bufsize,uint8_t lid)3313 nvme_get_logpage_int(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize,
3314 uint8_t lid)
3315 {
3316 const nvme_log_page_info_t *info = NULL;
3317 nvme_ioctl_get_logpage_t log;
3318 nvme_valid_ctrl_data_t data;
3319 boolean_t bret;
3320 bool var;
3321
3322 for (size_t i = 0; i < nvme_std_log_npages; i++) {
3323 if (nvme_std_log_pages[i].nlpi_lid == lid &&
3324 nvme_std_log_pages[i].nlpi_csi == NVME_CSI_NVM) {
3325 info = &nvme_std_log_pages[i];
3326 break;
3327 }
3328 }
3329
3330 if (info == NULL) {
3331 return (B_FALSE);
3332 }
3333
3334 data.vcd_vers = &nvme->n_version;
3335 data.vcd_id = nvme->n_idctl;
3336 bzero(&log, sizeof (log));
3337 log.nigl_common.nioc_nsid = NVME_NSID_BCAST;
3338 log.nigl_csi = info->nlpi_csi;
3339 log.nigl_lid = info->nlpi_lid;
3340 log.nigl_len = nvme_log_page_info_size(info, &data, &var);
3341
3342 /*
3343 * We only support getting standard fixed-length log pages through the
3344 * kernel interface at this time. If a log page either has an unknown
3345 * size or has a variable length, then we cannot get it.
3346 */
3347 if (log.nigl_len == 0 || var) {
3348 return (B_FALSE);
3349 }
3350
3351 bret = nvme_get_logpage(nvme, user, &log, buf);
3352 if (!bret) {
3353 return (B_FALSE);
3354 }
3355
3356 *bufsize = log.nigl_len;
3357 return (B_TRUE);
3358 }
3359
3360 static boolean_t
nvme_identify(nvme_t * nvme,boolean_t user,nvme_ioctl_identify_t * ioc,void ** buf)3361 nvme_identify(nvme_t *nvme, boolean_t user, nvme_ioctl_identify_t *ioc,
3362 void **buf)
3363 {
3364 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3365 boolean_t ret = B_FALSE;
3366 nvme_identify_dw10_t dw10;
3367
3368 ASSERT3P(buf, !=, NULL);
3369
3370 bzero(&dw10, sizeof (dw10));
3371
3372 cmd->nc_sqid = 0;
3373 cmd->nc_callback = nvme_wakeup_cmd;
3374 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
3375 cmd->nc_sqe.sqe_nsid = ioc->nid_common.nioc_nsid;
3376
3377 dw10.b.id_cns = bitx32(ioc->nid_cns, 7, 0);
3378 dw10.b.id_cntid = bitx32(ioc->nid_ctrlid, 15, 0);
3379
3380 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3381
3382 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
3383 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
3384 dev_err(nvme->n_dip, CE_WARN,
3385 "!nvme_zalloc_dma failed for IDENTIFY");
3386 ret = nvme_ioctl_error(&ioc->nid_common,
3387 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3388 goto fail;
3389 }
3390
3391 if (cmd->nc_dma->nd_ncookie > 2) {
3392 dev_err(nvme->n_dip, CE_WARN,
3393 "!too many DMA cookies for IDENTIFY");
3394 NVME_BUMP_STAT(nvme, too_many_cookies);
3395 ret = nvme_ioctl_error(&ioc->nid_common,
3396 NVME_IOCTL_E_BAD_PRP, 0, 0);
3397 goto fail;
3398 }
3399
3400 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
3401 if (cmd->nc_dma->nd_ncookie > 1) {
3402 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
3403 &cmd->nc_dma->nd_cookie);
3404 cmd->nc_sqe.sqe_dptr.d_prp[1] =
3405 cmd->nc_dma->nd_cookie.dmac_laddress;
3406 }
3407
3408 if (user)
3409 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3410
3411 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3412
3413 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nid_common)) {
3414 dev_err(nvme->n_dip, CE_WARN,
3415 "!IDENTIFY failed with sct = %x, sc = %x",
3416 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3417 ret = B_FALSE;
3418 goto fail;
3419 }
3420
3421 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
3422 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
3423 ret = B_TRUE;
3424
3425 fail:
3426 nvme_free_cmd(cmd);
3427
3428 return (ret);
3429 }
3430
3431 static boolean_t
nvme_identify_int(nvme_t * nvme,uint32_t nsid,uint8_t cns,void ** buf)3432 nvme_identify_int(nvme_t *nvme, uint32_t nsid, uint8_t cns, void **buf)
3433 {
3434 nvme_ioctl_identify_t id;
3435
3436 bzero(&id, sizeof (nvme_ioctl_identify_t));
3437 id.nid_common.nioc_nsid = nsid;
3438 id.nid_cns = cns;
3439
3440 return (nvme_identify(nvme, B_FALSE, &id, buf));
3441 }
3442
3443 static int
nvme_set_features(nvme_t * nvme,boolean_t user,uint32_t nsid,uint8_t feature,uint32_t val,uint32_t * res)3444 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
3445 uint32_t val, uint32_t *res)
3446 {
3447 _NOTE(ARGUNUSED(nsid));
3448 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3449 int ret = EINVAL;
3450
3451 ASSERT(res != NULL);
3452
3453 cmd->nc_sqid = 0;
3454 cmd->nc_callback = nvme_wakeup_cmd;
3455 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
3456 cmd->nc_sqe.sqe_cdw10 = feature;
3457 cmd->nc_sqe.sqe_cdw11 = val;
3458
3459 if (user)
3460 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3461
3462 switch (feature) {
3463 case NVME_FEAT_WRITE_CACHE:
3464 if (!nvme->n_write_cache_present)
3465 goto fail;
3466 break;
3467
3468 case NVME_FEAT_NQUEUES:
3469 break;
3470
3471 default:
3472 goto fail;
3473 }
3474
3475 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3476
3477 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3478 dev_err(nvme->n_dip, CE_WARN,
3479 "!SET FEATURES %d failed with sct = %x, sc = %x",
3480 feature, cmd->nc_cqe.cqe_sf.sf_sct,
3481 cmd->nc_cqe.cqe_sf.sf_sc);
3482 goto fail;
3483 }
3484
3485 *res = cmd->nc_cqe.cqe_dw0;
3486
3487 fail:
3488 nvme_free_cmd(cmd);
3489 return (ret);
3490 }
3491
3492 static int
nvme_write_cache_set(nvme_t * nvme,boolean_t enable)3493 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
3494 {
3495 nvme_write_cache_t nwc = { 0 };
3496
3497 if (enable)
3498 nwc.b.wc_wce = 1;
3499
3500 /*
3501 * We've seen some cases where this fails due to us being told we've
3502 * specified an invalid namespace when operating against the Xen xcp-ng
3503 * qemu NVMe virtual device. As such, we generally ensure that trying to
3504 * enable this doesn't lead us to panic. It's not completely clear why
3505 * specifying namespace zero here fails, but not when we're setting the
3506 * number of queues below.
3507 */
3508 return (nvme_set_features(nvme, B_TRUE, 0, NVME_FEAT_WRITE_CACHE,
3509 nwc.r, &nwc.r));
3510 }
3511
3512 static int
nvme_set_nqueues(nvme_t * nvme)3513 nvme_set_nqueues(nvme_t *nvme)
3514 {
3515 nvme_nqueues_t nq = { 0 };
3516 int ret;
3517
3518 /*
3519 * The default is to allocate one completion queue per vector.
3520 */
3521 if (nvme->n_completion_queues == -1)
3522 nvme->n_completion_queues = nvme->n_intr_cnt;
3523
3524 /*
3525 * There is no point in having more completion queues than
3526 * interrupt vectors.
3527 */
3528 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3529 nvme->n_intr_cnt);
3530
3531 /*
3532 * The default is to use one submission queue per completion queue.
3533 */
3534 if (nvme->n_submission_queues == -1)
3535 nvme->n_submission_queues = nvme->n_completion_queues;
3536
3537 /*
3538 * There is no point in having more completion queues than
3539 * submission queues.
3540 */
3541 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3542 nvme->n_submission_queues);
3543
3544 ASSERT(nvme->n_submission_queues > 0);
3545 ASSERT(nvme->n_completion_queues > 0);
3546
3547 nq.b.nq_nsq = nvme->n_submission_queues - 1;
3548 nq.b.nq_ncq = nvme->n_completion_queues - 1;
3549
3550 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r,
3551 &nq.r);
3552
3553 if (ret == 0) {
3554 /*
3555 * Never use more than the requested number of queues.
3556 */
3557 nvme->n_submission_queues = MIN(nvme->n_submission_queues,
3558 nq.b.nq_nsq + 1);
3559 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3560 nq.b.nq_ncq + 1);
3561 }
3562
3563 return (ret);
3564 }
3565
3566 static int
nvme_create_completion_queue(nvme_t * nvme,nvme_cq_t * cq)3567 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq)
3568 {
3569 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3570 nvme_create_queue_dw10_t dw10 = { 0 };
3571 nvme_create_cq_dw11_t c_dw11 = { 0 };
3572 int ret;
3573
3574 dw10.b.q_qid = cq->ncq_id;
3575 dw10.b.q_qsize = cq->ncq_nentry - 1;
3576
3577 c_dw11.b.cq_pc = 1;
3578 c_dw11.b.cq_ien = 1;
3579 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt;
3580
3581 cmd->nc_sqid = 0;
3582 cmd->nc_callback = nvme_wakeup_cmd;
3583 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
3584 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3585 cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
3586 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress;
3587
3588 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3589
3590 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3591 dev_err(nvme->n_dip, CE_WARN,
3592 "!CREATE CQUEUE failed with sct = %x, sc = %x",
3593 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3594 }
3595
3596 nvme_free_cmd(cmd);
3597
3598 return (ret);
3599 }
3600
3601 static int
nvme_create_io_qpair(nvme_t * nvme,nvme_qpair_t * qp,uint16_t idx)3602 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
3603 {
3604 nvme_cq_t *cq = qp->nq_cq;
3605 nvme_cmd_t *cmd;
3606 nvme_create_queue_dw10_t dw10 = { 0 };
3607 nvme_create_sq_dw11_t s_dw11 = { 0 };
3608 int ret;
3609
3610 /*
3611 * It is possible to have more qpairs than completion queues,
3612 * and when the idx > ncq_id, that completion queue is shared
3613 * and has already been created.
3614 */
3615 if (idx <= cq->ncq_id &&
3616 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS)
3617 return (DDI_FAILURE);
3618
3619 dw10.b.q_qid = idx;
3620 dw10.b.q_qsize = qp->nq_nentry - 1;
3621
3622 s_dw11.b.sq_pc = 1;
3623 s_dw11.b.sq_cqid = cq->ncq_id;
3624
3625 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3626 cmd->nc_sqid = 0;
3627 cmd->nc_callback = nvme_wakeup_cmd;
3628 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
3629 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3630 cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
3631 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
3632
3633 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3634
3635 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3636 dev_err(nvme->n_dip, CE_WARN,
3637 "!CREATE SQUEUE failed with sct = %x, sc = %x",
3638 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3639 }
3640
3641 nvme_free_cmd(cmd);
3642
3643 return (ret);
3644 }
3645
3646 static boolean_t
nvme_reset(nvme_t * nvme,boolean_t quiesce)3647 nvme_reset(nvme_t *nvme, boolean_t quiesce)
3648 {
3649 nvme_reg_csts_t csts;
3650 int i;
3651
3652 /*
3653 * If the device is gone, do not try to interact with it. We define
3654 * that resetting such a device is impossible, and always fails.
3655 */
3656 if (nvme_ctrl_is_gone(nvme)) {
3657 return (B_FALSE);
3658 }
3659
3660 nvme_put32(nvme, NVME_REG_CC, 0);
3661
3662 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3663 if (csts.b.csts_rdy == 1) {
3664 nvme_put32(nvme, NVME_REG_CC, 0);
3665
3666 /*
3667 * The timeout value is from the Controller Capabilities
3668 * register (CAP.TO, section 3.1.1). This is the worst case
3669 * time to wait for CSTS.RDY to transition from 1 to 0 after
3670 * CC.EN transitions from 1 to 0.
3671 *
3672 * The timeout units are in 500 ms units, and we are delaying
3673 * in 50ms chunks, hence counting to n_timeout * 10.
3674 */
3675 for (i = 0; i < nvme->n_timeout * 10; i++) {
3676 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3677 if (csts.b.csts_rdy == 0)
3678 break;
3679
3680 /*
3681 * Quiescing drivers should not use locks or timeouts,
3682 * so if this is the quiesce path, use a quiesce-safe
3683 * delay.
3684 */
3685 if (quiesce) {
3686 drv_usecwait(50000);
3687 } else {
3688 delay(drv_usectohz(50000));
3689 }
3690 }
3691 }
3692
3693 nvme_put32(nvme, NVME_REG_AQA, 0);
3694 nvme_put32(nvme, NVME_REG_ASQ, 0);
3695 nvme_put32(nvme, NVME_REG_ACQ, 0);
3696
3697 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3698 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
3699 }
3700
3701 static void
nvme_shutdown(nvme_t * nvme,boolean_t quiesce)3702 nvme_shutdown(nvme_t *nvme, boolean_t quiesce)
3703 {
3704 nvme_reg_cc_t cc;
3705 nvme_reg_csts_t csts;
3706 int i;
3707
3708 /*
3709 * Do not try to interact with the device if it is gone. Since it is
3710 * not there, in some sense it must already be shut down anyway.
3711 */
3712 if (nvme_ctrl_is_gone(nvme)) {
3713 return;
3714 }
3715
3716 cc.r = nvme_get32(nvme, NVME_REG_CC);
3717 cc.b.cc_shn = NVME_CC_SHN_NORMAL;
3718 nvme_put32(nvme, NVME_REG_CC, cc.r);
3719
3720 for (i = 0; i < 10; i++) {
3721 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3722 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
3723 break;
3724
3725 if (quiesce) {
3726 drv_usecwait(100000);
3727 } else {
3728 delay(drv_usectohz(100000));
3729 }
3730 }
3731 }
3732
3733 /*
3734 * Return length of string without trailing spaces.
3735 */
3736 static size_t
nvme_strlen(const char * str,size_t len)3737 nvme_strlen(const char *str, size_t len)
3738 {
3739 if (len <= 0)
3740 return (0);
3741
3742 while (str[--len] == ' ')
3743 ;
3744
3745 return (++len);
3746 }
3747
3748 static void
nvme_config_min_block_size(nvme_t * nvme,char * model,char * val)3749 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val)
3750 {
3751 ulong_t bsize = 0;
3752 char *msg = "";
3753
3754 if (ddi_strtoul(val, NULL, 0, &bsize) != 0)
3755 goto err;
3756
3757 if (!ISP2(bsize)) {
3758 msg = ": not a power of 2";
3759 goto err;
3760 }
3761
3762 if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) {
3763 msg = ": too low";
3764 goto err;
3765 }
3766
3767 nvme->n_min_block_size = bsize;
3768 return;
3769
3770 err:
3771 dev_err(nvme->n_dip, CE_WARN,
3772 "!nvme-config-list: ignoring invalid min-phys-block-size '%s' "
3773 "for model '%s'%s", val, model, msg);
3774
3775 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
3776 }
3777
3778 static void
nvme_config_boolean(nvme_t * nvme,char * model,char * name,char * val,boolean_t * b)3779 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val,
3780 boolean_t *b)
3781 {
3782 if (strcmp(val, "on") == 0 ||
3783 strcmp(val, "true") == 0)
3784 *b = B_TRUE;
3785 else if (strcmp(val, "off") == 0 ||
3786 strcmp(val, "false") == 0)
3787 *b = B_FALSE;
3788 else
3789 dev_err(nvme->n_dip, CE_WARN,
3790 "!nvme-config-list: invalid value for %s '%s'"
3791 " for model '%s', ignoring", name, val, model);
3792 }
3793
3794 static void
nvme_config_list(nvme_t * nvme)3795 nvme_config_list(nvme_t *nvme)
3796 {
3797 char **config_list;
3798 uint_t nelem;
3799 int rv;
3800
3801 /*
3802 * We're following the pattern of 'sd-config-list' here, but extend it.
3803 * Instead of two we have three separate strings for "model", "fwrev",
3804 * and "name-value-list".
3805 */
3806 rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip,
3807 DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem);
3808
3809 if (rv != DDI_PROP_SUCCESS) {
3810 if (rv == DDI_PROP_CANNOT_DECODE) {
3811 dev_err(nvme->n_dip, CE_WARN,
3812 "!nvme-config-list: cannot be decoded");
3813 }
3814
3815 return;
3816 }
3817
3818 if ((nelem % 3) != 0) {
3819 dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be "
3820 "triplets of <model>/<fwrev>/<name-value-list> strings ");
3821 goto out;
3822 }
3823
3824 for (uint_t i = 0; i < nelem; i += 3) {
3825 char *model = config_list[i];
3826 char *fwrev = config_list[i + 1];
3827 char *nvp, *save_nv;
3828 size_t id_model_len, id_fwrev_len;
3829
3830 id_model_len = nvme_strlen(nvme->n_idctl->id_model,
3831 sizeof (nvme->n_idctl->id_model));
3832
3833 if (strlen(model) != id_model_len)
3834 continue;
3835
3836 if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0)
3837 continue;
3838
3839 id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev,
3840 sizeof (nvme->n_idctl->id_fwrev));
3841
3842 if (strlen(fwrev) != 0) {
3843 boolean_t match = B_FALSE;
3844 char *fwr, *last_fw;
3845
3846 for (fwr = strtok_r(fwrev, ",", &last_fw);
3847 fwr != NULL;
3848 fwr = strtok_r(NULL, ",", &last_fw)) {
3849 if (strlen(fwr) != id_fwrev_len)
3850 continue;
3851
3852 if (strncmp(fwr, nvme->n_idctl->id_fwrev,
3853 id_fwrev_len) == 0)
3854 match = B_TRUE;
3855 }
3856
3857 if (!match)
3858 continue;
3859 }
3860
3861 /*
3862 * We should now have a comma-separated list of name:value
3863 * pairs.
3864 */
3865 for (nvp = strtok_r(config_list[i + 2], ",", &save_nv);
3866 nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) {
3867 char *name = nvp;
3868 char *val = strchr(nvp, ':');
3869
3870 if (val == NULL || name == val) {
3871 dev_err(nvme->n_dip, CE_WARN,
3872 "!nvme-config-list: <name-value-list> "
3873 "for model '%s' is malformed", model);
3874 goto out;
3875 }
3876
3877 /*
3878 * Null-terminate 'name', move 'val' past ':' sep.
3879 */
3880 *val++ = '\0';
3881
3882 /*
3883 * Process the name:val pairs that we know about.
3884 */
3885 if (strcmp(name, "ignore-unknown-vendor-status") == 0) {
3886 nvme_config_boolean(nvme, model, name, val,
3887 &nvme->n_ignore_unknown_vendor_status);
3888 } else if (strcmp(name, "min-phys-block-size") == 0) {
3889 nvme_config_min_block_size(nvme, model, val);
3890 } else if (strcmp(name, "volatile-write-cache") == 0) {
3891 nvme_config_boolean(nvme, model, name, val,
3892 &nvme->n_write_cache_enabled);
3893 } else {
3894 /*
3895 * Unknown 'name'.
3896 */
3897 dev_err(nvme->n_dip, CE_WARN,
3898 "!nvme-config-list: unknown config '%s' "
3899 "for model '%s', ignoring", name, model);
3900 }
3901 }
3902 }
3903
3904 out:
3905 ddi_prop_free(config_list);
3906 }
3907
3908 static void
nvme_prepare_devid(nvme_t * nvme,uint32_t nsid)3909 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
3910 {
3911 /*
3912 * Section 7.7 of the spec describes how to get a unique ID for
3913 * the controller: the vendor ID, the model name and the serial
3914 * number shall be unique when combined.
3915 *
3916 * If a namespace has no EUI64 we use the above and add the hex
3917 * namespace ID to get a unique ID for the namespace.
3918 */
3919 char model[sizeof (nvme->n_idctl->id_model) + 1];
3920 char serial[sizeof (nvme->n_idctl->id_serial) + 1];
3921
3922 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
3923 bcopy(nvme->n_idctl->id_serial, serial,
3924 sizeof (nvme->n_idctl->id_serial));
3925
3926 model[sizeof (nvme->n_idctl->id_model)] = '\0';
3927 serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
3928
3929 nvme_nsid2ns(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X",
3930 nvme->n_idctl->id_vid, model, serial, nsid);
3931 }
3932
3933 static nvme_identify_nsid_list_t *
nvme_update_nsid_list(nvme_t * nvme,int cns)3934 nvme_update_nsid_list(nvme_t *nvme, int cns)
3935 {
3936 nvme_identify_nsid_list_t *nslist;
3937
3938 /*
3939 * We currently don't handle cases where there are more than
3940 * 1024 active namespaces, requiring several IDENTIFY commands.
3941 */
3942 if (nvme_identify_int(nvme, 0, cns, (void **)&nslist))
3943 return (nslist);
3944
3945 return (NULL);
3946 }
3947
3948 nvme_namespace_t *
nvme_nsid2ns(nvme_t * nvme,uint32_t nsid)3949 nvme_nsid2ns(nvme_t *nvme, uint32_t nsid)
3950 {
3951 ASSERT3U(nsid, !=, 0);
3952 ASSERT3U(nsid, <=, nvme->n_namespace_count);
3953 return (&nvme->n_ns[nsid - 1]);
3954 }
3955
3956 static boolean_t
nvme_allocated_ns(nvme_namespace_t * ns)3957 nvme_allocated_ns(nvme_namespace_t *ns)
3958 {
3959 nvme_t *nvme = ns->ns_nvme;
3960 uint32_t i;
3961
3962 ASSERT(nvme_mgmt_lock_held(nvme));
3963
3964 /*
3965 * If supported, update the list of allocated namespace IDs.
3966 */
3967 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) &&
3968 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
3969 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
3970 NVME_IDENTIFY_NSID_ALLOC_LIST);
3971 boolean_t found = B_FALSE;
3972
3973 /*
3974 * When namespace management is supported, this really shouldn't
3975 * be NULL. Treat all namespaces as allocated if it is.
3976 */
3977 if (nslist == NULL)
3978 return (B_TRUE);
3979
3980 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
3981 if (ns->ns_id == 0)
3982 break;
3983
3984 if (ns->ns_id == nslist->nl_nsid[i])
3985 found = B_TRUE;
3986 }
3987
3988 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
3989 return (found);
3990 } else {
3991 /*
3992 * If namespace management isn't supported, report all
3993 * namespaces as allocated.
3994 */
3995 return (B_TRUE);
3996 }
3997 }
3998
3999 static boolean_t
nvme_active_ns(nvme_namespace_t * ns)4000 nvme_active_ns(nvme_namespace_t *ns)
4001 {
4002 nvme_t *nvme = ns->ns_nvme;
4003 uint64_t *ptr;
4004 uint32_t i;
4005
4006 ASSERT(nvme_mgmt_lock_held(nvme));
4007
4008 /*
4009 * If supported, update the list of active namespace IDs.
4010 */
4011 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) {
4012 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
4013 NVME_IDENTIFY_NSID_LIST);
4014 boolean_t found = B_FALSE;
4015
4016 /*
4017 * When namespace management is supported, this really shouldn't
4018 * be NULL. Treat all namespaces as allocated if it is.
4019 */
4020 if (nslist == NULL)
4021 return (B_TRUE);
4022
4023 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
4024 if (ns->ns_id == 0)
4025 break;
4026
4027 if (ns->ns_id == nslist->nl_nsid[i])
4028 found = B_TRUE;
4029 }
4030
4031 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
4032 return (found);
4033 }
4034
4035 /*
4036 * Workaround for revision 1.0:
4037 * Check whether the IDENTIFY NAMESPACE data is zero-filled.
4038 */
4039 for (ptr = (uint64_t *)ns->ns_idns;
4040 ptr != (uint64_t *)(ns->ns_idns + 1);
4041 ptr++) {
4042 if (*ptr != 0) {
4043 return (B_TRUE);
4044 }
4045 }
4046
4047 return (B_FALSE);
4048 }
4049
4050 static int
nvme_init_ns(nvme_t * nvme,uint32_t nsid)4051 nvme_init_ns(nvme_t *nvme, uint32_t nsid)
4052 {
4053 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
4054 nvme_identify_nsid_t *idns;
4055 nvme_ns_state_t orig_state;
4056
4057 ns->ns_nvme = nvme;
4058
4059 ASSERT(nvme_mgmt_lock_held(nvme));
4060
4061 /*
4062 * Because we might rescan a namespace and this will fail after boot
4063 * that'd leave us in a bad spot. We need to do something about this
4064 * longer term, but it's not clear how exactly we would recover right
4065 * now.
4066 */
4067 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
4068 (void **)&idns)) {
4069 dev_err(nvme->n_dip, CE_WARN,
4070 "!failed to identify namespace %d", nsid);
4071 return (DDI_FAILURE);
4072 }
4073
4074 if (ns->ns_idns != NULL)
4075 kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t));
4076
4077 ns->ns_idns = idns;
4078 ns->ns_id = nsid;
4079
4080 /*
4081 * Save the current state so we can tell what changed. Look at the
4082 * current state of the device. We will flag active devices that should
4083 * be ignored after this.
4084 */
4085 orig_state = ns->ns_state;
4086 if (nvme_active_ns(ns)) {
4087 /*
4088 * If the device previously had blkdev active, then that is its
4089 * current state. Otherwise, we consider this an upgrade and
4090 * just set it to not ignored.
4091 */
4092 if (orig_state == NVME_NS_STATE_ATTACHED) {
4093 ns->ns_state = NVME_NS_STATE_ATTACHED;
4094 } else {
4095 ns->ns_state = NVME_NS_STATE_NOT_IGNORED;
4096 }
4097 } else if (nvme_allocated_ns(ns)) {
4098 ns->ns_state = NVME_NS_STATE_ALLOCATED;
4099 } else {
4100 ns->ns_state = NVME_NS_STATE_UNALLOCATED;
4101 }
4102
4103 ns->ns_block_count = idns->id_nsize;
4104 ns->ns_block_size =
4105 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
4106 ns->ns_best_block_size = ns->ns_block_size;
4107
4108 /*
4109 * Get the EUI64 if present.
4110 */
4111 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
4112 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
4113
4114 /*
4115 * Get the NGUID if present.
4116 */
4117 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2))
4118 bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid));
4119
4120 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
4121 if (*(uint64_t *)ns->ns_eui64 == 0)
4122 nvme_prepare_devid(nvme, ns->ns_id);
4123
4124 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id);
4125
4126 /*
4127 * Find the LBA format with no metadata and the best relative
4128 * performance. A value of 3 means "degraded", 0 is best.
4129 */
4130 for (uint32_t j = 0, last_rp = 3; j <= idns->id_nlbaf; j++) {
4131 if (idns->id_lbaf[j].lbaf_lbads == 0)
4132 break;
4133 if (idns->id_lbaf[j].lbaf_ms != 0)
4134 continue;
4135 if (idns->id_lbaf[j].lbaf_rp >= last_rp)
4136 continue;
4137 last_rp = idns->id_lbaf[j].lbaf_rp;
4138 ns->ns_best_block_size =
4139 1 << idns->id_lbaf[j].lbaf_lbads;
4140 }
4141
4142 if (ns->ns_best_block_size < nvme->n_min_block_size)
4143 ns->ns_best_block_size = nvme->n_min_block_size;
4144
4145 /*
4146 * We currently don't support namespaces that are inactive, or use
4147 * either:
4148 * - protection information
4149 * - illegal block size (< 512)
4150 */
4151 if (ns->ns_state >= NVME_NS_STATE_NOT_IGNORED) {
4152 if (idns->id_dps.dp_pinfo) {
4153 dev_err(nvme->n_dip, CE_WARN,
4154 "!ignoring namespace %d, unsupported feature: "
4155 "pinfo = %d", nsid, idns->id_dps.dp_pinfo);
4156 ns->ns_state = NVME_NS_STATE_ACTIVE;
4157 }
4158
4159 if (ns->ns_block_size < 512) {
4160 dev_err(nvme->n_dip, CE_WARN,
4161 "!ignoring namespace %d, unsupported block size "
4162 "%"PRIu64, nsid, (uint64_t)ns->ns_block_size);
4163 ns->ns_state = NVME_NS_STATE_ACTIVE;
4164 }
4165 }
4166
4167 /*
4168 * If we were previously in a state where blkdev was active and suddenly
4169 * we think it should not be because ignore is set, then something has
4170 * gone behind our backs and this is not going to be recoverable.
4171 */
4172 if (orig_state == NVME_NS_STATE_ATTACHED &&
4173 ns->ns_state != NVME_NS_STATE_ATTACHED) {
4174 dev_err(nvme->n_dip, CE_PANIC, "namespace %u state "
4175 "unexpectedly changed and removed blkdev support!", nsid);
4176 }
4177
4178 /*
4179 * Keep a count of namespaces which are attachable.
4180 * See comments in nvme_bd_driveinfo() to understand its effect.
4181 */
4182 if (orig_state > NVME_NS_STATE_ACTIVE) {
4183 /*
4184 * Wasn't attachable previously, but now needs to be.
4185 * Discount it.
4186 */
4187 if (ns->ns_state < NVME_NS_STATE_NOT_IGNORED)
4188 nvme->n_namespaces_attachable--;
4189 } else if (ns->ns_state >= NVME_NS_STATE_NOT_IGNORED) {
4190 /*
4191 * Previously ignored, but now not. Count it.
4192 */
4193 nvme->n_namespaces_attachable++;
4194 }
4195
4196 return (DDI_SUCCESS);
4197 }
4198
4199 static boolean_t
nvme_bd_attach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)4200 nvme_bd_attach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
4201 {
4202 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
4203 int ret;
4204
4205 ASSERT(nvme_mgmt_lock_held(nvme));
4206
4207 if (!nvme_ns_state_check(ns, com, nvme_bd_attach_states)) {
4208 return (B_FALSE);
4209 }
4210
4211 if (ns->ns_bd_hdl == NULL) {
4212 bd_ops_t ops = nvme_bd_ops;
4213
4214 if (!nvme->n_idctl->id_oncs.on_dset_mgmt)
4215 ops.o_free_space = NULL;
4216
4217 ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr,
4218 KM_SLEEP);
4219
4220 if (ns->ns_bd_hdl == NULL) {
4221 dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev "
4222 "handle for namespace id %u", com->nioc_nsid);
4223 return (nvme_ioctl_error(com,
4224 NVME_IOCTL_E_BLKDEV_ATTACH, 0, 0));
4225 }
4226 }
4227
4228 nvme_mgmt_bd_start(nvme);
4229 ret = bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl);
4230 nvme_mgmt_bd_end(nvme);
4231 if (ret != DDI_SUCCESS) {
4232 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_ATTACH,
4233 0, 0));
4234 }
4235
4236 ns->ns_state = NVME_NS_STATE_ATTACHED;
4237
4238 return (B_TRUE);
4239 }
4240
4241 static boolean_t
nvme_bd_detach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)4242 nvme_bd_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
4243 {
4244 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
4245 int ret;
4246
4247 ASSERT(nvme_mgmt_lock_held(nvme));
4248
4249 if (!nvme_ns_state_check(ns, com, nvme_bd_detach_states)) {
4250 return (B_FALSE);
4251 }
4252
4253 nvme_mgmt_bd_start(nvme);
4254 ASSERT3P(ns->ns_bd_hdl, !=, NULL);
4255 ret = bd_detach_handle(ns->ns_bd_hdl);
4256 nvme_mgmt_bd_end(nvme);
4257
4258 if (ret != DDI_SUCCESS) {
4259 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_DETACH, 0,
4260 0));
4261 }
4262
4263 ns->ns_state = NVME_NS_STATE_NOT_IGNORED;
4264 return (B_TRUE);
4265
4266 }
4267
4268 /*
4269 * Rescan the namespace information associated with the namespaces indicated by
4270 * ioc. They should not be attached to blkdev right now.
4271 */
4272 static void
nvme_rescan_ns(nvme_t * nvme,uint32_t nsid)4273 nvme_rescan_ns(nvme_t *nvme, uint32_t nsid)
4274 {
4275 ASSERT(nvme_mgmt_lock_held(nvme));
4276 ASSERT3U(nsid, !=, 0);
4277
4278 if (nsid != NVME_NSID_BCAST) {
4279 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
4280
4281 ASSERT3U(ns->ns_state, <, NVME_NS_STATE_ATTACHED);
4282 (void) nvme_init_ns(nvme, nsid);
4283 return;
4284 }
4285
4286 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
4287 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
4288
4289 ASSERT3U(ns->ns_state, <, NVME_NS_STATE_ATTACHED);
4290 (void) nvme_init_ns(nvme, i);
4291 }
4292 }
4293
4294 typedef struct nvme_quirk_table {
4295 uint16_t nq_vendor_id;
4296 uint16_t nq_device_id;
4297 nvme_quirk_t nq_quirks;
4298 } nvme_quirk_table_t;
4299
4300 static const nvme_quirk_table_t nvme_quirks[] = {
4301 { 0x1987, 0x5018, NVME_QUIRK_START_CID }, /* Phison E18 */
4302 };
4303
4304 static void
nvme_detect_quirks(nvme_t * nvme)4305 nvme_detect_quirks(nvme_t *nvme)
4306 {
4307 for (uint_t i = 0; i < ARRAY_SIZE(nvme_quirks); i++) {
4308 const nvme_quirk_table_t *nqt = &nvme_quirks[i];
4309
4310 if (nqt->nq_vendor_id == nvme->n_vendor_id &&
4311 nqt->nq_device_id == nvme->n_device_id) {
4312 nvme->n_quirks = nqt->nq_quirks;
4313 return;
4314 }
4315 }
4316 }
4317
4318 static int
nvme_init(nvme_t * nvme)4319 nvme_init(nvme_t *nvme)
4320 {
4321 nvme_reg_cc_t cc = { 0 };
4322 nvme_reg_aqa_t aqa = { 0 };
4323 nvme_reg_asq_t asq = { 0 };
4324 nvme_reg_acq_t acq = { 0 };
4325 nvme_reg_cap_t cap;
4326 nvme_reg_vs_t vs;
4327 nvme_reg_csts_t csts;
4328 int i = 0;
4329 uint16_t nqueues;
4330 uint_t tq_threads;
4331 char model[sizeof (nvme->n_idctl->id_model) + 1];
4332 char *vendor, *product;
4333 uint32_t nsid;
4334
4335 /* Check controller version */
4336 vs.r = nvme_get32(nvme, NVME_REG_VS);
4337 nvme->n_version.v_major = vs.b.vs_mjr;
4338 nvme->n_version.v_minor = vs.b.vs_mnr;
4339 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d\n",
4340 nvme->n_version.v_major, nvme->n_version.v_minor);
4341
4342 if (nvme->n_version.v_major > nvme_version_major) {
4343 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x",
4344 nvme_version_major);
4345 if (nvme->n_strict_version)
4346 goto fail;
4347 }
4348
4349 /* retrieve controller configuration */
4350 cap.r = nvme_get64(nvme, NVME_REG_CAP);
4351
4352 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
4353 dev_err(nvme->n_dip, CE_WARN,
4354 "!NVM command set not supported by hardware");
4355 goto fail;
4356 }
4357
4358 nvme->n_nssr_supported = cap.b.cap_nssrs;
4359 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
4360 nvme->n_timeout = cap.b.cap_to;
4361 nvme->n_arbitration_mechanisms = cap.b.cap_ams;
4362 nvme->n_cont_queues_reqd = cap.b.cap_cqr;
4363 nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
4364
4365 /*
4366 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
4367 * the base page size of 4k (1<<12), so add 12 here to get the real
4368 * page size value.
4369 */
4370 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
4371 cap.b.cap_mpsmax + 12);
4372 nvme->n_pagesize = 1UL << (nvme->n_pageshift);
4373
4374 /*
4375 * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
4376 */
4377 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
4378 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
4379
4380 /*
4381 * Set up PRP DMA to transfer 1 page-aligned page at a time.
4382 * Maxxfer may be increased after we identified the controller limits.
4383 */
4384 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
4385 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
4386 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
4387 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
4388
4389 /*
4390 * Reset controller if it's still in ready state.
4391 */
4392 if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
4393 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
4394 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
4395 nvme->n_dead = B_TRUE;
4396 goto fail;
4397 }
4398
4399 /*
4400 * Create the cq array with one completion queue to be assigned
4401 * to the admin queue pair and a limited number of taskqs (4).
4402 */
4403 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) !=
4404 DDI_SUCCESS) {
4405 dev_err(nvme->n_dip, CE_WARN,
4406 "!failed to pre-allocate admin completion queue");
4407 goto fail;
4408 }
4409 /*
4410 * Create the admin queue pair.
4411 */
4412 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
4413 != DDI_SUCCESS) {
4414 dev_err(nvme->n_dip, CE_WARN,
4415 "!unable to allocate admin qpair");
4416 goto fail;
4417 }
4418 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
4419 nvme->n_ioq[0] = nvme->n_adminq;
4420
4421 if (nvme->n_quirks & NVME_QUIRK_START_CID)
4422 nvme->n_adminq->nq_next_cmd++;
4423
4424 nvme->n_progress |= NVME_ADMIN_QUEUE;
4425
4426 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4427 "admin-queue-len", nvme->n_admin_queue_len);
4428
4429 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
4430 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
4431 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress;
4432
4433 ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
4434 ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
4435
4436 nvme_put32(nvme, NVME_REG_AQA, aqa.r);
4437 nvme_put64(nvme, NVME_REG_ASQ, asq);
4438 nvme_put64(nvme, NVME_REG_ACQ, acq);
4439
4440 cc.b.cc_ams = 0; /* use Round-Robin arbitration */
4441 cc.b.cc_css = 0; /* use NVM command set */
4442 cc.b.cc_mps = nvme->n_pageshift - 12;
4443 cc.b.cc_shn = 0; /* no shutdown in progress */
4444 cc.b.cc_en = 1; /* enable controller */
4445 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */
4446 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */
4447
4448 nvme_put32(nvme, NVME_REG_CC, cc.r);
4449
4450 /*
4451 * Wait for the controller to become ready.
4452 */
4453 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
4454 if (csts.b.csts_rdy == 0) {
4455 for (i = 0; i != nvme->n_timeout * 10; i++) {
4456 delay(drv_usectohz(50000));
4457 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
4458
4459 if (csts.b.csts_cfs == 1) {
4460 dev_err(nvme->n_dip, CE_WARN,
4461 "!controller fatal status at init");
4462 ddi_fm_service_impact(nvme->n_dip,
4463 DDI_SERVICE_LOST);
4464 nvme->n_dead = B_TRUE;
4465 goto fail;
4466 }
4467
4468 if (csts.b.csts_rdy == 1)
4469 break;
4470 }
4471 }
4472
4473 if (csts.b.csts_rdy == 0) {
4474 dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
4475 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
4476 nvme->n_dead = B_TRUE;
4477 goto fail;
4478 }
4479
4480 /*
4481 * Assume an abort command limit of 1. We'll destroy and re-init
4482 * that later when we know the true abort command limit.
4483 */
4484 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
4485
4486 /*
4487 * Set up initial interrupt for admin queue.
4488 */
4489 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
4490 != DDI_SUCCESS) &&
4491 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
4492 != DDI_SUCCESS) &&
4493 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
4494 != DDI_SUCCESS)) {
4495 dev_err(nvme->n_dip, CE_WARN,
4496 "!failed to set up initial interrupt");
4497 goto fail;
4498 }
4499
4500 /*
4501 * Initialize the failure status we should use if we mark the controller
4502 * dead. Do this ahead of issuing any commands.
4503 */
4504 nvme->n_dead_status = NVME_IOCTL_E_CTRL_DEAD;
4505
4506 /*
4507 * Identify Controller
4508 */
4509 if (!nvme_identify_int(nvme, 0, NVME_IDENTIFY_CTRL,
4510 (void **)&nvme->n_idctl)) {
4511 dev_err(nvme->n_dip, CE_WARN, "!failed to identify controller");
4512 goto fail;
4513 }
4514
4515 /*
4516 * Process nvme-config-list (if present) in nvme.conf.
4517 */
4518 nvme_config_list(nvme);
4519
4520 /*
4521 * Get Vendor & Product ID
4522 */
4523 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
4524 model[sizeof (nvme->n_idctl->id_model)] = '\0';
4525 sata_split_model(model, &vendor, &product);
4526
4527 if (vendor == NULL)
4528 nvme->n_vendor = strdup("NVMe");
4529 else
4530 nvme->n_vendor = strdup(vendor);
4531
4532 nvme->n_product = strdup(product);
4533
4534 /*
4535 * Get controller limits.
4536 */
4537 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
4538 MIN(nvme->n_admin_queue_len / 10,
4539 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
4540
4541 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4542 "async-event-limit", nvme->n_async_event_limit);
4543
4544 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
4545
4546 /*
4547 * Reinitialize the semaphore with the true abort command limit
4548 * supported by the hardware. It's not necessary to disable interrupts
4549 * as only command aborts use the semaphore, and no commands are
4550 * executed or aborted while we're here.
4551 */
4552 sema_destroy(&nvme->n_abort_sema);
4553 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
4554 SEMA_DRIVER, NULL);
4555
4556 nvme->n_progress |= NVME_CTRL_LIMITS;
4557
4558 if (nvme->n_idctl->id_mdts == 0)
4559 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
4560 else
4561 nvme->n_max_data_transfer_size =
4562 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
4563
4564 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
4565
4566 /*
4567 * Limit n_max_data_transfer_size to what we can handle in one PRP.
4568 * Chained PRPs are currently unsupported.
4569 *
4570 * This is a no-op on hardware which doesn't support a transfer size
4571 * big enough to require chained PRPs.
4572 */
4573 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
4574 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
4575
4576 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
4577
4578 /*
4579 * Make sure the minimum/maximum queue entry sizes are not
4580 * larger/smaller than the default.
4581 */
4582
4583 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
4584 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
4585 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
4586 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
4587 goto fail;
4588
4589 /*
4590 * Check for the presence of a Volatile Write Cache. If present,
4591 * enable or disable based on the value of the property
4592 * volatile-write-cache-enable (default is enabled).
4593 */
4594 nvme->n_write_cache_present =
4595 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
4596
4597 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4598 "volatile-write-cache-present",
4599 nvme->n_write_cache_present ? 1 : 0);
4600
4601 if (!nvme->n_write_cache_present) {
4602 nvme->n_write_cache_enabled = B_FALSE;
4603 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
4604 != 0) {
4605 dev_err(nvme->n_dip, CE_WARN,
4606 "!failed to %sable volatile write cache",
4607 nvme->n_write_cache_enabled ? "en" : "dis");
4608 /*
4609 * Assume the cache is (still) enabled.
4610 */
4611 nvme->n_write_cache_enabled = B_TRUE;
4612 }
4613
4614 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4615 "volatile-write-cache-enable",
4616 nvme->n_write_cache_enabled ? 1 : 0);
4617
4618 /*
4619 * Get number of supported namespaces and allocate namespace array.
4620 */
4621 nvme->n_namespace_count = nvme->n_idctl->id_nn;
4622
4623 if (nvme->n_namespace_count == 0) {
4624 dev_err(nvme->n_dip, CE_WARN,
4625 "!controllers without namespaces are not supported");
4626 goto fail;
4627 }
4628
4629 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
4630 nvme->n_namespace_count, KM_SLEEP);
4631
4632 /*
4633 * Get the common namespace information if available. If not, we use the
4634 * information for nsid 1.
4635 */
4636 if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) &&
4637 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
4638 nsid = NVME_NSID_BCAST;
4639 } else {
4640 nsid = 1;
4641 }
4642
4643 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
4644 (void **)&nvme->n_idcomns)) {
4645 dev_err(nvme->n_dip, CE_WARN, "!failed to identify common "
4646 "namespace information");
4647 goto fail;
4648 }
4649
4650 /*
4651 * Try to set up MSI/MSI-X interrupts.
4652 */
4653 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
4654 != 0) {
4655 nvme_release_interrupts(nvme);
4656
4657 nqueues = MIN(UINT16_MAX, ncpus);
4658
4659 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
4660 nqueues) != DDI_SUCCESS) &&
4661 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
4662 nqueues) != DDI_SUCCESS)) {
4663 dev_err(nvme->n_dip, CE_WARN,
4664 "!failed to set up MSI/MSI-X interrupts");
4665 goto fail;
4666 }
4667 }
4668
4669 /*
4670 * Create I/O queue pairs.
4671 */
4672
4673 if (nvme_set_nqueues(nvme) != 0) {
4674 dev_err(nvme->n_dip, CE_WARN,
4675 "!failed to set number of I/O queues to %d",
4676 nvme->n_intr_cnt);
4677 goto fail;
4678 }
4679
4680 /*
4681 * Reallocate I/O queue array
4682 */
4683 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
4684 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
4685 (nvme->n_submission_queues + 1), KM_SLEEP);
4686 nvme->n_ioq[0] = nvme->n_adminq;
4687
4688 /*
4689 * There should always be at least as many submission queues
4690 * as completion queues.
4691 */
4692 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues);
4693
4694 nvme->n_ioq_count = nvme->n_submission_queues;
4695
4696 nvme->n_io_squeue_len =
4697 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries);
4698
4699 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len",
4700 nvme->n_io_squeue_len);
4701
4702 /*
4703 * Pre-allocate completion queues.
4704 * When there are the same number of submission and completion
4705 * queues there is no value in having a larger completion
4706 * queue length.
4707 */
4708 if (nvme->n_submission_queues == nvme->n_completion_queues)
4709 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4710 nvme->n_io_squeue_len);
4711
4712 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4713 nvme->n_max_queue_entries);
4714
4715 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len",
4716 nvme->n_io_cqueue_len);
4717
4718 /*
4719 * Assign the equal quantity of taskq threads to each completion
4720 * queue, capping the total number of threads to the number
4721 * of CPUs.
4722 */
4723 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues;
4724
4725 /*
4726 * In case the calculation above is zero, we need at least one
4727 * thread per completion queue.
4728 */
4729 tq_threads = MAX(1, tq_threads);
4730
4731 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1,
4732 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) {
4733 dev_err(nvme->n_dip, CE_WARN,
4734 "!failed to pre-allocate completion queues");
4735 goto fail;
4736 }
4737
4738 /*
4739 * If we use less completion queues than interrupt vectors return
4740 * some of the interrupt vectors back to the system.
4741 */
4742 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) {
4743 nvme_release_interrupts(nvme);
4744
4745 if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
4746 nvme->n_completion_queues + 1) != DDI_SUCCESS) {
4747 dev_err(nvme->n_dip, CE_WARN,
4748 "!failed to reduce number of interrupts");
4749 goto fail;
4750 }
4751 }
4752
4753 /*
4754 * Alloc & register I/O queue pairs
4755 */
4756
4757 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
4758 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len,
4759 &nvme->n_ioq[i], i) != DDI_SUCCESS) {
4760 dev_err(nvme->n_dip, CE_WARN,
4761 "!unable to allocate I/O qpair %d", i);
4762 goto fail;
4763 }
4764
4765 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
4766 dev_err(nvme->n_dip, CE_WARN,
4767 "!unable to create I/O qpair %d", i);
4768 goto fail;
4769 }
4770 }
4771
4772 return (DDI_SUCCESS);
4773
4774 fail:
4775 (void) nvme_reset(nvme, B_FALSE);
4776 return (DDI_FAILURE);
4777 }
4778
4779 static uint_t
nvme_intr(caddr_t arg1,caddr_t arg2)4780 nvme_intr(caddr_t arg1, caddr_t arg2)
4781 {
4782 nvme_t *nvme = (nvme_t *)arg1;
4783 int inum = (int)(uintptr_t)arg2;
4784 int ccnt = 0;
4785 int qnum;
4786
4787 if (inum >= nvme->n_intr_cnt)
4788 return (DDI_INTR_UNCLAIMED);
4789
4790 if (nvme->n_dead) {
4791 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
4792 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
4793 }
4794
4795 /*
4796 * The interrupt vector a queue uses is calculated as queue_idx %
4797 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
4798 * in steps of n_intr_cnt to process all queues using this vector.
4799 */
4800 for (qnum = inum;
4801 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL;
4802 qnum += nvme->n_intr_cnt) {
4803 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]);
4804 }
4805
4806 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
4807 }
4808
4809 static void
nvme_release_interrupts(nvme_t * nvme)4810 nvme_release_interrupts(nvme_t *nvme)
4811 {
4812 int i;
4813
4814 for (i = 0; i < nvme->n_intr_cnt; i++) {
4815 if (nvme->n_inth[i] == NULL)
4816 break;
4817
4818 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4819 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
4820 else
4821 (void) ddi_intr_disable(nvme->n_inth[i]);
4822
4823 (void) ddi_intr_remove_handler(nvme->n_inth[i]);
4824 (void) ddi_intr_free(nvme->n_inth[i]);
4825 }
4826
4827 kmem_free(nvme->n_inth, nvme->n_inth_sz);
4828 nvme->n_inth = NULL;
4829 nvme->n_inth_sz = 0;
4830
4831 nvme->n_progress &= ~NVME_INTERRUPTS;
4832 }
4833
4834 static int
nvme_setup_interrupts(nvme_t * nvme,int intr_type,int nqpairs)4835 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
4836 {
4837 int nintrs, navail, count;
4838 int ret;
4839 int i;
4840
4841 if (nvme->n_intr_types == 0) {
4842 ret = ddi_intr_get_supported_types(nvme->n_dip,
4843 &nvme->n_intr_types);
4844 if (ret != DDI_SUCCESS) {
4845 dev_err(nvme->n_dip, CE_WARN,
4846 "!%s: ddi_intr_get_supported types failed",
4847 __func__);
4848 return (ret);
4849 }
4850 #ifdef __x86
4851 if (get_hwenv() == HW_VMWARE)
4852 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
4853 #endif
4854 }
4855
4856 if ((nvme->n_intr_types & intr_type) == 0)
4857 return (DDI_FAILURE);
4858
4859 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
4860 if (ret != DDI_SUCCESS) {
4861 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
4862 __func__);
4863 return (ret);
4864 }
4865
4866 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
4867 if (ret != DDI_SUCCESS) {
4868 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
4869 __func__);
4870 return (ret);
4871 }
4872
4873 /* We want at most one interrupt per queue pair. */
4874 if (navail > nqpairs)
4875 navail = nqpairs;
4876
4877 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
4878 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
4879
4880 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
4881 &count, 0);
4882 if (ret != DDI_SUCCESS) {
4883 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
4884 __func__);
4885 goto fail;
4886 }
4887
4888 nvme->n_intr_cnt = count;
4889
4890 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
4891 if (ret != DDI_SUCCESS) {
4892 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
4893 __func__);
4894 goto fail;
4895 }
4896
4897 for (i = 0; i < count; i++) {
4898 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
4899 (void *)nvme, (void *)(uintptr_t)i);
4900 if (ret != DDI_SUCCESS) {
4901 dev_err(nvme->n_dip, CE_WARN,
4902 "!%s: ddi_intr_add_handler failed", __func__);
4903 goto fail;
4904 }
4905 }
4906
4907 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
4908
4909 for (i = 0; i < count; i++) {
4910 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4911 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
4912 else
4913 ret = ddi_intr_enable(nvme->n_inth[i]);
4914
4915 if (ret != DDI_SUCCESS) {
4916 dev_err(nvme->n_dip, CE_WARN,
4917 "!%s: enabling interrupt %d failed", __func__, i);
4918 goto fail;
4919 }
4920 }
4921
4922 nvme->n_intr_type = intr_type;
4923
4924 nvme->n_progress |= NVME_INTERRUPTS;
4925
4926 return (DDI_SUCCESS);
4927
4928 fail:
4929 nvme_release_interrupts(nvme);
4930
4931 return (ret);
4932 }
4933
4934 static int
nvme_fm_errcb(dev_info_t * dip,ddi_fm_error_t * fm_error,const void * arg)4935 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
4936 {
4937 _NOTE(ARGUNUSED(arg));
4938
4939 pci_ereport_post(dip, fm_error, NULL);
4940 return (fm_error->fme_status);
4941 }
4942
4943 static void
nvme_remove_callback(dev_info_t * dip,ddi_eventcookie_t cookie,void * a,void * b)4944 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a,
4945 void *b)
4946 {
4947 nvme_t *nvme = a;
4948
4949 nvme_ctrl_mark_dead(nvme, B_TRUE);
4950
4951 /*
4952 * Fail all outstanding commands, including those in the admin queue
4953 * (queue 0).
4954 */
4955 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) {
4956 nvme_qpair_t *qp = nvme->n_ioq[i];
4957
4958 mutex_enter(&qp->nq_mutex);
4959 for (size_t j = 0; j < qp->nq_nentry; j++) {
4960 nvme_cmd_t *cmd = qp->nq_cmd[j];
4961 nvme_cmd_t *u_cmd;
4962
4963 if (cmd == NULL) {
4964 continue;
4965 }
4966
4967 /*
4968 * Since we have the queue lock held the entire time we
4969 * iterate over it, it's not possible for the queue to
4970 * change underneath us. Thus, we don't need to check
4971 * that the return value of nvme_unqueue_cmd matches the
4972 * requested cmd to unqueue.
4973 */
4974 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
4975 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq,
4976 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
4977
4978 ASSERT3P(u_cmd, ==, cmd);
4979 }
4980 mutex_exit(&qp->nq_mutex);
4981 }
4982 }
4983
4984 /*
4985 * Open minor management
4986 */
4987 static int
nvme_minor_comparator(const void * l,const void * r)4988 nvme_minor_comparator(const void *l, const void *r)
4989 {
4990 const nvme_minor_t *lm = l;
4991 const nvme_minor_t *rm = r;
4992
4993 if (lm->nm_minor > rm->nm_minor) {
4994 return (1);
4995 } else if (lm->nm_minor < rm->nm_minor) {
4996 return (-1);
4997 } else {
4998 return (0);
4999 }
5000 }
5001
5002 static void
nvme_minor_free(nvme_minor_t * minor)5003 nvme_minor_free(nvme_minor_t *minor)
5004 {
5005 if (minor->nm_minor > 0) {
5006 ASSERT3S(minor->nm_minor, >=, NVME_OPEN_MINOR_MIN);
5007 id_free(nvme_open_minors, minor->nm_minor);
5008 minor->nm_minor = 0;
5009 }
5010 VERIFY0(list_link_active(&minor->nm_ctrl_lock.nli_node));
5011 VERIFY0(list_link_active(&minor->nm_ns_lock.nli_node));
5012 cv_destroy(&minor->nm_cv);
5013 kmem_free(minor, sizeof (nvme_minor_t));
5014 }
5015
5016 static nvme_minor_t *
nvme_minor_find_by_dev(dev_t dev)5017 nvme_minor_find_by_dev(dev_t dev)
5018 {
5019 id_t id = (id_t)getminor(dev);
5020 nvme_minor_t search = { .nm_minor = id };
5021 nvme_minor_t *ret;
5022
5023 mutex_enter(&nvme_open_minors_mutex);
5024 ret = avl_find(&nvme_open_minors_avl, &search, NULL);
5025 mutex_exit(&nvme_open_minors_mutex);
5026
5027 return (ret);
5028 }
5029
5030 static int
nvme_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)5031 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
5032 {
5033 nvme_t *nvme;
5034 int instance;
5035 int nregs;
5036 off_t regsize;
5037 char name[32];
5038
5039 if (cmd != DDI_ATTACH)
5040 return (DDI_FAILURE);
5041
5042 instance = ddi_get_instance(dip);
5043
5044 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
5045 return (DDI_FAILURE);
5046
5047 nvme = ddi_get_soft_state(nvme_state, instance);
5048 ddi_set_driver_private(dip, nvme);
5049 nvme->n_dip = dip;
5050
5051 /*
5052 * Map PCI config space
5053 */
5054 if (pci_config_setup(dip, &nvme->n_pcicfg_handle) != DDI_SUCCESS) {
5055 dev_err(dip, CE_WARN, "!failed to map PCI config space");
5056 goto fail;
5057 }
5058 nvme->n_progress |= NVME_PCI_CONFIG;
5059
5060 /*
5061 * Get the various PCI IDs from config space
5062 */
5063 nvme->n_vendor_id =
5064 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_VENID);
5065 nvme->n_device_id =
5066 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_DEVID);
5067 nvme->n_revision_id =
5068 pci_config_get8(nvme->n_pcicfg_handle, PCI_CONF_REVID);
5069 nvme->n_subsystem_device_id =
5070 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBSYSID);
5071 nvme->n_subsystem_vendor_id =
5072 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBVENID);
5073
5074 nvme_detect_quirks(nvme);
5075
5076 /*
5077 * Set up event handlers for hot removal. While npe(4D) supports the hot
5078 * removal event being injected for devices, the same is not true of all
5079 * of our possible parents (i.e. pci(4D) as of this writing). The most
5080 * common case this shows up is in some virtualization environments. We
5081 * should treat this as non-fatal so that way devices work but leave
5082 * this set up in such a way that if a nexus does grow support for this
5083 * we're good to go.
5084 */
5085 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT,
5086 &nvme->n_rm_cookie) == DDI_SUCCESS) {
5087 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie,
5088 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) !=
5089 DDI_SUCCESS) {
5090 goto fail;
5091 }
5092 } else {
5093 nvme->n_ev_rm_cb_id = NULL;
5094 }
5095
5096 mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL);
5097 nvme->n_progress |= NVME_MUTEX_INIT;
5098
5099 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5100 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
5101 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
5102 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
5103 B_TRUE : B_FALSE;
5104 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5105 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
5106 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5107 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN);
5108 /*
5109 * Double up the default for completion queues in case of
5110 * queue sharing.
5111 */
5112 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5113 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN);
5114 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5115 DDI_PROP_DONTPASS, "async-event-limit",
5116 NVME_DEFAULT_ASYNC_EVENT_LIMIT);
5117 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5118 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
5119 B_TRUE : B_FALSE;
5120 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5121 DDI_PROP_DONTPASS, "min-phys-block-size",
5122 NVME_DEFAULT_MIN_BLOCK_SIZE);
5123 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5124 DDI_PROP_DONTPASS, "max-submission-queues", -1);
5125 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
5126 DDI_PROP_DONTPASS, "max-completion-queues", -1);
5127
5128 if (!ISP2(nvme->n_min_block_size) ||
5129 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
5130 dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
5131 "using default %d", ISP2(nvme->n_min_block_size) ?
5132 "too low" : "not a power of 2",
5133 NVME_DEFAULT_MIN_BLOCK_SIZE);
5134 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
5135 }
5136
5137 if (nvme->n_submission_queues != -1 &&
5138 (nvme->n_submission_queues < 1 ||
5139 nvme->n_submission_queues > UINT16_MAX)) {
5140 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not "
5141 "valid. Must be [1..%d]", nvme->n_submission_queues,
5142 UINT16_MAX);
5143 nvme->n_submission_queues = -1;
5144 }
5145
5146 if (nvme->n_completion_queues != -1 &&
5147 (nvme->n_completion_queues < 1 ||
5148 nvme->n_completion_queues > UINT16_MAX)) {
5149 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not "
5150 "valid. Must be [1..%d]", nvme->n_completion_queues,
5151 UINT16_MAX);
5152 nvme->n_completion_queues = -1;
5153 }
5154
5155 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
5156 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
5157 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
5158 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
5159
5160 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN)
5161 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN;
5162 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN)
5163 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN;
5164
5165 if (nvme->n_async_event_limit < 1)
5166 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
5167
5168 nvme->n_reg_acc_attr = nvme_reg_acc_attr;
5169 nvme->n_queue_dma_attr = nvme_queue_dma_attr;
5170 nvme->n_prp_dma_attr = nvme_prp_dma_attr;
5171 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
5172
5173 /*
5174 * Set up FMA support.
5175 */
5176 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
5177 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
5178 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
5179 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
5180
5181 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
5182
5183 if (nvme->n_fm_cap) {
5184 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
5185 nvme->n_reg_acc_attr.devacc_attr_access =
5186 DDI_FLAGERR_ACC;
5187
5188 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
5189 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
5190 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
5191 }
5192
5193 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
5194 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5195 pci_ereport_setup(dip);
5196
5197 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5198 ddi_fm_handler_register(dip, nvme_fm_errcb,
5199 (void *)nvme);
5200 }
5201
5202 nvme->n_progress |= NVME_FMA_INIT;
5203
5204 /*
5205 * The spec defines several register sets. Only the controller
5206 * registers (set 1) are currently used.
5207 */
5208 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
5209 nregs < 2 ||
5210 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE)
5211 goto fail;
5212
5213 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
5214 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
5215 dev_err(dip, CE_WARN, "!failed to map regset 1");
5216 goto fail;
5217 }
5218
5219 nvme->n_progress |= NVME_REGS_MAPPED;
5220
5221 /*
5222 * Set up kstats
5223 */
5224 if (!nvme_stat_init(nvme)) {
5225 dev_err(dip, CE_WARN, "!failed to create device kstats");
5226 goto fail;
5227 }
5228 nvme->n_progress |= NVME_STAT_INIT;
5229
5230 /*
5231 * Create PRP DMA cache
5232 */
5233 (void) snprintf(name, sizeof (name), "%s%d_prp_cache",
5234 ddi_driver_name(dip), ddi_get_instance(dip));
5235 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
5236 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
5237 NULL, (void *)nvme, NULL, 0);
5238
5239 if (nvme_init(nvme) != DDI_SUCCESS)
5240 goto fail;
5241
5242 /*
5243 * Initialize the driver with the UFM subsystem
5244 */
5245 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops,
5246 &nvme->n_ufmh, nvme) != 0) {
5247 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem");
5248 goto fail;
5249 }
5250 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL);
5251 ddi_ufm_update(nvme->n_ufmh);
5252 nvme->n_progress |= NVME_UFM_INIT;
5253
5254 nvme_mgmt_lock_init(&nvme->n_mgmt);
5255 nvme_lock_init(&nvme->n_lock);
5256 nvme->n_progress |= NVME_MGMT_INIT;
5257
5258 /*
5259 * Identify namespaces.
5260 */
5261 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
5262
5263 boolean_t minor_logged = B_FALSE;
5264 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5265 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5266
5267 nvme_lock_init(&ns->ns_lock);
5268 ns->ns_progress |= NVME_NS_LOCK;
5269
5270 /*
5271 * Namespaces start out in the active state. This is the
5272 * default state until we find out information about the
5273 * namespaces in more detail. nvme_init_ns() will go through and
5274 * determine what the proper state should be. It will also use
5275 * this state change to keep an accurate count of attachable
5276 * namespaces.
5277 */
5278 ns->ns_state = NVME_NS_STATE_ACTIVE;
5279 if (nvme_init_ns(nvme, i) != 0) {
5280 nvme_mgmt_unlock(nvme);
5281 goto fail;
5282 }
5283
5284 /*
5285 * We only create compat minor nodes for the namespace for the
5286 * first NVME_MINOR_MAX namespaces. Those that are beyond this
5287 * can only be accessed through the primary controller node,
5288 * which is generally fine as that's what libnvme uses and is
5289 * our preferred path. Not having a minor is better than not
5290 * having the namespace!
5291 */
5292 if (i > NVME_MINOR_MAX) {
5293 if (!minor_logged) {
5294 dev_err(dip, CE_WARN, "namespace minor "
5295 "creation limited to the first %u "
5296 "namespaces, device has %u",
5297 NVME_MINOR_MAX, nvme->n_namespace_count);
5298 minor_logged = B_TRUE;
5299 }
5300 continue;
5301 }
5302
5303 if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR,
5304 NVME_MINOR(ddi_get_instance(nvme->n_dip), i),
5305 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
5306 nvme_mgmt_unlock(nvme);
5307 dev_err(dip, CE_WARN,
5308 "!failed to create minor node for namespace %d", i);
5309 goto fail;
5310 }
5311 ns->ns_progress |= NVME_NS_MINOR;
5312 }
5313
5314 /*
5315 * Indicate that namespace initialization is complete and therefore
5316 * marking the controller dead can evaluate every namespace lock.
5317 */
5318 nvme->n_progress |= NVME_NS_INIT;
5319
5320 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
5321 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) !=
5322 DDI_SUCCESS) {
5323 nvme_mgmt_unlock(nvme);
5324 dev_err(dip, CE_WARN, "nvme_attach: "
5325 "cannot create devctl minor node");
5326 goto fail;
5327 }
5328
5329 /*
5330 * Attempt to attach all namespaces that are in a reasonable state. This
5331 * should not fail attach.
5332 */
5333 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5334 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5335 nvme_ioctl_common_t com = { .nioc_nsid = i };
5336
5337 if (ns->ns_state < NVME_NS_STATE_NOT_IGNORED)
5338 continue;
5339
5340 if (!nvme_bd_attach_ns(nvme, &com) && com.nioc_drv_err !=
5341 NVME_IOCTL_E_UNSUP_ATTACH_NS) {
5342 dev_err(nvme->n_dip, CE_WARN, "!failed to attach "
5343 "namespace %d due to blkdev error (0x%x)", i,
5344 com.nioc_drv_err);
5345 }
5346 }
5347
5348 nvme_mgmt_unlock(nvme);
5349
5350 /*
5351 * As the last thing that we do, we finally go ahead and enable
5352 * asynchronous event notifications. Currently we rely upon whatever
5353 * defaults the device has for the events that we will receive. If we
5354 * enable this earlier, it's possible that we'll get events that we
5355 * cannot handle yet because all of our data structures are not valid.
5356 * The device will queue all asynchronous events on a per-log page basis
5357 * until we submit this. If the device is totally broken, it will have
5358 * likely failed our commands already. If we add support for configuring
5359 * which asynchronous events we would like to receive via the SET
5360 * FEATURES command, then we should do that as one of the first commands
5361 * we send in nvme_init().
5362 *
5363 * We start by assuming asynchronous events are supported. However, not
5364 * all devices (e.g. some versions of QEMU) support this, so we end up
5365 * tracking whether or not we think these actually work.
5366 */
5367 nvme->n_async_event_supported = B_TRUE;
5368 for (uint16_t i = 0; i < nvme->n_async_event_limit; i++) {
5369 nvme_async_event(nvme);
5370 }
5371
5372
5373 return (DDI_SUCCESS);
5374
5375 fail:
5376 /* attach successful anyway so that FMA can retire the device */
5377 if (nvme->n_dead)
5378 return (DDI_SUCCESS);
5379
5380 (void) nvme_detach(dip, DDI_DETACH);
5381
5382 return (DDI_FAILURE);
5383 }
5384
5385 static int
nvme_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)5386 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
5387 {
5388 int instance;
5389 nvme_t *nvme;
5390
5391 if (cmd != DDI_DETACH)
5392 return (DDI_FAILURE);
5393
5394 instance = ddi_get_instance(dip);
5395
5396 nvme = ddi_get_soft_state(nvme_state, instance);
5397
5398 if (nvme == NULL)
5399 return (DDI_FAILURE);
5400
5401 /*
5402 * Remove all minor nodes from the device regardless of the source in
5403 * one swoop.
5404 */
5405 ddi_remove_minor_node(dip, NULL);
5406
5407 /*
5408 * We need to remove the event handler as one of the first things that
5409 * we do. If we proceed with other teardown without removing the event
5410 * handler, we could end up in a very unfortunate race with ourselves.
5411 * The DDI does not serialize these with detach (just like timeout(9F)
5412 * and others).
5413 */
5414 if (nvme->n_ev_rm_cb_id != NULL) {
5415 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id);
5416 }
5417 nvme->n_ev_rm_cb_id = NULL;
5418
5419 /*
5420 * If the controller was marked dead, there is a slight chance that we
5421 * are asynchronusly processing the removal taskq. Because we have
5422 * removed the callback handler above and all minor nodes and commands
5423 * are closed, there is no other way to get in here. As such, we wait on
5424 * the nvme_dead_taskq to complete so we can avoid tracking if it's
5425 * running or not.
5426 */
5427 taskq_wait(nvme_dead_taskq);
5428
5429 if (nvme->n_ns) {
5430 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5431 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5432
5433 if (ns->ns_bd_hdl) {
5434 (void) bd_detach_handle(ns->ns_bd_hdl);
5435 bd_free_handle(ns->ns_bd_hdl);
5436 }
5437
5438 if (ns->ns_idns)
5439 kmem_free(ns->ns_idns,
5440 sizeof (nvme_identify_nsid_t));
5441 if (ns->ns_devid)
5442 strfree(ns->ns_devid);
5443
5444 if ((ns->ns_progress & NVME_NS_LOCK) != 0)
5445 nvme_lock_fini(&ns->ns_lock);
5446 }
5447
5448 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
5449 nvme->n_namespace_count);
5450 }
5451
5452 if (nvme->n_progress & NVME_MGMT_INIT) {
5453 nvme_lock_fini(&nvme->n_lock);
5454 nvme_mgmt_lock_fini(&nvme->n_mgmt);
5455 }
5456
5457 if (nvme->n_progress & NVME_UFM_INIT) {
5458 ddi_ufm_fini(nvme->n_ufmh);
5459 mutex_destroy(&nvme->n_fwslot_mutex);
5460 }
5461
5462 if (nvme->n_progress & NVME_INTERRUPTS)
5463 nvme_release_interrupts(nvme);
5464
5465 for (uint_t i = 0; i < nvme->n_cq_count; i++) {
5466 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL)
5467 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq);
5468 }
5469
5470 if (nvme->n_progress & NVME_MUTEX_INIT) {
5471 mutex_destroy(&nvme->n_minor_mutex);
5472 }
5473
5474 if (nvme->n_ioq_count > 0) {
5475 for (uint_t i = 1; i != nvme->n_ioq_count + 1; i++) {
5476 if (nvme->n_ioq[i] != NULL) {
5477 /* TODO: send destroy queue commands */
5478 nvme_free_qpair(nvme->n_ioq[i]);
5479 }
5480 }
5481
5482 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
5483 (nvme->n_ioq_count + 1));
5484 }
5485
5486 if (nvme->n_prp_cache != NULL) {
5487 kmem_cache_destroy(nvme->n_prp_cache);
5488 }
5489
5490 if (nvme->n_progress & NVME_REGS_MAPPED) {
5491 nvme_shutdown(nvme, B_FALSE);
5492 (void) nvme_reset(nvme, B_FALSE);
5493 }
5494
5495 if (nvme->n_progress & NVME_CTRL_LIMITS)
5496 sema_destroy(&nvme->n_abort_sema);
5497
5498 if (nvme->n_progress & NVME_ADMIN_QUEUE)
5499 nvme_free_qpair(nvme->n_adminq);
5500
5501 if (nvme->n_cq_count > 0) {
5502 nvme_destroy_cq_array(nvme, 0);
5503 nvme->n_cq = NULL;
5504 nvme->n_cq_count = 0;
5505 }
5506
5507 if (nvme->n_idcomns)
5508 kmem_free(nvme->n_idcomns, NVME_IDENTIFY_BUFSIZE);
5509
5510 if (nvme->n_idctl)
5511 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
5512
5513 if (nvme->n_progress & NVME_REGS_MAPPED)
5514 ddi_regs_map_free(&nvme->n_regh);
5515
5516 if (nvme->n_progress & NVME_STAT_INIT)
5517 nvme_stat_cleanup(nvme);
5518
5519 if (nvme->n_progress & NVME_FMA_INIT) {
5520 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5521 ddi_fm_handler_unregister(nvme->n_dip);
5522
5523 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
5524 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5525 pci_ereport_teardown(nvme->n_dip);
5526
5527 ddi_fm_fini(nvme->n_dip);
5528 }
5529
5530 if (nvme->n_progress & NVME_PCI_CONFIG)
5531 pci_config_teardown(&nvme->n_pcicfg_handle);
5532
5533 if (nvme->n_vendor != NULL)
5534 strfree(nvme->n_vendor);
5535
5536 if (nvme->n_product != NULL)
5537 strfree(nvme->n_product);
5538
5539 ddi_soft_state_free(nvme_state, instance);
5540
5541 return (DDI_SUCCESS);
5542 }
5543
5544 static int
nvme_quiesce(dev_info_t * dip)5545 nvme_quiesce(dev_info_t *dip)
5546 {
5547 int instance;
5548 nvme_t *nvme;
5549
5550 instance = ddi_get_instance(dip);
5551
5552 nvme = ddi_get_soft_state(nvme_state, instance);
5553
5554 if (nvme == NULL)
5555 return (DDI_FAILURE);
5556
5557 nvme_shutdown(nvme, B_TRUE);
5558
5559 (void) nvme_reset(nvme, B_TRUE);
5560
5561 return (DDI_SUCCESS);
5562 }
5563
5564 static int
nvme_fill_prp(nvme_cmd_t * cmd,ddi_dma_handle_t dma)5565 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma)
5566 {
5567 nvme_t *nvme = cmd->nc_nvme;
5568 uint_t nprp_per_page, nprp;
5569 uint64_t *prp;
5570 const ddi_dma_cookie_t *cookie;
5571 uint_t idx;
5572 uint_t ncookies = ddi_dma_ncookies(dma);
5573
5574 if (ncookies == 0)
5575 return (DDI_FAILURE);
5576
5577 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL)
5578 return (DDI_FAILURE);
5579 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress;
5580
5581 if (ncookies == 1) {
5582 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5583 return (DDI_SUCCESS);
5584 } else if (ncookies == 2) {
5585 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL)
5586 return (DDI_FAILURE);
5587 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress;
5588 return (DDI_SUCCESS);
5589 }
5590
5591 /*
5592 * At this point, we're always operating on cookies at
5593 * index >= 1 and writing the addresses of those cookies
5594 * into a new page. The address of that page is stored
5595 * as the second PRP entry.
5596 */
5597 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t);
5598 ASSERT(nprp_per_page > 0);
5599
5600 /*
5601 * We currently don't support chained PRPs and set up our DMA
5602 * attributes to reflect that. If we still get an I/O request
5603 * that needs a chained PRP something is very wrong. Account
5604 * for the first cookie here, which we've placed in d_prp[0].
5605 */
5606 nprp = howmany(ncookies - 1, nprp_per_page);
5607 VERIFY(nprp == 1);
5608
5609 /*
5610 * Allocate a page of pointers, in which we'll write the
5611 * addresses of cookies 1 to `ncookies`.
5612 */
5613 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
5614 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5615 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress;
5616
5617 prp = (uint64_t *)cmd->nc_prp->nd_memp;
5618 for (idx = 1; idx < ncookies; idx++) {
5619 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL)
5620 return (DDI_FAILURE);
5621 *prp++ = cookie->dmac_laddress;
5622 }
5623
5624 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5625 DDI_DMA_SYNC_FORDEV);
5626 return (DDI_SUCCESS);
5627 }
5628
5629 /*
5630 * The maximum number of requests supported for a deallocate request is
5631 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and
5632 * unchanged through at least 1.4a). The definition of nvme_range_t is also
5633 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for
5634 * a deallocate request will fit into the smallest supported namespace page
5635 * (4k).
5636 */
5637 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096);
5638
5639 static int
nvme_fill_ranges(nvme_cmd_t * cmd,bd_xfer_t * xfer,uint64_t blocksize,int allocflag)5640 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize,
5641 int allocflag)
5642 {
5643 const dkioc_free_list_t *dfl = xfer->x_dfl;
5644 const dkioc_free_list_ext_t *exts = dfl->dfl_exts;
5645 nvme_t *nvme = cmd->nc_nvme;
5646 nvme_range_t *ranges = NULL;
5647 uint_t i;
5648
5649 /*
5650 * The number of ranges in the request is 0s based (that is
5651 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ...,
5652 * word10 == 255 -> 256 ranges). Therefore the allowed values are
5653 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request,
5654 * we either provided bad info in nvme_bd_driveinfo() or there is a bug
5655 * in blkdev.
5656 */
5657 VERIFY3U(dfl->dfl_num_exts, >, 0);
5658 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES);
5659 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff;
5660
5661 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE;
5662
5663 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag);
5664 if (cmd->nc_prp == NULL)
5665 return (DDI_FAILURE);
5666
5667 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5668 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp;
5669
5670 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress;
5671 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5672
5673 for (i = 0; i < dfl->dfl_num_exts; i++) {
5674 uint64_t lba, len;
5675
5676 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize;
5677 len = exts[i].dfle_length / blocksize;
5678
5679 VERIFY3U(len, <=, UINT32_MAX);
5680
5681 /* No context attributes for a deallocate request */
5682 ranges[i].nr_ctxattr = 0;
5683 ranges[i].nr_len = len;
5684 ranges[i].nr_lba = lba;
5685 }
5686
5687 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5688 DDI_DMA_SYNC_FORDEV);
5689
5690 return (DDI_SUCCESS);
5691 }
5692
5693 static nvme_cmd_t *
nvme_create_nvm_cmd(nvme_namespace_t * ns,uint8_t opc,bd_xfer_t * xfer)5694 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
5695 {
5696 nvme_t *nvme = ns->ns_nvme;
5697 nvme_cmd_t *cmd;
5698 int allocflag;
5699
5700 /*
5701 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
5702 */
5703 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP;
5704 cmd = nvme_alloc_cmd(nvme, allocflag);
5705
5706 if (cmd == NULL)
5707 return (NULL);
5708
5709 cmd->nc_sqe.sqe_opc = opc;
5710 cmd->nc_callback = nvme_bd_xfer_done;
5711 cmd->nc_xfer = xfer;
5712
5713 switch (opc) {
5714 case NVME_OPC_NVM_WRITE:
5715 case NVME_OPC_NVM_READ:
5716 VERIFY(xfer->x_nblks <= 0x10000);
5717
5718 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5719
5720 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
5721 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
5722 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
5723
5724 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS)
5725 goto fail;
5726 break;
5727
5728 case NVME_OPC_NVM_FLUSH:
5729 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5730 break;
5731
5732 case NVME_OPC_NVM_DSET_MGMT:
5733 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5734
5735 if (nvme_fill_ranges(cmd, xfer,
5736 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS)
5737 goto fail;
5738 break;
5739
5740 default:
5741 goto fail;
5742 }
5743
5744 return (cmd);
5745
5746 fail:
5747 nvme_free_cmd(cmd);
5748 return (NULL);
5749 }
5750
5751 static void
nvme_bd_xfer_done(void * arg)5752 nvme_bd_xfer_done(void *arg)
5753 {
5754 nvme_cmd_t *cmd = arg;
5755 bd_xfer_t *xfer = cmd->nc_xfer;
5756 int error = 0;
5757
5758 error = nvme_check_cmd_status(cmd);
5759 nvme_free_cmd(cmd);
5760
5761 bd_xfer_done(xfer, error);
5762 }
5763
5764 static void
nvme_bd_driveinfo(void * arg,bd_drive_t * drive)5765 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
5766 {
5767 nvme_namespace_t *ns = arg;
5768 nvme_t *nvme = ns->ns_nvme;
5769 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable);
5770
5771 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5772
5773 /*
5774 * Set the blkdev qcount to the number of submission queues.
5775 * It will then create one waitq/runq pair for each submission
5776 * queue and spread I/O requests across the queues.
5777 */
5778 drive->d_qcount = nvme->n_ioq_count;
5779
5780 /*
5781 * I/O activity to individual namespaces is distributed across
5782 * each of the d_qcount blkdev queues (which has been set to
5783 * the number of nvme submission queues). d_qsize is the number
5784 * of submitted and not completed I/Os within each queue that blkdev
5785 * will allow before it starts holding them in the waitq.
5786 *
5787 * Each namespace will create a child blkdev instance, for each one
5788 * we try and set the d_qsize so that each namespace gets an
5789 * equal portion of the submission queue.
5790 *
5791 * If post instantiation of the nvme drive, n_namespaces_attachable
5792 * changes and a namespace is attached it could calculate a
5793 * different d_qsize. It may even be that the sum of the d_qsizes is
5794 * now beyond the submission queue size. Should that be the case
5795 * and the I/O rate is such that blkdev attempts to submit more
5796 * I/Os than the size of the submission queue, the excess I/Os
5797 * will be held behind the semaphore nq_sema.
5798 */
5799 drive->d_qsize = nvme->n_io_squeue_len / ns_count;
5800
5801 /*
5802 * Don't let the queue size drop below the minimum, though.
5803 */
5804 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN);
5805
5806 /*
5807 * d_maxxfer is not set, which means the value is taken from the DMA
5808 * attributes specified to bd_alloc_handle.
5809 */
5810
5811 drive->d_removable = B_FALSE;
5812 drive->d_hotpluggable = B_FALSE;
5813
5814 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
5815 drive->d_target = ns->ns_id;
5816 drive->d_lun = 0;
5817
5818 drive->d_model = nvme->n_idctl->id_model;
5819 drive->d_model_len = sizeof (nvme->n_idctl->id_model);
5820 drive->d_vendor = nvme->n_vendor;
5821 drive->d_vendor_len = strlen(nvme->n_vendor);
5822 drive->d_product = nvme->n_product;
5823 drive->d_product_len = strlen(nvme->n_product);
5824 drive->d_serial = nvme->n_idctl->id_serial;
5825 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
5826 drive->d_revision = nvme->n_idctl->id_fwrev;
5827 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
5828
5829 /*
5830 * If we support the dataset management command, the only restrictions
5831 * on a discard request are the maximum number of ranges (segments)
5832 * per single request.
5833 */
5834 if (nvme->n_idctl->id_oncs.on_dset_mgmt)
5835 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES;
5836
5837 nvme_mgmt_unlock(nvme);
5838 }
5839
5840 static int
nvme_bd_mediainfo(void * arg,bd_media_t * media)5841 nvme_bd_mediainfo(void *arg, bd_media_t *media)
5842 {
5843 nvme_namespace_t *ns = arg;
5844 nvme_t *nvme = ns->ns_nvme;
5845
5846 if (nvme->n_dead) {
5847 return (EIO);
5848 }
5849
5850 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5851
5852 media->m_nblks = ns->ns_block_count;
5853 media->m_blksize = ns->ns_block_size;
5854 media->m_readonly = B_FALSE;
5855 media->m_solidstate = B_TRUE;
5856
5857 media->m_pblksize = ns->ns_best_block_size;
5858
5859 nvme_mgmt_unlock(nvme);
5860
5861 return (0);
5862 }
5863
5864 static int
nvme_bd_cmd(nvme_namespace_t * ns,bd_xfer_t * xfer,uint8_t opc)5865 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
5866 {
5867 nvme_t *nvme = ns->ns_nvme;
5868 nvme_cmd_t *cmd;
5869 nvme_qpair_t *ioq;
5870 boolean_t poll;
5871 int ret;
5872
5873 if (nvme->n_dead) {
5874 return (EIO);
5875 }
5876
5877 cmd = nvme_create_nvm_cmd(ns, opc, xfer);
5878 if (cmd == NULL)
5879 return (ENOMEM);
5880
5881 cmd->nc_sqid = xfer->x_qnum + 1;
5882 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
5883 ioq = nvme->n_ioq[cmd->nc_sqid];
5884
5885 /*
5886 * Get the polling flag before submitting the command. The command may
5887 * complete immediately after it was submitted, which means we must
5888 * treat both cmd and xfer as if they have been freed already.
5889 */
5890 poll = (xfer->x_flags & BD_XFER_POLL) != 0;
5891
5892 ret = nvme_submit_io_cmd(ioq, cmd);
5893
5894 if (ret != 0)
5895 return (ret);
5896
5897 if (!poll)
5898 return (0);
5899
5900 do {
5901 cmd = nvme_retrieve_cmd(nvme, ioq);
5902 if (cmd != NULL) {
5903 ASSERT0(cmd->nc_flags & NVME_CMD_F_USELOCK);
5904 cmd->nc_callback(cmd);
5905 } else {
5906 drv_usecwait(10);
5907 }
5908 } while (ioq->nq_active_cmds != 0);
5909
5910 return (0);
5911 }
5912
5913 static int
nvme_bd_read(void * arg,bd_xfer_t * xfer)5914 nvme_bd_read(void *arg, bd_xfer_t *xfer)
5915 {
5916 nvme_namespace_t *ns = arg;
5917
5918 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
5919 }
5920
5921 static int
nvme_bd_write(void * arg,bd_xfer_t * xfer)5922 nvme_bd_write(void *arg, bd_xfer_t *xfer)
5923 {
5924 nvme_namespace_t *ns = arg;
5925
5926 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
5927 }
5928
5929 static int
nvme_bd_sync(void * arg,bd_xfer_t * xfer)5930 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
5931 {
5932 nvme_namespace_t *ns = arg;
5933
5934 if (ns->ns_nvme->n_dead)
5935 return (EIO);
5936
5937 /*
5938 * If the volatile write cache is not present or not enabled the FLUSH
5939 * command is a no-op, so we can take a shortcut here.
5940 */
5941 if (!ns->ns_nvme->n_write_cache_present) {
5942 bd_xfer_done(xfer, ENOTSUP);
5943 return (0);
5944 }
5945
5946 if (!ns->ns_nvme->n_write_cache_enabled) {
5947 bd_xfer_done(xfer, 0);
5948 return (0);
5949 }
5950
5951 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
5952 }
5953
5954 static int
nvme_bd_devid(void * arg,dev_info_t * devinfo,ddi_devid_t * devid)5955 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
5956 {
5957 nvme_namespace_t *ns = arg;
5958 nvme_t *nvme = ns->ns_nvme;
5959
5960 if (nvme->n_dead) {
5961 return (EIO);
5962 }
5963
5964 if (*(uint64_t *)ns->ns_nguid != 0 ||
5965 *(uint64_t *)(ns->ns_nguid + 8) != 0) {
5966 return (ddi_devid_init(devinfo, DEVID_NVME_NGUID,
5967 sizeof (ns->ns_nguid), ns->ns_nguid, devid));
5968 } else if (*(uint64_t *)ns->ns_eui64 != 0) {
5969 return (ddi_devid_init(devinfo, DEVID_NVME_EUI64,
5970 sizeof (ns->ns_eui64), ns->ns_eui64, devid));
5971 } else {
5972 return (ddi_devid_init(devinfo, DEVID_NVME_NSID,
5973 strlen(ns->ns_devid), ns->ns_devid, devid));
5974 }
5975 }
5976
5977 static int
nvme_bd_free_space(void * arg,bd_xfer_t * xfer)5978 nvme_bd_free_space(void *arg, bd_xfer_t *xfer)
5979 {
5980 nvme_namespace_t *ns = arg;
5981
5982 if (xfer->x_dfl == NULL)
5983 return (EINVAL);
5984
5985 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt)
5986 return (ENOTSUP);
5987
5988 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT));
5989 }
5990
5991 static int
nvme_open(dev_t * devp,int flag,int otyp,cred_t * cred_p)5992 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
5993 {
5994 #ifndef __lock_lint
5995 _NOTE(ARGUNUSED(cred_p));
5996 #endif
5997 nvme_t *nvme;
5998 nvme_minor_t *minor = NULL;
5999 uint32_t nsid;
6000 minor_t m = getminor(*devp);
6001 int rv = 0;
6002
6003 if (otyp != OTYP_CHR)
6004 return (EINVAL);
6005
6006 if (m >= NVME_OPEN_MINOR_MIN)
6007 return (ENXIO);
6008
6009 nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(m));
6010 nsid = NVME_MINOR_NSID(m);
6011
6012 if (nvme == NULL)
6013 return (ENXIO);
6014
6015 if (nsid > MIN(nvme->n_namespace_count, NVME_MINOR_MAX))
6016 return (ENXIO);
6017
6018 if (nvme->n_dead)
6019 return (EIO);
6020
6021 /*
6022 * At this point, we're going to allow an open to proceed on this
6023 * device. We need to allocate a new instance for this (presuming one is
6024 * available).
6025 */
6026 minor = kmem_zalloc(sizeof (nvme_minor_t), KM_NOSLEEP_LAZY);
6027 if (minor == NULL) {
6028 return (ENOMEM);
6029 }
6030
6031 cv_init(&minor->nm_cv, NULL, CV_DRIVER, NULL);
6032 list_link_init(&minor->nm_ctrl_lock.nli_node);
6033 minor->nm_ctrl_lock.nli_nvme = nvme;
6034 minor->nm_ctrl_lock.nli_minor = minor;
6035 list_link_init(&minor->nm_ns_lock.nli_node);
6036 minor->nm_ns_lock.nli_nvme = nvme;
6037 minor->nm_ns_lock.nli_minor = minor;
6038 minor->nm_minor = id_alloc_nosleep(nvme_open_minors);
6039 if (minor->nm_minor == -1) {
6040 nvme_minor_free(minor);
6041 return (ENOSPC);
6042 }
6043
6044 minor->nm_ctrl = nvme;
6045 if (nsid != 0) {
6046 minor->nm_ns = nvme_nsid2ns(nvme, nsid);
6047 }
6048
6049 /*
6050 * Before we check for exclusive access and attempt a lock if requested,
6051 * ensure that this minor is persisted.
6052 */
6053 mutex_enter(&nvme_open_minors_mutex);
6054 avl_add(&nvme_open_minors_avl, minor);
6055 mutex_exit(&nvme_open_minors_mutex);
6056
6057 /*
6058 * A request for opening this FEXCL, is translated into a non-blocking
6059 * write lock of the appropriate entity. This honors the original
6060 * semantics here. In the future, we should see if we can remove this
6061 * and turn a request for FEXCL at open into ENOTSUP.
6062 */
6063 mutex_enter(&nvme->n_minor_mutex);
6064 if ((flag & FEXCL) != 0) {
6065 nvme_ioctl_lock_t lock = {
6066 .nil_level = NVME_LOCK_L_WRITE,
6067 .nil_flags = NVME_LOCK_F_DONT_BLOCK
6068 };
6069
6070 if (minor->nm_ns != NULL) {
6071 lock.nil_ent = NVME_LOCK_E_NS;
6072 lock.nil_common.nioc_nsid = nsid;
6073 } else {
6074 lock.nil_ent = NVME_LOCK_E_CTRL;
6075 }
6076 nvme_rwlock(minor, &lock);
6077 if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) {
6078 mutex_exit(&nvme->n_minor_mutex);
6079
6080 mutex_enter(&nvme_open_minors_mutex);
6081 avl_remove(&nvme_open_minors_avl, minor);
6082 mutex_exit(&nvme_open_minors_mutex);
6083
6084 nvme_minor_free(minor);
6085 return (EBUSY);
6086 }
6087 }
6088 mutex_exit(&nvme->n_minor_mutex);
6089
6090 *devp = makedevice(getmajor(*devp), (minor_t)minor->nm_minor);
6091 return (rv);
6092
6093 }
6094
6095 static int
nvme_close(dev_t dev,int flag __unused,int otyp,cred_t * cred_p __unused)6096 nvme_close(dev_t dev, int flag __unused, int otyp, cred_t *cred_p __unused)
6097 {
6098 nvme_minor_t *minor;
6099 nvme_t *nvme;
6100
6101 if (otyp != OTYP_CHR) {
6102 return (ENXIO);
6103 }
6104
6105 minor = nvme_minor_find_by_dev(dev);
6106 if (minor == NULL) {
6107 return (ENXIO);
6108 }
6109
6110 mutex_enter(&nvme_open_minors_mutex);
6111 avl_remove(&nvme_open_minors_avl, minor);
6112 mutex_exit(&nvme_open_minors_mutex);
6113
6114 /*
6115 * When this device is being closed, we must ensure that any locks held
6116 * by this are dealt with.
6117 */
6118 nvme = minor->nm_ctrl;
6119 mutex_enter(&nvme->n_minor_mutex);
6120 ASSERT3U(minor->nm_ctrl_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
6121 ASSERT3U(minor->nm_ns_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
6122
6123 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
6124 VERIFY3P(minor->nm_ctrl_lock.nli_lock, !=, NULL);
6125 nvme_rwunlock(&minor->nm_ctrl_lock,
6126 minor->nm_ctrl_lock.nli_lock);
6127 }
6128
6129 if (minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
6130 VERIFY3P(minor->nm_ns_lock.nli_lock, !=, NULL);
6131 nvme_rwunlock(&minor->nm_ns_lock, minor->nm_ns_lock.nli_lock);
6132 }
6133 mutex_exit(&nvme->n_minor_mutex);
6134
6135 nvme_minor_free(minor);
6136
6137 return (0);
6138 }
6139
6140 void
nvme_ioctl_success(nvme_ioctl_common_t * ioc)6141 nvme_ioctl_success(nvme_ioctl_common_t *ioc)
6142 {
6143 ioc->nioc_drv_err = NVME_IOCTL_E_OK;
6144 ioc->nioc_ctrl_sc = NVME_CQE_SC_GEN_SUCCESS;
6145 ioc->nioc_ctrl_sct = NVME_CQE_SCT_GENERIC;
6146 }
6147
6148 boolean_t
nvme_ioctl_error(nvme_ioctl_common_t * ioc,nvme_ioctl_errno_t err,uint32_t sct,uint32_t sc)6149 nvme_ioctl_error(nvme_ioctl_common_t *ioc, nvme_ioctl_errno_t err, uint32_t sct,
6150 uint32_t sc)
6151 {
6152 ioc->nioc_drv_err = err;
6153 ioc->nioc_ctrl_sct = sct;
6154 ioc->nioc_ctrl_sc = sc;
6155
6156 return (B_FALSE);
6157 }
6158
6159 static int
nvme_ioctl_copyout_error(nvme_ioctl_errno_t err,intptr_t uaddr,int mode)6160 nvme_ioctl_copyout_error(nvme_ioctl_errno_t err, intptr_t uaddr, int mode)
6161 {
6162 nvme_ioctl_common_t ioc;
6163
6164 ASSERT3U(err, !=, NVME_IOCTL_E_CTRL_ERROR);
6165 bzero(&ioc, sizeof (ioc));
6166 if (ddi_copyout(&ioc, (void *)uaddr, sizeof (nvme_ioctl_common_t),
6167 mode & FKIOCTL) != 0) {
6168 return (EFAULT);
6169 }
6170 return (0);
6171 }
6172
6173 /*
6174 * The companion to the namespace checking. This occurs after any rewriting
6175 * occurs. This is the primary point that we attempt to enforce any operation's
6176 * exclusivity. Note, it is theoretically possible for an operation to be
6177 * ongoing and to have someone with an exclusive lock ask to unlock it for some
6178 * reason. This does not maintain the number of such events that are going on.
6179 * While perhaps this is leaving too much up to the user, by the same token we
6180 * don't try to stop them from issuing two different format NVM commands
6181 * targeting the whole device at the same time either, even though the
6182 * controller would really rather that didn't happen.
6183 */
6184 static boolean_t
nvme_ioctl_excl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)6185 nvme_ioctl_excl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
6186 const nvme_ioctl_check_t *check)
6187 {
6188 nvme_t *const nvme = minor->nm_ctrl;
6189 nvme_namespace_t *ns;
6190 boolean_t have_ctrl, have_ns, ctrl_is_excl, ns_is_excl;
6191
6192 /*
6193 * If the command doesn't require anything, then we're done.
6194 */
6195 if (check->nck_excl == NVME_IOCTL_EXCL_SKIP) {
6196 return (B_TRUE);
6197 }
6198
6199 if (ioc->nioc_nsid == 0 || ioc->nioc_nsid == NVME_NSID_BCAST) {
6200 ns = NULL;
6201 } else {
6202 ns = nvme_nsid2ns(nvme, ioc->nioc_nsid);
6203 }
6204
6205 mutex_enter(&nvme->n_minor_mutex);
6206 ctrl_is_excl = nvme->n_lock.nl_writer != NULL;
6207 have_ctrl = nvme->n_lock.nl_writer == &minor->nm_ctrl_lock;
6208 if (ns != NULL) {
6209 /*
6210 * We explicitly test the namespace lock's writer versus asking
6211 * the minor because the minor's namespace lock may apply to a
6212 * different namespace.
6213 */
6214 ns_is_excl = ns->ns_lock.nl_writer != NULL;
6215 have_ns = ns->ns_lock.nl_writer == &minor->nm_ns_lock;
6216 ASSERT0(have_ctrl && have_ns);
6217 #ifdef DEBUG
6218 if (have_ns) {
6219 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, ns);
6220 }
6221 #endif
6222 } else {
6223 ns_is_excl = B_FALSE;
6224 have_ns = B_FALSE;
6225 }
6226 ASSERT0(ctrl_is_excl && ns_is_excl);
6227 mutex_exit(&nvme->n_minor_mutex);
6228
6229 if (check->nck_excl == NVME_IOCTL_EXCL_CTRL) {
6230 if (have_ctrl) {
6231 return (B_TRUE);
6232 }
6233
6234 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NEED_CTRL_WRLOCK,
6235 0, 0));
6236 }
6237
6238 if (check->nck_excl == NVME_IOCTL_EXCL_WRITE) {
6239 if (ns == NULL) {
6240 if (have_ctrl) {
6241 return (B_TRUE);
6242 }
6243 return (nvme_ioctl_error(ioc,
6244 NVME_IOCTL_E_NEED_CTRL_WRLOCK, 0, 0));
6245 } else {
6246 if (have_ctrl || have_ns) {
6247 return (B_TRUE);
6248 }
6249 return (nvme_ioctl_error(ioc,
6250 NVME_IOCTL_E_NEED_NS_WRLOCK, 0, 0));
6251 }
6252 }
6253
6254 /*
6255 * Now we have an operation that does not require exclusive access. We
6256 * can proceed as long as no one else has it or if someone does it is
6257 * us. Regardless of what we target, a controller lock will stop us.
6258 */
6259 if (ctrl_is_excl && !have_ctrl) {
6260 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_LOCKED, 0, 0));
6261 }
6262
6263 /*
6264 * Only check namespace exclusivity if we are targeting one.
6265 */
6266 if (ns != NULL && ns_is_excl && !have_ns) {
6267 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_LOCKED, 0, 0));
6268 }
6269
6270 return (B_TRUE);
6271 }
6272
6273 /*
6274 * Perform common checking as to whether or not an ioctl operation may proceed.
6275 * We check in this function various aspects of the namespace attributes that
6276 * it's calling on. Once the namespace attributes and any possible rewriting
6277 * have been performed, then we proceed to check whether or not the requisite
6278 * exclusive access is present in nvme_ioctl_excl_check().
6279 */
6280 static boolean_t
nvme_ioctl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)6281 nvme_ioctl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
6282 const nvme_ioctl_check_t *check)
6283 {
6284 /*
6285 * If the minor has a namespace pointer, then it is constrained to that
6286 * namespace. If a namespace is allowed, then there are only two valid
6287 * values that we can find. The first is matching the minor. The second
6288 * is our value zero, which will be transformed to the current
6289 * namespace.
6290 */
6291 if (minor->nm_ns != NULL) {
6292 if (!check->nck_ns_ok || !check->nck_ns_minor_ok) {
6293 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NOT_CTRL, 0,
6294 0));
6295 }
6296
6297 if (ioc->nioc_nsid == 0) {
6298 ioc->nioc_nsid = minor->nm_ns->ns_id;
6299 } else if (ioc->nioc_nsid != minor->nm_ns->ns_id) {
6300 return (nvme_ioctl_error(ioc,
6301 NVME_IOCTL_E_MINOR_WRONG_NS, 0, 0));
6302 }
6303
6304 return (nvme_ioctl_excl_check(minor, ioc, check));
6305 }
6306
6307 /*
6308 * If we've been told to skip checking the controller, here's where we
6309 * do that. This should really only be for commands which use the
6310 * namespace ID for listing purposes and therefore can have
6311 * traditionally illegal values here.
6312 */
6313 if (check->nck_skip_ctrl) {
6314 return (nvme_ioctl_excl_check(minor, ioc, check));
6315 }
6316
6317 /*
6318 * At this point, we know that we're on the controller's node. We first
6319 * deal with the simple case, is a namespace allowed at all or not. If
6320 * it is not allowed, then the only acceptable value is zero.
6321 */
6322 if (!check->nck_ns_ok) {
6323 if (ioc->nioc_nsid != 0) {
6324 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_UNUSE, 0,
6325 0));
6326 }
6327
6328 return (nvme_ioctl_excl_check(minor, ioc, check));
6329 }
6330
6331 /*
6332 * At this point, we know that a controller is allowed to use a
6333 * namespace. If we haven't been given zero or the broadcast namespace,
6334 * check to see if it's actually a valid namespace ID. If is outside of
6335 * range, then it is an error. Next, if we have been requested to
6336 * rewrite 0 (the this controller indicator) as the broadcast namespace,
6337 * do so.
6338 *
6339 * While we validate that this namespace is within the valid range, we
6340 * do not check if it is active or inactive. That is left to our callers
6341 * to determine.
6342 */
6343 if (ioc->nioc_nsid > minor->nm_ctrl->n_namespace_count &&
6344 ioc->nioc_nsid != NVME_NSID_BCAST) {
6345 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_RANGE, 0, 0));
6346 }
6347
6348 if (ioc->nioc_nsid == 0 && check->nck_ctrl_rewrite) {
6349 ioc->nioc_nsid = NVME_NSID_BCAST;
6350 }
6351
6352 /*
6353 * Finally, see if we have ended up with a broadcast namespace ID
6354 * whether through specification or rewriting. If that is not allowed,
6355 * then that is an error.
6356 */
6357 if (!check->nck_bcast_ok && ioc->nioc_nsid == NVME_NSID_BCAST) {
6358 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_BCAST_NS, 0, 0));
6359 }
6360
6361 return (nvme_ioctl_excl_check(minor, ioc, check));
6362 }
6363
6364 static int
nvme_ioctl_ctrl_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6365 nvme_ioctl_ctrl_info(nvme_minor_t *minor, intptr_t arg, int mode,
6366 cred_t *cred_p)
6367 {
6368 nvme_t *const nvme = minor->nm_ctrl;
6369 nvme_ioctl_ctrl_info_t *info;
6370 nvme_reg_cap_t cap = { 0 };
6371 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_CTRL };
6372 void *idbuf;
6373
6374 if ((mode & FREAD) == 0)
6375 return (EBADF);
6376
6377 info = kmem_alloc(sizeof (nvme_ioctl_ctrl_info_t), KM_NOSLEEP_LAZY);
6378 if (info == NULL) {
6379 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
6380 mode));
6381 }
6382
6383 if (ddi_copyin((void *)arg, info, sizeof (nvme_ioctl_ctrl_info_t),
6384 mode & FKIOCTL) != 0) {
6385 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6386 return (EFAULT);
6387 }
6388
6389 if (!nvme_ioctl_check(minor, &info->nci_common,
6390 &nvme_check_ctrl_info)) {
6391 goto copyout;
6392 }
6393
6394 /*
6395 * We explicitly do not use the identify controller copy in the kernel
6396 * right now so that way we can get a snapshot of the controller's
6397 * current capacity and values. While it's tempting to try to use this
6398 * to refresh the kernel's version we don't just to simplify the rest of
6399 * the driver right now.
6400 */
6401 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
6402 info->nci_common = id.nid_common;
6403 goto copyout;
6404 }
6405 bcopy(idbuf, &info->nci_ctrl_id, sizeof (nvme_identify_ctrl_t));
6406 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
6407
6408 /*
6409 * Use the kernel's cached common namespace information for this.
6410 */
6411 bcopy(nvme->n_idcomns, &info->nci_common_ns,
6412 sizeof (nvme_identify_nsid_t));
6413
6414 info->nci_vers = nvme->n_version;
6415
6416 /*
6417 * The MPSMIN and MPSMAX fields in the CAP register use 0 to
6418 * specify the base page size of 4k (1<<12), so add 12 here to
6419 * get the real page size value.
6420 */
6421 cap.r = nvme_get64(nvme, NVME_REG_CAP);
6422 info->nci_caps.cap_mpsmax = 1 << (12 + cap.b.cap_mpsmax);
6423 info->nci_caps.cap_mpsmin = 1 << (12 + cap.b.cap_mpsmin);
6424
6425 info->nci_nintrs = (uint32_t)nvme->n_intr_cnt;
6426
6427 copyout:
6428 if (ddi_copyout(info, (void *)arg, sizeof (nvme_ioctl_ctrl_info_t),
6429 mode & FKIOCTL) != 0) {
6430 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6431 return (EFAULT);
6432 }
6433
6434 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6435 return (0);
6436 }
6437
6438 static int
nvme_ioctl_ns_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6439 nvme_ioctl_ns_info(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6440 {
6441 nvme_t *const nvme = minor->nm_ctrl;
6442 nvme_ioctl_ns_info_t *ns_info;
6443 nvme_namespace_t *ns;
6444 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_NSID };
6445 void *idbuf;
6446
6447 if ((mode & FREAD) == 0)
6448 return (EBADF);
6449
6450 ns_info = kmem_zalloc(sizeof (nvme_ioctl_ns_info_t), KM_NOSLEEP_LAZY);
6451 if (ns_info == NULL) {
6452 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
6453 mode));
6454 }
6455
6456 if (ddi_copyin((void *)arg, ns_info, sizeof (nvme_ioctl_ns_info_t),
6457 mode & FKIOCTL) != 0) {
6458 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6459 return (EFAULT);
6460 }
6461
6462 if (!nvme_ioctl_check(minor, &ns_info->nni_common,
6463 &nvme_check_ns_info)) {
6464 goto copyout;
6465 }
6466
6467 ASSERT3U(ns_info->nni_common.nioc_nsid, >, 0);
6468 ns = nvme_nsid2ns(nvme, ns_info->nni_common.nioc_nsid);
6469
6470 /*
6471 * First fetch a fresh copy of the namespace information. Most callers
6472 * are using this because they will want a mostly accurate snapshot of
6473 * capacity and utilization.
6474 */
6475 id.nid_common.nioc_nsid = ns_info->nni_common.nioc_nsid;
6476 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
6477 ns_info->nni_common = id.nid_common;
6478 goto copyout;
6479 }
6480 bcopy(idbuf, &ns_info->nni_id, sizeof (nvme_identify_nsid_t));
6481 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
6482
6483 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6484 ns_info->nni_state = ns->ns_state;
6485 if (ns->ns_state >= NVME_NS_STATE_ATTACHED) {
6486 const char *addr;
6487
6488 ns_info->nni_state = NVME_NS_STATE_ATTACHED;
6489 addr = bd_address(ns->ns_bd_hdl);
6490 if (strlcpy(ns_info->nni_addr, addr,
6491 sizeof (ns_info->nni_addr)) >= sizeof (ns_info->nni_addr)) {
6492 nvme_mgmt_unlock(nvme);
6493 (void) nvme_ioctl_error(&ns_info->nni_common,
6494 NVME_IOCTL_E_BD_ADDR_OVER, 0, 0);
6495 goto copyout;
6496 }
6497 }
6498 nvme_mgmt_unlock(nvme);
6499
6500 copyout:
6501 if (ddi_copyout(ns_info, (void *)arg, sizeof (nvme_ioctl_ns_info_t),
6502 mode & FKIOCTL) != 0) {
6503 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6504 return (EFAULT);
6505 }
6506
6507 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6508 return (0);
6509 }
6510
6511 static int
nvme_ioctl_identify(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6512 nvme_ioctl_identify(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6513 {
6514 _NOTE(ARGUNUSED(cred_p));
6515 nvme_t *const nvme = minor->nm_ctrl;
6516 void *idctl;
6517 uint_t model;
6518 nvme_ioctl_identify_t id;
6519 #ifdef _MULTI_DATAMODEL
6520 nvme_ioctl_identify32_t id32;
6521 #endif
6522 boolean_t ns_minor;
6523
6524 if ((mode & FREAD) == 0)
6525 return (EBADF);
6526
6527 model = ddi_model_convert_from(mode);
6528 switch (model) {
6529 #ifdef _MULTI_DATAMODEL
6530 case DDI_MODEL_ILP32:
6531 bzero(&id, sizeof (id));
6532 if (ddi_copyin((void *)arg, &id32, sizeof (id32),
6533 mode & FKIOCTL) != 0) {
6534 return (EFAULT);
6535 }
6536 id.nid_common.nioc_nsid = id32.nid_common.nioc_nsid;
6537 id.nid_cns = id32.nid_cns;
6538 id.nid_ctrlid = id32.nid_ctrlid;
6539 id.nid_data = id32.nid_data;
6540 break;
6541 #endif /* _MULTI_DATAMODEL */
6542 case DDI_MODEL_NONE:
6543 if (ddi_copyin((void *)arg, &id, sizeof (id),
6544 mode & FKIOCTL) != 0) {
6545 return (EFAULT);
6546 }
6547 break;
6548 default:
6549 return (ENOTSUP);
6550 }
6551
6552 if (!nvme_ioctl_check(minor, &id.nid_common, &nvme_check_identify)) {
6553 goto copyout;
6554 }
6555
6556 ns_minor = minor->nm_ns != NULL;
6557 if (!nvme_validate_identify(nvme, &id, ns_minor)) {
6558 goto copyout;
6559 }
6560
6561 if (nvme_identify(nvme, B_TRUE, &id, &idctl)) {
6562 int ret = ddi_copyout(idctl, (void *)id.nid_data,
6563 NVME_IDENTIFY_BUFSIZE, mode & FKIOCTL);
6564 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
6565 if (ret != 0) {
6566 (void) nvme_ioctl_error(&id.nid_common,
6567 NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6568 goto copyout;
6569 }
6570
6571 nvme_ioctl_success(&id.nid_common);
6572 }
6573
6574 copyout:
6575 switch (model) {
6576 #ifdef _MULTI_DATAMODEL
6577 case DDI_MODEL_ILP32:
6578 id32.nid_common = id.nid_common;
6579
6580 if (ddi_copyout(&id32, (void *)arg, sizeof (id32),
6581 mode & FKIOCTL) != 0) {
6582 return (EFAULT);
6583 }
6584 break;
6585 #endif /* _MULTI_DATAMODEL */
6586 case DDI_MODEL_NONE:
6587 if (ddi_copyout(&id, (void *)arg, sizeof (id),
6588 mode & FKIOCTL) != 0) {
6589 return (EFAULT);
6590 }
6591 break;
6592 default:
6593 return (ENOTSUP);
6594 }
6595
6596 return (0);
6597 }
6598
6599 /*
6600 * Execute commands on behalf of the various ioctls.
6601 *
6602 * If this returns true then the command completed successfully. Otherwise error
6603 * information is returned in the nvme_ioctl_common_t arguments.
6604 */
6605 typedef struct {
6606 nvme_sqe_t *ica_sqe;
6607 void *ica_data;
6608 uint32_t ica_data_len;
6609 uint_t ica_dma_flags;
6610 int ica_copy_flags;
6611 uint32_t ica_timeout;
6612 uint32_t ica_cdw0;
6613 } nvme_ioc_cmd_args_t;
6614
6615 static boolean_t
nvme_ioc_cmd(nvme_t * nvme,nvme_ioctl_common_t * ioc,nvme_ioc_cmd_args_t * args)6616 nvme_ioc_cmd(nvme_t *nvme, nvme_ioctl_common_t *ioc, nvme_ioc_cmd_args_t *args)
6617 {
6618 nvme_cmd_t *cmd;
6619 boolean_t ret = B_FALSE;
6620
6621 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
6622 cmd->nc_sqid = 0;
6623
6624 /*
6625 * This function is used to facilitate requests from
6626 * userspace, so don't panic if the command fails. This
6627 * is especially true for admin passthru commands, where
6628 * the actual command data structure is entirely defined
6629 * by userspace.
6630 */
6631 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
6632
6633 cmd->nc_callback = nvme_wakeup_cmd;
6634 cmd->nc_sqe = *args->ica_sqe;
6635
6636 if ((args->ica_dma_flags & DDI_DMA_RDWR) != 0) {
6637 if (args->ica_data == NULL) {
6638 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_DMA_MEM,
6639 0, 0);
6640 goto free_cmd;
6641 }
6642
6643 if (nvme_zalloc_dma(nvme, args->ica_data_len,
6644 args->ica_dma_flags, &nvme->n_prp_dma_attr, &cmd->nc_dma) !=
6645 DDI_SUCCESS) {
6646 dev_err(nvme->n_dip, CE_WARN,
6647 "!nvme_zalloc_dma failed for nvme_ioc_cmd()");
6648 ret = nvme_ioctl_error(ioc,
6649 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6650 goto free_cmd;
6651 }
6652
6653 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
6654 ret = nvme_ioctl_error(ioc,
6655 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6656 goto free_cmd;
6657 }
6658
6659 if ((args->ica_dma_flags & DDI_DMA_WRITE) != 0 &&
6660 ddi_copyin(args->ica_data, cmd->nc_dma->nd_memp,
6661 args->ica_data_len, args->ica_copy_flags) != 0) {
6662 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA,
6663 0, 0);
6664 goto free_cmd;
6665 }
6666 }
6667
6668 nvme_admin_cmd(cmd, args->ica_timeout);
6669
6670 if (!nvme_check_cmd_status_ioctl(cmd, ioc)) {
6671 ret = B_FALSE;
6672 goto free_cmd;
6673 }
6674
6675 args->ica_cdw0 = cmd->nc_cqe.cqe_dw0;
6676
6677 if ((args->ica_dma_flags & DDI_DMA_READ) != 0 &&
6678 ddi_copyout(cmd->nc_dma->nd_memp, args->ica_data,
6679 args->ica_data_len, args->ica_copy_flags) != 0) {
6680 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6681 goto free_cmd;
6682 }
6683
6684 ret = B_TRUE;
6685 nvme_ioctl_success(ioc);
6686
6687 free_cmd:
6688 nvme_free_cmd(cmd);
6689
6690 return (ret);
6691 }
6692
6693 static int
nvme_ioctl_get_logpage(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6694 nvme_ioctl_get_logpage(nvme_minor_t *minor, intptr_t arg, int mode,
6695 cred_t *cred_p)
6696 {
6697 nvme_t *const nvme = minor->nm_ctrl;
6698 void *buf;
6699 nvme_ioctl_get_logpage_t log;
6700 uint_t model;
6701 #ifdef _MULTI_DATAMODEL
6702 nvme_ioctl_get_logpage32_t log32;
6703 #endif
6704
6705 if ((mode & FREAD) == 0) {
6706 return (EBADF);
6707 }
6708
6709 model = ddi_model_convert_from(mode);
6710 switch (model) {
6711 #ifdef _MULTI_DATAMODEL
6712 case DDI_MODEL_ILP32:
6713 bzero(&log, sizeof (log));
6714 if (ddi_copyin((void *)arg, &log32, sizeof (log32),
6715 mode & FKIOCTL) != 0) {
6716 return (EFAULT);
6717 }
6718
6719 log.nigl_common.nioc_nsid = log32.nigl_common.nioc_nsid;
6720 log.nigl_csi = log32.nigl_csi;
6721 log.nigl_lid = log32.nigl_lid;
6722 log.nigl_lsp = log32.nigl_lsp;
6723 log.nigl_len = log32.nigl_len;
6724 log.nigl_offset = log32.nigl_offset;
6725 log.nigl_data = log32.nigl_data;
6726 break;
6727 #endif /* _MULTI_DATAMODEL */
6728 case DDI_MODEL_NONE:
6729 if (ddi_copyin((void *)arg, &log, sizeof (log),
6730 mode & FKIOCTL) != 0) {
6731 return (EFAULT);
6732 }
6733 break;
6734 default:
6735 return (ENOTSUP);
6736 }
6737
6738 /*
6739 * Eventually we'd like to do a soft lock on the namespaces from
6740 * changing out from us during this operation in the future. But we
6741 * haven't implemented that yet.
6742 */
6743 if (!nvme_ioctl_check(minor, &log.nigl_common,
6744 &nvme_check_get_logpage)) {
6745 goto copyout;
6746 }
6747
6748 if (!nvme_validate_logpage(nvme, &log)) {
6749 goto copyout;
6750 }
6751
6752 if (nvme_get_logpage(nvme, B_TRUE, &log, &buf)) {
6753 int copy;
6754
6755 copy = ddi_copyout(buf, (void *)log.nigl_data, log.nigl_len,
6756 mode & FKIOCTL);
6757 kmem_free(buf, log.nigl_len);
6758 if (copy != 0) {
6759 (void) nvme_ioctl_error(&log.nigl_common,
6760 NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6761 goto copyout;
6762 }
6763
6764 nvme_ioctl_success(&log.nigl_common);
6765 }
6766
6767 copyout:
6768 switch (model) {
6769 #ifdef _MULTI_DATAMODEL
6770 case DDI_MODEL_ILP32:
6771 bzero(&log32, sizeof (log32));
6772
6773 log32.nigl_common = log.nigl_common;
6774 log32.nigl_csi = log.nigl_csi;
6775 log32.nigl_lid = log.nigl_lid;
6776 log32.nigl_lsp = log.nigl_lsp;
6777 log32.nigl_len = log.nigl_len;
6778 log32.nigl_offset = log.nigl_offset;
6779 log32.nigl_data = log.nigl_data;
6780 if (ddi_copyout(&log32, (void *)arg, sizeof (log32),
6781 mode & FKIOCTL) != 0) {
6782 return (EFAULT);
6783 }
6784 break;
6785 #endif /* _MULTI_DATAMODEL */
6786 case DDI_MODEL_NONE:
6787 if (ddi_copyout(&log, (void *)arg, sizeof (log),
6788 mode & FKIOCTL) != 0) {
6789 return (EFAULT);
6790 }
6791 break;
6792 default:
6793 return (ENOTSUP);
6794 }
6795
6796 return (0);
6797 }
6798
6799 static int
nvme_ioctl_get_feature(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6800 nvme_ioctl_get_feature(nvme_minor_t *minor, intptr_t arg, int mode,
6801 cred_t *cred_p)
6802 {
6803 nvme_t *const nvme = minor->nm_ctrl;
6804 nvme_ioctl_get_feature_t feat;
6805 uint_t model;
6806 #ifdef _MULTI_DATAMODEL
6807 nvme_ioctl_get_feature32_t feat32;
6808 #endif
6809 nvme_get_features_dw10_t gf_dw10 = { 0 };
6810 nvme_ioc_cmd_args_t args = { NULL };
6811 nvme_sqe_t sqe = {
6812 .sqe_opc = NVME_OPC_GET_FEATURES
6813 };
6814
6815 if ((mode & FREAD) == 0) {
6816 return (EBADF);
6817 }
6818
6819 model = ddi_model_convert_from(mode);
6820 switch (model) {
6821 #ifdef _MULTI_DATAMODEL
6822 case DDI_MODEL_ILP32:
6823 bzero(&feat, sizeof (feat));
6824 if (ddi_copyin((void *)arg, &feat32, sizeof (feat32),
6825 mode & FKIOCTL) != 0) {
6826 return (EFAULT);
6827 }
6828
6829 feat.nigf_common.nioc_nsid = feat32.nigf_common.nioc_nsid;
6830 feat.nigf_fid = feat32.nigf_fid;
6831 feat.nigf_sel = feat32.nigf_sel;
6832 feat.nigf_cdw11 = feat32.nigf_cdw11;
6833 feat.nigf_data = feat32.nigf_data;
6834 feat.nigf_len = feat32.nigf_len;
6835 break;
6836 #endif /* _MULTI_DATAMODEL */
6837 case DDI_MODEL_NONE:
6838 if (ddi_copyin((void *)arg, &feat, sizeof (feat),
6839 mode & FKIOCTL) != 0) {
6840 return (EFAULT);
6841 }
6842 break;
6843 default:
6844 return (ENOTSUP);
6845 }
6846
6847 if (!nvme_ioctl_check(minor, &feat.nigf_common,
6848 &nvme_check_get_feature)) {
6849 goto copyout;
6850 }
6851
6852 if (!nvme_validate_get_feature(nvme, &feat)) {
6853 goto copyout;
6854 }
6855
6856 gf_dw10.b.gt_fid = bitx32(feat.nigf_fid, 7, 0);
6857 gf_dw10.b.gt_sel = bitx32(feat.nigf_sel, 2, 0);
6858 sqe.sqe_cdw10 = gf_dw10.r;
6859 sqe.sqe_cdw11 = feat.nigf_cdw11;
6860 sqe.sqe_nsid = feat.nigf_common.nioc_nsid;
6861
6862 args.ica_sqe = &sqe;
6863 if (feat.nigf_len != 0) {
6864 args.ica_data = (void *)feat.nigf_data;
6865 args.ica_data_len = feat.nigf_len;
6866 args.ica_dma_flags = DDI_DMA_READ;
6867 }
6868 args.ica_copy_flags = mode;
6869 args.ica_timeout = nvme_admin_cmd_timeout;
6870
6871 if (!nvme_ioc_cmd(nvme, &feat.nigf_common, &args)) {
6872 goto copyout;
6873 }
6874
6875 feat.nigf_cdw0 = args.ica_cdw0;
6876
6877 copyout:
6878 switch (model) {
6879 #ifdef _MULTI_DATAMODEL
6880 case DDI_MODEL_ILP32:
6881 bzero(&feat32, sizeof (feat32));
6882
6883 feat32.nigf_common = feat.nigf_common;
6884 feat32.nigf_fid = feat.nigf_fid;
6885 feat32.nigf_sel = feat.nigf_sel;
6886 feat32.nigf_cdw11 = feat.nigf_cdw11;
6887 feat32.nigf_data = feat.nigf_data;
6888 feat32.nigf_len = feat.nigf_len;
6889 feat32.nigf_cdw0 = feat.nigf_cdw0;
6890 if (ddi_copyout(&feat32, (void *)arg, sizeof (feat32),
6891 mode & FKIOCTL) != 0) {
6892 return (EFAULT);
6893 }
6894 break;
6895 #endif /* _MULTI_DATAMODEL */
6896 case DDI_MODEL_NONE:
6897 if (ddi_copyout(&feat, (void *)arg, sizeof (feat),
6898 mode & FKIOCTL) != 0) {
6899 return (EFAULT);
6900 }
6901 break;
6902 default:
6903 return (ENOTSUP);
6904 }
6905
6906 return (0);
6907 }
6908
6909 static int
nvme_ioctl_format(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6910 nvme_ioctl_format(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6911 {
6912 nvme_t *const nvme = minor->nm_ctrl;
6913 nvme_ioctl_format_t ioc;
6914
6915 if ((mode & FWRITE) == 0)
6916 return (EBADF);
6917
6918 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6919 return (EPERM);
6920
6921 if (ddi_copyin((void *)(uintptr_t)arg, &ioc,
6922 sizeof (nvme_ioctl_format_t), mode & FKIOCTL) != 0)
6923 return (EFAULT);
6924
6925 if (!nvme_ioctl_check(minor, &ioc.nif_common, &nvme_check_format)) {
6926 goto copyout;
6927 }
6928
6929 if (!nvme_validate_format(nvme, &ioc)) {
6930 goto copyout;
6931 }
6932
6933 /*
6934 * The broadcast namespace can format all namespaces attached to the
6935 * controller, meaning active namespaces. However, a targeted format can
6936 * impact any allocated namespace, even one not attached. As such, we
6937 * need different checks for each situation.
6938 */
6939 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6940 if (ioc.nif_common.nioc_nsid == NVME_NSID_BCAST) {
6941 if (!nvme_no_blkdev_attached(nvme, ioc.nif_common.nioc_nsid)) {
6942 nvme_mgmt_unlock(nvme);
6943 (void) nvme_ioctl_error(&ioc.nif_common,
6944 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
6945 goto copyout;
6946 }
6947 } else {
6948 nvme_namespace_t *ns = nvme_nsid2ns(nvme,
6949 ioc.nif_common.nioc_nsid);
6950
6951 if (!nvme_ns_state_check(ns, &ioc.nif_common,
6952 nvme_format_nvm_states)) {
6953 nvme_mgmt_unlock(nvme);
6954 goto copyout;
6955 }
6956 }
6957
6958 if (nvme_format_nvm(nvme, &ioc)) {
6959 nvme_ioctl_success(&ioc.nif_common);
6960 nvme_rescan_ns(nvme, ioc.nif_common.nioc_nsid);
6961 }
6962 nvme_mgmt_unlock(nvme);
6963
6964 copyout:
6965 if (ddi_copyout(&ioc, (void *)(uintptr_t)arg, sizeof (ioc),
6966 mode & FKIOCTL) != 0) {
6967 return (EFAULT);
6968 }
6969
6970 return (0);
6971 }
6972
6973 static int
nvme_ioctl_bd_detach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6974 nvme_ioctl_bd_detach(nvme_minor_t *minor, intptr_t arg, int mode,
6975 cred_t *cred_p)
6976 {
6977 nvme_t *const nvme = minor->nm_ctrl;
6978 nvme_ioctl_common_t com;
6979
6980 if ((mode & FWRITE) == 0)
6981 return (EBADF);
6982
6983 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6984 return (EPERM);
6985
6986 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
6987 mode & FKIOCTL) != 0) {
6988 return (EFAULT);
6989 }
6990
6991 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
6992 goto copyout;
6993 }
6994
6995 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6996 if (nvme_bd_detach_ns(nvme, &com)) {
6997 nvme_ioctl_success(&com);
6998 }
6999 nvme_mgmt_unlock(nvme);
7000
7001 copyout:
7002 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7003 mode & FKIOCTL) != 0) {
7004 return (EFAULT);
7005 }
7006
7007 return (0);
7008 }
7009
7010 static int
nvme_ioctl_bd_attach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7011 nvme_ioctl_bd_attach(nvme_minor_t *minor, intptr_t arg, int mode,
7012 cred_t *cred_p)
7013 {
7014 nvme_t *const nvme = minor->nm_ctrl;
7015 nvme_ioctl_common_t com;
7016 nvme_namespace_t *ns;
7017
7018 if ((mode & FWRITE) == 0)
7019 return (EBADF);
7020
7021 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7022 return (EPERM);
7023
7024 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
7025 mode & FKIOCTL) != 0) {
7026 return (EFAULT);
7027 }
7028
7029 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
7030 goto copyout;
7031 }
7032
7033 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7034 ns = nvme_nsid2ns(nvme, com.nioc_nsid);
7035
7036 /*
7037 * Strictly speaking we shouldn't need to call nvme_init_ns() here as
7038 * we should be properly refreshing the internal state when we are
7039 * issuing commands that change things. However, we opt to still do so
7040 * as a bit of a safety check lest we give the kernel something bad or a
7041 * vendor unique command somehow did something behind our backs.
7042 */
7043 if (ns->ns_state < NVME_NS_STATE_ATTACHED) {
7044 nvme_rescan_ns(nvme, com.nioc_nsid);
7045 }
7046
7047 if (nvme_bd_attach_ns(nvme, &com)) {
7048 nvme_ioctl_success(&com);
7049 }
7050 nvme_mgmt_unlock(nvme);
7051
7052 copyout:
7053 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7054 mode & FKIOCTL) != 0) {
7055 return (EFAULT);
7056 }
7057
7058 return (0);
7059 }
7060
7061 /*
7062 * Attach or detach a controller from the specified namespace. While this in
7063 * theory allows for multiple controllers to be specified, currently we only
7064 * support using the controller that we've issued this ioctl on. In the future
7065 * when we have better ways to test dual-attached controllers then this should
7066 * be extended to take the controller list from userland.
7067 */
7068 static boolean_t
nvme_ctrl_attach_detach_ns(nvme_t * nvme,nvme_namespace_t * ns,nvme_ioctl_common_t * ioc,boolean_t attach)7069 nvme_ctrl_attach_detach_ns(nvme_t *nvme, nvme_namespace_t *ns,
7070 nvme_ioctl_common_t *ioc, boolean_t attach)
7071 {
7072 nvme_ioc_cmd_args_t args = { NULL };
7073 nvme_sqe_t sqe;
7074 nvme_ns_mgmt_dw10_t dw10;
7075 uint16_t ctrlids[2];
7076
7077 ASSERT(nvme_mgmt_lock_held(nvme));
7078
7079 bzero(&sqe, sizeof (sqe));
7080 sqe.sqe_nsid = ioc->nioc_nsid;
7081 sqe.sqe_opc = NVME_OPC_NS_ATTACH;
7082
7083 dw10.r = 0;
7084 dw10.b.nsm_sel = attach ? NVME_NS_ATTACH_CTRL_ATTACH :
7085 NVME_NS_ATTACH_CTRL_DETACH;
7086 sqe.sqe_cdw10 = dw10.r;
7087
7088 /*
7089 * As we only support sending our current controller's id along, we can
7090 * simplify this and don't need both allocating a full
7091 * nvme_identify_ctrl_list_t for two items.
7092 */
7093 ctrlids[0] = 1;
7094 ctrlids[1] = nvme->n_idctl->id_cntlid;
7095
7096 args.ica_sqe = &sqe;
7097 args.ica_data = ctrlids;
7098 args.ica_data_len = sizeof (ctrlids);
7099 args.ica_dma_flags = DDI_DMA_WRITE;
7100 args.ica_copy_flags = FKIOCTL;
7101 args.ica_timeout = nvme_admin_cmd_timeout;
7102
7103 return (nvme_ioc_cmd(nvme, ioc, &args));
7104 }
7105
7106 static int
nvme_ioctl_ctrl_detach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7107 nvme_ioctl_ctrl_detach(nvme_minor_t *minor, intptr_t arg, int mode,
7108 cred_t *cred_p)
7109 {
7110 nvme_t *const nvme = minor->nm_ctrl;
7111 nvme_ioctl_common_t com;
7112 nvme_namespace_t *ns;
7113
7114 if ((mode & FWRITE) == 0)
7115 return (EBADF);
7116
7117 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7118 return (EPERM);
7119
7120 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
7121 mode & FKIOCTL) != 0) {
7122 return (EFAULT);
7123 }
7124
7125 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
7126 goto copyout;
7127 }
7128
7129 if (!nvme_validate_ctrl_attach_detach_ns(nvme, &com)) {
7130 goto copyout;
7131 }
7132
7133 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7134 ns = nvme_nsid2ns(nvme, com.nioc_nsid);
7135
7136 if (nvme_ns_state_check(ns, &com, nvme_ctrl_detach_states)) {
7137 if (nvme_ctrl_attach_detach_ns(nvme, ns, &com, B_FALSE)) {
7138 nvme_rescan_ns(nvme, com.nioc_nsid);
7139 nvme_ioctl_success(&com);
7140 }
7141 }
7142 nvme_mgmt_unlock(nvme);
7143
7144 copyout:
7145 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7146 mode & FKIOCTL) != 0) {
7147 return (EFAULT);
7148 }
7149
7150 return (0);
7151 }
7152
7153 static int
nvme_ioctl_ns_create(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7154 nvme_ioctl_ns_create(nvme_minor_t *minor, intptr_t arg, int mode,
7155 cred_t *cred_p)
7156 {
7157 nvme_t *const nvme = minor->nm_ctrl;
7158 nvme_ioctl_ns_create_t create;
7159
7160 if ((mode & FWRITE) == 0)
7161 return (EBADF);
7162
7163 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7164 return (EPERM);
7165
7166 if (ddi_copyin((void *)(uintptr_t)arg, &create, sizeof (create),
7167 mode & FKIOCTL) != 0) {
7168 return (EFAULT);
7169 }
7170
7171 if (!nvme_ioctl_check(minor, &create.nnc_common,
7172 &nvme_check_ns_create)) {
7173 goto copyout;
7174 }
7175
7176 if (!nvme_validate_ns_create(nvme, &create)) {
7177 goto copyout;
7178 }
7179
7180 /*
7181 * Now that we've validated this, proceed to build up the actual data
7182 * request. We need to fill out the relevant identify namespace data
7183 * structure fields.
7184 */
7185 nvme_identify_nsid_t *idns = kmem_zalloc(sizeof (nvme_identify_nsid_t),
7186 KM_NOSLEEP_LAZY);
7187 if (idns == NULL) {
7188 (void) nvme_ioctl_error(&create.nnc_common,
7189 NVME_IOCTL_E_NO_KERN_MEM, 0, 0);
7190 goto copyout;
7191 }
7192
7193 idns->id_nsize = create.nnc_nsze;
7194 idns->id_ncap = create.nnc_ncap;
7195 idns->id_flbas.lba_format = create.nnc_flbas;
7196 idns->id_nmic.nm_shared = bitx32(create.nnc_nmic, 0, 0);
7197
7198 nvme_ioc_cmd_args_t args = { NULL };
7199 nvme_sqe_t sqe;
7200 nvme_ns_mgmt_dw10_t dw10;
7201 nvme_ns_mgmt_dw11_t dw11;
7202
7203 bzero(&sqe, sizeof (sqe));
7204 sqe.sqe_nsid = create.nnc_common.nioc_nsid;
7205 sqe.sqe_opc = NVME_OPC_NS_MGMT;
7206
7207 dw10.r = 0;
7208 dw10.b.nsm_sel = NVME_NS_MGMT_NS_CREATE;
7209 sqe.sqe_cdw10 = dw10.r;
7210
7211 dw11.r = 0;
7212 dw11.b.nsm_csi = create.nnc_csi;
7213 sqe.sqe_cdw11 = dw11.r;
7214
7215 args.ica_sqe = &sqe;
7216 args.ica_data = idns;
7217 args.ica_data_len = sizeof (nvme_identify_nsid_t);
7218 args.ica_dma_flags = DDI_DMA_WRITE;
7219 args.ica_copy_flags = FKIOCTL;
7220 args.ica_timeout = nvme_format_cmd_timeout;
7221
7222 /*
7223 * This command manipulates our understanding of a namespace's state.
7224 * While we don't need to check anything before we proceed, we still
7225 * logically require the lock.
7226 */
7227 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7228 if (nvme_ioc_cmd(nvme, &create.nnc_common, &args)) {
7229 create.nnc_nsid = args.ica_cdw0;
7230 nvme_rescan_ns(nvme, create.nnc_nsid);
7231 nvme_ioctl_success(&create.nnc_common);
7232 }
7233 nvme_mgmt_unlock(nvme);
7234 kmem_free(idns, sizeof (nvme_identify_nsid_t));
7235
7236 copyout:
7237 if (ddi_copyout(&create, (void *)(uintptr_t)arg, sizeof (create),
7238 mode & FKIOCTL) != 0) {
7239 return (EFAULT);
7240 }
7241
7242 return (0);
7243
7244 }
7245
7246 static int
nvme_ioctl_ns_delete(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7247 nvme_ioctl_ns_delete(nvme_minor_t *minor, intptr_t arg, int mode,
7248 cred_t *cred_p)
7249 {
7250 nvme_t *const nvme = minor->nm_ctrl;
7251 nvme_ioctl_common_t com;
7252
7253 if ((mode & FWRITE) == 0)
7254 return (EBADF);
7255
7256 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7257 return (EPERM);
7258
7259 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
7260 mode & FKIOCTL) != 0) {
7261 return (EFAULT);
7262 }
7263
7264 if (!nvme_ioctl_check(minor, &com, &nvme_check_ns_delete)) {
7265 goto copyout;
7266 }
7267
7268 if (!nvme_validate_ns_delete(nvme, &com)) {
7269 goto copyout;
7270 }
7271
7272 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7273 if (com.nioc_nsid == NVME_NSID_BCAST) {
7274 if (!nvme_no_blkdev_attached(nvme, com.nioc_nsid)) {
7275 nvme_mgmt_unlock(nvme);
7276 (void) nvme_ioctl_error(&com,
7277 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
7278 goto copyout;
7279 }
7280 } else {
7281 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com.nioc_nsid);
7282
7283 if (!nvme_ns_state_check(ns, &com, nvme_ns_delete_states)) {
7284 nvme_mgmt_unlock(nvme);
7285 goto copyout;
7286 }
7287 }
7288
7289 nvme_ioc_cmd_args_t args = { NULL };
7290 nvme_sqe_t sqe;
7291 nvme_ns_mgmt_dw10_t dw10;
7292
7293 bzero(&sqe, sizeof (sqe));
7294 sqe.sqe_nsid = com.nioc_nsid;
7295 sqe.sqe_opc = NVME_OPC_NS_MGMT;
7296
7297 dw10.r = 0;
7298 dw10.b.nsm_sel = NVME_NS_MGMT_NS_DELETE;
7299 sqe.sqe_cdw10 = dw10.r;
7300
7301 args.ica_sqe = &sqe;
7302 args.ica_data = NULL;
7303 args.ica_data_len = 0;
7304 args.ica_dma_flags = 0;
7305 args.ica_copy_flags = 0;
7306 args.ica_timeout = nvme_format_cmd_timeout;
7307
7308 if (nvme_ioc_cmd(nvme, &com, &args)) {
7309 nvme_rescan_ns(nvme, com.nioc_nsid);
7310 nvme_ioctl_success(&com);
7311 }
7312 nvme_mgmt_unlock(nvme);
7313
7314 copyout:
7315 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7316 mode & FKIOCTL) != 0) {
7317 return (EFAULT);
7318 }
7319
7320 return (0);
7321 }
7322
7323 static int
nvme_ioctl_ctrl_attach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7324 nvme_ioctl_ctrl_attach(nvme_minor_t *minor, intptr_t arg, int mode,
7325 cred_t *cred_p)
7326 {
7327 nvme_t *const nvme = minor->nm_ctrl;
7328 nvme_ioctl_common_t com;
7329 nvme_namespace_t *ns;
7330
7331 if ((mode & FWRITE) == 0)
7332 return (EBADF);
7333
7334 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7335 return (EPERM);
7336
7337 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
7338 mode & FKIOCTL) != 0) {
7339 return (EFAULT);
7340 }
7341
7342 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
7343 goto copyout;
7344 }
7345
7346 if (!nvme_validate_ctrl_attach_detach_ns(nvme, &com)) {
7347 goto copyout;
7348 }
7349
7350 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7351 ns = nvme_nsid2ns(nvme, com.nioc_nsid);
7352
7353 if (nvme_ns_state_check(ns, &com, nvme_ctrl_attach_states)) {
7354 if (nvme_ctrl_attach_detach_ns(nvme, ns, &com, B_TRUE)) {
7355 nvme_rescan_ns(nvme, com.nioc_nsid);
7356 nvme_ioctl_success(&com);
7357 }
7358 }
7359 nvme_mgmt_unlock(nvme);
7360
7361 copyout:
7362 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
7363 mode & FKIOCTL) != 0) {
7364 return (EFAULT);
7365 }
7366
7367 return (0);
7368 }
7369
7370 static void
nvme_ufm_update(nvme_t * nvme)7371 nvme_ufm_update(nvme_t *nvme)
7372 {
7373 mutex_enter(&nvme->n_fwslot_mutex);
7374 ddi_ufm_update(nvme->n_ufmh);
7375 if (nvme->n_fwslot != NULL) {
7376 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t));
7377 nvme->n_fwslot = NULL;
7378 }
7379 mutex_exit(&nvme->n_fwslot_mutex);
7380 }
7381
7382 /*
7383 * Download new firmware to the device's internal staging area. We do not call
7384 * nvme_ufm_update() here because after a firmware download, there has been no
7385 * change to any of the actual persistent firmware data. That requires a
7386 * subsequent ioctl (NVME_IOC_FIRMWARE_COMMIT) to commit the firmware to a slot
7387 * or to activate a slot.
7388 */
7389 static int
nvme_ioctl_firmware_download(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7390 nvme_ioctl_firmware_download(nvme_minor_t *minor, intptr_t arg, int mode,
7391 cred_t *cred_p)
7392 {
7393 nvme_t *const nvme = minor->nm_ctrl;
7394 nvme_ioctl_fw_load_t fw;
7395 uint64_t len, maxcopy;
7396 offset_t offset;
7397 uint32_t gran;
7398 nvme_valid_ctrl_data_t data;
7399 uintptr_t buf;
7400 nvme_sqe_t sqe = {
7401 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD
7402 };
7403
7404 if ((mode & FWRITE) == 0)
7405 return (EBADF);
7406
7407 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7408 return (EPERM);
7409
7410 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
7411 mode & FKIOCTL) != 0) {
7412 return (EFAULT);
7413 }
7414
7415 if (!nvme_ioctl_check(minor, &fw.fwl_common, &nvme_check_firmware)) {
7416 goto copyout;
7417 }
7418
7419 if (!nvme_validate_fw_load(nvme, &fw)) {
7420 goto copyout;
7421 }
7422
7423 len = fw.fwl_len;
7424 offset = fw.fwl_off;
7425 buf = fw.fwl_buf;
7426
7427 /*
7428 * We need to determine the minimum and maximum amount of data that we
7429 * will send to the device in a given go. Starting in NMVe 1.3 this must
7430 * be a multiple of the firmware update granularity (FWUG), but must not
7431 * exceed the maximum data transfer that we've set. Many devices don't
7432 * report something here, which means we'll end up getting our default
7433 * value. Our policy is a little simple, but it's basically if the
7434 * maximum data transfer is evenly divided by the granularity, then use
7435 * it. Otherwise we use the granularity itself. The granularity is
7436 * always in page sized units, so trying to find another optimum point
7437 * isn't worth it. If we encounter a contradiction, then we will have to
7438 * error out.
7439 */
7440 data.vcd_vers = &nvme->n_version;
7441 data.vcd_id = nvme->n_idctl;
7442 gran = nvme_fw_load_granularity(&data);
7443
7444 if ((nvme->n_max_data_transfer_size % gran) == 0) {
7445 maxcopy = nvme->n_max_data_transfer_size;
7446 } else if (gran <= nvme->n_max_data_transfer_size) {
7447 maxcopy = gran;
7448 } else {
7449 (void) nvme_ioctl_error(&fw.fwl_common,
7450 NVME_IOCTL_E_FW_LOAD_IMPOS_GRAN, 0, 0);
7451 goto copyout;
7452 }
7453
7454 while (len > 0) {
7455 nvme_ioc_cmd_args_t args = { NULL };
7456 uint64_t copylen = MIN(maxcopy, len);
7457
7458 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1;
7459 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT);
7460
7461 args.ica_sqe = &sqe;
7462 args.ica_data = (void *)buf;
7463 args.ica_data_len = copylen;
7464 args.ica_dma_flags = DDI_DMA_WRITE;
7465 args.ica_copy_flags = mode;
7466 args.ica_timeout = nvme_admin_cmd_timeout;
7467
7468 if (!nvme_ioc_cmd(nvme, &fw.fwl_common, &args)) {
7469 break;
7470 }
7471
7472 buf += copylen;
7473 offset += copylen;
7474 len -= copylen;
7475 }
7476
7477 copyout:
7478 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
7479 mode & FKIOCTL) != 0) {
7480 return (EFAULT);
7481 }
7482
7483 return (0);
7484 }
7485
7486 static int
nvme_ioctl_firmware_commit(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7487 nvme_ioctl_firmware_commit(nvme_minor_t *minor, intptr_t arg, int mode,
7488 cred_t *cred_p)
7489 {
7490 nvme_t *const nvme = minor->nm_ctrl;
7491 nvme_ioctl_fw_commit_t fw;
7492 nvme_firmware_commit_dw10_t fc_dw10 = { 0 };
7493 nvme_ioc_cmd_args_t args = { NULL };
7494 nvme_sqe_t sqe = {
7495 .sqe_opc = NVME_OPC_FW_ACTIVATE
7496 };
7497
7498 if ((mode & FWRITE) == 0)
7499 return (EBADF);
7500
7501 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7502 return (EPERM);
7503
7504 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
7505 mode & FKIOCTL) != 0) {
7506 return (EFAULT);
7507 }
7508
7509 if (!nvme_ioctl_check(minor, &fw.fwc_common, &nvme_check_firmware)) {
7510 goto copyout;
7511 }
7512
7513 if (!nvme_validate_fw_commit(nvme, &fw)) {
7514 goto copyout;
7515 }
7516
7517 fc_dw10.b.fc_slot = fw.fwc_slot;
7518 fc_dw10.b.fc_action = fw.fwc_action;
7519 sqe.sqe_cdw10 = fc_dw10.r;
7520
7521 args.ica_sqe = &sqe;
7522 args.ica_timeout = nvme_commit_save_cmd_timeout;
7523
7524 /*
7525 * There are no conditional actions to take based on this succeeding or
7526 * failing. A failure is recorded in the ioctl structure returned to the
7527 * user.
7528 */
7529 (void) nvme_ioc_cmd(nvme, &fw.fwc_common, &args);
7530
7531 /*
7532 * Let the DDI UFM subsystem know that the firmware information for
7533 * this device has changed. We perform this unconditionally as an
7534 * invalidation doesn't particularly hurt us.
7535 */
7536 nvme_ufm_update(nvme);
7537
7538 copyout:
7539 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
7540 mode & FKIOCTL) != 0) {
7541 return (EFAULT);
7542 }
7543
7544 return (0);
7545 }
7546
7547 /*
7548 * Helper to copy in a passthru command from userspace, handling
7549 * different data models.
7550 */
7551 static int
nvme_passthru_copyin_cmd(const void * buf,nvme_ioctl_passthru_t * cmd,int mode)7552 nvme_passthru_copyin_cmd(const void *buf, nvme_ioctl_passthru_t *cmd, int mode)
7553 {
7554 switch (ddi_model_convert_from(mode & FMODELS)) {
7555 #ifdef _MULTI_DATAMODEL
7556 case DDI_MODEL_ILP32: {
7557 nvme_ioctl_passthru32_t cmd32;
7558
7559 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0)
7560 return (EFAULT);
7561
7562 bzero(cmd, sizeof (nvme_ioctl_passthru_t));
7563
7564 cmd->npc_common.nioc_nsid = cmd32.npc_common.nioc_nsid;
7565 cmd->npc_opcode = cmd32.npc_opcode;
7566 cmd->npc_timeout = cmd32.npc_timeout;
7567 cmd->npc_flags = cmd32.npc_flags;
7568 cmd->npc_impact = cmd32.npc_impact;
7569 cmd->npc_cdw12 = cmd32.npc_cdw12;
7570 cmd->npc_cdw13 = cmd32.npc_cdw13;
7571 cmd->npc_cdw14 = cmd32.npc_cdw14;
7572 cmd->npc_cdw15 = cmd32.npc_cdw15;
7573 cmd->npc_buflen = cmd32.npc_buflen;
7574 cmd->npc_buf = cmd32.npc_buf;
7575 break;
7576 }
7577 #endif /* _MULTI_DATAMODEL */
7578 case DDI_MODEL_NONE:
7579 if (ddi_copyin(buf, (void *)cmd, sizeof (nvme_ioctl_passthru_t),
7580 mode) != 0) {
7581 return (EFAULT);
7582 }
7583 break;
7584 default:
7585 return (ENOTSUP);
7586 }
7587
7588 return (0);
7589 }
7590
7591 /*
7592 * Helper to copy out a passthru command result to userspace, handling
7593 * different data models.
7594 */
7595 static int
nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t * cmd,void * buf,int mode)7596 nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t *cmd, void *buf, int mode)
7597 {
7598 switch (ddi_model_convert_from(mode & FMODELS)) {
7599 #ifdef _MULTI_DATAMODEL
7600 case DDI_MODEL_ILP32: {
7601 nvme_ioctl_passthru32_t cmd32;
7602
7603 bzero(&cmd32, sizeof (nvme_ioctl_passthru32_t));
7604
7605 cmd32.npc_common = cmd->npc_common;
7606 cmd32.npc_opcode = cmd->npc_opcode;
7607 cmd32.npc_timeout = cmd->npc_timeout;
7608 cmd32.npc_flags = cmd->npc_flags;
7609 cmd32.npc_impact = cmd->npc_impact;
7610 cmd32.npc_cdw0 = cmd->npc_cdw0;
7611 cmd32.npc_cdw12 = cmd->npc_cdw12;
7612 cmd32.npc_cdw13 = cmd->npc_cdw13;
7613 cmd32.npc_cdw14 = cmd->npc_cdw14;
7614 cmd32.npc_cdw15 = cmd->npc_cdw15;
7615 cmd32.npc_buflen = (size32_t)cmd->npc_buflen;
7616 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf;
7617 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0)
7618 return (EFAULT);
7619 break;
7620 }
7621 #endif /* _MULTI_DATAMODEL */
7622 case DDI_MODEL_NONE:
7623 if (ddi_copyout(cmd, buf, sizeof (nvme_ioctl_passthru_t),
7624 mode) != 0) {
7625 return (EFAULT);
7626 }
7627 break;
7628 default:
7629 return (ENOTSUP);
7630 }
7631 return (0);
7632 }
7633
7634 /*
7635 * Run an arbitrary vendor-specific admin command on the device.
7636 */
7637 static int
nvme_ioctl_passthru(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7638 nvme_ioctl_passthru(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
7639 {
7640 nvme_t *const nvme = minor->nm_ctrl;
7641 int rv;
7642 nvme_ioctl_passthru_t pass;
7643 nvme_sqe_t sqe;
7644 nvme_ioc_cmd_args_t args = { NULL };
7645
7646 /*
7647 * Basic checks: permissions, data model, argument size.
7648 */
7649 if ((mode & FWRITE) == 0)
7650 return (EBADF);
7651
7652 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7653 return (EPERM);
7654
7655 if ((rv = nvme_passthru_copyin_cmd((void *)(uintptr_t)arg, &pass,
7656 mode)) != 0) {
7657 return (rv);
7658 }
7659
7660 if (!nvme_ioctl_check(minor, &pass.npc_common, &nvme_check_passthru)) {
7661 goto copyout;
7662 }
7663
7664 if (!nvme_validate_vuc(nvme, &pass)) {
7665 goto copyout;
7666 }
7667
7668 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7669 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
7670 /*
7671 * We've been told this has ns impact. Right now force that to
7672 * be every ns until we have more use cases and reason to trust
7673 * the nsid field.
7674 */
7675 if (!nvme_no_blkdev_attached(nvme, NVME_NSID_BCAST)) {
7676 nvme_mgmt_unlock(nvme);
7677 (void) nvme_ioctl_error(&pass.npc_common,
7678 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
7679 goto copyout;
7680 }
7681 }
7682
7683 bzero(&sqe, sizeof (sqe));
7684
7685 sqe.sqe_opc = pass.npc_opcode;
7686 sqe.sqe_nsid = pass.npc_common.nioc_nsid;
7687 sqe.sqe_cdw10 = (uint32_t)(pass.npc_buflen >> NVME_DWORD_SHIFT);
7688 sqe.sqe_cdw12 = pass.npc_cdw12;
7689 sqe.sqe_cdw13 = pass.npc_cdw13;
7690 sqe.sqe_cdw14 = pass.npc_cdw14;
7691 sqe.sqe_cdw15 = pass.npc_cdw15;
7692
7693 args.ica_sqe = &sqe;
7694 args.ica_data = (void *)pass.npc_buf;
7695 args.ica_data_len = pass.npc_buflen;
7696 args.ica_copy_flags = mode;
7697 args.ica_timeout = pass.npc_timeout;
7698
7699 if ((pass.npc_flags & NVME_PASSTHRU_READ) != 0)
7700 args.ica_dma_flags |= DDI_DMA_READ;
7701 else if ((pass.npc_flags & NVME_PASSTHRU_WRITE) != 0)
7702 args.ica_dma_flags |= DDI_DMA_WRITE;
7703
7704 if (nvme_ioc_cmd(nvme, &pass.npc_common, &args)) {
7705 pass.npc_cdw0 = args.ica_cdw0;
7706 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
7707 nvme_rescan_ns(nvme, NVME_NSID_BCAST);
7708 }
7709 }
7710 nvme_mgmt_unlock(nvme);
7711
7712 copyout:
7713 rv = nvme_passthru_copyout_cmd(&pass, (void *)(uintptr_t)arg,
7714 mode);
7715
7716 return (rv);
7717 }
7718
7719 static int
nvme_ioctl_lock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7720 nvme_ioctl_lock(nvme_minor_t *minor, intptr_t arg, int mode,
7721 cred_t *cred_p)
7722 {
7723 nvme_ioctl_lock_t lock;
7724 const nvme_lock_flags_t all_flags = NVME_LOCK_F_DONT_BLOCK;
7725 nvme_t *nvme = minor->nm_ctrl;
7726
7727 if ((mode & FWRITE) == 0)
7728 return (EBADF);
7729
7730 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7731 return (EPERM);
7732
7733 if (ddi_copyin((void *)(uintptr_t)arg, &lock, sizeof (lock),
7734 mode & FKIOCTL) != 0) {
7735 return (EFAULT);
7736 }
7737
7738 if (lock.nil_ent != NVME_LOCK_E_CTRL &&
7739 lock.nil_ent != NVME_LOCK_E_NS) {
7740 (void) nvme_ioctl_error(&lock.nil_common,
7741 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7742 goto copyout;
7743 }
7744
7745 if (lock.nil_level != NVME_LOCK_L_READ &&
7746 lock.nil_level != NVME_LOCK_L_WRITE) {
7747 (void) nvme_ioctl_error(&lock.nil_common,
7748 NVME_IOCTL_E_BAD_LOCK_LEVEL, 0, 0);
7749 goto copyout;
7750 }
7751
7752 if ((lock.nil_flags & ~all_flags) != 0) {
7753 (void) nvme_ioctl_error(&lock.nil_common,
7754 NVME_IOCTL_E_BAD_LOCK_FLAGS, 0, 0);
7755 goto copyout;
7756 }
7757
7758 if (!nvme_ioctl_check(minor, &lock.nil_common, &nvme_check_locking)) {
7759 goto copyout;
7760 }
7761
7762 /*
7763 * If we're on a namespace, confirm that we're not asking for the
7764 * controller.
7765 */
7766 if (lock.nil_common.nioc_nsid != 0 &&
7767 lock.nil_ent == NVME_LOCK_E_CTRL) {
7768 (void) nvme_ioctl_error(&lock.nil_common,
7769 NVME_IOCTL_E_NS_CANNOT_LOCK_CTRL, 0, 0);
7770 goto copyout;
7771 }
7772
7773 /*
7774 * We've reached the point where we can no longer actually check things
7775 * without serializing state. First, we need to check to make sure that
7776 * none of our invariants are being broken for locking:
7777 *
7778 * 1) The caller isn't already blocking for a lock operation to
7779 * complete.
7780 *
7781 * 2) The caller is attempting to grab a lock that they already have.
7782 * While there are other rule violations that this might create, we opt
7783 * to check this ahead of it so we can have slightly better error
7784 * messages for our callers.
7785 *
7786 * 3) The caller is trying to grab a controller lock, while holding a
7787 * namespace lock.
7788 *
7789 * 4) The caller has a controller write lock and is trying to get a
7790 * namespace lock. For now, we disallow this case. Holding a controller
7791 * read lock is allowed, but the write lock allows you to operate on all
7792 * namespaces anyways. In addition, this simplifies the locking logic;
7793 * however, this constraint may be loosened in the future.
7794 *
7795 * 5) The caller is trying to acquire a second namespace lock when they
7796 * already have one.
7797 */
7798 mutex_enter(&nvme->n_minor_mutex);
7799 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_BLOCKED ||
7800 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_BLOCKED) {
7801 (void) nvme_ioctl_error(&lock.nil_common,
7802 NVME_IOCTL_E_LOCK_PENDING, 0, 0);
7803 mutex_exit(&nvme->n_minor_mutex);
7804 goto copyout;
7805 }
7806
7807 if ((lock.nil_ent == NVME_LOCK_E_CTRL &&
7808 minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) ||
7809 (lock.nil_ent == NVME_LOCK_E_NS &&
7810 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
7811 minor->nm_ns_lock.nli_ns->ns_id == lock.nil_common.nioc_nsid)) {
7812 (void) nvme_ioctl_error(&lock.nil_common,
7813 NVME_IOCTL_E_LOCK_ALREADY_HELD, 0, 0);
7814 mutex_exit(&nvme->n_minor_mutex);
7815 goto copyout;
7816 }
7817
7818 if (lock.nil_ent == NVME_LOCK_E_CTRL &&
7819 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
7820 (void) nvme_ioctl_error(&lock.nil_common,
7821 NVME_IOCTL_E_LOCK_NO_CTRL_WITH_NS, 0, 0);
7822 mutex_exit(&nvme->n_minor_mutex);
7823 goto copyout;
7824 }
7825
7826 if (lock.nil_ent == NVME_LOCK_E_NS &&
7827 (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
7828 minor->nm_ctrl_lock.nli_curlevel == NVME_LOCK_L_WRITE)) {
7829 (void) nvme_ioctl_error(&lock.nil_common,
7830 NVME_IOCTL_LOCK_NO_NS_WITH_CTRL_WRLOCK, 0, 0);
7831 mutex_exit(&nvme->n_minor_mutex);
7832 goto copyout;
7833 }
7834
7835 if (lock.nil_ent == NVME_LOCK_E_NS &&
7836 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
7837 (void) nvme_ioctl_error(&lock.nil_common,
7838 NVME_IOCTL_E_LOCK_NO_2ND_NS, 0, 0);
7839 mutex_exit(&nvme->n_minor_mutex);
7840 goto copyout;
7841 }
7842
7843 #ifdef DEBUG
7844 /*
7845 * This is a big block of sanity checks to make sure that we haven't
7846 * allowed anything bad to happen.
7847 */
7848 if (lock.nil_ent == NVME_LOCK_E_NS) {
7849 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
7850 ASSERT3U(minor->nm_ns_lock.nli_state, ==,
7851 NVME_LOCK_STATE_UNLOCKED);
7852 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
7853 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7854
7855 if (minor->nm_ns != NULL) {
7856 ASSERT3U(minor->nm_ns->ns_id, ==,
7857 lock.nil_common.nioc_nsid);
7858 }
7859
7860 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
7861 } else {
7862 ASSERT3P(minor->nm_ctrl_lock.nli_lock, ==, NULL);
7863 ASSERT3U(minor->nm_ctrl_lock.nli_state, ==,
7864 NVME_LOCK_STATE_UNLOCKED);
7865 ASSERT3U(minor->nm_ctrl_lock.nli_curlevel, ==, 0);
7866 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7867 ASSERT0(list_link_active(&minor->nm_ctrl_lock.nli_node));
7868
7869 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
7870 ASSERT3U(minor->nm_ns_lock.nli_state, ==,
7871 NVME_LOCK_STATE_UNLOCKED);
7872 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
7873 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7874 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
7875 }
7876 #endif /* DEBUG */
7877
7878 /*
7879 * At this point we should actually attempt a locking operation.
7880 */
7881 nvme_rwlock(minor, &lock);
7882 mutex_exit(&nvme->n_minor_mutex);
7883
7884 copyout:
7885 if (ddi_copyout(&lock, (void *)(uintptr_t)arg, sizeof (lock),
7886 mode & FKIOCTL) != 0) {
7887 return (EFAULT);
7888 }
7889
7890 return (0);
7891 }
7892
7893 static int
nvme_ioctl_unlock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7894 nvme_ioctl_unlock(nvme_minor_t *minor, intptr_t arg, int mode,
7895 cred_t *cred_p)
7896 {
7897 nvme_ioctl_unlock_t unlock;
7898 nvme_t *const nvme = minor->nm_ctrl;
7899 boolean_t is_ctrl;
7900 nvme_lock_t *lock;
7901 nvme_minor_lock_info_t *info;
7902
7903 /*
7904 * Note, we explicitly don't check for privileges for unlock. The idea
7905 * being that if you have the lock, that's what matters. If you don't
7906 * have the lock, it doesn't matter what privileges that you have at
7907 * all.
7908 */
7909 if ((mode & FWRITE) == 0)
7910 return (EBADF);
7911
7912 if (ddi_copyin((void *)(uintptr_t)arg, &unlock, sizeof (unlock),
7913 mode & FKIOCTL) != 0) {
7914 return (EFAULT);
7915 }
7916
7917 if (unlock.niu_ent != NVME_LOCK_E_CTRL &&
7918 unlock.niu_ent != NVME_LOCK_E_NS) {
7919 (void) nvme_ioctl_error(&unlock.niu_common,
7920 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7921 goto copyout;
7922 }
7923
7924 if (!nvme_ioctl_check(minor, &unlock.niu_common, &nvme_check_locking)) {
7925 goto copyout;
7926 }
7927
7928 /*
7929 * If we're on a namespace, confirm that we're not asking for the
7930 * controller.
7931 */
7932 if (unlock.niu_common.nioc_nsid != 0 &&
7933 unlock.niu_ent == NVME_LOCK_E_CTRL) {
7934 (void) nvme_ioctl_error(&unlock.niu_common,
7935 NVME_IOCTL_E_NS_CANNOT_UNLOCK_CTRL, 0, 0);
7936 goto copyout;
7937 }
7938
7939 mutex_enter(&nvme->n_minor_mutex);
7940 if (unlock.niu_ent == NVME_LOCK_E_CTRL) {
7941 if (minor->nm_ctrl_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7942 mutex_exit(&nvme->n_minor_mutex);
7943 (void) nvme_ioctl_error(&unlock.niu_common,
7944 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7945 goto copyout;
7946 }
7947 } else {
7948 if (minor->nm_ns_lock.nli_ns == NULL) {
7949 mutex_exit(&nvme->n_minor_mutex);
7950 (void) nvme_ioctl_error(&unlock.niu_common,
7951 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7952 goto copyout;
7953 }
7954
7955 /*
7956 * Check that our unlock request corresponds to the namespace ID
7957 * that is currently locked. This could happen if we're using
7958 * the controller node and it specified a valid, but not locked,
7959 * namespace ID.
7960 */
7961 if (minor->nm_ns_lock.nli_ns->ns_id !=
7962 unlock.niu_common.nioc_nsid) {
7963 mutex_exit(&nvme->n_minor_mutex);
7964 ASSERT3P(minor->nm_ns, ==, NULL);
7965 (void) nvme_ioctl_error(&unlock.niu_common,
7966 NVME_IOCTL_E_LOCK_WRONG_NS, 0, 0);
7967 goto copyout;
7968 }
7969
7970 if (minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7971 mutex_exit(&nvme->n_minor_mutex);
7972 (void) nvme_ioctl_error(&unlock.niu_common,
7973 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7974 goto copyout;
7975 }
7976 }
7977
7978 /*
7979 * Finally, perform the unlock.
7980 */
7981 is_ctrl = unlock.niu_ent == NVME_LOCK_E_CTRL;
7982 if (is_ctrl) {
7983 lock = &nvme->n_lock;
7984 info = &minor->nm_ctrl_lock;
7985 } else {
7986 nvme_namespace_t *ns;
7987 const uint32_t nsid = unlock.niu_common.nioc_nsid;
7988
7989 ns = nvme_nsid2ns(nvme, nsid);
7990 lock = &ns->ns_lock;
7991 info = &minor->nm_ns_lock;
7992 VERIFY3P(ns, ==, info->nli_ns);
7993 }
7994 nvme_rwunlock(info, lock);
7995 mutex_exit(&nvme->n_minor_mutex);
7996 nvme_ioctl_success(&unlock.niu_common);
7997
7998 copyout:
7999 if (ddi_copyout(&unlock, (void *)(uintptr_t)arg, sizeof (unlock),
8000 mode & FKIOCTL) != 0) {
8001 return (EFAULT);
8002 }
8003
8004 return (0);
8005 }
8006
8007 static int
nvme_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)8008 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
8009 int *rval_p)
8010 {
8011 #ifndef __lock_lint
8012 _NOTE(ARGUNUSED(rval_p));
8013 #endif
8014 int ret;
8015 nvme_minor_t *minor;
8016 nvme_t *nvme;
8017
8018 minor = nvme_minor_find_by_dev(dev);
8019 if (minor == NULL) {
8020 return (ENXIO);
8021 }
8022
8023 nvme = minor->nm_ctrl;
8024 if (nvme == NULL)
8025 return (ENXIO);
8026
8027 if (IS_DEVCTL(cmd))
8028 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
8029
8030 if (nvme->n_dead && (cmd != NVME_IOC_BD_DETACH && cmd !=
8031 NVME_IOC_UNLOCK)) {
8032 if (IS_NVME_IOC(cmd) == 0) {
8033 return (EIO);
8034 }
8035
8036 return (nvme_ioctl_copyout_error(nvme->n_dead_status, arg,
8037 mode));
8038 }
8039
8040 /*
8041 * ioctls that are no longer using the original ioctl structure.
8042 */
8043 switch (cmd) {
8044 case NVME_IOC_CTRL_INFO:
8045 ret = nvme_ioctl_ctrl_info(minor, arg, mode, cred_p);
8046 break;
8047 case NVME_IOC_IDENTIFY:
8048 ret = nvme_ioctl_identify(minor, arg, mode, cred_p);
8049 break;
8050 case NVME_IOC_GET_LOGPAGE:
8051 ret = nvme_ioctl_get_logpage(minor, arg, mode, cred_p);
8052 break;
8053 case NVME_IOC_GET_FEATURE:
8054 ret = nvme_ioctl_get_feature(minor, arg, mode, cred_p);
8055 break;
8056 case NVME_IOC_BD_DETACH:
8057 ret = nvme_ioctl_bd_detach(minor, arg, mode, cred_p);
8058 break;
8059 case NVME_IOC_BD_ATTACH:
8060 ret = nvme_ioctl_bd_attach(minor, arg, mode, cred_p);
8061 break;
8062 case NVME_IOC_FORMAT:
8063 ret = nvme_ioctl_format(minor, arg, mode, cred_p);
8064 break;
8065 case NVME_IOC_FIRMWARE_DOWNLOAD:
8066 ret = nvme_ioctl_firmware_download(minor, arg, mode, cred_p);
8067 break;
8068 case NVME_IOC_FIRMWARE_COMMIT:
8069 ret = nvme_ioctl_firmware_commit(minor, arg, mode, cred_p);
8070 break;
8071 case NVME_IOC_NS_INFO:
8072 ret = nvme_ioctl_ns_info(minor, arg, mode, cred_p);
8073 break;
8074 case NVME_IOC_PASSTHRU:
8075 ret = nvme_ioctl_passthru(minor, arg, mode, cred_p);
8076 break;
8077 case NVME_IOC_LOCK:
8078 ret = nvme_ioctl_lock(minor, arg, mode, cred_p);
8079 break;
8080 case NVME_IOC_UNLOCK:
8081 ret = nvme_ioctl_unlock(minor, arg, mode, cred_p);
8082 break;
8083 case NVME_IOC_CTRL_DETACH:
8084 ret = nvme_ioctl_ctrl_detach(minor, arg, mode, cred_p);
8085 break;
8086 case NVME_IOC_CTRL_ATTACH:
8087 ret = nvme_ioctl_ctrl_attach(minor, arg, mode, cred_p);
8088 break;
8089 case NVME_IOC_NS_CREATE:
8090 ret = nvme_ioctl_ns_create(minor, arg, mode, cred_p);
8091 break;
8092 case NVME_IOC_NS_DELETE:
8093 ret = nvme_ioctl_ns_delete(minor, arg, mode, cred_p);
8094 break;
8095 default:
8096 ret = ENOTTY;
8097 break;
8098 }
8099
8100 ASSERT(!nvme_mgmt_lock_held(nvme));
8101 return (ret);
8102 }
8103
8104 /*
8105 * DDI UFM Callbacks
8106 */
8107 static int
nvme_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * img)8108 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
8109 ddi_ufm_image_t *img)
8110 {
8111 nvme_t *nvme = arg;
8112
8113 if (imgno != 0)
8114 return (EINVAL);
8115
8116 ddi_ufm_image_set_desc(img, "Firmware");
8117 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot);
8118
8119 return (0);
8120 }
8121
8122 /*
8123 * Fill out firmware slot information for the requested slot. The firmware
8124 * slot information is gathered by requesting the Firmware Slot Information log
8125 * page. The format of the page is described in section 5.10.1.3.
8126 *
8127 * We lazily cache the log page on the first call and then invalidate the cache
8128 * data after a successful firmware download or firmware commit command.
8129 * The cached data is protected by a mutex as the state can change
8130 * asynchronous to this callback.
8131 */
8132 static int
nvme_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slot)8133 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
8134 uint_t slotno, ddi_ufm_slot_t *slot)
8135 {
8136 nvme_t *nvme = arg;
8137 void *log = NULL;
8138 size_t bufsize;
8139 ddi_ufm_attr_t attr = 0;
8140 char fw_ver[NVME_FWVER_SZ + 1];
8141
8142 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1))
8143 return (EINVAL);
8144
8145 mutex_enter(&nvme->n_fwslot_mutex);
8146 if (nvme->n_fwslot == NULL) {
8147 if (!nvme_get_logpage_int(nvme, B_TRUE, &log, &bufsize,
8148 NVME_LOGPAGE_FWSLOT) ||
8149 bufsize != sizeof (nvme_fwslot_log_t)) {
8150 if (log != NULL)
8151 kmem_free(log, bufsize);
8152 mutex_exit(&nvme->n_fwslot_mutex);
8153 return (EIO);
8154 }
8155 nvme->n_fwslot = (nvme_fwslot_log_t *)log;
8156 }
8157
8158 /*
8159 * NVMe numbers firmware slots starting at 1
8160 */
8161 if (slotno == (nvme->n_fwslot->fw_afi - 1))
8162 attr |= DDI_UFM_ATTR_ACTIVE;
8163
8164 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0)
8165 attr |= DDI_UFM_ATTR_WRITEABLE;
8166
8167 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') {
8168 attr |= DDI_UFM_ATTR_EMPTY;
8169 } else {
8170 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno],
8171 NVME_FWVER_SZ);
8172 fw_ver[NVME_FWVER_SZ] = '\0';
8173 ddi_ufm_slot_set_version(slot, fw_ver);
8174 }
8175 mutex_exit(&nvme->n_fwslot_mutex);
8176
8177 ddi_ufm_slot_set_attrs(slot, attr);
8178
8179 return (0);
8180 }
8181
8182 static int
nvme_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)8183 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
8184 {
8185 *caps = DDI_UFM_CAP_REPORT;
8186 return (0);
8187 }
8188
8189 boolean_t
nvme_ctrl_atleast(nvme_t * nvme,const nvme_version_t * min)8190 nvme_ctrl_atleast(nvme_t *nvme, const nvme_version_t *min)
8191 {
8192 return (nvme_vers_atleast(&nvme->n_version, min) ? B_TRUE : B_FALSE);
8193 }
8194