1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 14 * Copyright 2019 Unix Software Ltd. 15 * Copyright 2020 Joyent, Inc. 16 * Copyright 2020 Racktop Systems. 17 * Copyright 2024 Oxide Computer Company. 18 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association. 19 * Copyright 2022 Tintri by DDN, Inc. All rights reserved. 20 */ 21 22 /* 23 * blkdev driver for NVMe compliant storage devices 24 * 25 * This driver targets and is designed to support all NVMe 1.x and NVMe 2.x 26 * devices. Features are added to the driver as we encounter devices that 27 * require them and our needs, so some commands or log pages may not take 28 * advantage of newer features that devices support at this time. When you 29 * encounter such a case, it is generally fine to add that support to the driver 30 * as long as you take care to ensure that the requisite device version is met 31 * before using it. 32 * 33 * The driver has only been tested on x86 systems and will not work on big- 34 * endian systems without changes to the code accessing registers and data 35 * structures used by the hardware. 36 * 37 * 38 * Interrupt Usage: 39 * 40 * The driver will use a single interrupt while configuring the device as the 41 * specification requires, but contrary to the specification it will try to use 42 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it 43 * will switch to multiple-message MSI(-X) if supported. The driver wants to 44 * have one interrupt vector per CPU, but it will work correctly if less are 45 * available. Interrupts can be shared by queues, the interrupt handler will 46 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only 47 * the admin queue will share an interrupt with one I/O queue. The interrupt 48 * handler will retrieve completed commands from all queues sharing an interrupt 49 * vector and will post them to a taskq for completion processing. 50 * 51 * 52 * Command Processing: 53 * 54 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up 55 * to 65536 I/O commands. The driver will configure one I/O queue pair per 56 * available interrupt vector, with the queue length usually much smaller than 57 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 58 * interrupt vectors will be used. 59 * 60 * Additionally the hardware provides a single special admin queue pair that can 61 * hold up to 4096 admin commands. 62 * 63 * From the hardware perspective both queues of a queue pair are independent, 64 * but they share some driver state: the command array (holding pointers to 65 * commands currently being processed by the hardware) and the active command 66 * counter. Access to a submission queue and the shared state is protected by 67 * nq_mutex; completion queue is protected by ncq_mutex. 68 * 69 * When a command is submitted to a queue pair the active command counter is 70 * incremented and a pointer to the command is stored in the command array. The 71 * array index is used as command identifier (CID) in the submission queue 72 * entry. Some commands may take a very long time to complete, and if the queue 73 * wraps around in that time a submission may find the next array slot to still 74 * be used by a long-running command. In this case the array is sequentially 75 * searched for the next free slot. The length of the command array is the same 76 * as the configured queue length. Queue overrun is prevented by the semaphore, 77 * so a command submission may block if the queue is full. 78 * 79 * 80 * Polled I/O Support: 81 * 82 * For kernel core dump support the driver can do polled I/O. As interrupts are 83 * turned off while dumping the driver will just submit a command in the regular 84 * way, and then repeatedly attempt a command retrieval until it gets the 85 * command back. 86 * 87 * 88 * Namespace Support: 89 * 90 * NVMe devices can have multiple namespaces, each being a independent data 91 * store. The driver supports multiple namespaces and creates a blkdev interface 92 * for each namespace found. Namespaces can have various attributes to support 93 * protection information. This driver does not support any of this and ignores 94 * namespaces that have these attributes. 95 * 96 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier 97 * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally 98 * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64 99 * if present to generate the devid, and passes the EUI64 to blkdev to use it 100 * in the device node names. 101 * 102 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a 103 * single controller. This is an artificial limit imposed by the driver to be 104 * able to address a reasonable number of controllers and namespaces while 105 * fitting within the constraints of MAXMIN32, aka a 32-bit device number which 106 * only has 18-bits for the minor number. See the minor node section for more 107 * information. 108 * 109 * 110 * Minor nodes: 111 * 112 * For each NVMe device the driver exposes one minor node for the controller and 113 * one minor node for each namespace. The only operations supported by those 114 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the 115 * primary control interface for the devices. The character device is a private 116 * interface and we attempt stability through libnvme and more so nvmeadm. 117 * 118 * The controller minor node is much more flexible than the namespace minor node 119 * and should be preferred. The controller node allows one to target any 120 * namespace that the device has, while the namespace is limited in what it can 121 * acquire. While the namespace minor exists, it should not be relied upon and 122 * is not by libnvme. 123 * 124 * The minor number space is split in two. We use the lower part to support the 125 * controller and namespaces as described above in the 'Namespace Support' 126 * section. The second set is used for cloning opens. We set aside one million 127 * minors for this purpose. We utilize a cloning open so that way we can have 128 * per-file_t state. This is how we end up implementing and tracking locking 129 * state and related. 130 * 131 * When we have this cloned open, then we allocate a new nvme_minor_t which gets 132 * its minor number from the nvme_open_minors id_space_t and is stored in the 133 * nvme_open_minors_avl. While someone calls open on a controller or namespace 134 * minor, everything else occurs in the context of one of these ephemeral 135 * minors. 136 * 137 * 138 * ioctls, Errors, and Exclusive Access: 139 * 140 * All of the logical commands that one can issue are driven through the 141 * ioctl(9E) interface. All of our ioctls have a similar shape where they 142 * all include the 'nvme_ioctl_common_t' as their first member. 143 * 144 * This common ioctl structure is used to communicate the namespace that should 145 * be targeted. When the namespace is left as 0, then that indicates that it 146 * should target whatever the default is of the minor node. For a namespace 147 * minor, that will be transparently rewritten to the namespace's namespace id. 148 * 149 * In addition, the nvme_ioctl_common_t structure also has a standard error 150 * return. Our goal in our ioctl path is to ensure that we have useful semantic 151 * errors as much as possible. EINVAL, EIO, etc. are all overloaded. Instead as 152 * long as we can copy in our structure, then we will set a semantic error. If 153 * we have an error from the controller, then that will be included there. 154 * 155 * Each command has a specific policy that controls whether or not it is allowed 156 * on the namespace or controller minor, whether the broadcast namespace is 157 * allowed, various settings around what kind of exclusive access is allowed, 158 * and more. Each of these is wrapped up in a bit of policy described by the 159 * 'nvme_ioctl_check_t' structure. 160 * 161 * The device provides a form of exclusion in the form of both a 162 * controller-level and namespace-level read and write lock. Most operations do 163 * not require a lock (e.g. get log page, identify, etc.), but a few do (e.g. 164 * format nvm, firmware related activity, etc.). A read lock guarantees that you 165 * can complete your operation without interference, but read locks are not 166 * required. If you don't take a read lock and someone comes in with a write 167 * lock, then subsequent operations will fail with a semantic error indicating 168 * that you were blocked due to this. 169 * 170 * Here are some of the rules that govern our locks: 171 * 172 * 1. Writers starve readers. Any readers are allowed to finish when there is a 173 * pending writer; however, all subsequent readers will be blocked upon that 174 * writer. 175 * 2. A controller write lock takes priority over all other locks. Put 176 * differently a controller writer not only starves subsequent controller 177 * readers, but also all namespace read and write locks. 178 * 3. Each namespace lock is independent. 179 * 4. At most a single namespace lock may be owned. 180 * 5. If you own a namespace lock, you may not take a controller lock (to help 181 * with lock ordering). 182 * 6. In a similar spirit, if you own a controller write lock, you may not take 183 * any namespace lock. Someone with the controller write lock can perform any 184 * operations that they need to. However, if you have a controller read lock 185 * you may take any namespace lock. 186 * 7. There is no ability to upgrade a read lock to a write lock. 187 * 8. There is no recursive locking. 188 * 189 * While there's a lot there to keep track of, the goals of these are to 190 * constrain things so as to avoid deadlock. This is more complex than the 191 * original implementation in the driver which only allowed for an exclusive 192 * open that was tied to the thread. The first issue with tying this to the 193 * thread was that that didn't work well for software that utilized thread 194 * pools, like complex daemons. The second issue is that we want the ability for 195 * daemons, such as a FRU monitor, to be able to retain a file descriptor to the 196 * device without blocking others from taking action except during critical 197 * periods. 198 * 199 * In particular to enable something like libnvme, we didn't want someone to 200 * have to open and close the file descriptor to change what kind of exclusive 201 * access they desired. 202 * 203 * There are two different sets of data structures that we employ for tracking 204 * locking information: 205 * 206 * 1) The nvme_lock_t structure is contained in both the nvme_t and the 207 * nvme_namespace_t and tracks the current writer, readers, and pending writers 208 * and readers. Each of these lists or the writer pointer all refer to our 209 * second data structure. 210 * 211 * When a lock is owned by a single writer, then the nl_writer field is set to a 212 * specific minor's lock data structure. If instead readers are present, then 213 * the nl_readers list_t is not empty. An invariant of the system is that if 214 * nl_writer is non-NULL, nl_readers must be empty and conversely, if nl_readers 215 * is not empty, nl_writer must be NULL. 216 * 217 * 2) The nvme_minor_lock_info_t exists in the nvme_minor_t. There is one 218 * information structure which represents the minor's controller lock and a 219 * second one that represents the minor's namespace lock. The members of this 220 * are broken into tracking what the current lock is and what it targets. It 221 * also several members that are intended for debugging (nli_last_change, 222 * nli_acq_kthread, etc.). 223 * 224 * While the minor has two different lock information structures, our rules 225 * ensure that only one of the two can be pending and that they shouldn't result 226 * in a deadlock. When a lock is pending, the caller is sleeping on the minor's 227 * nm_cv member. 228 * 229 * These relationships are represented in the following image which shows a 230 * controller write lock being held with a pending readers on the controller 231 * lock and pending writers on one of the controller's namespaces. 232 * 233 * +---------+ 234 * | nvme_t | 235 * | | 236 * | n_lock -|-------+ 237 * | n_ns -+ | | +-----------------------------+ 238 * +-------|-+ +-----------------+ | nvme_minor_t | 239 * | | nvme_lock_t | | | 240 * | | | | +------------------------+ | 241 * | | writer --|-------------->| nvme_minor_lock_info_t | | 242 * | | reader list | | | nm_ctrl_lock | | 243 * | | pending writers | | +------------------------+ | 244 * | | pending readers |------+ | +------------------------+ | 245 * | +-----------------+ | | | nvme_minor_lock_info_t | | 246 * | | | | nm_ns_lock | | 247 * | | | +------------------------+ | 248 * | | +-----------------------------+ 249 * +------------------+ | +-----------------+ 250 * | nvme_namespace_t | | | nvme_minor_t | 251 * | | | | | 252 * | ns_lock ---+ | | | +-------------+ | 253 * +------------|-----+ +-----------------|>|nm_ctrl_lock | | 254 * | | +-------------+ | 255 * v +-----------------+ 256 * +------------------+ ... 257 * | nvme_lock_t | +-----------------+ 258 * | | | nvme_minor_t | 259 * | writer | | | 260 * | reader list | | +-------------+ | 261 * | pending writers -|-----------------+ | |nm_ctrl_lock | | 262 * | pending readers | | | +-------------+ | 263 * +------------------+ | +-----------------+ 264 * +-----------------------------+ | +-----------------------------+ 265 * | nvme_minor_t | | | nvme_minor_t | 266 * | | | | | 267 * | +------------------------+ | | | +------------------------+ | 268 * | | nvme_minor_lock_info_t | | | | | nvme_minor_lock_info_t | | 269 * | | nm_ctrl_lock | | | | | nm_ctrl_lock | | 270 * | +------------------------+ | | | +------------------------+ | 271 * | +------------------------+ | v | +------------------------+ | 272 * | | nvme_minor_lock_info_t |-|-----|->| nvme_minor_lock_info_t | | 273 * | | nm_ns_lock | | | | nm_ns_lock | | 274 * | +------------------------+ | | +------------------------+ | 275 * +-----------------------------+ +-----------------------------+ 276 * 277 * Blkdev Interface: 278 * 279 * This driver uses blkdev to do all the heavy lifting involved with presenting 280 * a disk device to the system. As a result, the processing of I/O requests is 281 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 282 * setup, and splitting of transfers into manageable chunks. 283 * 284 * I/O requests coming in from blkdev are turned into NVM commands and posted to 285 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 286 * queues. There is currently no timeout handling of I/O commands. 287 * 288 * Blkdev also supports querying device/media information and generating a 289 * devid. The driver reports the best block size as determined by the namespace 290 * format back to blkdev as physical block size to support partition and block 291 * alignment. The devid is either based on the namespace GUID or EUI64, if 292 * present, or composed using the device vendor ID, model number, serial number, 293 * and the namespace ID. 294 * 295 * 296 * Error Handling: 297 * 298 * Error handling is currently limited to detecting fatal hardware errors, 299 * either by asynchronous events, or synchronously through command status or 300 * admin command timeouts. In case of severe errors the device is fenced off, 301 * all further requests will return EIO. FMA is then called to fault the device. 302 * 303 * The hardware has a limit for outstanding asynchronous event requests. Before 304 * this limit is known the driver assumes it is at least 1 and posts a single 305 * asynchronous request. Later when the limit is known more asynchronous event 306 * requests are posted to allow quicker reception of error information. When an 307 * asynchronous event is posted by the hardware the driver will parse the error 308 * status fields and log information or fault the device, depending on the 309 * severity of the asynchronous event. The asynchronous event request is then 310 * reused and posted to the admin queue again. 311 * 312 * On command completion the command status is checked for errors. In case of 313 * errors indicating a driver bug the driver panics. Almost all other error 314 * status values just cause EIO to be returned. 315 * 316 * Command timeouts are currently detected for all admin commands except 317 * asynchronous event requests. If a command times out and the hardware appears 318 * to be healthy the driver attempts to abort the command. The abort command 319 * timeout is a separate tunable but the original command timeout will be used 320 * if it is greater. If the abort times out too the driver assumes the device 321 * to be dead, fences it off, and calls FMA to retire it. In all other cases 322 * the aborted command should return immediately with a status indicating it 323 * was aborted, and the driver will wait indefinitely for that to happen. No 324 * timeout handling of normal I/O commands is presently done. 325 * 326 * Any command that times out due to the controller dropping dead will be put on 327 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA 328 * memory being reused by the system and later being written to by a "dead" 329 * NVMe controller. 330 * 331 * 332 * Locking: 333 * 334 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held 335 * when accessing shared state and submission queue registers, ncq_mutex 336 * is held when accessing completion queue state and registers. 337 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while 338 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both 339 * mutexes themselves. 340 * 341 * Each command also has its own nc_mutex, which is associated with the 342 * condition variable nc_cv. It is only used on admin commands which are run 343 * synchronously. In that case it must be held across calls to 344 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by 345 * nvme_admin_cmd(). It must also be held whenever the completion state of the 346 * command is changed or while an admin command timeout is handled. 347 * 348 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first. 349 * More than one nc_mutex may only be held when aborting commands. In this case, 350 * the nc_mutex of the command to be aborted must be held across the call to 351 * nvme_abort_cmd() to prevent the command from completing while the abort is in 352 * progress. 353 * 354 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be 355 * acquired first. More than one nq_mutex is never held by a single thread. 356 * The ncq_mutex is only held by nvme_retrieve_cmd() and 357 * nvme_process_iocq(). nvme_process_iocq() is only called from the 358 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the 359 * mutex is non-contentious but is required for implementation completeness 360 * and safety. 361 * 362 * Each nvme_t has an n_admin_stat_mutex that protects the admin command 363 * statistics structure. If this is taken in conjunction with any other locks, 364 * then it must be taken last. 365 * 366 * There is one mutex n_minor_mutex which protects all open flags nm_open and 367 * exclusive-open thread pointers nm_oexcl of each minor node associated with a 368 * controller and its namespaces. 369 * 370 * In addition, there is a logical namespace management mutex which protects the 371 * data about namespaces. When interrogating the metadata of any namespace, this 372 * lock must be held. This gets tricky as we need to call into blkdev, which may 373 * issue callbacks into us which want this and it is illegal to hold locks 374 * across those blkdev calls as otherwise they might lead to deadlock (blkdev 375 * leverages ndi_devi_enter()). 376 * 377 * The lock exposes two levels, one that we call 'NVME' and one 'BDRO' or blkdev 378 * read-only. The idea is that most callers will use the NVME level which says 379 * this is a full traditional mutex operation. The BDRO level is used by blkdev 380 * callback functions and is a promise to only only read the data. When a blkdev 381 * operation starts, the lock holder will use nvme_mgmt_bd_start(). This 382 * strictly speaking drops the mutex, but records that the lock is logically 383 * held by the thread that did the start() operation. 384 * 385 * During this time, other threads (or even the same one) may end up calling 386 * into nvme_mgmt_lock(). Only one person may still hold the lock at any time; 387 * however, the BRDO level will be allowed to proceed during this time. This 388 * allows us to make consistent progress and honor the blkdev lock ordering 389 * requirements, albeit it is not as straightforward as a simple mutex. 390 * 391 * Quiesce / Fast Reboot: 392 * 393 * The driver currently does not support fast reboot. A quiesce(9E) entry point 394 * is still provided which is used to send a shutdown notification to the 395 * device. 396 * 397 * 398 * NVMe Hotplug: 399 * 400 * The driver supports hot removal. The driver uses the NDI event framework 401 * to register a callback, nvme_remove_callback, to clean up when a disk is 402 * removed. In particular, the driver will unqueue outstanding I/O commands and 403 * set n_dead on the softstate to true so that other operations, such as ioctls 404 * and command submissions, fail as well. 405 * 406 * While the callback registration relies on the NDI event framework, the 407 * removal event itself is kicked off in the PCIe hotplug framework, when the 408 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a 409 * device was removed from the slot. 410 * 411 * The NVMe driver instance itself will remain until the final close of the 412 * device. 413 * 414 * 415 * DDI UFM Support 416 * 417 * The driver supports the DDI UFM framework for reporting information about 418 * the device's firmware image and slot configuration. This data can be 419 * queried by userland software via ioctls to the ufm driver. For more 420 * information, see ddi_ufm(9E). 421 * 422 * 423 * Driver Configuration: 424 * 425 * The following driver properties can be changed to control some aspects of the 426 * drivers operation: 427 * - strict-version: can be set to 0 to allow devices conforming to newer 428 * major versions to be used 429 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 430 * specific command status as a fatal error leading device faulting 431 * - admin-queue-len: the maximum length of the admin queue (16-4096) 432 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536) 433 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536) 434 * - async-event-limit: the maximum number of asynchronous event requests to be 435 * posted by the driver 436 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 437 * cache 438 * - min-phys-block-size: the minimum physical block size to report to blkdev, 439 * which is among other things the basis for ZFS vdev ashift 440 * - max-submission-queues: the maximum number of I/O submission queues. 441 * - max-completion-queues: the maximum number of I/O completion queues, 442 * can be less than max-submission-queues, in which case the completion 443 * queues are shared. 444 * 445 * In addition to the above properties, some device-specific tunables can be 446 * configured using the nvme-config-list global property. The value of this 447 * property is a list of triplets. The formal syntax is: 448 * 449 * nvme-config-list ::= <triplet> [, <triplet>]* ; 450 * <triplet> ::= "<model>" , "<rev-list>" , "<tuple-list>" 451 * <rev-list> ::= [ <fwrev> [, <fwrev>]*] 452 * <tuple-list> ::= <tunable> [, <tunable>]* 453 * <tunable> ::= <name> : <value> 454 * 455 * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and 456 * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list> 457 * contains one or more tunables to apply to all controllers that match the 458 * specified model number and optionally firmware revision. Each <tunable> is a 459 * <name> : <value> pair. Supported tunables are: 460 * 461 * - ignore-unknown-vendor-status: can be set to "on" to not handle any vendor 462 * specific command status as a fatal error leading device faulting 463 * 464 * - min-phys-block-size: the minimum physical block size to report to blkdev, 465 * which is among other things the basis for ZFS vdev ashift 466 * 467 * - volatile-write-cache: can be set to "on" or "off" to enable or disable the 468 * volatile write cache, if present 469 * 470 * 471 * TODO: 472 * - figure out sane default for I/O queue depth reported to blkdev 473 * - FMA handling of media errors 474 * - support for devices supporting very large I/O requests using chained PRPs 475 * - support for configuring hardware parameters like interrupt coalescing 476 * - support for media formatting and hard partitioning into namespaces 477 * - support for big-endian systems 478 * - support for fast reboot 479 * - support for NVMe Subsystem Reset (1.1) 480 * - support for Scatter/Gather lists (1.1) 481 * - support for Reservations (1.1) 482 * - support for power management 483 */ 484 485 #include <sys/byteorder.h> 486 #ifdef _BIG_ENDIAN 487 #error nvme driver needs porting for big-endian platforms 488 #endif 489 490 #include <sys/modctl.h> 491 #include <sys/conf.h> 492 #include <sys/devops.h> 493 #include <sys/ddi.h> 494 #include <sys/ddi_ufm.h> 495 #include <sys/sunddi.h> 496 #include <sys/sunndi.h> 497 #include <sys/bitmap.h> 498 #include <sys/sysmacros.h> 499 #include <sys/param.h> 500 #include <sys/varargs.h> 501 #include <sys/cpuvar.h> 502 #include <sys/disp.h> 503 #include <sys/blkdev.h> 504 #include <sys/atomic.h> 505 #include <sys/archsystm.h> 506 #include <sys/sata/sata_hba.h> 507 #include <sys/stat.h> 508 #include <sys/policy.h> 509 #include <sys/list.h> 510 #include <sys/dkio.h> 511 #include <sys/pci.h> 512 #include <sys/mkdev.h> 513 514 #include <sys/nvme.h> 515 516 #ifdef __x86 517 #include <sys/x86_archext.h> 518 #endif 519 520 #include "nvme_reg.h" 521 #include "nvme_var.h" 522 523 /* 524 * Assertions to make sure that we've properly captured various aspects of the 525 * packed structures and haven't broken them during updates. 526 */ 527 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE); 528 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256); 529 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512); 530 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520); 531 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768); 532 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792); 533 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048); 534 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072); 535 536 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE); 537 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32); 538 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92); 539 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104); 540 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128); 541 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384); 542 543 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE); 544 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE); 545 546 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE); 547 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32); 548 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64); 549 550 CTASSERT(sizeof (nvme_nschange_list_t) == 4096); 551 552 /* NVMe spec version supported */ 553 static const int nvme_version_major = 2; 554 555 /* Tunable for FORMAT NVM command timeout in seconds, default is 600s */ 556 uint32_t nvme_format_cmd_timeout = 600; 557 558 /* Tunable for firmware commit with NVME_FWC_SAVE, default is 15s */ 559 uint32_t nvme_commit_save_cmd_timeout = 15; 560 561 /* 562 * Tunable for the admin command timeout used for commands other than those 563 * with their own timeouts defined above; in seconds. While most commands are 564 * expected to complete very quickly (sub-second), experience has shown that 565 * some controllers can occasionally be a bit slower, and not always consistent 566 * in the time taken - times of up to around 4.2s have been observed. Setting 567 * this to 15s by default provides headroom. 568 */ 569 uint32_t nvme_admin_cmd_timeout = 15; 570 571 /* 572 * Tunable for abort command timeout in seconds, default is 60s. This timeout 573 * is used when issuing an abort command, currently only in response to a 574 * different admin command timing out. Aborts always complete after the command 575 * that they are attempting to abort so we need to allow enough time for the 576 * controller to process the long running command that we are attempting to 577 * abort. The abort timeout here is only used if it is greater than the timeout 578 * for the command that is being aborted. 579 */ 580 uint32_t nvme_abort_cmd_timeout = 60; 581 582 /* 583 * Tunable for the size of arbitrary vendor specific admin commands, 584 * default is 16MiB. 585 */ 586 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24; 587 588 /* 589 * Tunable for the max timeout of arbitary vendor specific admin commands, 590 * default is 60s. 591 */ 592 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60; 593 594 /* 595 * This ID space, AVL, and lock are used for keeping track of minor state across 596 * opens between different devices. 597 */ 598 static id_space_t *nvme_open_minors; 599 static avl_tree_t nvme_open_minors_avl; 600 kmutex_t nvme_open_minors_mutex; 601 602 /* 603 * Removal taskq used for n_dead callback processing. 604 */ 605 taskq_t *nvme_dead_taskq; 606 607 /* 608 * This enumeration is used in tandem with nvme_mgmt_lock() to describe which 609 * form of the lock is being taken. See the theory statement for more context. 610 */ 611 typedef enum { 612 /* 613 * This is the primary form of taking the management lock and indicates 614 * that the user intends to do a read/write of it. This should always be 615 * used for any ioctl paths or truly anything other than a blkdev 616 * information operation. 617 */ 618 NVME_MGMT_LOCK_NVME, 619 /* 620 * This is a subordinate form of the lock whereby the user is in blkdev 621 * callback context and will only intend to read the namespace data. 622 */ 623 NVME_MGMT_LOCK_BDRO 624 } nvme_mgmt_lock_level_t; 625 626 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 627 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 628 static int nvme_quiesce(dev_info_t *); 629 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 630 static int nvme_setup_interrupts(nvme_t *, int, int); 631 static void nvme_release_interrupts(nvme_t *); 632 static uint_t nvme_intr(caddr_t, caddr_t); 633 634 static void nvme_shutdown(nvme_t *, boolean_t); 635 static boolean_t nvme_reset(nvme_t *, boolean_t); 636 static int nvme_init(nvme_t *); 637 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 638 static void nvme_free_cmd(nvme_cmd_t *); 639 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 640 bd_xfer_t *); 641 static void nvme_admin_cmd(nvme_cmd_t *, uint32_t); 642 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *, uint32_t *); 643 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *); 644 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *, uint32_t *); 645 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int); 646 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 647 static void nvme_wait_cmd(nvme_cmd_t *, uint_t); 648 static void nvme_wakeup_cmd(void *); 649 static void nvme_async_event_task(void *); 650 651 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 652 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 653 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 654 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 655 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 656 static inline int nvme_check_cmd_status(nvme_cmd_t *); 657 static boolean_t nvme_check_cmd_status_ioctl(nvme_cmd_t *, 658 nvme_ioctl_common_t *); 659 660 static int nvme_abort_cmd(nvme_cmd_t *, const uint32_t); 661 static void nvme_async_event(nvme_t *); 662 static boolean_t nvme_format_nvm(nvme_t *, nvme_ioctl_format_t *); 663 static boolean_t nvme_get_logpage_int(nvme_t *, boolean_t, void **, size_t *, 664 uint8_t); 665 static boolean_t nvme_identify(nvme_t *, boolean_t, nvme_ioctl_identify_t *, 666 void **); 667 static boolean_t nvme_identify_int(nvme_t *, uint32_t, uint8_t, void **); 668 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t, 669 uint32_t *); 670 static int nvme_write_cache_set(nvme_t *, boolean_t); 671 static int nvme_set_nqueues(nvme_t *); 672 673 static void nvme_free_dma(nvme_dma_t *); 674 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 675 nvme_dma_t **); 676 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 677 nvme_dma_t **); 678 static void nvme_free_qpair(nvme_qpair_t *); 679 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t); 680 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 681 682 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 683 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 684 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 685 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 686 687 static boolean_t nvme_check_regs_hdl(nvme_t *); 688 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 689 690 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t); 691 692 static void nvme_bd_xfer_done(void *); 693 static void nvme_bd_driveinfo(void *, bd_drive_t *); 694 static int nvme_bd_mediainfo(void *, bd_media_t *); 695 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 696 static int nvme_bd_read(void *, bd_xfer_t *); 697 static int nvme_bd_write(void *, bd_xfer_t *); 698 static int nvme_bd_sync(void *, bd_xfer_t *); 699 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 700 static int nvme_bd_free_space(void *, bd_xfer_t *); 701 702 static int nvme_prp_dma_constructor(void *, void *, int); 703 static void nvme_prp_dma_destructor(void *, void *); 704 705 static void nvme_prepare_devid(nvme_t *, uint32_t); 706 707 /* DDI UFM callbacks */ 708 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 709 ddi_ufm_image_t *); 710 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 711 ddi_ufm_slot_t *); 712 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 713 714 static int nvme_open(dev_t *, int, int, cred_t *); 715 static int nvme_close(dev_t, int, int, cred_t *); 716 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 717 718 static int nvme_init_ns(nvme_t *, uint32_t); 719 static boolean_t nvme_attach_ns(nvme_t *, nvme_ioctl_common_t *); 720 static boolean_t nvme_detach_ns(nvme_t *, nvme_ioctl_common_t *); 721 722 static int nvme_minor_comparator(const void *, const void *); 723 724 static ddi_ufm_ops_t nvme_ufm_ops = { 725 NULL, 726 nvme_ufm_fill_image, 727 nvme_ufm_fill_slot, 728 nvme_ufm_getcaps 729 }; 730 731 /* 732 * Minor numbers are split amongst those used for controllers and for device 733 * opens. The number of controller minors are limited based open MAXMIN32 per 734 * the theory statement. We allocate 1 million minors as a total guess at a 735 * number that'll probably be enough. The starting point of the open minors can 736 * be shifted to accommodate future expansion of the NVMe device minors. 737 */ 738 #define NVME_MINOR_INST_SHIFT 9 739 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid)) 740 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT) 741 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1)) 742 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2) 743 744 #define NVME_OPEN_NMINORS (1024 * 1024) 745 #define NVME_OPEN_MINOR_MIN (MAXMIN32 + 1) 746 #define NVME_OPEN_MINOR_MAX_EXCL (NVME_OPEN_MINOR_MIN + \ 747 NVME_OPEN_NMINORS) 748 749 #define NVME_BUMP_STAT(nvme, stat) \ 750 atomic_inc_64(&nvme->n_device_stat.nds_ ## stat.value.ui64) 751 752 static void *nvme_state; 753 static kmem_cache_t *nvme_cmd_cache; 754 755 /* 756 * DMA attributes for queue DMA memory 757 * 758 * Queue DMA memory must be page aligned. The maximum length of a queue is 759 * 65536 entries, and an entry can be 64 bytes long. 760 */ 761 static const ddi_dma_attr_t nvme_queue_dma_attr = { 762 .dma_attr_version = DMA_ATTR_V0, 763 .dma_attr_addr_lo = 0, 764 .dma_attr_addr_hi = 0xffffffffffffffffULL, 765 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 766 .dma_attr_align = 0x1000, 767 .dma_attr_burstsizes = 0x7ff, 768 .dma_attr_minxfer = 0x1000, 769 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 770 .dma_attr_seg = 0xffffffffffffffffULL, 771 .dma_attr_sgllen = 1, 772 .dma_attr_granular = 1, 773 .dma_attr_flags = 0, 774 }; 775 776 /* 777 * DMA attributes for transfers using Physical Region Page (PRP) entries 778 * 779 * A PRP entry describes one page of DMA memory using the page size specified 780 * in the controller configuration's memory page size register (CC.MPS). It uses 781 * a 64bit base address aligned to this page size. There is no limitation on 782 * chaining PRPs together for arbitrarily large DMA transfers. These DMA 783 * attributes will be copied into the nvme_t during nvme_attach() and the 784 * dma_attr_maxxfer will be updated. 785 */ 786 static const ddi_dma_attr_t nvme_prp_dma_attr = { 787 .dma_attr_version = DMA_ATTR_V0, 788 .dma_attr_addr_lo = 0, 789 .dma_attr_addr_hi = 0xffffffffffffffffULL, 790 .dma_attr_count_max = 0xfff, 791 .dma_attr_align = 0x1000, 792 .dma_attr_burstsizes = 0x7ff, 793 .dma_attr_minxfer = 0x1000, 794 .dma_attr_maxxfer = 0x1000, 795 .dma_attr_seg = 0xfff, 796 .dma_attr_sgllen = -1, 797 .dma_attr_granular = 1, 798 .dma_attr_flags = 0, 799 }; 800 801 /* 802 * DMA attributes for transfers using scatter/gather lists 803 * 804 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 805 * 32bit length field. SGL Segment and SGL Last Segment entries require the 806 * length to be a multiple of 16 bytes. While the SGL DMA attributes are copied 807 * into the nvme_t, they are not currently used for any I/O. 808 */ 809 static const ddi_dma_attr_t nvme_sgl_dma_attr = { 810 .dma_attr_version = DMA_ATTR_V0, 811 .dma_attr_addr_lo = 0, 812 .dma_attr_addr_hi = 0xffffffffffffffffULL, 813 .dma_attr_count_max = 0xffffffffUL, 814 .dma_attr_align = 1, 815 .dma_attr_burstsizes = 0x7ff, 816 .dma_attr_minxfer = 0x10, 817 .dma_attr_maxxfer = 0xfffffffffULL, 818 .dma_attr_seg = 0xffffffffffffffffULL, 819 .dma_attr_sgllen = -1, 820 .dma_attr_granular = 0x10, 821 .dma_attr_flags = 0 822 }; 823 824 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 825 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 826 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 827 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 828 }; 829 830 /* 831 * ioctl validation policies. These are policies that determine which namespaces 832 * are allowed or disallowed for various operations. Note, all policy items 833 * should be explicitly listed here to help make it clear what our intent is. 834 * That is also why some of these are identical or repeated when they cover 835 * different ioctls. 836 */ 837 838 /* 839 * The controller information ioctl generally contains read-only information 840 * about the controller that is sourced from multiple different pieces of 841 * information. This does not operate on a namespace and none are accepted. 842 */ 843 static const nvme_ioctl_check_t nvme_check_ctrl_info = { 844 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE, 845 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 846 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE 847 }; 848 849 /* 850 * The kernel namespace information requires a namespace ID to be specified. It 851 * does not allow for the broadcast ID to be specified. 852 */ 853 static const nvme_ioctl_check_t nvme_check_ns_info = { 854 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 855 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 856 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE 857 }; 858 859 /* 860 * Identify commands are allowed to operate on a namespace minor. Unfortunately, 861 * the namespace field in identify commands is a bit, weird. In particular, some 862 * commands need a valid namespace, while others are namespace listing 863 * operations, which means illegal namespaces like zero are allowed. 864 */ 865 static const nvme_ioctl_check_t nvme_check_identify = { 866 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 867 .nck_skip_ctrl = B_TRUE, .nck_ctrl_rewrite = B_FALSE, 868 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE 869 }; 870 871 /* 872 * The get log page command requires the ability to specify namespaces. When 873 * targeting the controller, one must use the broadcast NSID. 874 */ 875 static const nvme_ioctl_check_t nvme_check_get_logpage = { 876 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 877 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE, 878 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE 879 }; 880 881 /* 882 * When getting a feature, we do not want rewriting behavior as most features do 883 * not require a namespace to be specified. Specific instances are checked in 884 * nvme_validate_get_feature(). 885 */ 886 static const nvme_ioctl_check_t nvme_check_get_feature = { 887 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 888 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 889 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE 890 }; 891 892 /* 893 * Format commands must target a namespace. The broadcast namespace must be used 894 * when referring to the controller. 895 */ 896 static const nvme_ioctl_check_t nvme_check_format = { 897 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 898 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE, 899 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_WRITE 900 }; 901 902 /* 903 * Attach and detach must always target a minor. However, the broadcast 904 * namespace is not allowed. We still perform rewriting so that way specifying 905 * the controller node with 0 will be caught. 906 */ 907 static const nvme_ioctl_check_t nvme_check_attach_detach = { 908 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 909 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE, 910 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE 911 }; 912 913 /* 914 * Firmware operations must not target a namespace and are only allowed from the 915 * controller. 916 */ 917 static const nvme_ioctl_check_t nvme_check_firmware = { 918 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE, 919 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 920 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE 921 }; 922 923 /* 924 * Passthru commands are an odd set. We only allow them from the primary 925 * controller; however, we allow a namespace to be specified in them and allow 926 * the broadcast namespace. We do not perform rewriting because we don't know 927 * what the semantics are. We explicitly exempt passthru commands from needing 928 * an exclusive lock and leave it up to them to tell us the impact of the 929 * command and semantics. As this is a privileged interface and the semantics 930 * are arbitrary, there's not much we can do without some assistance from the 931 * consumer. 932 */ 933 static const nvme_ioctl_check_t nvme_check_passthru = { 934 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE, 935 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 936 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE 937 }; 938 939 /* 940 * Lock operations are allowed to target a namespace, but must not be rewritten. 941 * There is no support for the broadcast namespace. This is the only ioctl that 942 * should skip exclusive checking as it's used to grant it. 943 */ 944 static const nvme_ioctl_check_t nvme_check_locking = { 945 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 946 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 947 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_SKIP 948 }; 949 950 static struct cb_ops nvme_cb_ops = { 951 .cb_open = nvme_open, 952 .cb_close = nvme_close, 953 .cb_strategy = nodev, 954 .cb_print = nodev, 955 .cb_dump = nodev, 956 .cb_read = nodev, 957 .cb_write = nodev, 958 .cb_ioctl = nvme_ioctl, 959 .cb_devmap = nodev, 960 .cb_mmap = nodev, 961 .cb_segmap = nodev, 962 .cb_chpoll = nochpoll, 963 .cb_prop_op = ddi_prop_op, 964 .cb_str = 0, 965 .cb_flag = D_NEW | D_MP, 966 .cb_rev = CB_REV, 967 .cb_aread = nodev, 968 .cb_awrite = nodev 969 }; 970 971 static struct dev_ops nvme_dev_ops = { 972 .devo_rev = DEVO_REV, 973 .devo_refcnt = 0, 974 .devo_getinfo = ddi_no_info, 975 .devo_identify = nulldev, 976 .devo_probe = nulldev, 977 .devo_attach = nvme_attach, 978 .devo_detach = nvme_detach, 979 .devo_reset = nodev, 980 .devo_cb_ops = &nvme_cb_ops, 981 .devo_bus_ops = NULL, 982 .devo_power = NULL, 983 .devo_quiesce = nvme_quiesce, 984 }; 985 986 static struct modldrv nvme_modldrv = { 987 .drv_modops = &mod_driverops, 988 .drv_linkinfo = "NVMe driver", 989 .drv_dev_ops = &nvme_dev_ops 990 }; 991 992 static struct modlinkage nvme_modlinkage = { 993 .ml_rev = MODREV_1, 994 .ml_linkage = { &nvme_modldrv, NULL } 995 }; 996 997 static bd_ops_t nvme_bd_ops = { 998 .o_version = BD_OPS_CURRENT_VERSION, 999 .o_drive_info = nvme_bd_driveinfo, 1000 .o_media_info = nvme_bd_mediainfo, 1001 .o_devid_init = nvme_bd_devid, 1002 .o_sync_cache = nvme_bd_sync, 1003 .o_read = nvme_bd_read, 1004 .o_write = nvme_bd_write, 1005 .o_free_space = nvme_bd_free_space, 1006 }; 1007 1008 /* 1009 * This list will hold commands that have timed out and couldn't be aborted. 1010 * As we don't know what the hardware may still do with the DMA memory we can't 1011 * free them, so we'll keep them forever on this list where we can easily look 1012 * at them with mdb. 1013 */ 1014 static struct list nvme_lost_cmds; 1015 static kmutex_t nvme_lc_mutex; 1016 1017 int 1018 _init(void) 1019 { 1020 int error; 1021 1022 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 1023 if (error != DDI_SUCCESS) 1024 return (error); 1025 1026 if ((nvme_open_minors = id_space_create("nvme_open_minors", 1027 NVME_OPEN_MINOR_MIN, NVME_OPEN_MINOR_MAX_EXCL)) == NULL) { 1028 ddi_soft_state_fini(&nvme_state); 1029 return (ENOMEM); 1030 } 1031 1032 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 1033 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1034 1035 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL); 1036 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t), 1037 offsetof(nvme_cmd_t, nc_list)); 1038 1039 mutex_init(&nvme_open_minors_mutex, NULL, MUTEX_DRIVER, NULL); 1040 avl_create(&nvme_open_minors_avl, nvme_minor_comparator, 1041 sizeof (nvme_minor_t), offsetof(nvme_minor_t, nm_avl)); 1042 1043 nvme_dead_taskq = taskq_create("nvme_dead_taskq", 1, minclsyspri, 1, 1, 1044 TASKQ_PREPOPULATE); 1045 1046 bd_mod_init(&nvme_dev_ops); 1047 1048 error = mod_install(&nvme_modlinkage); 1049 if (error != DDI_SUCCESS) { 1050 ddi_soft_state_fini(&nvme_state); 1051 id_space_destroy(nvme_open_minors); 1052 mutex_destroy(&nvme_lc_mutex); 1053 list_destroy(&nvme_lost_cmds); 1054 bd_mod_fini(&nvme_dev_ops); 1055 mutex_destroy(&nvme_open_minors_mutex); 1056 avl_destroy(&nvme_open_minors_avl); 1057 taskq_destroy(nvme_dead_taskq); 1058 } 1059 1060 return (error); 1061 } 1062 1063 int 1064 _fini(void) 1065 { 1066 int error; 1067 1068 if (!list_is_empty(&nvme_lost_cmds)) 1069 return (DDI_FAILURE); 1070 1071 error = mod_remove(&nvme_modlinkage); 1072 if (error == DDI_SUCCESS) { 1073 ddi_soft_state_fini(&nvme_state); 1074 id_space_destroy(nvme_open_minors); 1075 kmem_cache_destroy(nvme_cmd_cache); 1076 mutex_destroy(&nvme_lc_mutex); 1077 list_destroy(&nvme_lost_cmds); 1078 bd_mod_fini(&nvme_dev_ops); 1079 mutex_destroy(&nvme_open_minors_mutex); 1080 avl_destroy(&nvme_open_minors_avl); 1081 taskq_destroy(nvme_dead_taskq); 1082 } 1083 1084 return (error); 1085 } 1086 1087 int 1088 _info(struct modinfo *modinfop) 1089 { 1090 return (mod_info(&nvme_modlinkage, modinfop)); 1091 } 1092 1093 static inline void 1094 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 1095 { 1096 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 1097 1098 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 1099 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 1100 } 1101 1102 static inline void 1103 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 1104 { 1105 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 1106 1107 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 1108 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 1109 } 1110 1111 static inline uint64_t 1112 nvme_get64(nvme_t *nvme, uintptr_t reg) 1113 { 1114 uint64_t val; 1115 1116 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 1117 1118 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 1119 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 1120 1121 return (val); 1122 } 1123 1124 static inline uint32_t 1125 nvme_get32(nvme_t *nvme, uintptr_t reg) 1126 { 1127 uint32_t val; 1128 1129 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 1130 1131 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 1132 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 1133 1134 return (val); 1135 } 1136 1137 static void 1138 nvme_mgmt_lock_fini(nvme_mgmt_lock_t *lock) 1139 { 1140 ASSERT3U(lock->nml_bd_own, ==, 0); 1141 mutex_destroy(&lock->nml_lock); 1142 cv_destroy(&lock->nml_cv); 1143 } 1144 1145 static void 1146 nvme_mgmt_lock_init(nvme_mgmt_lock_t *lock) 1147 { 1148 mutex_init(&lock->nml_lock, NULL, MUTEX_DRIVER, NULL); 1149 cv_init(&lock->nml_cv, NULL, CV_DRIVER, NULL); 1150 lock->nml_bd_own = 0; 1151 } 1152 1153 static void 1154 nvme_mgmt_unlock(nvme_t *nvme) 1155 { 1156 nvme_mgmt_lock_t *lock = &nvme->n_mgmt; 1157 1158 cv_broadcast(&lock->nml_cv); 1159 mutex_exit(&lock->nml_lock); 1160 } 1161 1162 #ifdef DEBUG 1163 static boolean_t 1164 nvme_mgmt_lock_held(nvme_t *nvme) 1165 { 1166 return (MUTEX_HELD(&nvme->n_mgmt.nml_lock) != 0); 1167 } 1168 #endif /* DEBUG */ 1169 1170 static void 1171 nvme_mgmt_lock(nvme_t *nvme, nvme_mgmt_lock_level_t level) 1172 { 1173 nvme_mgmt_lock_t *lock = &nvme->n_mgmt; 1174 mutex_enter(&lock->nml_lock); 1175 while (lock->nml_bd_own != 0) { 1176 if (level == NVME_MGMT_LOCK_BDRO) 1177 break; 1178 cv_wait(&lock->nml_cv, &lock->nml_lock); 1179 } 1180 } 1181 1182 /* 1183 * This and nvme_mgmt_bd_end() are used to indicate that the driver is going to 1184 * be calling into a re-entrant blkdev related function. We cannot hold the lock 1185 * across such an operation and therefore must indicate that this is logically 1186 * held, while allowing other operations to proceed. This nvme_mgmt_bd_end() may 1187 * only be called by a thread that already holds the nmve_mgmt_lock(). 1188 */ 1189 static void 1190 nvme_mgmt_bd_start(nvme_t *nvme) 1191 { 1192 nvme_mgmt_lock_t *lock = &nvme->n_mgmt; 1193 1194 VERIFY(MUTEX_HELD(&lock->nml_lock)); 1195 VERIFY3U(lock->nml_bd_own, ==, 0); 1196 lock->nml_bd_own = (uintptr_t)curthread; 1197 mutex_exit(&lock->nml_lock); 1198 } 1199 1200 static void 1201 nvme_mgmt_bd_end(nvme_t *nvme) 1202 { 1203 nvme_mgmt_lock_t *lock = &nvme->n_mgmt; 1204 1205 mutex_enter(&lock->nml_lock); 1206 VERIFY3U(lock->nml_bd_own, ==, (uintptr_t)curthread); 1207 lock->nml_bd_own = 0; 1208 } 1209 1210 /* 1211 * This is a central clearing house for marking an NVMe controller dead and/or 1212 * removed. This takes care of setting the flag, taking care of outstanding 1213 * blocked locks, and sending a DDI FMA impact. This is called from a precarious 1214 * place where locking is suspect. The only guarantee we have is that the nvme_t 1215 * is valid and won't disappear until we return. 1216 * 1217 * This should only be used after attach has been called. 1218 */ 1219 static void 1220 nvme_ctrl_mark_dead(nvme_t *nvme, boolean_t removed) 1221 { 1222 boolean_t was_dead; 1223 1224 /* 1225 * See if we win the race to set things up here. If someone beat us to 1226 * it, we do not do anything. 1227 */ 1228 was_dead = atomic_cas_32((volatile uint32_t *)&nvme->n_dead, B_FALSE, 1229 B_TRUE); 1230 if (was_dead) { 1231 return; 1232 } 1233 1234 /* 1235 * If this was removed, there is no reason to change the service impact. 1236 * However, then we need to change our default return code that we use 1237 * here to indicate that it was gone versus that it is dead. 1238 */ 1239 if (removed) { 1240 nvme->n_dead_status = NVME_IOCTL_E_CTRL_GONE; 1241 } else { 1242 ASSERT3U(nvme->n_dead_status, ==, NVME_IOCTL_E_CTRL_DEAD); 1243 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1244 } 1245 1246 taskq_dispatch_ent(nvme_dead_taskq, nvme_rwlock_ctrl_dead, nvme, 1247 TQ_NOSLEEP, &nvme->n_dead_tqent); 1248 } 1249 1250 static boolean_t 1251 nvme_check_regs_hdl(nvme_t *nvme) 1252 { 1253 ddi_fm_error_t error; 1254 1255 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 1256 1257 if (error.fme_status != DDI_FM_OK) 1258 return (B_TRUE); 1259 1260 return (B_FALSE); 1261 } 1262 1263 static boolean_t 1264 nvme_check_dma_hdl(nvme_dma_t *dma) 1265 { 1266 ddi_fm_error_t error; 1267 1268 if (dma == NULL) 1269 return (B_FALSE); 1270 1271 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 1272 1273 if (error.fme_status != DDI_FM_OK) 1274 return (B_TRUE); 1275 1276 return (B_FALSE); 1277 } 1278 1279 static void 1280 nvme_free_dma_common(nvme_dma_t *dma) 1281 { 1282 if (dma->nd_dmah != NULL) 1283 (void) ddi_dma_unbind_handle(dma->nd_dmah); 1284 if (dma->nd_acch != NULL) 1285 ddi_dma_mem_free(&dma->nd_acch); 1286 if (dma->nd_dmah != NULL) 1287 ddi_dma_free_handle(&dma->nd_dmah); 1288 } 1289 1290 static void 1291 nvme_free_dma(nvme_dma_t *dma) 1292 { 1293 nvme_free_dma_common(dma); 1294 kmem_free(dma, sizeof (*dma)); 1295 } 1296 1297 static void 1298 nvme_prp_dma_destructor(void *buf, void *private __unused) 1299 { 1300 nvme_dma_t *dma = (nvme_dma_t *)buf; 1301 1302 nvme_free_dma_common(dma); 1303 } 1304 1305 static int 1306 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 1307 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 1308 { 1309 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 1310 &dma->nd_dmah) != DDI_SUCCESS) { 1311 /* 1312 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 1313 * the only other possible error is DDI_DMA_BADATTR which 1314 * indicates a driver bug which should cause a panic. 1315 */ 1316 dev_err(nvme->n_dip, CE_PANIC, 1317 "!failed to get DMA handle, check DMA attributes"); 1318 return (DDI_FAILURE); 1319 } 1320 1321 /* 1322 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 1323 * or the flags are conflicting, which isn't the case here. 1324 */ 1325 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 1326 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 1327 &dma->nd_len, &dma->nd_acch); 1328 1329 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 1330 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1331 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 1332 dev_err(nvme->n_dip, CE_WARN, 1333 "!failed to bind DMA memory"); 1334 NVME_BUMP_STAT(nvme, dma_bind_err); 1335 nvme_free_dma_common(dma); 1336 return (DDI_FAILURE); 1337 } 1338 1339 return (DDI_SUCCESS); 1340 } 1341 1342 static int 1343 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 1344 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 1345 { 1346 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 1347 1348 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 1349 DDI_SUCCESS) { 1350 *ret = NULL; 1351 kmem_free(dma, sizeof (nvme_dma_t)); 1352 return (DDI_FAILURE); 1353 } 1354 1355 bzero(dma->nd_memp, dma->nd_len); 1356 1357 *ret = dma; 1358 return (DDI_SUCCESS); 1359 } 1360 1361 static int 1362 nvme_prp_dma_constructor(void *buf, void *private, int flags __unused) 1363 { 1364 nvme_dma_t *dma = (nvme_dma_t *)buf; 1365 nvme_t *nvme = (nvme_t *)private; 1366 1367 dma->nd_dmah = NULL; 1368 dma->nd_acch = NULL; 1369 1370 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 1371 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 1372 return (-1); 1373 } 1374 1375 ASSERT(dma->nd_ncookie == 1); 1376 1377 dma->nd_cached = B_TRUE; 1378 1379 return (0); 1380 } 1381 1382 static int 1383 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 1384 uint_t flags, nvme_dma_t **dma) 1385 { 1386 uint32_t len = nentry * qe_len; 1387 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 1388 1389 len = roundup(len, nvme->n_pagesize); 1390 1391 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 1392 != DDI_SUCCESS) { 1393 dev_err(nvme->n_dip, CE_WARN, 1394 "!failed to get DMA memory for queue"); 1395 goto fail; 1396 } 1397 1398 if ((*dma)->nd_ncookie != 1) { 1399 dev_err(nvme->n_dip, CE_WARN, 1400 "!got too many cookies for queue DMA"); 1401 goto fail; 1402 } 1403 1404 return (DDI_SUCCESS); 1405 1406 fail: 1407 if (*dma) { 1408 nvme_free_dma(*dma); 1409 *dma = NULL; 1410 } 1411 1412 return (DDI_FAILURE); 1413 } 1414 1415 static void 1416 nvme_free_cq(nvme_cq_t *cq) 1417 { 1418 mutex_destroy(&cq->ncq_mutex); 1419 1420 if (cq->ncq_cmd_taskq != NULL) 1421 taskq_destroy(cq->ncq_cmd_taskq); 1422 1423 if (cq->ncq_dma != NULL) 1424 nvme_free_dma(cq->ncq_dma); 1425 1426 kmem_free(cq, sizeof (*cq)); 1427 } 1428 1429 static void 1430 nvme_free_qpair(nvme_qpair_t *qp) 1431 { 1432 int i; 1433 1434 mutex_destroy(&qp->nq_mutex); 1435 sema_destroy(&qp->nq_sema); 1436 1437 if (qp->nq_sqdma != NULL) 1438 nvme_free_dma(qp->nq_sqdma); 1439 1440 if (qp->nq_active_cmds > 0) 1441 for (i = 0; i != qp->nq_nentry; i++) 1442 if (qp->nq_cmd[i] != NULL) 1443 nvme_free_cmd(qp->nq_cmd[i]); 1444 1445 if (qp->nq_cmd != NULL) 1446 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 1447 1448 kmem_free(qp, sizeof (nvme_qpair_t)); 1449 } 1450 1451 /* 1452 * Destroy the pre-allocated cq array, but only free individual completion 1453 * queues from the given starting index. 1454 */ 1455 static void 1456 nvme_destroy_cq_array(nvme_t *nvme, uint_t start) 1457 { 1458 uint_t i; 1459 1460 for (i = start; i < nvme->n_cq_count; i++) 1461 if (nvme->n_cq[i] != NULL) 1462 nvme_free_cq(nvme->n_cq[i]); 1463 1464 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count); 1465 } 1466 1467 static int 1468 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx, 1469 uint_t nthr) 1470 { 1471 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP); 1472 char name[64]; /* large enough for the taskq name */ 1473 1474 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER, 1475 DDI_INTR_PRI(nvme->n_intr_pri)); 1476 1477 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 1478 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS) 1479 goto fail; 1480 1481 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp; 1482 cq->ncq_nentry = nentry; 1483 cq->ncq_id = idx; 1484 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx); 1485 1486 /* 1487 * Each completion queue has its own command taskq. 1488 */ 1489 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u", 1490 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx); 1491 1492 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX, 1493 TASKQ_PREPOPULATE); 1494 1495 if (cq->ncq_cmd_taskq == NULL) { 1496 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd " 1497 "taskq for cq %u", idx); 1498 goto fail; 1499 } 1500 1501 *cqp = cq; 1502 return (DDI_SUCCESS); 1503 1504 fail: 1505 nvme_free_cq(cq); 1506 *cqp = NULL; 1507 1508 return (DDI_FAILURE); 1509 } 1510 1511 /* 1512 * Create the n_cq array big enough to hold "ncq" completion queues. 1513 * If the array already exists it will be re-sized (but only larger). 1514 * The admin queue is included in this array, which boosts the 1515 * max number of entries to UINT16_MAX + 1. 1516 */ 1517 static int 1518 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr) 1519 { 1520 nvme_cq_t **cq; 1521 uint_t i, cq_count; 1522 1523 ASSERT3U(ncq, >, nvme->n_cq_count); 1524 1525 cq = nvme->n_cq; 1526 cq_count = nvme->n_cq_count; 1527 1528 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP); 1529 nvme->n_cq_count = ncq; 1530 1531 for (i = 0; i < cq_count; i++) 1532 nvme->n_cq[i] = cq[i]; 1533 1534 for (; i < nvme->n_cq_count; i++) 1535 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) != 1536 DDI_SUCCESS) 1537 goto fail; 1538 1539 if (cq != NULL) 1540 kmem_free(cq, sizeof (*cq) * cq_count); 1541 1542 return (DDI_SUCCESS); 1543 1544 fail: 1545 nvme_destroy_cq_array(nvme, cq_count); 1546 /* 1547 * Restore the original array 1548 */ 1549 nvme->n_cq_count = cq_count; 1550 nvme->n_cq = cq; 1551 1552 return (DDI_FAILURE); 1553 } 1554 1555 static int 1556 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 1557 uint_t idx) 1558 { 1559 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 1560 uint_t cq_idx; 1561 1562 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 1563 DDI_INTR_PRI(nvme->n_intr_pri)); 1564 1565 /* 1566 * The NVMe spec defines that a full queue has one empty (unused) slot; 1567 * initialize the semaphore accordingly. 1568 */ 1569 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL); 1570 1571 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 1572 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 1573 goto fail; 1574 1575 /* 1576 * idx == 0 is adminq, those above 0 are shared io completion queues. 1577 */ 1578 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1); 1579 qp->nq_cq = nvme->n_cq[cq_idx]; 1580 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 1581 qp->nq_nentry = nentry; 1582 1583 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 1584 1585 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 1586 qp->nq_next_cmd = 0; 1587 1588 *nqp = qp; 1589 return (DDI_SUCCESS); 1590 1591 fail: 1592 nvme_free_qpair(qp); 1593 *nqp = NULL; 1594 1595 return (DDI_FAILURE); 1596 } 1597 1598 /* 1599 * One might reasonably consider that the nvme_cmd_cache should have a cache 1600 * constructor and destructor that takes care of the mutex/cv init/destroy, and 1601 * that nvme_free_cmd should reset more fields such that allocation becomes 1602 * simpler. This is not currently implemented as: 1603 * - nvme_cmd_cache is a global cache, shared across nvme instances and 1604 * therefore there is no easy access to the corresponding nvme_t in the 1605 * constructor to determine the required interrupt priority. 1606 * - Most fields in nvme_cmd_t would need to be zeroed in nvme_free_cmd while 1607 * preserving the mutex/cv. It is easier to able to zero the entire 1608 * structure and then init the mutex/cv only in the unlikely event that we 1609 * want an admin command. 1610 */ 1611 static nvme_cmd_t * 1612 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 1613 { 1614 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 1615 1616 if (cmd != NULL) { 1617 bzero(cmd, sizeof (nvme_cmd_t)); 1618 cmd->nc_nvme = nvme; 1619 } 1620 1621 return (cmd); 1622 } 1623 1624 static nvme_cmd_t * 1625 nvme_alloc_admin_cmd(nvme_t *nvme, int kmflag) 1626 { 1627 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, kmflag); 1628 1629 if (cmd != NULL) { 1630 cmd->nc_flags |= NVME_CMD_F_USELOCK; 1631 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 1632 DDI_INTR_PRI(nvme->n_intr_pri)); 1633 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 1634 } 1635 1636 return (cmd); 1637 } 1638 1639 static void 1640 nvme_free_cmd(nvme_cmd_t *cmd) 1641 { 1642 /* Don't free commands on the lost commands list. */ 1643 if (list_link_active(&cmd->nc_list)) 1644 return; 1645 1646 if (cmd->nc_dma) { 1647 nvme_free_dma(cmd->nc_dma); 1648 cmd->nc_dma = NULL; 1649 } 1650 1651 if (cmd->nc_prp) { 1652 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp); 1653 cmd->nc_prp = NULL; 1654 } 1655 1656 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) { 1657 cv_destroy(&cmd->nc_cv); 1658 mutex_destroy(&cmd->nc_mutex); 1659 } 1660 1661 kmem_cache_free(nvme_cmd_cache, cmd); 1662 } 1663 1664 static void 1665 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp) 1666 { 1667 sema_p(&qp->nq_sema); 1668 nvme_submit_cmd_common(qp, cmd, qtimeoutp); 1669 } 1670 1671 static int 1672 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1673 { 1674 if (cmd->nc_nvme->n_dead) { 1675 return (EIO); 1676 } 1677 1678 if (sema_tryp(&qp->nq_sema) == 0) 1679 return (EAGAIN); 1680 1681 nvme_submit_cmd_common(qp, cmd, NULL); 1682 return (0); 1683 } 1684 1685 /* 1686 * Common command submission routine. If `qtimeoutp` is not NULL then it will 1687 * be set to the sum of the timeouts of any active commands ahead of the one 1688 * being submitted. 1689 */ 1690 static void 1691 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp) 1692 { 1693 nvme_reg_sqtdbl_t tail = { 0 }; 1694 1695 /* 1696 * We don't need to take a lock on cmd since it is not yet enqueued. 1697 */ 1698 cmd->nc_submit_ts = gethrtime(); 1699 cmd->nc_state = NVME_CMD_SUBMITTED; 1700 1701 mutex_enter(&qp->nq_mutex); 1702 1703 /* 1704 * Now that we hold the queue pair lock, we must check whether or not 1705 * the controller has been listed as dead (e.g. was removed due to 1706 * hotplug). This is necessary as otherwise we could race with 1707 * nvme_remove_callback(). Because this has not been enqueued, we don't 1708 * call nvme_unqueue_cmd(), which is why we must manually decrement the 1709 * semaphore. 1710 */ 1711 if (cmd->nc_nvme->n_dead) { 1712 cmd->nc_queue_ts = gethrtime(); 1713 cmd->nc_state = NVME_CMD_QUEUED; 1714 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback, 1715 cmd, TQ_NOSLEEP, &cmd->nc_tqent); 1716 sema_v(&qp->nq_sema); 1717 mutex_exit(&qp->nq_mutex); 1718 return; 1719 } 1720 1721 /* 1722 * Try to insert the cmd into the active cmd array at the nq_next_cmd 1723 * slot. If the slot is already occupied advance to the next slot and 1724 * try again. This can happen for long running commands like async event 1725 * requests. 1726 */ 1727 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 1728 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1729 qp->nq_cmd[qp->nq_next_cmd] = cmd; 1730 1731 /* 1732 * We keep track of the number of active commands in this queue, and 1733 * the sum of the timeouts for those active commands. 1734 */ 1735 qp->nq_active_cmds++; 1736 if (qtimeoutp != NULL) 1737 *qtimeoutp = qp->nq_active_timeout; 1738 qp->nq_active_timeout += cmd->nc_timeout; 1739 1740 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 1741 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 1742 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 1743 sizeof (nvme_sqe_t) * qp->nq_sqtail, 1744 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 1745 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1746 1747 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 1748 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 1749 1750 mutex_exit(&qp->nq_mutex); 1751 } 1752 1753 static nvme_cmd_t * 1754 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid) 1755 { 1756 nvme_cmd_t *cmd; 1757 1758 ASSERT(mutex_owned(&qp->nq_mutex)); 1759 ASSERT3S(cid, <, qp->nq_nentry); 1760 1761 cmd = qp->nq_cmd[cid]; 1762 /* 1763 * Some controllers will erroneously add things to the completion queue 1764 * for which there is no matching outstanding command. If this happens, 1765 * it is almost certainly a controller firmware bug since nq_mutex 1766 * is held across command submission and ringing the queue doorbell, 1767 * and is also held in this function. 1768 * 1769 * If we see such an unexpected command, there is not much we can do. 1770 * These will be logged and counted in nvme_get_completed(), but 1771 * otherwise ignored. 1772 */ 1773 if (cmd == NULL) 1774 return (NULL); 1775 qp->nq_cmd[cid] = NULL; 1776 ASSERT3U(qp->nq_active_cmds, >, 0); 1777 qp->nq_active_cmds--; 1778 ASSERT3U(qp->nq_active_timeout, >=, cmd->nc_timeout); 1779 qp->nq_active_timeout -= cmd->nc_timeout; 1780 sema_v(&qp->nq_sema); 1781 1782 ASSERT3P(cmd, !=, NULL); 1783 ASSERT3P(cmd->nc_nvme, ==, nvme); 1784 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid); 1785 1786 return (cmd); 1787 } 1788 1789 /* 1790 * This is called when an admin abort has failed to complete, once for the 1791 * original command and once for the abort itself. At this point the controller 1792 * has been marked dead. The commands are considered lost, de-queued if 1793 * possible, and placed on a global lost commands list so that they cannot be 1794 * freed and so that any DMA memory they have have is not re-used. 1795 */ 1796 static void 1797 nvme_lost_cmd(nvme_t *nvme, nvme_cmd_t *cmd) 1798 { 1799 ASSERT(mutex_owned(&cmd->nc_mutex)); 1800 1801 switch (cmd->nc_state) { 1802 case NVME_CMD_SUBMITTED: { 1803 nvme_qpair_t *qp = nvme->n_ioq[cmd->nc_sqid]; 1804 1805 /* 1806 * The command is still in the submitted state, meaning that we 1807 * have not processed a completion queue entry for it. De-queue 1808 * should be successful and if the hardware does later report 1809 * completion we'll skip it as a command for which we aren't 1810 * expecting a response (see nvme_unqueue_cmd()). 1811 */ 1812 mutex_enter(&qp->nq_mutex); 1813 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 1814 mutex_exit(&qp->nq_mutex); 1815 } 1816 case NVME_CMD_ALLOCATED: 1817 case NVME_CMD_COMPLETED: 1818 /* 1819 * If the command has not been submitted, or has completed, 1820 * there is nothing to do here. In the event of an abort 1821 * command timeout, we can end up here in the process of 1822 * "losing" the original command. It's possible that command 1823 * has actually completed (or been queued on the taskq) in the 1824 * interim. 1825 */ 1826 break; 1827 case NVME_CMD_QUEUED: 1828 /* 1829 * The command is on the taskq, awaiting callback. This should 1830 * be fairly rapid so wait for completion. 1831 */ 1832 while (cmd->nc_state != NVME_CMD_COMPLETED) 1833 cv_wait(&cmd->nc_cv, &cmd->nc_mutex); 1834 break; 1835 case NVME_CMD_LOST: 1836 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1837 "%s: command %p already lost", __func__, (void *)cmd); 1838 break; 1839 } 1840 1841 cmd->nc_state = NVME_CMD_LOST; 1842 1843 mutex_enter(&nvme_lc_mutex); 1844 list_insert_head(&nvme_lost_cmds, cmd); 1845 mutex_exit(&nvme_lc_mutex); 1846 } 1847 1848 /* 1849 * Get the command tied to the next completed cqe and bump along completion 1850 * queue head counter. 1851 */ 1852 static nvme_cmd_t * 1853 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq) 1854 { 1855 nvme_qpair_t *qp; 1856 nvme_cqe_t *cqe; 1857 nvme_cmd_t *cmd; 1858 1859 ASSERT(mutex_owned(&cq->ncq_mutex)); 1860 1861 retry: 1862 cqe = &cq->ncq_cq[cq->ncq_head]; 1863 1864 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 1865 if (cqe->cqe_sf.sf_p == cq->ncq_phase) 1866 return (NULL); 1867 1868 qp = nvme->n_ioq[cqe->cqe_sqid]; 1869 1870 mutex_enter(&qp->nq_mutex); 1871 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); 1872 mutex_exit(&qp->nq_mutex); 1873 1874 qp->nq_sqhead = cqe->cqe_sqhd; 1875 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry; 1876 1877 /* Toggle phase on wrap-around. */ 1878 if (cq->ncq_head == 0) 1879 cq->ncq_phase = cq->ncq_phase != 0 ? 0 : 1; 1880 1881 if (cmd == NULL) { 1882 dev_err(nvme->n_dip, CE_WARN, 1883 "!received completion for unknown cid 0x%x", cqe->cqe_cid); 1884 NVME_BUMP_STAT(nvme, unknown_cid); 1885 /* 1886 * We want to ignore this unexpected completion entry as it 1887 * is most likely a result of a bug in the controller firmware. 1888 * However, if we return NULL, then callers will assume there 1889 * are no more pending commands for this wakeup. Retry to keep 1890 * enumerating commands until the phase tag indicates there are 1891 * no more and we are really done. 1892 */ 1893 goto retry; 1894 } 1895 1896 ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid); 1897 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 1898 1899 return (cmd); 1900 } 1901 1902 /* 1903 * Process all completed commands on the io completion queue. 1904 */ 1905 static uint_t 1906 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq) 1907 { 1908 nvme_reg_cqhdbl_t head = { 0 }; 1909 nvme_cmd_t *cmd; 1910 uint_t completed = 0; 1911 1912 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1913 DDI_SUCCESS) 1914 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1915 __func__); 1916 1917 mutex_enter(&cq->ncq_mutex); 1918 1919 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1920 /* 1921 * NVME_CMD_F_USELOCK is applied to all commands which are 1922 * going to be waited for by another thread in nvme_wait_cmd 1923 * and indicates that the lock should be taken before modifying 1924 * protected fields, and that the mutex has been initialised. 1925 * Commands which do not require the mutex to be held have not 1926 * initialised it (to reduce overhead). 1927 */ 1928 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) { 1929 mutex_enter(&cmd->nc_mutex); 1930 /* 1931 * The command could have been de-queued as lost while 1932 * we waited on the lock, in which case we drop it. 1933 */ 1934 if (cmd->nc_state == NVME_CMD_LOST) { 1935 mutex_exit(&cmd->nc_mutex); 1936 completed++; 1937 continue; 1938 } 1939 } 1940 cmd->nc_queue_ts = gethrtime(); 1941 cmd->nc_state = NVME_CMD_QUEUED; 1942 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) 1943 mutex_exit(&cmd->nc_mutex); 1944 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd, 1945 TQ_NOSLEEP, &cmd->nc_tqent); 1946 1947 completed++; 1948 } 1949 1950 if (completed > 0) { 1951 /* 1952 * Update the completion queue head doorbell. 1953 */ 1954 head.b.cqhdbl_cqh = cq->ncq_head; 1955 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1956 } 1957 1958 mutex_exit(&cq->ncq_mutex); 1959 1960 return (completed); 1961 } 1962 1963 static nvme_cmd_t * 1964 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 1965 { 1966 nvme_cq_t *cq = qp->nq_cq; 1967 nvme_reg_cqhdbl_t head = { 0 }; 1968 nvme_cmd_t *cmd; 1969 1970 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1971 DDI_SUCCESS) 1972 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1973 __func__); 1974 1975 mutex_enter(&cq->ncq_mutex); 1976 1977 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1978 head.b.cqhdbl_cqh = cq->ncq_head; 1979 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1980 } 1981 1982 mutex_exit(&cq->ncq_mutex); 1983 1984 return (cmd); 1985 } 1986 1987 static int 1988 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 1989 { 1990 nvme_cqe_t *cqe = &cmd->nc_cqe; 1991 1992 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1993 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1994 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1995 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1996 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1997 1998 if (cmd->nc_xfer != NULL) 1999 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2000 2001 if (cmd->nc_nvme->n_strict_version) { 2002 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2003 } 2004 2005 return (EIO); 2006 } 2007 2008 static int 2009 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 2010 { 2011 nvme_cqe_t *cqe = &cmd->nc_cqe; 2012 2013 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 2014 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 2015 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 2016 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 2017 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 2018 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 2019 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2020 } 2021 2022 return (EIO); 2023 } 2024 2025 static int 2026 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 2027 { 2028 nvme_cqe_t *cqe = &cmd->nc_cqe; 2029 2030 switch (cqe->cqe_sf.sf_sc) { 2031 case NVME_CQE_SC_INT_NVM_WRITE: 2032 /* write fail */ 2033 /* TODO: post ereport */ 2034 if (cmd->nc_xfer != NULL) 2035 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 2036 return (EIO); 2037 2038 case NVME_CQE_SC_INT_NVM_READ: 2039 /* read fail */ 2040 /* TODO: post ereport */ 2041 if (cmd->nc_xfer != NULL) 2042 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 2043 return (EIO); 2044 2045 default: 2046 return (nvme_check_unknown_cmd_status(cmd)); 2047 } 2048 } 2049 2050 static int 2051 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 2052 { 2053 nvme_cqe_t *cqe = &cmd->nc_cqe; 2054 2055 switch (cqe->cqe_sf.sf_sc) { 2056 case NVME_CQE_SC_GEN_SUCCESS: 2057 return (0); 2058 2059 /* 2060 * Errors indicating a bug in the driver should cause a panic. 2061 */ 2062 case NVME_CQE_SC_GEN_INV_OPC: 2063 /* Invalid Command Opcode */ 2064 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmd_err); 2065 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) { 2066 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 2067 "programming error: invalid opcode in cmd %p", 2068 (void *)cmd); 2069 } 2070 return (EINVAL); 2071 2072 case NVME_CQE_SC_GEN_INV_FLD: 2073 /* Invalid Field in Command */ 2074 NVME_BUMP_STAT(cmd->nc_nvme, inv_field_err); 2075 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) { 2076 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 2077 "programming error: invalid field in cmd %p", 2078 (void *)cmd); 2079 } 2080 return (EIO); 2081 2082 case NVME_CQE_SC_GEN_ID_CNFL: 2083 /* Command ID Conflict */ 2084 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 2085 "cmd ID conflict in cmd %p", (void *)cmd); 2086 return (0); 2087 2088 case NVME_CQE_SC_GEN_INV_NS: 2089 /* Invalid Namespace or Format */ 2090 NVME_BUMP_STAT(cmd->nc_nvme, inv_nsfmt_err); 2091 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) { 2092 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 2093 "programming error: invalid NS/format in cmd %p", 2094 (void *)cmd); 2095 } 2096 return (EINVAL); 2097 2098 case NVME_CQE_SC_GEN_CMD_SEQ_ERR: 2099 /* 2100 * Command Sequence Error 2101 * 2102 * This can be generated normally by user log page requests that 2103 * come out of order (e.g. getting the persistent event log 2104 * without establishing the context). If the kernel manages this 2105 * on its own then that's problematic. 2106 */ 2107 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmdseq_err); 2108 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) { 2109 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 2110 "programming error: command sequencing error %p", 2111 (void *)cmd); 2112 } 2113 return (EINVAL); 2114 2115 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 2116 /* LBA Out Of Range */ 2117 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 2118 "LBA out of range in cmd %p", (void *)cmd); 2119 return (0); 2120 2121 /* 2122 * Non-fatal errors, handle gracefully. 2123 */ 2124 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 2125 /* Data Transfer Error (DMA) */ 2126 /* TODO: post ereport */ 2127 NVME_BUMP_STAT(cmd->nc_nvme, data_xfr_err); 2128 if (cmd->nc_xfer != NULL) 2129 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 2130 return (EIO); 2131 2132 case NVME_CQE_SC_GEN_INTERNAL_ERR: 2133 /* 2134 * Internal Error. The spec (v1.0, section 4.5.1.2) says 2135 * detailed error information is returned as async event, 2136 * so we pretty much ignore the error here and handle it 2137 * in the async event handler. 2138 */ 2139 NVME_BUMP_STAT(cmd->nc_nvme, internal_err); 2140 if (cmd->nc_xfer != NULL) 2141 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 2142 return (EIO); 2143 2144 case NVME_CQE_SC_GEN_ABORT_REQUEST: 2145 /* 2146 * Command Abort Requested. This normally happens only when a 2147 * command times out. 2148 */ 2149 /* TODO: post ereport or change blkdev to handle this? */ 2150 NVME_BUMP_STAT(cmd->nc_nvme, abort_rq_err); 2151 return (ECANCELED); 2152 2153 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 2154 /* Command Aborted due to Power Loss Notification */ 2155 NVME_BUMP_STAT(cmd->nc_nvme, abort_pwrloss_err); 2156 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2157 return (EIO); 2158 2159 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 2160 /* Command Aborted due to SQ Deletion */ 2161 NVME_BUMP_STAT(cmd->nc_nvme, abort_sq_del); 2162 return (EIO); 2163 2164 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 2165 /* Capacity Exceeded */ 2166 NVME_BUMP_STAT(cmd->nc_nvme, nvm_cap_exc); 2167 if (cmd->nc_xfer != NULL) 2168 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 2169 return (EIO); 2170 2171 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 2172 /* Namespace Not Ready */ 2173 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_notrdy); 2174 if (cmd->nc_xfer != NULL) 2175 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 2176 return (EIO); 2177 2178 case NVME_CQE_SC_GEN_NVM_FORMATTING: 2179 /* Format in progress (1.2) */ 2180 if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2)) 2181 return (nvme_check_unknown_cmd_status(cmd)); 2182 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_formatting); 2183 if (cmd->nc_xfer != NULL) 2184 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 2185 return (EIO); 2186 2187 default: 2188 return (nvme_check_unknown_cmd_status(cmd)); 2189 } 2190 } 2191 2192 static int 2193 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 2194 { 2195 nvme_cqe_t *cqe = &cmd->nc_cqe; 2196 2197 switch (cqe->cqe_sf.sf_sc) { 2198 case NVME_CQE_SC_SPC_INV_CQ: 2199 /* Completion Queue Invalid */ 2200 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 2201 NVME_BUMP_STAT(cmd->nc_nvme, inv_cq_err); 2202 return (EINVAL); 2203 2204 case NVME_CQE_SC_SPC_INV_QID: 2205 /* Invalid Queue Identifier */ 2206 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 2207 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 2208 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 2209 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 2210 NVME_BUMP_STAT(cmd->nc_nvme, inv_qid_err); 2211 return (EINVAL); 2212 2213 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 2214 /* Max Queue Size Exceeded */ 2215 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 2216 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 2217 NVME_BUMP_STAT(cmd->nc_nvme, max_qsz_exc); 2218 return (EINVAL); 2219 2220 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 2221 /* Abort Command Limit Exceeded */ 2222 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 2223 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 2224 "abort command limit exceeded in cmd %p", (void *)cmd); 2225 return (0); 2226 2227 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 2228 /* Async Event Request Limit Exceeded */ 2229 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 2230 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 2231 "async event request limit exceeded in cmd %p", 2232 (void *)cmd); 2233 return (0); 2234 2235 case NVME_CQE_SC_SPC_INV_INT_VECT: 2236 /* Invalid Interrupt Vector */ 2237 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 2238 NVME_BUMP_STAT(cmd->nc_nvme, inv_int_vect); 2239 return (EINVAL); 2240 2241 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 2242 /* Invalid Log Page */ 2243 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 2244 NVME_BUMP_STAT(cmd->nc_nvme, inv_log_page); 2245 return (EINVAL); 2246 2247 case NVME_CQE_SC_SPC_INV_FORMAT: 2248 /* Invalid Format */ 2249 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 2250 NVME_BUMP_STAT(cmd->nc_nvme, inv_format); 2251 if (cmd->nc_xfer != NULL) 2252 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2253 return (EINVAL); 2254 2255 case NVME_CQE_SC_SPC_INV_Q_DEL: 2256 /* Invalid Queue Deletion */ 2257 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 2258 NVME_BUMP_STAT(cmd->nc_nvme, inv_q_del); 2259 return (EINVAL); 2260 2261 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 2262 /* Conflicting Attributes */ 2263 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 2264 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 2265 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 2266 NVME_BUMP_STAT(cmd->nc_nvme, cnfl_attr); 2267 if (cmd->nc_xfer != NULL) 2268 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2269 return (EINVAL); 2270 2271 case NVME_CQE_SC_SPC_NVM_INV_PROT: 2272 /* Invalid Protection Information */ 2273 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 2274 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 2275 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 2276 NVME_BUMP_STAT(cmd->nc_nvme, inv_prot); 2277 if (cmd->nc_xfer != NULL) 2278 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2279 return (EINVAL); 2280 2281 case NVME_CQE_SC_SPC_NVM_READONLY: 2282 /* Write to Read Only Range */ 2283 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 2284 NVME_BUMP_STAT(cmd->nc_nvme, readonly); 2285 if (cmd->nc_xfer != NULL) 2286 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2287 return (EROFS); 2288 2289 case NVME_CQE_SC_SPC_INV_FW_SLOT: 2290 /* Invalid Firmware Slot */ 2291 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwslot); 2292 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2293 return (EINVAL); 2294 2295 case NVME_CQE_SC_SPC_INV_FW_IMG: 2296 /* Invalid Firmware Image */ 2297 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwimg); 2298 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2299 return (EINVAL); 2300 2301 case NVME_CQE_SC_SPC_FW_RESET: 2302 /* Conventional Reset Required */ 2303 NVME_BUMP_STAT(cmd->nc_nvme, fwact_creset); 2304 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2305 return (0); 2306 2307 case NVME_CQE_SC_SPC_FW_NSSR: 2308 /* NVMe Subsystem Reset Required */ 2309 NVME_BUMP_STAT(cmd->nc_nvme, fwact_nssr); 2310 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2311 return (0); 2312 2313 case NVME_CQE_SC_SPC_FW_NEXT_RESET: 2314 /* Activation Requires Reset */ 2315 NVME_BUMP_STAT(cmd->nc_nvme, fwact_reset); 2316 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2317 return (0); 2318 2319 case NVME_CQE_SC_SPC_FW_MTFA: 2320 /* Activation Requires Maximum Time Violation */ 2321 NVME_BUMP_STAT(cmd->nc_nvme, fwact_mtfa); 2322 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2323 return (EAGAIN); 2324 2325 case NVME_CQE_SC_SPC_FW_PROHIBITED: 2326 /* Activation Prohibited */ 2327 NVME_BUMP_STAT(cmd->nc_nvme, fwact_prohibited); 2328 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2329 return (EINVAL); 2330 2331 case NVME_CQE_SC_SPC_FW_OVERLAP: 2332 /* Overlapping Firmware Ranges */ 2333 NVME_BUMP_STAT(cmd->nc_nvme, fw_overlap); 2334 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD || 2335 cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2336 return (EINVAL); 2337 2338 default: 2339 return (nvme_check_unknown_cmd_status(cmd)); 2340 } 2341 } 2342 2343 static inline int 2344 nvme_check_cmd_status(nvme_cmd_t *cmd) 2345 { 2346 nvme_cqe_t *cqe = &cmd->nc_cqe; 2347 2348 /* 2349 * Take a shortcut if the controller is dead, or if 2350 * command status indicates no error. 2351 */ 2352 if (cmd->nc_nvme->n_dead) 2353 return (EIO); 2354 2355 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2356 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 2357 return (0); 2358 2359 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 2360 return (nvme_check_generic_cmd_status(cmd)); 2361 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 2362 return (nvme_check_specific_cmd_status(cmd)); 2363 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 2364 return (nvme_check_integrity_cmd_status(cmd)); 2365 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 2366 return (nvme_check_vendor_cmd_status(cmd)); 2367 2368 return (nvme_check_unknown_cmd_status(cmd)); 2369 } 2370 2371 /* 2372 * Check the command status as used by an ioctl path and do not convert it to an 2373 * errno. We still allow all the command status checking to occur, but otherwise 2374 * will pass back the controller error as is. 2375 */ 2376 static boolean_t 2377 nvme_check_cmd_status_ioctl(nvme_cmd_t *cmd, nvme_ioctl_common_t *ioc) 2378 { 2379 nvme_cqe_t *cqe = &cmd->nc_cqe; 2380 nvme_t *nvme = cmd->nc_nvme; 2381 2382 if (nvme->n_dead) { 2383 return (nvme_ioctl_error(ioc, nvme->n_dead_status, 0, 0)); 2384 } 2385 2386 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2387 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 2388 return (B_TRUE); 2389 2390 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) { 2391 (void) nvme_check_generic_cmd_status(cmd); 2392 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) { 2393 (void) nvme_check_specific_cmd_status(cmd); 2394 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) { 2395 (void) nvme_check_integrity_cmd_status(cmd); 2396 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) { 2397 (void) nvme_check_vendor_cmd_status(cmd); 2398 } else { 2399 (void) nvme_check_unknown_cmd_status(cmd); 2400 } 2401 2402 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_ERROR, 2403 cqe->cqe_sf.sf_sct, cqe->cqe_sf.sf_sc)); 2404 } 2405 2406 static int 2407 nvme_abort_cmd(nvme_cmd_t *cmd, const uint32_t sec) 2408 { 2409 nvme_t *nvme = cmd->nc_nvme; 2410 nvme_cmd_t *abort_cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 2411 nvme_abort_cmd_t ac = { 0 }; 2412 int ret = 0; 2413 2414 sema_p(&nvme->n_abort_sema); 2415 2416 ac.b.ac_cid = cmd->nc_sqe.sqe_cid; 2417 ac.b.ac_sqid = cmd->nc_sqid; 2418 2419 abort_cmd->nc_sqid = 0; 2420 abort_cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 2421 abort_cmd->nc_callback = nvme_wakeup_cmd; 2422 abort_cmd->nc_sqe.sqe_cdw10 = ac.r; 2423 2424 /* 2425 * Send the ABORT to the hardware. The ABORT command will return _after_ 2426 * the aborted command has completed (aborted or otherwise) so we must 2427 * drop the aborted command's lock to allow it to complete. 2428 * We want to allow at least `nvme_abort_cmd_timeout` seconds for the 2429 * abort to be processed, but more if we are aborting a long-running 2430 * command to give that time to complete/abort too. 2431 */ 2432 mutex_exit(&cmd->nc_mutex); 2433 nvme_admin_cmd(abort_cmd, MAX(nvme_abort_cmd_timeout, sec)); 2434 mutex_enter(&cmd->nc_mutex); 2435 2436 sema_v(&nvme->n_abort_sema); 2437 2438 /* 2439 * If the abort command itself has timed out, it will have been 2440 * de-queued so that its callback will not be called after this point, 2441 * and its state will be NVME_CMD_LOST. 2442 * 2443 * nvme_admin_cmd(abort_cmd) 2444 * -> nvme_wait_cmd(abort_cmd) 2445 * -> nvme_cmd(abort_cmd) 2446 * | -> nvme_admin_cmd(cmd) 2447 * | -> nvme_wait_cmd(cmd) 2448 * | -> nvme_ctrl_mark_dead() 2449 * | -> nvme_lost_cmd(cmd) 2450 * | -> cmd->nc_stat = NVME_CMD_LOST 2451 * and here we are. 2452 */ 2453 if (abort_cmd->nc_state == NVME_CMD_LOST) { 2454 dev_err(nvme->n_dip, CE_WARN, 2455 "!ABORT of command %d/%d timed out", 2456 cmd->nc_sqe.sqe_cid, cmd->nc_sqid); 2457 NVME_BUMP_STAT(nvme, abort_timeout); 2458 ret = EIO; 2459 } else if ((ret = nvme_check_cmd_status(abort_cmd)) != 0) { 2460 dev_err(nvme->n_dip, CE_WARN, 2461 "!ABORT of command %d/%d " 2462 "failed with sct = %x, sc = %x", 2463 cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 2464 abort_cmd->nc_cqe.cqe_sf.sf_sct, 2465 abort_cmd->nc_cqe.cqe_sf.sf_sc); 2466 NVME_BUMP_STAT(nvme, abort_failed); 2467 } else { 2468 boolean_t success = ((abort_cmd->nc_cqe.cqe_dw0 & 1) == 0); 2469 2470 dev_err(nvme->n_dip, CE_WARN, 2471 "!ABORT of command %d/%d %ssuccessful", 2472 cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 2473 success ? "" : "un"); 2474 2475 if (success) { 2476 NVME_BUMP_STAT(nvme, abort_successful); 2477 } else { 2478 NVME_BUMP_STAT(nvme, abort_unsuccessful); 2479 } 2480 } 2481 2482 /* 2483 * This abort abort_cmd has either completed or been de-queued as 2484 * lost in nvme_wait_cmd. Either way it's safe to free it here. 2485 */ 2486 nvme_free_cmd(abort_cmd); 2487 2488 return (ret); 2489 } 2490 2491 /* 2492 * nvme_wait_cmd -- wait for command completion or timeout 2493 * 2494 * In case of a serious error or a timeout of the abort command the hardware 2495 * will be declared dead and FMA will be notified. 2496 */ 2497 static void 2498 nvme_wait_cmd(nvme_cmd_t *cmd, uint32_t sec) 2499 { 2500 nvme_t *nvme = cmd->nc_nvme; 2501 nvme_reg_csts_t csts; 2502 2503 ASSERT(mutex_owned(&cmd->nc_mutex)); 2504 2505 while (cmd->nc_state != NVME_CMD_COMPLETED) { 2506 clock_t timeout = ddi_get_lbolt() + 2507 drv_usectohz((long)sec * MICROSEC); 2508 2509 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) { 2510 /* 2511 * If this command is on the task queue then we don't 2512 * consider it to have timed out. We are waiting for 2513 * the callback to be invoked, the timing of which can 2514 * be affected by system load and should not count 2515 * against the device; continue to wait. 2516 * While this doesn't help deal with the possibility of 2517 * a command timing out between being placed on the CQ 2518 * and arriving on the taskq, we expect interrupts to 2519 * run fairly promptly making this a small window. 2520 */ 2521 if (cmd->nc_state != NVME_CMD_QUEUED) 2522 break; 2523 } 2524 } 2525 2526 if (cmd->nc_state == NVME_CMD_COMPLETED) { 2527 DTRACE_PROBE1(nvme_admin_cmd_completed, nvme_cmd_t *, cmd); 2528 nvme_admin_stat_cmd(nvme, cmd); 2529 return; 2530 } 2531 2532 /* 2533 * The command timed out. 2534 */ 2535 2536 DTRACE_PROBE1(nvme_admin_cmd_timeout, nvme_cmd_t *, cmd); 2537 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2538 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, " 2539 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 2540 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 2541 NVME_BUMP_STAT(nvme, cmd_timeout); 2542 2543 /* 2544 * Check controller for fatal status, any errors associated with the 2545 * register or DMA handle, or for a double timeout (abort command timed 2546 * out). If necessary log a warning and call FMA. 2547 */ 2548 if (csts.b.csts_cfs || 2549 nvme_check_regs_hdl(nvme) || 2550 nvme_check_dma_hdl(cmd->nc_dma) || 2551 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 2552 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2553 nvme_lost_cmd(nvme, cmd); 2554 return; 2555 } 2556 2557 /* Issue an abort for the command that has timed out */ 2558 if (nvme_abort_cmd(cmd, sec) == 0) { 2559 /* 2560 * If the abort completed, whether or not it was 2561 * successful in aborting the command, that command 2562 * will also have completed with an appropriate 2563 * status. 2564 */ 2565 while (cmd->nc_state != NVME_CMD_COMPLETED) 2566 cv_wait(&cmd->nc_cv, &cmd->nc_mutex); 2567 return; 2568 } 2569 2570 /* 2571 * Otherwise, the abort has also timed out or failed, which 2572 * will have marked the controller dead. De-queue the original command 2573 * and add it to the lost commands list. 2574 */ 2575 VERIFY(cmd->nc_nvme->n_dead); 2576 nvme_lost_cmd(nvme, cmd); 2577 } 2578 2579 static void 2580 nvme_wakeup_cmd(void *arg) 2581 { 2582 nvme_cmd_t *cmd = arg; 2583 2584 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK); 2585 2586 mutex_enter(&cmd->nc_mutex); 2587 cmd->nc_state = NVME_CMD_COMPLETED; 2588 cv_signal(&cmd->nc_cv); 2589 mutex_exit(&cmd->nc_mutex); 2590 } 2591 2592 static void 2593 nvme_async_event_task(void *arg) 2594 { 2595 nvme_cmd_t *cmd = arg; 2596 nvme_t *nvme = cmd->nc_nvme; 2597 nvme_error_log_entry_t *error_log = NULL; 2598 nvme_health_log_t *health_log = NULL; 2599 nvme_nschange_list_t *nslist = NULL; 2600 size_t logsize = 0; 2601 nvme_async_event_t event; 2602 2603 /* 2604 * Check for errors associated with the async request itself. The only 2605 * command-specific error is "async event limit exceeded", which 2606 * indicates a programming error in the driver and causes a panic in 2607 * nvme_check_cmd_status(). 2608 * 2609 * Other possible errors are various scenarios where the async request 2610 * was aborted, or internal errors in the device. Internal errors are 2611 * reported to FMA, the command aborts need no special handling here. 2612 * 2613 * And finally, at least qemu nvme does not support async events, 2614 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we 2615 * will avoid posting async events. 2616 */ 2617 2618 if (nvme_check_cmd_status(cmd) != 0) { 2619 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 2620 "!async event request returned failure, sct = 0x%x, " 2621 "sc = 0x%x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 2622 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 2623 cmd->nc_cqe.cqe_sf.sf_m); 2624 2625 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2626 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 2627 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2628 } 2629 2630 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2631 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC && 2632 cmd->nc_cqe.cqe_sf.sf_dnr == 1) { 2633 nvme->n_async_event_supported = B_FALSE; 2634 } 2635 2636 nvme_free_cmd(cmd); 2637 return; 2638 } 2639 2640 event.r = cmd->nc_cqe.cqe_dw0; 2641 2642 /* Clear CQE and re-submit the async request. */ 2643 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 2644 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL); 2645 cmd = NULL; /* cmd can no longer be used after resubmission */ 2646 2647 switch (event.b.ae_type) { 2648 case NVME_ASYNC_TYPE_ERROR: 2649 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 2650 if (!nvme_get_logpage_int(nvme, B_FALSE, 2651 (void **)&error_log, &logsize, 2652 NVME_LOGPAGE_ERROR)) { 2653 return; 2654 } 2655 } else { 2656 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 2657 "async event reply: type=0x%x logpage=0x%x", 2658 event.b.ae_type, event.b.ae_logpage); 2659 NVME_BUMP_STAT(nvme, wrong_logpage); 2660 return; 2661 } 2662 2663 switch (event.b.ae_info) { 2664 case NVME_ASYNC_ERROR_INV_SQ: 2665 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 2666 "invalid submission queue"); 2667 return; 2668 2669 case NVME_ASYNC_ERROR_INV_DBL: 2670 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 2671 "invalid doorbell write value"); 2672 return; 2673 2674 case NVME_ASYNC_ERROR_DIAGFAIL: 2675 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 2676 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2677 NVME_BUMP_STAT(nvme, diagfail_event); 2678 break; 2679 2680 case NVME_ASYNC_ERROR_PERSISTENT: 2681 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 2682 "device error"); 2683 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2684 NVME_BUMP_STAT(nvme, persistent_event); 2685 break; 2686 2687 case NVME_ASYNC_ERROR_TRANSIENT: 2688 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 2689 "device error"); 2690 /* TODO: send ereport */ 2691 NVME_BUMP_STAT(nvme, transient_event); 2692 break; 2693 2694 case NVME_ASYNC_ERROR_FW_LOAD: 2695 dev_err(nvme->n_dip, CE_WARN, 2696 "!firmware image load error"); 2697 NVME_BUMP_STAT(nvme, fw_load_event); 2698 break; 2699 } 2700 break; 2701 2702 case NVME_ASYNC_TYPE_HEALTH: 2703 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 2704 if (!nvme_get_logpage_int(nvme, B_FALSE, 2705 (void **)&health_log, &logsize, 2706 NVME_LOGPAGE_HEALTH)) { 2707 return; 2708 } 2709 } else { 2710 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 2711 "type=0x%x logpage=0x%x", event.b.ae_type, 2712 event.b.ae_logpage); 2713 NVME_BUMP_STAT(nvme, wrong_logpage); 2714 return; 2715 } 2716 2717 switch (event.b.ae_info) { 2718 case NVME_ASYNC_HEALTH_RELIABILITY: 2719 dev_err(nvme->n_dip, CE_WARN, 2720 "!device reliability compromised"); 2721 /* TODO: send ereport */ 2722 NVME_BUMP_STAT(nvme, reliability_event); 2723 break; 2724 2725 case NVME_ASYNC_HEALTH_TEMPERATURE: 2726 dev_err(nvme->n_dip, CE_WARN, 2727 "!temperature above threshold"); 2728 /* TODO: send ereport */ 2729 NVME_BUMP_STAT(nvme, temperature_event); 2730 break; 2731 2732 case NVME_ASYNC_HEALTH_SPARE: 2733 dev_err(nvme->n_dip, CE_WARN, 2734 "!spare space below threshold"); 2735 /* TODO: send ereport */ 2736 NVME_BUMP_STAT(nvme, spare_event); 2737 break; 2738 } 2739 break; 2740 2741 case NVME_ASYNC_TYPE_NOTICE: 2742 switch (event.b.ae_info) { 2743 case NVME_ASYNC_NOTICE_NS_CHANGE: 2744 if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) { 2745 dev_err(nvme->n_dip, CE_WARN, 2746 "!wrong logpage in async event reply: " 2747 "type=0x%x logpage=0x%x", 2748 event.b.ae_type, event.b.ae_logpage); 2749 NVME_BUMP_STAT(nvme, wrong_logpage); 2750 break; 2751 } 2752 2753 dev_err(nvme->n_dip, CE_NOTE, 2754 "namespace attribute change event, " 2755 "logpage = 0x%x", event.b.ae_logpage); 2756 NVME_BUMP_STAT(nvme, notice_event); 2757 2758 if (!nvme_get_logpage_int(nvme, B_FALSE, 2759 (void **)&nslist, &logsize, 2760 NVME_LOGPAGE_NSCHANGE)) { 2761 break; 2762 } 2763 2764 if (nslist->nscl_ns[0] == UINT32_MAX) { 2765 dev_err(nvme->n_dip, CE_CONT, 2766 "more than %u namespaces have changed.\n", 2767 NVME_NSCHANGE_LIST_SIZE); 2768 break; 2769 } 2770 2771 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 2772 for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) { 2773 uint32_t nsid = nslist->nscl_ns[i]; 2774 2775 if (nsid == 0) /* end of list */ 2776 break; 2777 2778 dev_err(nvme->n_dip, CE_NOTE, 2779 "!namespace nvme%d/%u has changed.", 2780 ddi_get_instance(nvme->n_dip), nsid); 2781 2782 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) 2783 continue; 2784 2785 nvme_mgmt_bd_start(nvme); 2786 bd_state_change(nvme_nsid2ns(nvme, 2787 nsid)->ns_bd_hdl); 2788 nvme_mgmt_bd_end(nvme); 2789 } 2790 nvme_mgmt_unlock(nvme); 2791 2792 break; 2793 2794 case NVME_ASYNC_NOTICE_FW_ACTIVATE: 2795 dev_err(nvme->n_dip, CE_NOTE, 2796 "firmware activation starting, " 2797 "logpage = 0x%x", event.b.ae_logpage); 2798 NVME_BUMP_STAT(nvme, notice_event); 2799 break; 2800 2801 case NVME_ASYNC_NOTICE_TELEMETRY: 2802 dev_err(nvme->n_dip, CE_NOTE, 2803 "telemetry log changed, " 2804 "logpage = 0x%x", event.b.ae_logpage); 2805 NVME_BUMP_STAT(nvme, notice_event); 2806 break; 2807 2808 case NVME_ASYNC_NOTICE_NS_ASYMM: 2809 dev_err(nvme->n_dip, CE_NOTE, 2810 "asymmetric namespace access change, " 2811 "logpage = 0x%x", event.b.ae_logpage); 2812 NVME_BUMP_STAT(nvme, notice_event); 2813 break; 2814 2815 case NVME_ASYNC_NOTICE_LATENCYLOG: 2816 dev_err(nvme->n_dip, CE_NOTE, 2817 "predictable latency event aggregate log change, " 2818 "logpage = 0x%x", event.b.ae_logpage); 2819 NVME_BUMP_STAT(nvme, notice_event); 2820 break; 2821 2822 case NVME_ASYNC_NOTICE_LBASTATUS: 2823 dev_err(nvme->n_dip, CE_NOTE, 2824 "LBA status information alert, " 2825 "logpage = 0x%x", event.b.ae_logpage); 2826 NVME_BUMP_STAT(nvme, notice_event); 2827 break; 2828 2829 case NVME_ASYNC_NOTICE_ENDURANCELOG: 2830 dev_err(nvme->n_dip, CE_NOTE, 2831 "endurance group event aggregate log page change, " 2832 "logpage = 0x%x", event.b.ae_logpage); 2833 NVME_BUMP_STAT(nvme, notice_event); 2834 break; 2835 2836 default: 2837 dev_err(nvme->n_dip, CE_WARN, 2838 "!unknown notice async event received, " 2839 "info = 0x%x, logpage = 0x%x", event.b.ae_info, 2840 event.b.ae_logpage); 2841 NVME_BUMP_STAT(nvme, unknown_event); 2842 break; 2843 } 2844 break; 2845 2846 case NVME_ASYNC_TYPE_VENDOR: 2847 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 2848 "received, info = 0x%x, logpage = 0x%x", event.b.ae_info, 2849 event.b.ae_logpage); 2850 NVME_BUMP_STAT(nvme, vendor_event); 2851 break; 2852 2853 default: 2854 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 2855 "type = 0x%x, info = 0x%x, logpage = 0x%x", event.b.ae_type, 2856 event.b.ae_info, event.b.ae_logpage); 2857 NVME_BUMP_STAT(nvme, unknown_event); 2858 break; 2859 } 2860 2861 if (error_log != NULL) 2862 kmem_free(error_log, logsize); 2863 2864 if (health_log != NULL) 2865 kmem_free(health_log, logsize); 2866 2867 if (nslist != NULL) 2868 kmem_free(nslist, logsize); 2869 } 2870 2871 static void 2872 nvme_admin_cmd(nvme_cmd_t *cmd, uint32_t sec) 2873 { 2874 uint32_t qtimeout; 2875 2876 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK); 2877 2878 mutex_enter(&cmd->nc_mutex); 2879 cmd->nc_timeout = sec; 2880 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd, &qtimeout); 2881 /* 2882 * We will wait for a total of this command's specified timeout plus 2883 * the sum of the timeouts of any commands queued ahead of this one. If 2884 * we aren't first in the queue, this will inflate the timeout somewhat 2885 * but these times are not critical and it means that if we get stuck 2886 * behind a long running command such as a namespace format then we 2887 * won't time out and trigger an abort. 2888 */ 2889 nvme_wait_cmd(cmd, sec + qtimeout); 2890 mutex_exit(&cmd->nc_mutex); 2891 } 2892 2893 static void 2894 nvme_async_event(nvme_t *nvme) 2895 { 2896 nvme_cmd_t *cmd; 2897 2898 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 2899 cmd->nc_sqid = 0; 2900 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 2901 cmd->nc_callback = nvme_async_event_task; 2902 cmd->nc_flags |= NVME_CMD_F_DONTPANIC; 2903 2904 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL); 2905 } 2906 2907 /* 2908 * There are commands such as format or vendor unique commands that are going to 2909 * manipulate the data in a namespace or destroy them, we make sure that none of 2910 * the ones that will be impacted are actually attached. 2911 */ 2912 static boolean_t 2913 nvme_no_blkdev_attached(nvme_t *nvme, uint32_t nsid) 2914 { 2915 ASSERT(nvme_mgmt_lock_held(nvme)); 2916 ASSERT3U(nsid, !=, 0); 2917 2918 if (nsid != NVME_NSID_BCAST) { 2919 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid); 2920 return (!ns->ns_attached); 2921 } 2922 2923 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 2924 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i); 2925 2926 if (ns->ns_attached) { 2927 return (B_FALSE); 2928 } 2929 } 2930 2931 return (B_TRUE); 2932 } 2933 2934 static boolean_t 2935 nvme_format_nvm(nvme_t *nvme, nvme_ioctl_format_t *ioc) 2936 { 2937 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 2938 nvme_format_nvm_t format_nvm = { 0 }; 2939 boolean_t ret; 2940 2941 format_nvm.b.fm_lbaf = bitx32(ioc->nif_lbaf, 3, 0); 2942 format_nvm.b.fm_ses = bitx32(ioc->nif_ses, 2, 0); 2943 2944 cmd->nc_sqid = 0; 2945 cmd->nc_callback = nvme_wakeup_cmd; 2946 cmd->nc_sqe.sqe_nsid = ioc->nif_common.nioc_nsid; 2947 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT; 2948 cmd->nc_sqe.sqe_cdw10 = format_nvm.r; 2949 2950 /* 2951 * We don't want to panic on any format commands. There are two reasons 2952 * for this: 2953 * 2954 * 1) All format commands are initiated by users. We don't want to panic 2955 * on user commands. 2956 * 2957 * 2) Several devices like the Samsung SM951 don't allow formatting of 2958 * all namespaces in one command and we'd prefer to handle that 2959 * gracefully. 2960 */ 2961 cmd->nc_flags |= NVME_CMD_F_DONTPANIC; 2962 2963 nvme_admin_cmd(cmd, nvme_format_cmd_timeout); 2964 2965 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nif_common) != 0) { 2966 dev_err(nvme->n_dip, CE_WARN, 2967 "!FORMAT failed with sct = %x, sc = %x", 2968 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2969 ret = B_FALSE; 2970 goto fail; 2971 } 2972 2973 ret = B_TRUE; 2974 fail: 2975 nvme_free_cmd(cmd); 2976 return (ret); 2977 } 2978 2979 /* 2980 * Retrieve a specific log page. The contents of the log page request should 2981 * have already been validated by the system. 2982 */ 2983 static boolean_t 2984 nvme_get_logpage(nvme_t *nvme, boolean_t user, nvme_ioctl_get_logpage_t *log, 2985 void **buf) 2986 { 2987 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 2988 nvme_getlogpage_dw10_t dw10; 2989 uint32_t offlo, offhi; 2990 nvme_getlogpage_dw11_t dw11; 2991 nvme_getlogpage_dw14_t dw14; 2992 uint32_t ndw; 2993 boolean_t ret = B_FALSE; 2994 2995 bzero(&dw10, sizeof (dw10)); 2996 bzero(&dw11, sizeof (dw11)); 2997 bzero(&dw14, sizeof (dw14)); 2998 2999 cmd->nc_sqid = 0; 3000 cmd->nc_callback = nvme_wakeup_cmd; 3001 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 3002 cmd->nc_sqe.sqe_nsid = log->nigl_common.nioc_nsid; 3003 3004 if (user) 3005 cmd->nc_flags |= NVME_CMD_F_DONTPANIC; 3006 3007 /* 3008 * The size field is the number of double words, but is a zeros based 3009 * value. We need to store our actual value minus one. 3010 */ 3011 ndw = (uint32_t)(log->nigl_len / 4); 3012 ASSERT3U(ndw, >, 0); 3013 ndw--; 3014 3015 dw10.b.lp_lid = bitx32(log->nigl_lid, 7, 0); 3016 dw10.b.lp_lsp = bitx32(log->nigl_lsp, 6, 0); 3017 dw10.b.lp_rae = bitx32(log->nigl_lsp, 0, 0); 3018 dw10.b.lp_lnumdl = bitx32(ndw, 15, 0); 3019 3020 dw11.b.lp_numdu = bitx32(ndw, 31, 16); 3021 dw11.b.lp_lsi = bitx32(log->nigl_lsi, 15, 0); 3022 3023 offlo = bitx64(log->nigl_offset, 31, 0); 3024 offhi = bitx64(log->nigl_offset, 63, 32); 3025 3026 dw14.b.lp_csi = bitx32(log->nigl_csi, 7, 0); 3027 3028 cmd->nc_sqe.sqe_cdw10 = dw10.r; 3029 cmd->nc_sqe.sqe_cdw11 = dw11.r; 3030 cmd->nc_sqe.sqe_cdw12 = offlo; 3031 cmd->nc_sqe.sqe_cdw13 = offhi; 3032 cmd->nc_sqe.sqe_cdw14 = dw14.r; 3033 3034 if (nvme_zalloc_dma(nvme, log->nigl_len, DDI_DMA_READ, 3035 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 3036 dev_err(nvme->n_dip, CE_WARN, 3037 "!nvme_zalloc_dma failed for GET LOG PAGE"); 3038 ret = nvme_ioctl_error(&log->nigl_common, 3039 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 3040 goto fail; 3041 } 3042 3043 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) { 3044 ret = nvme_ioctl_error(&log->nigl_common, 3045 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 3046 goto fail; 3047 } 3048 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 3049 3050 if (!nvme_check_cmd_status_ioctl(cmd, &log->nigl_common)) { 3051 if (!user) { 3052 dev_err(nvme->n_dip, CE_WARN, 3053 "!GET LOG PAGE failed with sct = %x, sc = %x", 3054 cmd->nc_cqe.cqe_sf.sf_sct, 3055 cmd->nc_cqe.cqe_sf.sf_sc); 3056 } 3057 ret = B_FALSE; 3058 goto fail; 3059 } 3060 3061 *buf = kmem_alloc(log->nigl_len, KM_SLEEP); 3062 bcopy(cmd->nc_dma->nd_memp, *buf, log->nigl_len); 3063 3064 ret = B_TRUE; 3065 fail: 3066 nvme_free_cmd(cmd); 3067 3068 return (ret); 3069 } 3070 3071 /* 3072 * This is an internal wrapper for when the kernel wants to get a log page. 3073 * Currently this assumes that the only thing that is required is the log page 3074 * ID. If more information is required, we'll be better served to just use the 3075 * general ioctl interface. 3076 */ 3077 static boolean_t 3078 nvme_get_logpage_int(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize, 3079 uint8_t lid) 3080 { 3081 const nvme_log_page_info_t *info = NULL; 3082 nvme_ioctl_get_logpage_t log; 3083 nvme_valid_ctrl_data_t data; 3084 boolean_t bret; 3085 bool var; 3086 3087 for (size_t i = 0; i < nvme_std_log_npages; i++) { 3088 if (nvme_std_log_pages[i].nlpi_lid == lid && 3089 nvme_std_log_pages[i].nlpi_csi == NVME_CSI_NVM) { 3090 info = &nvme_std_log_pages[i]; 3091 break; 3092 } 3093 } 3094 3095 if (info == NULL) { 3096 return (B_FALSE); 3097 } 3098 3099 data.vcd_vers = &nvme->n_version; 3100 data.vcd_id = nvme->n_idctl; 3101 bzero(&log, sizeof (log)); 3102 log.nigl_common.nioc_nsid = NVME_NSID_BCAST; 3103 log.nigl_csi = info->nlpi_csi; 3104 log.nigl_lid = info->nlpi_lid; 3105 log.nigl_len = nvme_log_page_info_size(info, &data, &var); 3106 3107 /* 3108 * We only support getting standard fixed-length log pages through the 3109 * kernel interface at this time. If a log page either has an unknown 3110 * size or has a variable length, then we cannot get it. 3111 */ 3112 if (log.nigl_len == 0 || var) { 3113 return (B_FALSE); 3114 } 3115 3116 bret = nvme_get_logpage(nvme, user, &log, buf); 3117 if (!bret) { 3118 return (B_FALSE); 3119 } 3120 3121 *bufsize = log.nigl_len; 3122 return (B_TRUE); 3123 } 3124 3125 static boolean_t 3126 nvme_identify(nvme_t *nvme, boolean_t user, nvme_ioctl_identify_t *ioc, 3127 void **buf) 3128 { 3129 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 3130 boolean_t ret = B_FALSE; 3131 nvme_identify_dw10_t dw10; 3132 3133 ASSERT3P(buf, !=, NULL); 3134 3135 bzero(&dw10, sizeof (dw10)); 3136 3137 cmd->nc_sqid = 0; 3138 cmd->nc_callback = nvme_wakeup_cmd; 3139 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 3140 cmd->nc_sqe.sqe_nsid = ioc->nid_common.nioc_nsid; 3141 3142 dw10.b.id_cns = bitx32(ioc->nid_cns, 7, 0); 3143 dw10.b.id_cntid = bitx32(ioc->nid_ctrlid, 15, 0); 3144 3145 cmd->nc_sqe.sqe_cdw10 = dw10.r; 3146 3147 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 3148 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 3149 dev_err(nvme->n_dip, CE_WARN, 3150 "!nvme_zalloc_dma failed for IDENTIFY"); 3151 ret = nvme_ioctl_error(&ioc->nid_common, 3152 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 3153 goto fail; 3154 } 3155 3156 if (cmd->nc_dma->nd_ncookie > 2) { 3157 dev_err(nvme->n_dip, CE_WARN, 3158 "!too many DMA cookies for IDENTIFY"); 3159 NVME_BUMP_STAT(nvme, too_many_cookies); 3160 ret = nvme_ioctl_error(&ioc->nid_common, 3161 NVME_IOCTL_E_BAD_PRP, 0, 0); 3162 goto fail; 3163 } 3164 3165 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 3166 if (cmd->nc_dma->nd_ncookie > 1) { 3167 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 3168 &cmd->nc_dma->nd_cookie); 3169 cmd->nc_sqe.sqe_dptr.d_prp[1] = 3170 cmd->nc_dma->nd_cookie.dmac_laddress; 3171 } 3172 3173 if (user) 3174 cmd->nc_flags |= NVME_CMD_F_DONTPANIC; 3175 3176 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 3177 3178 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nid_common)) { 3179 dev_err(nvme->n_dip, CE_WARN, 3180 "!IDENTIFY failed with sct = %x, sc = %x", 3181 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 3182 ret = B_FALSE; 3183 goto fail; 3184 } 3185 3186 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 3187 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE); 3188 ret = B_TRUE; 3189 3190 fail: 3191 nvme_free_cmd(cmd); 3192 3193 return (ret); 3194 } 3195 3196 static boolean_t 3197 nvme_identify_int(nvme_t *nvme, uint32_t nsid, uint8_t cns, void **buf) 3198 { 3199 nvme_ioctl_identify_t id; 3200 3201 bzero(&id, sizeof (nvme_ioctl_identify_t)); 3202 id.nid_common.nioc_nsid = nsid; 3203 id.nid_cns = cns; 3204 3205 return (nvme_identify(nvme, B_FALSE, &id, buf)); 3206 } 3207 3208 static int 3209 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 3210 uint32_t val, uint32_t *res) 3211 { 3212 _NOTE(ARGUNUSED(nsid)); 3213 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 3214 int ret = EINVAL; 3215 3216 ASSERT(res != NULL); 3217 3218 cmd->nc_sqid = 0; 3219 cmd->nc_callback = nvme_wakeup_cmd; 3220 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 3221 cmd->nc_sqe.sqe_cdw10 = feature; 3222 cmd->nc_sqe.sqe_cdw11 = val; 3223 3224 if (user) 3225 cmd->nc_flags |= NVME_CMD_F_DONTPANIC; 3226 3227 switch (feature) { 3228 case NVME_FEAT_WRITE_CACHE: 3229 if (!nvme->n_write_cache_present) 3230 goto fail; 3231 break; 3232 3233 case NVME_FEAT_NQUEUES: 3234 break; 3235 3236 default: 3237 goto fail; 3238 } 3239 3240 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 3241 3242 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 3243 dev_err(nvme->n_dip, CE_WARN, 3244 "!SET FEATURES %d failed with sct = %x, sc = %x", 3245 feature, cmd->nc_cqe.cqe_sf.sf_sct, 3246 cmd->nc_cqe.cqe_sf.sf_sc); 3247 goto fail; 3248 } 3249 3250 *res = cmd->nc_cqe.cqe_dw0; 3251 3252 fail: 3253 nvme_free_cmd(cmd); 3254 return (ret); 3255 } 3256 3257 static int 3258 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 3259 { 3260 nvme_write_cache_t nwc = { 0 }; 3261 3262 if (enable) 3263 nwc.b.wc_wce = 1; 3264 3265 /* 3266 * We've seen some cases where this fails due to us being told we've 3267 * specified an invalid namespace when operating against the Xen xcp-ng 3268 * qemu NVMe virtual device. As such, we generally ensure that trying to 3269 * enable this doesn't lead us to panic. It's not completely clear why 3270 * specifying namespace zero here fails, but not when we're setting the 3271 * number of queues below. 3272 */ 3273 return (nvme_set_features(nvme, B_TRUE, 0, NVME_FEAT_WRITE_CACHE, 3274 nwc.r, &nwc.r)); 3275 } 3276 3277 static int 3278 nvme_set_nqueues(nvme_t *nvme) 3279 { 3280 nvme_nqueues_t nq = { 0 }; 3281 int ret; 3282 3283 /* 3284 * The default is to allocate one completion queue per vector. 3285 */ 3286 if (nvme->n_completion_queues == -1) 3287 nvme->n_completion_queues = nvme->n_intr_cnt; 3288 3289 /* 3290 * There is no point in having more completion queues than 3291 * interrupt vectors. 3292 */ 3293 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 3294 nvme->n_intr_cnt); 3295 3296 /* 3297 * The default is to use one submission queue per completion queue. 3298 */ 3299 if (nvme->n_submission_queues == -1) 3300 nvme->n_submission_queues = nvme->n_completion_queues; 3301 3302 /* 3303 * There is no point in having more completion queues than 3304 * submission queues. 3305 */ 3306 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 3307 nvme->n_submission_queues); 3308 3309 ASSERT(nvme->n_submission_queues > 0); 3310 ASSERT(nvme->n_completion_queues > 0); 3311 3312 nq.b.nq_nsq = nvme->n_submission_queues - 1; 3313 nq.b.nq_ncq = nvme->n_completion_queues - 1; 3314 3315 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r, 3316 &nq.r); 3317 3318 if (ret == 0) { 3319 /* 3320 * Never use more than the requested number of queues. 3321 */ 3322 nvme->n_submission_queues = MIN(nvme->n_submission_queues, 3323 nq.b.nq_nsq + 1); 3324 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 3325 nq.b.nq_ncq + 1); 3326 } 3327 3328 return (ret); 3329 } 3330 3331 static int 3332 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq) 3333 { 3334 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 3335 nvme_create_queue_dw10_t dw10 = { 0 }; 3336 nvme_create_cq_dw11_t c_dw11 = { 0 }; 3337 int ret; 3338 3339 dw10.b.q_qid = cq->ncq_id; 3340 dw10.b.q_qsize = cq->ncq_nentry - 1; 3341 3342 c_dw11.b.cq_pc = 1; 3343 c_dw11.b.cq_ien = 1; 3344 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt; 3345 3346 cmd->nc_sqid = 0; 3347 cmd->nc_callback = nvme_wakeup_cmd; 3348 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 3349 cmd->nc_sqe.sqe_cdw10 = dw10.r; 3350 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 3351 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress; 3352 3353 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 3354 3355 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 3356 dev_err(nvme->n_dip, CE_WARN, 3357 "!CREATE CQUEUE failed with sct = %x, sc = %x", 3358 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 3359 } 3360 3361 nvme_free_cmd(cmd); 3362 3363 return (ret); 3364 } 3365 3366 static int 3367 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 3368 { 3369 nvme_cq_t *cq = qp->nq_cq; 3370 nvme_cmd_t *cmd; 3371 nvme_create_queue_dw10_t dw10 = { 0 }; 3372 nvme_create_sq_dw11_t s_dw11 = { 0 }; 3373 int ret; 3374 3375 /* 3376 * It is possible to have more qpairs than completion queues, 3377 * and when the idx > ncq_id, that completion queue is shared 3378 * and has already been created. 3379 */ 3380 if (idx <= cq->ncq_id && 3381 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS) 3382 return (DDI_FAILURE); 3383 3384 dw10.b.q_qid = idx; 3385 dw10.b.q_qsize = qp->nq_nentry - 1; 3386 3387 s_dw11.b.sq_pc = 1; 3388 s_dw11.b.sq_cqid = cq->ncq_id; 3389 3390 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 3391 cmd->nc_sqid = 0; 3392 cmd->nc_callback = nvme_wakeup_cmd; 3393 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 3394 cmd->nc_sqe.sqe_cdw10 = dw10.r; 3395 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 3396 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 3397 3398 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 3399 3400 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 3401 dev_err(nvme->n_dip, CE_WARN, 3402 "!CREATE SQUEUE failed with sct = %x, sc = %x", 3403 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 3404 } 3405 3406 nvme_free_cmd(cmd); 3407 3408 return (ret); 3409 } 3410 3411 static boolean_t 3412 nvme_reset(nvme_t *nvme, boolean_t quiesce) 3413 { 3414 nvme_reg_csts_t csts; 3415 int i; 3416 3417 nvme_put32(nvme, NVME_REG_CC, 0); 3418 3419 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3420 if (csts.b.csts_rdy == 1) { 3421 nvme_put32(nvme, NVME_REG_CC, 0); 3422 3423 /* 3424 * The timeout value is from the Controller Capabilities 3425 * register (CAP.TO, section 3.1.1). This is the worst case 3426 * time to wait for CSTS.RDY to transition from 1 to 0 after 3427 * CC.EN transitions from 1 to 0. 3428 * 3429 * The timeout units are in 500 ms units, and we are delaying 3430 * in 50ms chunks, hence counting to n_timeout * 10. 3431 */ 3432 for (i = 0; i < nvme->n_timeout * 10; i++) { 3433 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3434 if (csts.b.csts_rdy == 0) 3435 break; 3436 3437 /* 3438 * Quiescing drivers should not use locks or timeouts, 3439 * so if this is the quiesce path, use a quiesce-safe 3440 * delay. 3441 */ 3442 if (quiesce) { 3443 drv_usecwait(50000); 3444 } else { 3445 delay(drv_usectohz(50000)); 3446 } 3447 } 3448 } 3449 3450 nvme_put32(nvme, NVME_REG_AQA, 0); 3451 nvme_put32(nvme, NVME_REG_ASQ, 0); 3452 nvme_put32(nvme, NVME_REG_ACQ, 0); 3453 3454 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3455 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 3456 } 3457 3458 static void 3459 nvme_shutdown(nvme_t *nvme, boolean_t quiesce) 3460 { 3461 nvme_reg_cc_t cc; 3462 nvme_reg_csts_t csts; 3463 int i; 3464 3465 cc.r = nvme_get32(nvme, NVME_REG_CC); 3466 cc.b.cc_shn = NVME_CC_SHN_NORMAL; 3467 nvme_put32(nvme, NVME_REG_CC, cc.r); 3468 3469 for (i = 0; i < 10; i++) { 3470 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3471 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 3472 break; 3473 3474 if (quiesce) { 3475 drv_usecwait(100000); 3476 } else { 3477 delay(drv_usectohz(100000)); 3478 } 3479 } 3480 } 3481 3482 /* 3483 * Return length of string without trailing spaces. 3484 */ 3485 static int 3486 nvme_strlen(const char *str, int len) 3487 { 3488 if (len <= 0) 3489 return (0); 3490 3491 while (str[--len] == ' ') 3492 ; 3493 3494 return (++len); 3495 } 3496 3497 static void 3498 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val) 3499 { 3500 ulong_t bsize = 0; 3501 char *msg = ""; 3502 3503 if (ddi_strtoul(val, NULL, 0, &bsize) != 0) 3504 goto err; 3505 3506 if (!ISP2(bsize)) { 3507 msg = ": not a power of 2"; 3508 goto err; 3509 } 3510 3511 if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) { 3512 msg = ": too low"; 3513 goto err; 3514 } 3515 3516 nvme->n_min_block_size = bsize; 3517 return; 3518 3519 err: 3520 dev_err(nvme->n_dip, CE_WARN, 3521 "!nvme-config-list: ignoring invalid min-phys-block-size '%s' " 3522 "for model '%s'%s", val, model, msg); 3523 3524 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 3525 } 3526 3527 static void 3528 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val, 3529 boolean_t *b) 3530 { 3531 if (strcmp(val, "on") == 0 || 3532 strcmp(val, "true") == 0) 3533 *b = B_TRUE; 3534 else if (strcmp(val, "off") == 0 || 3535 strcmp(val, "false") == 0) 3536 *b = B_FALSE; 3537 else 3538 dev_err(nvme->n_dip, CE_WARN, 3539 "!nvme-config-list: invalid value for %s '%s'" 3540 " for model '%s', ignoring", name, val, model); 3541 } 3542 3543 static void 3544 nvme_config_list(nvme_t *nvme) 3545 { 3546 char **config_list; 3547 uint_t nelem; 3548 int rv, i; 3549 3550 /* 3551 * We're following the pattern of 'sd-config-list' here, but extend it. 3552 * Instead of two we have three separate strings for "model", "fwrev", 3553 * and "name-value-list". 3554 */ 3555 rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip, 3556 DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem); 3557 3558 if (rv != DDI_PROP_SUCCESS) { 3559 if (rv == DDI_PROP_CANNOT_DECODE) { 3560 dev_err(nvme->n_dip, CE_WARN, 3561 "!nvme-config-list: cannot be decoded"); 3562 } 3563 3564 return; 3565 } 3566 3567 if ((nelem % 3) != 0) { 3568 dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be " 3569 "triplets of <model>/<fwrev>/<name-value-list> strings "); 3570 goto out; 3571 } 3572 3573 for (i = 0; i < nelem; i += 3) { 3574 char *model = config_list[i]; 3575 char *fwrev = config_list[i + 1]; 3576 char *nvp, *save_nv; 3577 int id_model_len, id_fwrev_len; 3578 3579 id_model_len = nvme_strlen(nvme->n_idctl->id_model, 3580 sizeof (nvme->n_idctl->id_model)); 3581 3582 if (strlen(model) != id_model_len) 3583 continue; 3584 3585 if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0) 3586 continue; 3587 3588 id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev, 3589 sizeof (nvme->n_idctl->id_fwrev)); 3590 3591 if (strlen(fwrev) != 0) { 3592 boolean_t match = B_FALSE; 3593 char *fwr, *last_fw; 3594 3595 for (fwr = strtok_r(fwrev, ",", &last_fw); 3596 fwr != NULL; 3597 fwr = strtok_r(NULL, ",", &last_fw)) { 3598 if (strlen(fwr) != id_fwrev_len) 3599 continue; 3600 3601 if (strncmp(fwr, nvme->n_idctl->id_fwrev, 3602 id_fwrev_len) == 0) 3603 match = B_TRUE; 3604 } 3605 3606 if (!match) 3607 continue; 3608 } 3609 3610 /* 3611 * We should now have a comma-separated list of name:value 3612 * pairs. 3613 */ 3614 for (nvp = strtok_r(config_list[i + 2], ",", &save_nv); 3615 nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) { 3616 char *name = nvp; 3617 char *val = strchr(nvp, ':'); 3618 3619 if (val == NULL || name == val) { 3620 dev_err(nvme->n_dip, CE_WARN, 3621 "!nvme-config-list: <name-value-list> " 3622 "for model '%s' is malformed", model); 3623 goto out; 3624 } 3625 3626 /* 3627 * Null-terminate 'name', move 'val' past ':' sep. 3628 */ 3629 *val++ = '\0'; 3630 3631 /* 3632 * Process the name:val pairs that we know about. 3633 */ 3634 if (strcmp(name, "ignore-unknown-vendor-status") == 0) { 3635 nvme_config_boolean(nvme, model, name, val, 3636 &nvme->n_ignore_unknown_vendor_status); 3637 } else if (strcmp(name, "min-phys-block-size") == 0) { 3638 nvme_config_min_block_size(nvme, model, val); 3639 } else if (strcmp(name, "volatile-write-cache") == 0) { 3640 nvme_config_boolean(nvme, model, name, val, 3641 &nvme->n_write_cache_enabled); 3642 } else { 3643 /* 3644 * Unknown 'name'. 3645 */ 3646 dev_err(nvme->n_dip, CE_WARN, 3647 "!nvme-config-list: unknown config '%s' " 3648 "for model '%s', ignoring", name, model); 3649 } 3650 } 3651 } 3652 3653 out: 3654 ddi_prop_free(config_list); 3655 } 3656 3657 static void 3658 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 3659 { 3660 /* 3661 * Section 7.7 of the spec describes how to get a unique ID for 3662 * the controller: the vendor ID, the model name and the serial 3663 * number shall be unique when combined. 3664 * 3665 * If a namespace has no EUI64 we use the above and add the hex 3666 * namespace ID to get a unique ID for the namespace. 3667 */ 3668 char model[sizeof (nvme->n_idctl->id_model) + 1]; 3669 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 3670 3671 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 3672 bcopy(nvme->n_idctl->id_serial, serial, 3673 sizeof (nvme->n_idctl->id_serial)); 3674 3675 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 3676 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 3677 3678 nvme_nsid2ns(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X", 3679 nvme->n_idctl->id_vid, model, serial, nsid); 3680 } 3681 3682 static nvme_identify_nsid_list_t * 3683 nvme_update_nsid_list(nvme_t *nvme, int cns) 3684 { 3685 nvme_identify_nsid_list_t *nslist; 3686 3687 /* 3688 * We currently don't handle cases where there are more than 3689 * 1024 active namespaces, requiring several IDENTIFY commands. 3690 */ 3691 if (nvme_identify_int(nvme, 0, cns, (void **)&nslist)) 3692 return (nslist); 3693 3694 return (NULL); 3695 } 3696 3697 nvme_namespace_t * 3698 nvme_nsid2ns(nvme_t *nvme, uint32_t nsid) 3699 { 3700 ASSERT3U(nsid, !=, 0); 3701 ASSERT3U(nsid, <=, nvme->n_namespace_count); 3702 return (&nvme->n_ns[nsid - 1]); 3703 } 3704 3705 static boolean_t 3706 nvme_allocated_ns(nvme_namespace_t *ns) 3707 { 3708 nvme_t *nvme = ns->ns_nvme; 3709 uint32_t i; 3710 3711 ASSERT(nvme_mgmt_lock_held(nvme)); 3712 3713 /* 3714 * If supported, update the list of allocated namespace IDs. 3715 */ 3716 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) && 3717 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) { 3718 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme, 3719 NVME_IDENTIFY_NSID_ALLOC_LIST); 3720 boolean_t found = B_FALSE; 3721 3722 /* 3723 * When namespace management is supported, this really shouldn't 3724 * be NULL. Treat all namespaces as allocated if it is. 3725 */ 3726 if (nslist == NULL) 3727 return (B_TRUE); 3728 3729 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) { 3730 if (ns->ns_id == 0) 3731 break; 3732 3733 if (ns->ns_id == nslist->nl_nsid[i]) 3734 found = B_TRUE; 3735 } 3736 3737 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE); 3738 return (found); 3739 } else { 3740 /* 3741 * If namespace management isn't supported, report all 3742 * namespaces as allocated. 3743 */ 3744 return (B_TRUE); 3745 } 3746 } 3747 3748 static boolean_t 3749 nvme_active_ns(nvme_namespace_t *ns) 3750 { 3751 nvme_t *nvme = ns->ns_nvme; 3752 uint64_t *ptr; 3753 uint32_t i; 3754 3755 ASSERT(nvme_mgmt_lock_held(nvme)); 3756 3757 /* 3758 * If supported, update the list of active namespace IDs. 3759 */ 3760 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) { 3761 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme, 3762 NVME_IDENTIFY_NSID_LIST); 3763 boolean_t found = B_FALSE; 3764 3765 /* 3766 * When namespace management is supported, this really shouldn't 3767 * be NULL. Treat all namespaces as allocated if it is. 3768 */ 3769 if (nslist == NULL) 3770 return (B_TRUE); 3771 3772 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) { 3773 if (ns->ns_id == 0) 3774 break; 3775 3776 if (ns->ns_id == nslist->nl_nsid[i]) 3777 found = B_TRUE; 3778 } 3779 3780 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE); 3781 return (found); 3782 } 3783 3784 /* 3785 * Workaround for revision 1.0: 3786 * Check whether the IDENTIFY NAMESPACE data is zero-filled. 3787 */ 3788 for (ptr = (uint64_t *)ns->ns_idns; 3789 ptr != (uint64_t *)(ns->ns_idns + 1); 3790 ptr++) { 3791 if (*ptr != 0) { 3792 return (B_TRUE); 3793 } 3794 } 3795 3796 return (B_FALSE); 3797 } 3798 3799 static int 3800 nvme_init_ns(nvme_t *nvme, uint32_t nsid) 3801 { 3802 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid); 3803 nvme_identify_nsid_t *idns; 3804 boolean_t was_ignored; 3805 int last_rp; 3806 3807 ns->ns_nvme = nvme; 3808 3809 ASSERT(nvme_mgmt_lock_held(nvme)); 3810 3811 /* 3812 * Because we might rescan a namespace and this will fail after boot 3813 * that'd leave us in a bad spot. We need to do something about this 3814 * longer term, but it's not clear how exactly we would recover right 3815 * now. 3816 */ 3817 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID, 3818 (void **)&idns)) { 3819 dev_err(nvme->n_dip, CE_WARN, 3820 "!failed to identify namespace %d", nsid); 3821 return (DDI_FAILURE); 3822 } 3823 3824 if (ns->ns_idns != NULL) 3825 kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t)); 3826 3827 ns->ns_idns = idns; 3828 ns->ns_id = nsid; 3829 3830 was_ignored = ns->ns_ignore; 3831 3832 ns->ns_allocated = nvme_allocated_ns(ns); 3833 ns->ns_active = nvme_active_ns(ns); 3834 3835 ns->ns_block_count = idns->id_nsize; 3836 ns->ns_block_size = 3837 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 3838 ns->ns_best_block_size = ns->ns_block_size; 3839 3840 /* 3841 * Get the EUI64 if present. 3842 */ 3843 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 3844 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64)); 3845 3846 /* 3847 * Get the NGUID if present. 3848 */ 3849 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) 3850 bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid)); 3851 3852 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 3853 if (*(uint64_t *)ns->ns_eui64 == 0) 3854 nvme_prepare_devid(nvme, ns->ns_id); 3855 3856 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id); 3857 3858 /* 3859 * Find the LBA format with no metadata and the best relative 3860 * performance. A value of 3 means "degraded", 0 is best. 3861 */ 3862 last_rp = 3; 3863 for (int j = 0; j <= idns->id_nlbaf; j++) { 3864 if (idns->id_lbaf[j].lbaf_lbads == 0) 3865 break; 3866 if (idns->id_lbaf[j].lbaf_ms != 0) 3867 continue; 3868 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 3869 continue; 3870 last_rp = idns->id_lbaf[j].lbaf_rp; 3871 ns->ns_best_block_size = 3872 1 << idns->id_lbaf[j].lbaf_lbads; 3873 } 3874 3875 if (ns->ns_best_block_size < nvme->n_min_block_size) 3876 ns->ns_best_block_size = nvme->n_min_block_size; 3877 3878 was_ignored = ns->ns_ignore; 3879 3880 /* 3881 * We currently don't support namespaces that are inactive, or use 3882 * either: 3883 * - protection information 3884 * - illegal block size (< 512) 3885 */ 3886 if (!ns->ns_active) { 3887 ns->ns_ignore = B_TRUE; 3888 } else if (idns->id_dps.dp_pinfo) { 3889 dev_err(nvme->n_dip, CE_WARN, 3890 "!ignoring namespace %d, unsupported feature: " 3891 "pinfo = %d", nsid, idns->id_dps.dp_pinfo); 3892 ns->ns_ignore = B_TRUE; 3893 } else if (ns->ns_block_size < 512) { 3894 dev_err(nvme->n_dip, CE_WARN, 3895 "!ignoring namespace %d, unsupported block size %"PRIu64, 3896 nsid, (uint64_t)ns->ns_block_size); 3897 ns->ns_ignore = B_TRUE; 3898 } else { 3899 ns->ns_ignore = B_FALSE; 3900 } 3901 3902 /* 3903 * Keep a count of namespaces which are attachable. 3904 * See comments in nvme_bd_driveinfo() to understand its effect. 3905 */ 3906 if (was_ignored) { 3907 /* 3908 * Previously ignored, but now not. Count it. 3909 */ 3910 if (!ns->ns_ignore) 3911 nvme->n_namespaces_attachable++; 3912 } else { 3913 /* 3914 * Wasn't ignored previously, but now needs to be. 3915 * Discount it. 3916 */ 3917 if (ns->ns_ignore) 3918 nvme->n_namespaces_attachable--; 3919 } 3920 3921 return (DDI_SUCCESS); 3922 } 3923 3924 static boolean_t 3925 nvme_attach_ns(nvme_t *nvme, nvme_ioctl_common_t *com) 3926 { 3927 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid); 3928 int ret; 3929 3930 ASSERT(nvme_mgmt_lock_held(nvme)); 3931 3932 if (ns->ns_ignore) { 3933 return (nvme_ioctl_error(com, NVME_IOCTL_E_UNSUP_ATTACH_NS, 3934 0, 0)); 3935 } 3936 3937 if (ns->ns_bd_hdl == NULL) { 3938 bd_ops_t ops = nvme_bd_ops; 3939 3940 if (!nvme->n_idctl->id_oncs.on_dset_mgmt) 3941 ops.o_free_space = NULL; 3942 3943 ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr, 3944 KM_SLEEP); 3945 3946 if (ns->ns_bd_hdl == NULL) { 3947 dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev " 3948 "handle for namespace id %u", com->nioc_nsid); 3949 return (nvme_ioctl_error(com, 3950 NVME_IOCTL_E_BLKDEV_ATTACH, 0, 0)); 3951 } 3952 } 3953 3954 nvme_mgmt_bd_start(nvme); 3955 ret = bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl); 3956 nvme_mgmt_bd_end(nvme); 3957 if (ret != DDI_SUCCESS) { 3958 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_ATTACH, 3959 0, 0)); 3960 } 3961 3962 ns->ns_attached = B_TRUE; 3963 3964 return (B_TRUE); 3965 } 3966 3967 static boolean_t 3968 nvme_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com) 3969 { 3970 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid); 3971 int ret; 3972 3973 ASSERT(nvme_mgmt_lock_held(nvme)); 3974 3975 if (ns->ns_ignore || !ns->ns_attached) 3976 return (B_TRUE); 3977 3978 nvme_mgmt_bd_start(nvme); 3979 ASSERT3P(ns->ns_bd_hdl, !=, NULL); 3980 ret = bd_detach_handle(ns->ns_bd_hdl); 3981 nvme_mgmt_bd_end(nvme); 3982 3983 if (ret != DDI_SUCCESS) { 3984 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_DETACH, 0, 3985 0)); 3986 } 3987 3988 ns->ns_attached = B_FALSE; 3989 return (B_TRUE); 3990 3991 } 3992 3993 /* 3994 * Rescan the namespace information associated with the namespaces indicated by 3995 * ioc. They should not be attached to blkdev right now. 3996 */ 3997 static void 3998 nvme_rescan_ns(nvme_t *nvme, uint32_t nsid) 3999 { 4000 ASSERT(nvme_mgmt_lock_held(nvme)); 4001 ASSERT3U(nsid, !=, 0); 4002 4003 if (nsid != NVME_NSID_BCAST) { 4004 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid); 4005 4006 ASSERT3U(ns->ns_attached, ==, B_FALSE); 4007 (void) nvme_init_ns(nvme, nsid); 4008 return; 4009 } 4010 4011 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 4012 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i); 4013 4014 ASSERT3U(ns->ns_attached, ==, B_FALSE); 4015 (void) nvme_init_ns(nvme, i); 4016 } 4017 } 4018 4019 typedef struct nvme_quirk_table { 4020 uint16_t nq_vendor_id; 4021 uint16_t nq_device_id; 4022 nvme_quirk_t nq_quirks; 4023 } nvme_quirk_table_t; 4024 4025 static const nvme_quirk_table_t nvme_quirks[] = { 4026 { 0x1987, 0x5018, NVME_QUIRK_START_CID }, /* Phison E18 */ 4027 }; 4028 4029 static void 4030 nvme_detect_quirks(nvme_t *nvme) 4031 { 4032 for (uint_t i = 0; i < ARRAY_SIZE(nvme_quirks); i++) { 4033 const nvme_quirk_table_t *nqt = &nvme_quirks[i]; 4034 4035 if (nqt->nq_vendor_id == nvme->n_vendor_id && 4036 nqt->nq_device_id == nvme->n_device_id) { 4037 nvme->n_quirks = nqt->nq_quirks; 4038 return; 4039 } 4040 } 4041 } 4042 4043 static int 4044 nvme_init(nvme_t *nvme) 4045 { 4046 nvme_reg_cc_t cc = { 0 }; 4047 nvme_reg_aqa_t aqa = { 0 }; 4048 nvme_reg_asq_t asq = { 0 }; 4049 nvme_reg_acq_t acq = { 0 }; 4050 nvme_reg_cap_t cap; 4051 nvme_reg_vs_t vs; 4052 nvme_reg_csts_t csts; 4053 int i = 0; 4054 uint16_t nqueues; 4055 uint_t tq_threads; 4056 char model[sizeof (nvme->n_idctl->id_model) + 1]; 4057 char *vendor, *product; 4058 uint32_t nsid; 4059 4060 /* Check controller version */ 4061 vs.r = nvme_get32(nvme, NVME_REG_VS); 4062 nvme->n_version.v_major = vs.b.vs_mjr; 4063 nvme->n_version.v_minor = vs.b.vs_mnr; 4064 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d\n", 4065 nvme->n_version.v_major, nvme->n_version.v_minor); 4066 4067 if (nvme->n_version.v_major > nvme_version_major) { 4068 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x", 4069 nvme_version_major); 4070 if (nvme->n_strict_version) 4071 goto fail; 4072 } 4073 4074 /* retrieve controller configuration */ 4075 cap.r = nvme_get64(nvme, NVME_REG_CAP); 4076 4077 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 4078 dev_err(nvme->n_dip, CE_WARN, 4079 "!NVM command set not supported by hardware"); 4080 goto fail; 4081 } 4082 4083 nvme->n_nssr_supported = cap.b.cap_nssrs; 4084 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 4085 nvme->n_timeout = cap.b.cap_to; 4086 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 4087 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 4088 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 4089 4090 /* 4091 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 4092 * the base page size of 4k (1<<12), so add 12 here to get the real 4093 * page size value. 4094 */ 4095 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 4096 cap.b.cap_mpsmax + 12); 4097 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 4098 4099 /* 4100 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 4101 */ 4102 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 4103 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 4104 4105 /* 4106 * Set up PRP DMA to transfer 1 page-aligned page at a time. 4107 * Maxxfer may be increased after we identified the controller limits. 4108 */ 4109 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 4110 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 4111 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 4112 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 4113 4114 /* 4115 * Reset controller if it's still in ready state. 4116 */ 4117 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 4118 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 4119 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 4120 nvme->n_dead = B_TRUE; 4121 goto fail; 4122 } 4123 4124 /* 4125 * Create the cq array with one completion queue to be assigned 4126 * to the admin queue pair and a limited number of taskqs (4). 4127 */ 4128 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) != 4129 DDI_SUCCESS) { 4130 dev_err(nvme->n_dip, CE_WARN, 4131 "!failed to pre-allocate admin completion queue"); 4132 goto fail; 4133 } 4134 /* 4135 * Create the admin queue pair. 4136 */ 4137 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 4138 != DDI_SUCCESS) { 4139 dev_err(nvme->n_dip, CE_WARN, 4140 "!unable to allocate admin qpair"); 4141 goto fail; 4142 } 4143 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 4144 nvme->n_ioq[0] = nvme->n_adminq; 4145 4146 if (nvme->n_quirks & NVME_QUIRK_START_CID) 4147 nvme->n_adminq->nq_next_cmd++; 4148 4149 nvme->n_progress |= NVME_ADMIN_QUEUE; 4150 4151 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 4152 "admin-queue-len", nvme->n_admin_queue_len); 4153 4154 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 4155 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 4156 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress; 4157 4158 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 4159 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 4160 4161 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 4162 nvme_put64(nvme, NVME_REG_ASQ, asq); 4163 nvme_put64(nvme, NVME_REG_ACQ, acq); 4164 4165 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 4166 cc.b.cc_css = 0; /* use NVM command set */ 4167 cc.b.cc_mps = nvme->n_pageshift - 12; 4168 cc.b.cc_shn = 0; /* no shutdown in progress */ 4169 cc.b.cc_en = 1; /* enable controller */ 4170 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 4171 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 4172 4173 nvme_put32(nvme, NVME_REG_CC, cc.r); 4174 4175 /* 4176 * Wait for the controller to become ready. 4177 */ 4178 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 4179 if (csts.b.csts_rdy == 0) { 4180 for (i = 0; i != nvme->n_timeout * 10; i++) { 4181 delay(drv_usectohz(50000)); 4182 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 4183 4184 if (csts.b.csts_cfs == 1) { 4185 dev_err(nvme->n_dip, CE_WARN, 4186 "!controller fatal status at init"); 4187 ddi_fm_service_impact(nvme->n_dip, 4188 DDI_SERVICE_LOST); 4189 nvme->n_dead = B_TRUE; 4190 goto fail; 4191 } 4192 4193 if (csts.b.csts_rdy == 1) 4194 break; 4195 } 4196 } 4197 4198 if (csts.b.csts_rdy == 0) { 4199 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 4200 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 4201 nvme->n_dead = B_TRUE; 4202 goto fail; 4203 } 4204 4205 /* 4206 * Assume an abort command limit of 1. We'll destroy and re-init 4207 * that later when we know the true abort command limit. 4208 */ 4209 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 4210 4211 /* 4212 * Set up initial interrupt for admin queue. 4213 */ 4214 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 4215 != DDI_SUCCESS) && 4216 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 4217 != DDI_SUCCESS) && 4218 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 4219 != DDI_SUCCESS)) { 4220 dev_err(nvme->n_dip, CE_WARN, 4221 "!failed to set up initial interrupt"); 4222 goto fail; 4223 } 4224 4225 /* 4226 * Post an asynchronous event command to catch errors. 4227 * We assume the asynchronous events are supported as required by 4228 * specification (Figure 40 in section 5 of NVMe 1.2). 4229 * However, since at least qemu does not follow the specification, 4230 * we need a mechanism to protect ourselves. 4231 */ 4232 nvme->n_async_event_supported = B_TRUE; 4233 nvme_async_event(nvme); 4234 4235 /* 4236 * Identify Controller 4237 */ 4238 if (!nvme_identify_int(nvme, 0, NVME_IDENTIFY_CTRL, 4239 (void **)&nvme->n_idctl)) { 4240 dev_err(nvme->n_dip, CE_WARN, "!failed to identify controller"); 4241 goto fail; 4242 } 4243 4244 /* 4245 * Get the common namespace information if available. If not, we use the 4246 * information for nsid 1. 4247 */ 4248 if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) && 4249 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) { 4250 nsid = NVME_NSID_BCAST; 4251 } else { 4252 nsid = 1; 4253 } 4254 4255 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID, 4256 (void **)&nvme->n_idcomns)) { 4257 dev_err(nvme->n_dip, CE_WARN, "!failed to identify common " 4258 "namespace information"); 4259 goto fail; 4260 } 4261 /* 4262 * Process nvme-config-list (if present) in nvme.conf. 4263 */ 4264 nvme_config_list(nvme); 4265 4266 /* 4267 * Get Vendor & Product ID 4268 */ 4269 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 4270 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 4271 sata_split_model(model, &vendor, &product); 4272 4273 if (vendor == NULL) 4274 nvme->n_vendor = strdup("NVMe"); 4275 else 4276 nvme->n_vendor = strdup(vendor); 4277 4278 nvme->n_product = strdup(product); 4279 4280 /* 4281 * Get controller limits. 4282 */ 4283 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 4284 MIN(nvme->n_admin_queue_len / 10, 4285 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 4286 4287 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 4288 "async-event-limit", nvme->n_async_event_limit); 4289 4290 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 4291 4292 /* 4293 * Reinitialize the semaphore with the true abort command limit 4294 * supported by the hardware. It's not necessary to disable interrupts 4295 * as only command aborts use the semaphore, and no commands are 4296 * executed or aborted while we're here. 4297 */ 4298 sema_destroy(&nvme->n_abort_sema); 4299 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 4300 SEMA_DRIVER, NULL); 4301 4302 nvme->n_progress |= NVME_CTRL_LIMITS; 4303 4304 if (nvme->n_idctl->id_mdts == 0) 4305 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 4306 else 4307 nvme->n_max_data_transfer_size = 4308 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 4309 4310 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 4311 4312 /* 4313 * Limit n_max_data_transfer_size to what we can handle in one PRP. 4314 * Chained PRPs are currently unsupported. 4315 * 4316 * This is a no-op on hardware which doesn't support a transfer size 4317 * big enough to require chained PRPs. 4318 */ 4319 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 4320 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 4321 4322 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 4323 4324 /* 4325 * Make sure the minimum/maximum queue entry sizes are not 4326 * larger/smaller than the default. 4327 */ 4328 4329 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 4330 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 4331 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 4332 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 4333 goto fail; 4334 4335 /* 4336 * Check for the presence of a Volatile Write Cache. If present, 4337 * enable or disable based on the value of the property 4338 * volatile-write-cache-enable (default is enabled). 4339 */ 4340 nvme->n_write_cache_present = 4341 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 4342 4343 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 4344 "volatile-write-cache-present", 4345 nvme->n_write_cache_present ? 1 : 0); 4346 4347 if (!nvme->n_write_cache_present) { 4348 nvme->n_write_cache_enabled = B_FALSE; 4349 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled) 4350 != 0) { 4351 dev_err(nvme->n_dip, CE_WARN, 4352 "!failed to %sable volatile write cache", 4353 nvme->n_write_cache_enabled ? "en" : "dis"); 4354 /* 4355 * Assume the cache is (still) enabled. 4356 */ 4357 nvme->n_write_cache_enabled = B_TRUE; 4358 } 4359 4360 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 4361 "volatile-write-cache-enable", 4362 nvme->n_write_cache_enabled ? 1 : 0); 4363 4364 /* 4365 * Get number of supported namespaces and allocate namespace array. 4366 */ 4367 nvme->n_namespace_count = nvme->n_idctl->id_nn; 4368 4369 if (nvme->n_namespace_count == 0) { 4370 dev_err(nvme->n_dip, CE_WARN, 4371 "!controllers without namespaces are not supported"); 4372 goto fail; 4373 } 4374 4375 if (nvme->n_namespace_count > NVME_MINOR_MAX) { 4376 dev_err(nvme->n_dip, CE_WARN, 4377 "!too many namespaces: %d, limiting to %d\n", 4378 nvme->n_namespace_count, NVME_MINOR_MAX); 4379 nvme->n_namespace_count = NVME_MINOR_MAX; 4380 } 4381 4382 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 4383 nvme->n_namespace_count, KM_SLEEP); 4384 4385 /* 4386 * Try to set up MSI/MSI-X interrupts. 4387 */ 4388 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 4389 != 0) { 4390 nvme_release_interrupts(nvme); 4391 4392 nqueues = MIN(UINT16_MAX, ncpus); 4393 4394 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 4395 nqueues) != DDI_SUCCESS) && 4396 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 4397 nqueues) != DDI_SUCCESS)) { 4398 dev_err(nvme->n_dip, CE_WARN, 4399 "!failed to set up MSI/MSI-X interrupts"); 4400 goto fail; 4401 } 4402 } 4403 4404 /* 4405 * Create I/O queue pairs. 4406 */ 4407 4408 if (nvme_set_nqueues(nvme) != 0) { 4409 dev_err(nvme->n_dip, CE_WARN, 4410 "!failed to set number of I/O queues to %d", 4411 nvme->n_intr_cnt); 4412 goto fail; 4413 } 4414 4415 /* 4416 * Reallocate I/O queue array 4417 */ 4418 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 4419 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 4420 (nvme->n_submission_queues + 1), KM_SLEEP); 4421 nvme->n_ioq[0] = nvme->n_adminq; 4422 4423 /* 4424 * There should always be at least as many submission queues 4425 * as completion queues. 4426 */ 4427 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues); 4428 4429 nvme->n_ioq_count = nvme->n_submission_queues; 4430 4431 nvme->n_io_squeue_len = 4432 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries); 4433 4434 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len", 4435 nvme->n_io_squeue_len); 4436 4437 /* 4438 * Pre-allocate completion queues. 4439 * When there are the same number of submission and completion 4440 * queues there is no value in having a larger completion 4441 * queue length. 4442 */ 4443 if (nvme->n_submission_queues == nvme->n_completion_queues) 4444 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 4445 nvme->n_io_squeue_len); 4446 4447 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 4448 nvme->n_max_queue_entries); 4449 4450 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len", 4451 nvme->n_io_cqueue_len); 4452 4453 /* 4454 * Assign the equal quantity of taskq threads to each completion 4455 * queue, capping the total number of threads to the number 4456 * of CPUs. 4457 */ 4458 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues; 4459 4460 /* 4461 * In case the calculation above is zero, we need at least one 4462 * thread per completion queue. 4463 */ 4464 tq_threads = MAX(1, tq_threads); 4465 4466 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1, 4467 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) { 4468 dev_err(nvme->n_dip, CE_WARN, 4469 "!failed to pre-allocate completion queues"); 4470 goto fail; 4471 } 4472 4473 /* 4474 * If we use less completion queues than interrupt vectors return 4475 * some of the interrupt vectors back to the system. 4476 */ 4477 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) { 4478 nvme_release_interrupts(nvme); 4479 4480 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 4481 nvme->n_completion_queues + 1) != DDI_SUCCESS) { 4482 dev_err(nvme->n_dip, CE_WARN, 4483 "!failed to reduce number of interrupts"); 4484 goto fail; 4485 } 4486 } 4487 4488 /* 4489 * Alloc & register I/O queue pairs 4490 */ 4491 4492 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 4493 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len, 4494 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 4495 dev_err(nvme->n_dip, CE_WARN, 4496 "!unable to allocate I/O qpair %d", i); 4497 goto fail; 4498 } 4499 4500 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) { 4501 dev_err(nvme->n_dip, CE_WARN, 4502 "!unable to create I/O qpair %d", i); 4503 goto fail; 4504 } 4505 } 4506 4507 /* 4508 * Post more asynchronous events commands to reduce event reporting 4509 * latency as suggested by the spec. 4510 */ 4511 if (nvme->n_async_event_supported) { 4512 for (i = 1; i != nvme->n_async_event_limit; i++) 4513 nvme_async_event(nvme); 4514 } 4515 4516 return (DDI_SUCCESS); 4517 4518 fail: 4519 (void) nvme_reset(nvme, B_FALSE); 4520 return (DDI_FAILURE); 4521 } 4522 4523 static uint_t 4524 nvme_intr(caddr_t arg1, caddr_t arg2) 4525 { 4526 nvme_t *nvme = (nvme_t *)arg1; 4527 int inum = (int)(uintptr_t)arg2; 4528 int ccnt = 0; 4529 int qnum; 4530 4531 if (inum >= nvme->n_intr_cnt) 4532 return (DDI_INTR_UNCLAIMED); 4533 4534 if (nvme->n_dead) { 4535 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ? 4536 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED); 4537 } 4538 4539 /* 4540 * The interrupt vector a queue uses is calculated as queue_idx % 4541 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 4542 * in steps of n_intr_cnt to process all queues using this vector. 4543 */ 4544 for (qnum = inum; 4545 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL; 4546 qnum += nvme->n_intr_cnt) { 4547 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]); 4548 } 4549 4550 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 4551 } 4552 4553 static void 4554 nvme_release_interrupts(nvme_t *nvme) 4555 { 4556 int i; 4557 4558 for (i = 0; i < nvme->n_intr_cnt; i++) { 4559 if (nvme->n_inth[i] == NULL) 4560 break; 4561 4562 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 4563 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 4564 else 4565 (void) ddi_intr_disable(nvme->n_inth[i]); 4566 4567 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 4568 (void) ddi_intr_free(nvme->n_inth[i]); 4569 } 4570 4571 kmem_free(nvme->n_inth, nvme->n_inth_sz); 4572 nvme->n_inth = NULL; 4573 nvme->n_inth_sz = 0; 4574 4575 nvme->n_progress &= ~NVME_INTERRUPTS; 4576 } 4577 4578 static int 4579 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 4580 { 4581 int nintrs, navail, count; 4582 int ret; 4583 int i; 4584 4585 if (nvme->n_intr_types == 0) { 4586 ret = ddi_intr_get_supported_types(nvme->n_dip, 4587 &nvme->n_intr_types); 4588 if (ret != DDI_SUCCESS) { 4589 dev_err(nvme->n_dip, CE_WARN, 4590 "!%s: ddi_intr_get_supported types failed", 4591 __func__); 4592 return (ret); 4593 } 4594 #ifdef __x86 4595 if (get_hwenv() == HW_VMWARE) 4596 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; 4597 #endif 4598 } 4599 4600 if ((nvme->n_intr_types & intr_type) == 0) 4601 return (DDI_FAILURE); 4602 4603 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 4604 if (ret != DDI_SUCCESS) { 4605 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 4606 __func__); 4607 return (ret); 4608 } 4609 4610 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 4611 if (ret != DDI_SUCCESS) { 4612 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 4613 __func__); 4614 return (ret); 4615 } 4616 4617 /* We want at most one interrupt per queue pair. */ 4618 if (navail > nqpairs) 4619 navail = nqpairs; 4620 4621 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 4622 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 4623 4624 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 4625 &count, 0); 4626 if (ret != DDI_SUCCESS) { 4627 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 4628 __func__); 4629 goto fail; 4630 } 4631 4632 nvme->n_intr_cnt = count; 4633 4634 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 4635 if (ret != DDI_SUCCESS) { 4636 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 4637 __func__); 4638 goto fail; 4639 } 4640 4641 for (i = 0; i < count; i++) { 4642 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 4643 (void *)nvme, (void *)(uintptr_t)i); 4644 if (ret != DDI_SUCCESS) { 4645 dev_err(nvme->n_dip, CE_WARN, 4646 "!%s: ddi_intr_add_handler failed", __func__); 4647 goto fail; 4648 } 4649 } 4650 4651 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 4652 4653 for (i = 0; i < count; i++) { 4654 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 4655 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 4656 else 4657 ret = ddi_intr_enable(nvme->n_inth[i]); 4658 4659 if (ret != DDI_SUCCESS) { 4660 dev_err(nvme->n_dip, CE_WARN, 4661 "!%s: enabling interrupt %d failed", __func__, i); 4662 goto fail; 4663 } 4664 } 4665 4666 nvme->n_intr_type = intr_type; 4667 4668 nvme->n_progress |= NVME_INTERRUPTS; 4669 4670 return (DDI_SUCCESS); 4671 4672 fail: 4673 nvme_release_interrupts(nvme); 4674 4675 return (ret); 4676 } 4677 4678 static int 4679 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 4680 { 4681 _NOTE(ARGUNUSED(arg)); 4682 4683 pci_ereport_post(dip, fm_error, NULL); 4684 return (fm_error->fme_status); 4685 } 4686 4687 static void 4688 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a, 4689 void *b) 4690 { 4691 nvme_t *nvme = a; 4692 4693 nvme_ctrl_mark_dead(nvme, B_TRUE); 4694 4695 /* 4696 * Fail all outstanding commands, including those in the admin queue 4697 * (queue 0). 4698 */ 4699 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) { 4700 nvme_qpair_t *qp = nvme->n_ioq[i]; 4701 4702 mutex_enter(&qp->nq_mutex); 4703 for (size_t j = 0; j < qp->nq_nentry; j++) { 4704 nvme_cmd_t *cmd = qp->nq_cmd[j]; 4705 nvme_cmd_t *u_cmd; 4706 4707 if (cmd == NULL) { 4708 continue; 4709 } 4710 4711 /* 4712 * Since we have the queue lock held the entire time we 4713 * iterate over it, it's not possible for the queue to 4714 * change underneath us. Thus, we don't need to check 4715 * that the return value of nvme_unqueue_cmd matches the 4716 * requested cmd to unqueue. 4717 */ 4718 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 4719 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, 4720 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 4721 4722 ASSERT3P(u_cmd, ==, cmd); 4723 } 4724 mutex_exit(&qp->nq_mutex); 4725 } 4726 } 4727 4728 /* 4729 * Open minor management 4730 */ 4731 static int 4732 nvme_minor_comparator(const void *l, const void *r) 4733 { 4734 const nvme_minor_t *lm = l; 4735 const nvme_minor_t *rm = r; 4736 4737 if (lm->nm_minor > rm->nm_minor) { 4738 return (1); 4739 } else if (lm->nm_minor < rm->nm_minor) { 4740 return (-1); 4741 } else { 4742 return (0); 4743 } 4744 } 4745 4746 static void 4747 nvme_minor_free(nvme_minor_t *minor) 4748 { 4749 if (minor->nm_minor > 0) { 4750 ASSERT3S(minor->nm_minor, >=, NVME_OPEN_MINOR_MIN); 4751 id_free(nvme_open_minors, minor->nm_minor); 4752 minor->nm_minor = 0; 4753 } 4754 VERIFY0(list_link_active(&minor->nm_ctrl_lock.nli_node)); 4755 VERIFY0(list_link_active(&minor->nm_ns_lock.nli_node)); 4756 cv_destroy(&minor->nm_cv); 4757 kmem_free(minor, sizeof (nvme_minor_t)); 4758 } 4759 4760 static nvme_minor_t * 4761 nvme_minor_find_by_dev(dev_t dev) 4762 { 4763 id_t id = (id_t)getminor(dev); 4764 nvme_minor_t search = { .nm_minor = id }; 4765 nvme_minor_t *ret; 4766 4767 mutex_enter(&nvme_open_minors_mutex); 4768 ret = avl_find(&nvme_open_minors_avl, &search, NULL); 4769 mutex_exit(&nvme_open_minors_mutex); 4770 4771 return (ret); 4772 } 4773 4774 static int 4775 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4776 { 4777 nvme_t *nvme; 4778 int instance; 4779 int nregs; 4780 off_t regsize; 4781 char name[32]; 4782 boolean_t attached_ns; 4783 4784 if (cmd != DDI_ATTACH) 4785 return (DDI_FAILURE); 4786 4787 instance = ddi_get_instance(dip); 4788 4789 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 4790 return (DDI_FAILURE); 4791 4792 nvme = ddi_get_soft_state(nvme_state, instance); 4793 ddi_set_driver_private(dip, nvme); 4794 nvme->n_dip = dip; 4795 4796 /* 4797 * Map PCI config space 4798 */ 4799 if (pci_config_setup(dip, &nvme->n_pcicfg_handle) != DDI_SUCCESS) { 4800 dev_err(dip, CE_WARN, "!failed to map PCI config space"); 4801 goto fail; 4802 } 4803 nvme->n_progress |= NVME_PCI_CONFIG; 4804 4805 /* 4806 * Get the various PCI IDs from config space 4807 */ 4808 nvme->n_vendor_id = 4809 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_VENID); 4810 nvme->n_device_id = 4811 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_DEVID); 4812 nvme->n_revision_id = 4813 pci_config_get8(nvme->n_pcicfg_handle, PCI_CONF_REVID); 4814 nvme->n_subsystem_device_id = 4815 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBSYSID); 4816 nvme->n_subsystem_vendor_id = 4817 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBVENID); 4818 4819 nvme_detect_quirks(nvme); 4820 4821 /* 4822 * Set up event handlers for hot removal. While npe(4D) supports the hot 4823 * removal event being injected for devices, the same is not true of all 4824 * of our possible parents (i.e. pci(4D) as of this writing). The most 4825 * common case this shows up is in some virtualization environments. We 4826 * should treat this as non-fatal so that way devices work but leave 4827 * this set up in such a way that if a nexus does grow support for this 4828 * we're good to go. 4829 */ 4830 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT, 4831 &nvme->n_rm_cookie) == DDI_SUCCESS) { 4832 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie, 4833 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) != 4834 DDI_SUCCESS) { 4835 goto fail; 4836 } 4837 } else { 4838 nvme->n_ev_rm_cb_id = NULL; 4839 } 4840 4841 mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL); 4842 nvme->n_progress |= NVME_MUTEX_INIT; 4843 4844 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4845 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 4846 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 4847 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 4848 B_TRUE : B_FALSE; 4849 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4850 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 4851 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4852 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN); 4853 /* 4854 * Double up the default for completion queues in case of 4855 * queue sharing. 4856 */ 4857 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4858 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN); 4859 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4860 DDI_PROP_DONTPASS, "async-event-limit", 4861 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 4862 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4863 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 4864 B_TRUE : B_FALSE; 4865 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4866 DDI_PROP_DONTPASS, "min-phys-block-size", 4867 NVME_DEFAULT_MIN_BLOCK_SIZE); 4868 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4869 DDI_PROP_DONTPASS, "max-submission-queues", -1); 4870 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4871 DDI_PROP_DONTPASS, "max-completion-queues", -1); 4872 4873 if (!ISP2(nvme->n_min_block_size) || 4874 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { 4875 dev_err(dip, CE_WARN, "!min-phys-block-size %s, " 4876 "using default %d", ISP2(nvme->n_min_block_size) ? 4877 "too low" : "not a power of 2", 4878 NVME_DEFAULT_MIN_BLOCK_SIZE); 4879 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 4880 } 4881 4882 if (nvme->n_submission_queues != -1 && 4883 (nvme->n_submission_queues < 1 || 4884 nvme->n_submission_queues > UINT16_MAX)) { 4885 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not " 4886 "valid. Must be [1..%d]", nvme->n_submission_queues, 4887 UINT16_MAX); 4888 nvme->n_submission_queues = -1; 4889 } 4890 4891 if (nvme->n_completion_queues != -1 && 4892 (nvme->n_completion_queues < 1 || 4893 nvme->n_completion_queues > UINT16_MAX)) { 4894 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not " 4895 "valid. Must be [1..%d]", nvme->n_completion_queues, 4896 UINT16_MAX); 4897 nvme->n_completion_queues = -1; 4898 } 4899 4900 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 4901 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 4902 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 4903 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 4904 4905 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN) 4906 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN; 4907 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN) 4908 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN; 4909 4910 if (nvme->n_async_event_limit < 1) 4911 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 4912 4913 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 4914 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 4915 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 4916 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 4917 4918 /* 4919 * Set up FMA support. 4920 */ 4921 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 4922 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 4923 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 4924 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 4925 4926 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 4927 4928 if (nvme->n_fm_cap) { 4929 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 4930 nvme->n_reg_acc_attr.devacc_attr_access = 4931 DDI_FLAGERR_ACC; 4932 4933 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 4934 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 4935 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 4936 } 4937 4938 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 4939 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4940 pci_ereport_setup(dip); 4941 4942 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4943 ddi_fm_handler_register(dip, nvme_fm_errcb, 4944 (void *)nvme); 4945 } 4946 4947 nvme->n_progress |= NVME_FMA_INIT; 4948 4949 /* 4950 * The spec defines several register sets. Only the controller 4951 * registers (set 1) are currently used. 4952 */ 4953 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 4954 nregs < 2 || 4955 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 4956 goto fail; 4957 4958 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 4959 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 4960 dev_err(dip, CE_WARN, "!failed to map regset 1"); 4961 goto fail; 4962 } 4963 4964 nvme->n_progress |= NVME_REGS_MAPPED; 4965 4966 /* 4967 * Set up kstats 4968 */ 4969 if (!nvme_stat_init(nvme)) { 4970 dev_err(dip, CE_WARN, "!failed to create device kstats"); 4971 goto fail; 4972 } 4973 nvme->n_progress |= NVME_STAT_INIT; 4974 4975 /* 4976 * Create PRP DMA cache 4977 */ 4978 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 4979 ddi_driver_name(dip), ddi_get_instance(dip)); 4980 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 4981 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 4982 NULL, (void *)nvme, NULL, 0); 4983 4984 if (nvme_init(nvme) != DDI_SUCCESS) 4985 goto fail; 4986 4987 /* 4988 * Initialize the driver with the UFM subsystem 4989 */ 4990 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops, 4991 &nvme->n_ufmh, nvme) != 0) { 4992 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem"); 4993 goto fail; 4994 } 4995 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL); 4996 ddi_ufm_update(nvme->n_ufmh); 4997 nvme->n_progress |= NVME_UFM_INIT; 4998 4999 nvme_mgmt_lock_init(&nvme->n_mgmt); 5000 nvme_lock_init(&nvme->n_lock); 5001 nvme->n_progress |= NVME_MGMT_INIT; 5002 nvme->n_dead_status = NVME_IOCTL_E_CTRL_DEAD; 5003 5004 /* 5005 * Identify namespaces. 5006 */ 5007 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 5008 5009 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 5010 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i); 5011 5012 nvme_lock_init(&ns->ns_lock); 5013 ns->ns_progress |= NVME_NS_LOCK; 5014 5015 /* 5016 * Namespaces start out ignored. When nvme_init_ns() checks 5017 * their properties and finds they can be used, it will set 5018 * ns_ignore to B_FALSE. It will also use this state change 5019 * to keep an accurate count of attachable namespaces. 5020 */ 5021 ns->ns_ignore = B_TRUE; 5022 if (nvme_init_ns(nvme, i) != 0) { 5023 nvme_mgmt_unlock(nvme); 5024 goto fail; 5025 } 5026 5027 if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR, 5028 NVME_MINOR(ddi_get_instance(nvme->n_dip), i), 5029 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 5030 nvme_mgmt_unlock(nvme); 5031 dev_err(dip, CE_WARN, 5032 "!failed to create minor node for namespace %d", i); 5033 goto fail; 5034 } 5035 } 5036 5037 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 5038 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) != 5039 DDI_SUCCESS) { 5040 nvme_mgmt_unlock(nvme); 5041 dev_err(dip, CE_WARN, "nvme_attach: " 5042 "cannot create devctl minor node"); 5043 goto fail; 5044 } 5045 5046 attached_ns = B_FALSE; 5047 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 5048 nvme_ioctl_common_t com = { .nioc_nsid = i }; 5049 5050 if (nvme_attach_ns(nvme, &com)) { 5051 attached_ns = B_TRUE; 5052 } else if (com.nioc_drv_err != NVME_IOCTL_E_UNSUP_ATTACH_NS) { 5053 dev_err(nvme->n_dip, CE_WARN, "!failed to attach " 5054 "namespace %d due to blkdev error", i); 5055 /* 5056 * Once we have successfully attached a namespace we 5057 * can no longer fail the driver attach as there is now 5058 * a blkdev child node linked to this device, and 5059 * our node is not yet in the attached state. 5060 */ 5061 if (!attached_ns) { 5062 nvme_mgmt_unlock(nvme); 5063 goto fail; 5064 } 5065 } 5066 } 5067 5068 nvme_mgmt_unlock(nvme); 5069 5070 return (DDI_SUCCESS); 5071 5072 fail: 5073 /* attach successful anyway so that FMA can retire the device */ 5074 if (nvme->n_dead) 5075 return (DDI_SUCCESS); 5076 5077 (void) nvme_detach(dip, DDI_DETACH); 5078 5079 return (DDI_FAILURE); 5080 } 5081 5082 static int 5083 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 5084 { 5085 int instance; 5086 nvme_t *nvme; 5087 5088 if (cmd != DDI_DETACH) 5089 return (DDI_FAILURE); 5090 5091 instance = ddi_get_instance(dip); 5092 5093 nvme = ddi_get_soft_state(nvme_state, instance); 5094 5095 if (nvme == NULL) 5096 return (DDI_FAILURE); 5097 5098 /* 5099 * Remove all minor nodes from the device regardless of the source in 5100 * one swoop. 5101 */ 5102 ddi_remove_minor_node(dip, NULL); 5103 5104 /* 5105 * We need to remove the event handler as one of the first things that 5106 * we do. If we proceed with other teardown without removing the event 5107 * handler, we could end up in a very unfortunate race with ourselves. 5108 * The DDI does not serialize these with detach (just like timeout(9F) 5109 * and others). 5110 */ 5111 if (nvme->n_ev_rm_cb_id != NULL) { 5112 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id); 5113 } 5114 nvme->n_ev_rm_cb_id = NULL; 5115 5116 /* 5117 * If the controller was marked dead, there is a slight chance that we 5118 * are asynchronusly processing the removal taskq. Because we have 5119 * removed the callback handler above and all minor nodes and commands 5120 * are closed, there is no other way to get in here. As such, we wait on 5121 * the nvme_dead_taskq to complete so we can avoid tracking if it's 5122 * running or not. 5123 */ 5124 taskq_wait(nvme_dead_taskq); 5125 5126 if (nvme->n_ns) { 5127 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 5128 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i); 5129 5130 if (ns->ns_bd_hdl) { 5131 (void) bd_detach_handle(ns->ns_bd_hdl); 5132 bd_free_handle(ns->ns_bd_hdl); 5133 } 5134 5135 if (ns->ns_idns) 5136 kmem_free(ns->ns_idns, 5137 sizeof (nvme_identify_nsid_t)); 5138 if (ns->ns_devid) 5139 strfree(ns->ns_devid); 5140 5141 if ((ns->ns_progress & NVME_NS_LOCK) != 0) 5142 nvme_lock_fini(&ns->ns_lock); 5143 } 5144 5145 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 5146 nvme->n_namespace_count); 5147 } 5148 5149 if (nvme->n_progress & NVME_MGMT_INIT) { 5150 nvme_lock_fini(&nvme->n_lock); 5151 nvme_mgmt_lock_fini(&nvme->n_mgmt); 5152 } 5153 5154 if (nvme->n_progress & NVME_UFM_INIT) { 5155 ddi_ufm_fini(nvme->n_ufmh); 5156 mutex_destroy(&nvme->n_fwslot_mutex); 5157 } 5158 5159 if (nvme->n_progress & NVME_INTERRUPTS) 5160 nvme_release_interrupts(nvme); 5161 5162 for (uint_t i = 0; i < nvme->n_cq_count; i++) { 5163 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL) 5164 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq); 5165 } 5166 5167 if (nvme->n_progress & NVME_MUTEX_INIT) { 5168 mutex_destroy(&nvme->n_minor_mutex); 5169 } 5170 5171 if (nvme->n_ioq_count > 0) { 5172 for (uint_t i = 1; i != nvme->n_ioq_count + 1; i++) { 5173 if (nvme->n_ioq[i] != NULL) { 5174 /* TODO: send destroy queue commands */ 5175 nvme_free_qpair(nvme->n_ioq[i]); 5176 } 5177 } 5178 5179 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 5180 (nvme->n_ioq_count + 1)); 5181 } 5182 5183 if (nvme->n_prp_cache != NULL) { 5184 kmem_cache_destroy(nvme->n_prp_cache); 5185 } 5186 5187 if (nvme->n_progress & NVME_REGS_MAPPED) { 5188 nvme_shutdown(nvme, B_FALSE); 5189 (void) nvme_reset(nvme, B_FALSE); 5190 } 5191 5192 if (nvme->n_progress & NVME_CTRL_LIMITS) 5193 sema_destroy(&nvme->n_abort_sema); 5194 5195 if (nvme->n_progress & NVME_ADMIN_QUEUE) 5196 nvme_free_qpair(nvme->n_adminq); 5197 5198 if (nvme->n_cq_count > 0) { 5199 nvme_destroy_cq_array(nvme, 0); 5200 nvme->n_cq = NULL; 5201 nvme->n_cq_count = 0; 5202 } 5203 5204 if (nvme->n_idcomns) 5205 kmem_free(nvme->n_idcomns, NVME_IDENTIFY_BUFSIZE); 5206 5207 if (nvme->n_idctl) 5208 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); 5209 5210 if (nvme->n_progress & NVME_REGS_MAPPED) 5211 ddi_regs_map_free(&nvme->n_regh); 5212 5213 if (nvme->n_progress & NVME_STAT_INIT) 5214 nvme_stat_cleanup(nvme); 5215 5216 if (nvme->n_progress & NVME_FMA_INIT) { 5217 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 5218 ddi_fm_handler_unregister(nvme->n_dip); 5219 5220 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 5221 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 5222 pci_ereport_teardown(nvme->n_dip); 5223 5224 ddi_fm_fini(nvme->n_dip); 5225 } 5226 5227 if (nvme->n_progress & NVME_PCI_CONFIG) 5228 pci_config_teardown(&nvme->n_pcicfg_handle); 5229 5230 if (nvme->n_vendor != NULL) 5231 strfree(nvme->n_vendor); 5232 5233 if (nvme->n_product != NULL) 5234 strfree(nvme->n_product); 5235 5236 ddi_soft_state_free(nvme_state, instance); 5237 5238 return (DDI_SUCCESS); 5239 } 5240 5241 static int 5242 nvme_quiesce(dev_info_t *dip) 5243 { 5244 int instance; 5245 nvme_t *nvme; 5246 5247 instance = ddi_get_instance(dip); 5248 5249 nvme = ddi_get_soft_state(nvme_state, instance); 5250 5251 if (nvme == NULL) 5252 return (DDI_FAILURE); 5253 5254 nvme_shutdown(nvme, B_TRUE); 5255 5256 (void) nvme_reset(nvme, B_TRUE); 5257 5258 return (DDI_SUCCESS); 5259 } 5260 5261 static int 5262 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma) 5263 { 5264 nvme_t *nvme = cmd->nc_nvme; 5265 uint_t nprp_per_page, nprp; 5266 uint64_t *prp; 5267 const ddi_dma_cookie_t *cookie; 5268 uint_t idx; 5269 uint_t ncookies = ddi_dma_ncookies(dma); 5270 5271 if (ncookies == 0) 5272 return (DDI_FAILURE); 5273 5274 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL) 5275 return (DDI_FAILURE); 5276 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress; 5277 5278 if (ncookies == 1) { 5279 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 5280 return (DDI_SUCCESS); 5281 } else if (ncookies == 2) { 5282 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL) 5283 return (DDI_FAILURE); 5284 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress; 5285 return (DDI_SUCCESS); 5286 } 5287 5288 /* 5289 * At this point, we're always operating on cookies at 5290 * index >= 1 and writing the addresses of those cookies 5291 * into a new page. The address of that page is stored 5292 * as the second PRP entry. 5293 */ 5294 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t); 5295 ASSERT(nprp_per_page > 0); 5296 5297 /* 5298 * We currently don't support chained PRPs and set up our DMA 5299 * attributes to reflect that. If we still get an I/O request 5300 * that needs a chained PRP something is very wrong. Account 5301 * for the first cookie here, which we've placed in d_prp[0]. 5302 */ 5303 nprp = howmany(ncookies - 1, nprp_per_page); 5304 VERIFY(nprp == 1); 5305 5306 /* 5307 * Allocate a page of pointers, in which we'll write the 5308 * addresses of cookies 1 to `ncookies`. 5309 */ 5310 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 5311 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 5312 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress; 5313 5314 prp = (uint64_t *)cmd->nc_prp->nd_memp; 5315 for (idx = 1; idx < ncookies; idx++) { 5316 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL) 5317 return (DDI_FAILURE); 5318 *prp++ = cookie->dmac_laddress; 5319 } 5320 5321 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 5322 DDI_DMA_SYNC_FORDEV); 5323 return (DDI_SUCCESS); 5324 } 5325 5326 /* 5327 * The maximum number of requests supported for a deallocate request is 5328 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and 5329 * unchanged through at least 1.4a). The definition of nvme_range_t is also 5330 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for 5331 * a deallocate request will fit into the smallest supported namespace page 5332 * (4k). 5333 */ 5334 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096); 5335 5336 static int 5337 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize, 5338 int allocflag) 5339 { 5340 const dkioc_free_list_t *dfl = xfer->x_dfl; 5341 const dkioc_free_list_ext_t *exts = dfl->dfl_exts; 5342 nvme_t *nvme = cmd->nc_nvme; 5343 nvme_range_t *ranges = NULL; 5344 uint_t i; 5345 5346 /* 5347 * The number of ranges in the request is 0s based (that is 5348 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ..., 5349 * word10 == 255 -> 256 ranges). Therefore the allowed values are 5350 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request, 5351 * we either provided bad info in nvme_bd_driveinfo() or there is a bug 5352 * in blkdev. 5353 */ 5354 VERIFY3U(dfl->dfl_num_exts, >, 0); 5355 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES); 5356 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff; 5357 5358 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE; 5359 5360 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag); 5361 if (cmd->nc_prp == NULL) 5362 return (DDI_FAILURE); 5363 5364 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 5365 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp; 5366 5367 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress; 5368 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 5369 5370 for (i = 0; i < dfl->dfl_num_exts; i++) { 5371 uint64_t lba, len; 5372 5373 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize; 5374 len = exts[i].dfle_length / blocksize; 5375 5376 VERIFY3U(len, <=, UINT32_MAX); 5377 5378 /* No context attributes for a deallocate request */ 5379 ranges[i].nr_ctxattr = 0; 5380 ranges[i].nr_len = len; 5381 ranges[i].nr_lba = lba; 5382 } 5383 5384 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 5385 DDI_DMA_SYNC_FORDEV); 5386 5387 return (DDI_SUCCESS); 5388 } 5389 5390 static nvme_cmd_t * 5391 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 5392 { 5393 nvme_t *nvme = ns->ns_nvme; 5394 nvme_cmd_t *cmd; 5395 int allocflag; 5396 5397 /* 5398 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 5399 */ 5400 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP; 5401 cmd = nvme_alloc_cmd(nvme, allocflag); 5402 5403 if (cmd == NULL) 5404 return (NULL); 5405 5406 cmd->nc_sqe.sqe_opc = opc; 5407 cmd->nc_callback = nvme_bd_xfer_done; 5408 cmd->nc_xfer = xfer; 5409 5410 switch (opc) { 5411 case NVME_OPC_NVM_WRITE: 5412 case NVME_OPC_NVM_READ: 5413 VERIFY(xfer->x_nblks <= 0x10000); 5414 5415 cmd->nc_sqe.sqe_nsid = ns->ns_id; 5416 5417 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 5418 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 5419 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 5420 5421 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS) 5422 goto fail; 5423 break; 5424 5425 case NVME_OPC_NVM_FLUSH: 5426 cmd->nc_sqe.sqe_nsid = ns->ns_id; 5427 break; 5428 5429 case NVME_OPC_NVM_DSET_MGMT: 5430 cmd->nc_sqe.sqe_nsid = ns->ns_id; 5431 5432 if (nvme_fill_ranges(cmd, xfer, 5433 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS) 5434 goto fail; 5435 break; 5436 5437 default: 5438 goto fail; 5439 } 5440 5441 return (cmd); 5442 5443 fail: 5444 nvme_free_cmd(cmd); 5445 return (NULL); 5446 } 5447 5448 static void 5449 nvme_bd_xfer_done(void *arg) 5450 { 5451 nvme_cmd_t *cmd = arg; 5452 bd_xfer_t *xfer = cmd->nc_xfer; 5453 int error = 0; 5454 5455 error = nvme_check_cmd_status(cmd); 5456 nvme_free_cmd(cmd); 5457 5458 bd_xfer_done(xfer, error); 5459 } 5460 5461 static void 5462 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 5463 { 5464 nvme_namespace_t *ns = arg; 5465 nvme_t *nvme = ns->ns_nvme; 5466 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable); 5467 5468 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO); 5469 5470 /* 5471 * Set the blkdev qcount to the number of submission queues. 5472 * It will then create one waitq/runq pair for each submission 5473 * queue and spread I/O requests across the queues. 5474 */ 5475 drive->d_qcount = nvme->n_ioq_count; 5476 5477 /* 5478 * I/O activity to individual namespaces is distributed across 5479 * each of the d_qcount blkdev queues (which has been set to 5480 * the number of nvme submission queues). d_qsize is the number 5481 * of submitted and not completed I/Os within each queue that blkdev 5482 * will allow before it starts holding them in the waitq. 5483 * 5484 * Each namespace will create a child blkdev instance, for each one 5485 * we try and set the d_qsize so that each namespace gets an 5486 * equal portion of the submission queue. 5487 * 5488 * If post instantiation of the nvme drive, n_namespaces_attachable 5489 * changes and a namespace is attached it could calculate a 5490 * different d_qsize. It may even be that the sum of the d_qsizes is 5491 * now beyond the submission queue size. Should that be the case 5492 * and the I/O rate is such that blkdev attempts to submit more 5493 * I/Os than the size of the submission queue, the excess I/Os 5494 * will be held behind the semaphore nq_sema. 5495 */ 5496 drive->d_qsize = nvme->n_io_squeue_len / ns_count; 5497 5498 /* 5499 * Don't let the queue size drop below the minimum, though. 5500 */ 5501 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN); 5502 5503 /* 5504 * d_maxxfer is not set, which means the value is taken from the DMA 5505 * attributes specified to bd_alloc_handle. 5506 */ 5507 5508 drive->d_removable = B_FALSE; 5509 drive->d_hotpluggable = B_FALSE; 5510 5511 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64)); 5512 drive->d_target = ns->ns_id; 5513 drive->d_lun = 0; 5514 5515 drive->d_model = nvme->n_idctl->id_model; 5516 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 5517 drive->d_vendor = nvme->n_vendor; 5518 drive->d_vendor_len = strlen(nvme->n_vendor); 5519 drive->d_product = nvme->n_product; 5520 drive->d_product_len = strlen(nvme->n_product); 5521 drive->d_serial = nvme->n_idctl->id_serial; 5522 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 5523 drive->d_revision = nvme->n_idctl->id_fwrev; 5524 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 5525 5526 /* 5527 * If we support the dataset management command, the only restrictions 5528 * on a discard request are the maximum number of ranges (segments) 5529 * per single request. 5530 */ 5531 if (nvme->n_idctl->id_oncs.on_dset_mgmt) 5532 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES; 5533 5534 nvme_mgmt_unlock(nvme); 5535 } 5536 5537 static int 5538 nvme_bd_mediainfo(void *arg, bd_media_t *media) 5539 { 5540 nvme_namespace_t *ns = arg; 5541 nvme_t *nvme = ns->ns_nvme; 5542 5543 if (nvme->n_dead) { 5544 return (EIO); 5545 } 5546 5547 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO); 5548 5549 media->m_nblks = ns->ns_block_count; 5550 media->m_blksize = ns->ns_block_size; 5551 media->m_readonly = B_FALSE; 5552 media->m_solidstate = B_TRUE; 5553 5554 media->m_pblksize = ns->ns_best_block_size; 5555 5556 nvme_mgmt_unlock(nvme); 5557 5558 return (0); 5559 } 5560 5561 static int 5562 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 5563 { 5564 nvme_t *nvme = ns->ns_nvme; 5565 nvme_cmd_t *cmd; 5566 nvme_qpair_t *ioq; 5567 boolean_t poll; 5568 int ret; 5569 5570 if (nvme->n_dead) { 5571 return (EIO); 5572 } 5573 5574 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 5575 if (cmd == NULL) 5576 return (ENOMEM); 5577 5578 cmd->nc_sqid = xfer->x_qnum + 1; 5579 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 5580 ioq = nvme->n_ioq[cmd->nc_sqid]; 5581 5582 /* 5583 * Get the polling flag before submitting the command. The command may 5584 * complete immediately after it was submitted, which means we must 5585 * treat both cmd and xfer as if they have been freed already. 5586 */ 5587 poll = (xfer->x_flags & BD_XFER_POLL) != 0; 5588 5589 ret = nvme_submit_io_cmd(ioq, cmd); 5590 5591 if (ret != 0) 5592 return (ret); 5593 5594 if (!poll) 5595 return (0); 5596 5597 do { 5598 cmd = nvme_retrieve_cmd(nvme, ioq); 5599 if (cmd != NULL) { 5600 ASSERT0(cmd->nc_flags & NVME_CMD_F_USELOCK); 5601 cmd->nc_callback(cmd); 5602 } else { 5603 drv_usecwait(10); 5604 } 5605 } while (ioq->nq_active_cmds != 0); 5606 5607 return (0); 5608 } 5609 5610 static int 5611 nvme_bd_read(void *arg, bd_xfer_t *xfer) 5612 { 5613 nvme_namespace_t *ns = arg; 5614 5615 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 5616 } 5617 5618 static int 5619 nvme_bd_write(void *arg, bd_xfer_t *xfer) 5620 { 5621 nvme_namespace_t *ns = arg; 5622 5623 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 5624 } 5625 5626 static int 5627 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 5628 { 5629 nvme_namespace_t *ns = arg; 5630 5631 if (ns->ns_nvme->n_dead) 5632 return (EIO); 5633 5634 /* 5635 * If the volatile write cache is not present or not enabled the FLUSH 5636 * command is a no-op, so we can take a shortcut here. 5637 */ 5638 if (!ns->ns_nvme->n_write_cache_present) { 5639 bd_xfer_done(xfer, ENOTSUP); 5640 return (0); 5641 } 5642 5643 if (!ns->ns_nvme->n_write_cache_enabled) { 5644 bd_xfer_done(xfer, 0); 5645 return (0); 5646 } 5647 5648 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 5649 } 5650 5651 static int 5652 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 5653 { 5654 nvme_namespace_t *ns = arg; 5655 nvme_t *nvme = ns->ns_nvme; 5656 5657 if (nvme->n_dead) { 5658 return (EIO); 5659 } 5660 5661 if (*(uint64_t *)ns->ns_nguid != 0 || 5662 *(uint64_t *)(ns->ns_nguid + 8) != 0) { 5663 return (ddi_devid_init(devinfo, DEVID_NVME_NGUID, 5664 sizeof (ns->ns_nguid), ns->ns_nguid, devid)); 5665 } else if (*(uint64_t *)ns->ns_eui64 != 0) { 5666 return (ddi_devid_init(devinfo, DEVID_NVME_EUI64, 5667 sizeof (ns->ns_eui64), ns->ns_eui64, devid)); 5668 } else { 5669 return (ddi_devid_init(devinfo, DEVID_NVME_NSID, 5670 strlen(ns->ns_devid), ns->ns_devid, devid)); 5671 } 5672 } 5673 5674 static int 5675 nvme_bd_free_space(void *arg, bd_xfer_t *xfer) 5676 { 5677 nvme_namespace_t *ns = arg; 5678 5679 if (xfer->x_dfl == NULL) 5680 return (EINVAL); 5681 5682 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt) 5683 return (ENOTSUP); 5684 5685 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT)); 5686 } 5687 5688 static int 5689 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 5690 { 5691 #ifndef __lock_lint 5692 _NOTE(ARGUNUSED(cred_p)); 5693 #endif 5694 nvme_t *nvme; 5695 nvme_minor_t *minor = NULL; 5696 uint32_t nsid; 5697 minor_t m = getminor(*devp); 5698 int rv = 0; 5699 5700 if (otyp != OTYP_CHR) 5701 return (EINVAL); 5702 5703 if (m >= NVME_OPEN_MINOR_MIN) 5704 return (ENXIO); 5705 5706 nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(m)); 5707 nsid = NVME_MINOR_NSID(m); 5708 5709 if (nvme == NULL) 5710 return (ENXIO); 5711 5712 if (nsid > nvme->n_namespace_count) 5713 return (ENXIO); 5714 5715 if (nvme->n_dead) 5716 return (EIO); 5717 5718 /* 5719 * At this point, we're going to allow an open to proceed on this 5720 * device. We need to allocate a new instance for this (presuming one is 5721 * available). 5722 */ 5723 minor = kmem_zalloc(sizeof (nvme_minor_t), KM_NOSLEEP_LAZY); 5724 if (minor == NULL) { 5725 return (ENOMEM); 5726 } 5727 5728 cv_init(&minor->nm_cv, NULL, CV_DRIVER, NULL); 5729 list_link_init(&minor->nm_ctrl_lock.nli_node); 5730 minor->nm_ctrl_lock.nli_nvme = nvme; 5731 minor->nm_ctrl_lock.nli_minor = minor; 5732 list_link_init(&minor->nm_ns_lock.nli_node); 5733 minor->nm_ns_lock.nli_nvme = nvme; 5734 minor->nm_ns_lock.nli_minor = minor; 5735 minor->nm_minor = id_alloc_nosleep(nvme_open_minors); 5736 if (minor->nm_minor == -1) { 5737 nvme_minor_free(minor); 5738 return (ENOSPC); 5739 } 5740 5741 minor->nm_ctrl = nvme; 5742 if (nsid != 0) { 5743 minor->nm_ns = nvme_nsid2ns(nvme, nsid); 5744 } 5745 5746 /* 5747 * Before we check for exclusive access and attempt a lock if requested, 5748 * ensure that this minor is persisted. 5749 */ 5750 mutex_enter(&nvme_open_minors_mutex); 5751 avl_add(&nvme_open_minors_avl, minor); 5752 mutex_exit(&nvme_open_minors_mutex); 5753 5754 /* 5755 * A request for opening this FEXCL, is translated into a non-blocking 5756 * write lock of the appropriate entity. This honors the original 5757 * semantics here. In the future, we should see if we can remove this 5758 * and turn a request for FEXCL at open into ENOTSUP. 5759 */ 5760 mutex_enter(&nvme->n_minor_mutex); 5761 if ((flag & FEXCL) != 0) { 5762 nvme_ioctl_lock_t lock = { 5763 .nil_level = NVME_LOCK_L_WRITE, 5764 .nil_flags = NVME_LOCK_F_DONT_BLOCK 5765 }; 5766 5767 if (minor->nm_ns != NULL) { 5768 lock.nil_ent = NVME_LOCK_E_NS; 5769 lock.nil_common.nioc_nsid = nsid; 5770 } else { 5771 lock.nil_ent = NVME_LOCK_E_CTRL; 5772 } 5773 nvme_rwlock(minor, &lock); 5774 if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) { 5775 mutex_exit(&nvme->n_minor_mutex); 5776 5777 mutex_enter(&nvme_open_minors_mutex); 5778 avl_remove(&nvme_open_minors_avl, minor); 5779 mutex_exit(&nvme_open_minors_mutex); 5780 5781 nvme_minor_free(minor); 5782 return (EBUSY); 5783 } 5784 } 5785 mutex_exit(&nvme->n_minor_mutex); 5786 5787 *devp = makedevice(getmajor(*devp), (minor_t)minor->nm_minor); 5788 return (rv); 5789 5790 } 5791 5792 static int 5793 nvme_close(dev_t dev, int flag __unused, int otyp, cred_t *cred_p __unused) 5794 { 5795 nvme_minor_t *minor; 5796 nvme_t *nvme; 5797 5798 if (otyp != OTYP_CHR) { 5799 return (ENXIO); 5800 } 5801 5802 minor = nvme_minor_find_by_dev(dev); 5803 if (minor == NULL) { 5804 return (ENXIO); 5805 } 5806 5807 mutex_enter(&nvme_open_minors_mutex); 5808 avl_remove(&nvme_open_minors_avl, minor); 5809 mutex_exit(&nvme_open_minors_mutex); 5810 5811 /* 5812 * When this device is being closed, we must ensure that any locks held 5813 * by this are dealt with. 5814 */ 5815 nvme = minor->nm_ctrl; 5816 mutex_enter(&nvme->n_minor_mutex); 5817 ASSERT3U(minor->nm_ctrl_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED); 5818 ASSERT3U(minor->nm_ns_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED); 5819 5820 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) { 5821 VERIFY3P(minor->nm_ctrl_lock.nli_lock, !=, NULL); 5822 nvme_rwunlock(&minor->nm_ctrl_lock, 5823 minor->nm_ctrl_lock.nli_lock); 5824 } 5825 5826 if (minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) { 5827 VERIFY3P(minor->nm_ns_lock.nli_lock, !=, NULL); 5828 nvme_rwunlock(&minor->nm_ns_lock, minor->nm_ns_lock.nli_lock); 5829 } 5830 mutex_exit(&nvme->n_minor_mutex); 5831 5832 nvme_minor_free(minor); 5833 5834 return (0); 5835 } 5836 5837 void 5838 nvme_ioctl_success(nvme_ioctl_common_t *ioc) 5839 { 5840 ioc->nioc_drv_err = NVME_IOCTL_E_OK; 5841 ioc->nioc_ctrl_sc = NVME_CQE_SC_GEN_SUCCESS; 5842 ioc->nioc_ctrl_sct = NVME_CQE_SCT_GENERIC; 5843 } 5844 5845 boolean_t 5846 nvme_ioctl_error(nvme_ioctl_common_t *ioc, nvme_ioctl_errno_t err, uint32_t sct, 5847 uint32_t sc) 5848 { 5849 ioc->nioc_drv_err = err; 5850 ioc->nioc_ctrl_sct = sct; 5851 ioc->nioc_ctrl_sc = sc; 5852 5853 return (B_FALSE); 5854 } 5855 5856 static int 5857 nvme_ioctl_copyout_error(nvme_ioctl_errno_t err, intptr_t uaddr, int mode) 5858 { 5859 nvme_ioctl_common_t ioc; 5860 5861 ASSERT3U(err, !=, NVME_IOCTL_E_CTRL_ERROR); 5862 bzero(&ioc, sizeof (ioc)); 5863 if (ddi_copyout(&ioc, (void *)uaddr, sizeof (nvme_ioctl_common_t), 5864 mode & FKIOCTL) != 0) { 5865 return (EFAULT); 5866 } 5867 return (0); 5868 } 5869 5870 /* 5871 * The companion to the namespace checking. This occurs after any rewriting 5872 * occurs. This is the primary point that we attempt to enforce any operation's 5873 * exclusivity. Note, it is theoretically possible for an operation to be 5874 * ongoing and to have someone with an exclusive lock ask to unlock it for some 5875 * reason. This does not maintain the number of such events that are going on. 5876 * While perhaps this is leaving too much up to the user, by the same token we 5877 * don't try to stop them from issuing two different format NVM commands 5878 * targeting the whole device at the same time either, even though the 5879 * controller would really rather that didn't happen. 5880 */ 5881 static boolean_t 5882 nvme_ioctl_excl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc, 5883 const nvme_ioctl_check_t *check) 5884 { 5885 nvme_t *const nvme = minor->nm_ctrl; 5886 nvme_namespace_t *ns; 5887 boolean_t have_ctrl, have_ns, ctrl_is_excl, ns_is_excl; 5888 5889 /* 5890 * If the command doesn't require anything, then we're done. 5891 */ 5892 if (check->nck_excl == NVME_IOCTL_EXCL_SKIP) { 5893 return (B_TRUE); 5894 } 5895 5896 if (ioc->nioc_nsid == 0 || ioc->nioc_nsid == NVME_NSID_BCAST) { 5897 ns = NULL; 5898 } else { 5899 ns = nvme_nsid2ns(nvme, ioc->nioc_nsid); 5900 } 5901 5902 mutex_enter(&nvme->n_minor_mutex); 5903 ctrl_is_excl = nvme->n_lock.nl_writer != NULL; 5904 have_ctrl = nvme->n_lock.nl_writer == &minor->nm_ctrl_lock; 5905 if (ns != NULL) { 5906 /* 5907 * We explicitly test the namespace lock's writer versus asking 5908 * the minor because the minor's namespace lock may apply to a 5909 * different namespace. 5910 */ 5911 ns_is_excl = ns->ns_lock.nl_writer != NULL; 5912 have_ns = ns->ns_lock.nl_writer == &minor->nm_ns_lock; 5913 ASSERT0(have_ctrl && have_ns); 5914 #ifdef DEBUG 5915 if (have_ns) { 5916 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, ns); 5917 } 5918 #endif 5919 } else { 5920 ns_is_excl = B_FALSE; 5921 have_ns = B_FALSE; 5922 } 5923 ASSERT0(ctrl_is_excl && ns_is_excl); 5924 mutex_exit(&nvme->n_minor_mutex); 5925 5926 if (check->nck_excl == NVME_IOCTL_EXCL_WRITE) { 5927 if (ns == NULL) { 5928 if (have_ctrl) { 5929 return (B_TRUE); 5930 } 5931 return (nvme_ioctl_error(ioc, 5932 NVME_IOCTL_E_NEED_CTRL_WRLOCK, 0, 0)); 5933 } else { 5934 if (have_ctrl || have_ns) { 5935 return (B_TRUE); 5936 } 5937 return (nvme_ioctl_error(ioc, 5938 NVME_IOCTL_E_NEED_NS_WRLOCK, 0, 0)); 5939 } 5940 } 5941 5942 /* 5943 * Now we have an operation that does not require exclusive access. We 5944 * can proceed as long as no one else has it or if someone does it is 5945 * us. Regardless of what we target, a controller lock will stop us. 5946 */ 5947 if (ctrl_is_excl && !have_ctrl) { 5948 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_LOCKED, 0, 0)); 5949 } 5950 5951 /* 5952 * Only check namespace exclusivity if we are targeting one. 5953 */ 5954 if (ns != NULL && ns_is_excl && !have_ns) { 5955 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_LOCKED, 0, 0)); 5956 } 5957 5958 return (B_TRUE); 5959 } 5960 5961 /* 5962 * Perform common checking as to whether or not an ioctl operation may proceed. 5963 * We check in this function various aspects of the namespace attributes that 5964 * it's calling on. Once the namespace attributes and any possible rewriting 5965 * have been performed, then we proceed to check whether or not the requisite 5966 * exclusive access is present in nvme_ioctl_excl_check(). 5967 */ 5968 static boolean_t 5969 nvme_ioctl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc, 5970 const nvme_ioctl_check_t *check) 5971 { 5972 /* 5973 * If the minor has a namespace pointer, then it is constrained to that 5974 * namespace. If a namespace is allowed, then there are only two valid 5975 * values that we can find. The first is matching the minor. The second 5976 * is our value zero, which will be transformed to the current 5977 * namespace. 5978 */ 5979 if (minor->nm_ns != NULL) { 5980 if (!check->nck_ns_ok || !check->nck_ns_minor_ok) { 5981 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NOT_CTRL, 0, 5982 0)); 5983 } 5984 5985 if (ioc->nioc_nsid == 0) { 5986 ioc->nioc_nsid = minor->nm_ns->ns_id; 5987 } else if (ioc->nioc_nsid != minor->nm_ns->ns_id) { 5988 return (nvme_ioctl_error(ioc, 5989 NVME_IOCTL_E_MINOR_WRONG_NS, 0, 0)); 5990 } 5991 5992 return (nvme_ioctl_excl_check(minor, ioc, check)); 5993 } 5994 5995 /* 5996 * If we've been told to skip checking the controller, here's where we 5997 * do that. This should really only be for commands which use the 5998 * namespace ID for listing purposes and therefore can have 5999 * traditionally illegal values here. 6000 */ 6001 if (check->nck_skip_ctrl) { 6002 return (nvme_ioctl_excl_check(minor, ioc, check)); 6003 } 6004 6005 /* 6006 * At this point, we know that we're on the controller's node. We first 6007 * deal with the simple case, is a namespace allowed at all or not. If 6008 * it is not allowed, then the only acceptable value is zero. 6009 */ 6010 if (!check->nck_ns_ok) { 6011 if (ioc->nioc_nsid != 0) { 6012 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_UNUSE, 0, 6013 0)); 6014 } 6015 6016 return (nvme_ioctl_excl_check(minor, ioc, check)); 6017 } 6018 6019 /* 6020 * At this point, we know that a controller is allowed to use a 6021 * namespace. If we haven't been given zero or the broadcast namespace, 6022 * check to see if it's actually a valid namespace ID. If is outside of 6023 * range, then it is an error. Next, if we have been requested to 6024 * rewrite 0 (the this controller indicator) as the broadcast namespace, 6025 * do so. 6026 * 6027 * While we validate that this namespace is within the valid range, we 6028 * do not check if it is active or inactive. That is left to our callers 6029 * to determine. 6030 */ 6031 if (ioc->nioc_nsid > minor->nm_ctrl->n_namespace_count && 6032 ioc->nioc_nsid != NVME_NSID_BCAST) { 6033 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_RANGE, 0, 0)); 6034 } 6035 6036 if (ioc->nioc_nsid == 0 && check->nck_ctrl_rewrite) { 6037 ioc->nioc_nsid = NVME_NSID_BCAST; 6038 } 6039 6040 /* 6041 * Finally, see if we have ended up with a broadcast namespace ID 6042 * whether through specification or rewriting. If that is not allowed, 6043 * then that is an error. 6044 */ 6045 if (!check->nck_bcast_ok && ioc->nioc_nsid == NVME_NSID_BCAST) { 6046 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_BCAST_NS, 0, 0)); 6047 } 6048 6049 return (nvme_ioctl_excl_check(minor, ioc, check)); 6050 } 6051 6052 static int 6053 nvme_ioctl_ctrl_info(nvme_minor_t *minor, intptr_t arg, int mode, 6054 cred_t *cred_p) 6055 { 6056 nvme_t *const nvme = minor->nm_ctrl; 6057 nvme_ioctl_ctrl_info_t *info; 6058 nvme_reg_cap_t cap = { 0 }; 6059 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_CTRL }; 6060 void *idbuf; 6061 6062 if ((mode & FREAD) == 0) 6063 return (EBADF); 6064 6065 info = kmem_alloc(sizeof (nvme_ioctl_ctrl_info_t), KM_NOSLEEP_LAZY); 6066 if (info == NULL) { 6067 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg, 6068 mode)); 6069 } 6070 6071 if (ddi_copyin((void *)arg, info, sizeof (nvme_ioctl_ctrl_info_t), 6072 mode & FKIOCTL) != 0) { 6073 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t)); 6074 return (EFAULT); 6075 } 6076 6077 if (!nvme_ioctl_check(minor, &info->nci_common, 6078 &nvme_check_ctrl_info)) { 6079 goto copyout; 6080 } 6081 6082 /* 6083 * We explicitly do not use the identify controller copy in the kernel 6084 * right now so that way we can get a snapshot of the controller's 6085 * current capacity and values. While it's tempting to try to use this 6086 * to refresh the kernel's version we don't just to simplify the rest of 6087 * the driver right now. 6088 */ 6089 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) { 6090 info->nci_common = id.nid_common; 6091 goto copyout; 6092 } 6093 bcopy(idbuf, &info->nci_ctrl_id, sizeof (nvme_identify_ctrl_t)); 6094 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE); 6095 6096 /* 6097 * Use the kernel's cached common namespace information for this. 6098 */ 6099 bcopy(nvme->n_idcomns, &info->nci_common_ns, 6100 sizeof (nvme_identify_nsid_t)); 6101 6102 info->nci_vers = nvme->n_version; 6103 6104 /* 6105 * The MPSMIN and MPSMAX fields in the CAP register use 0 to 6106 * specify the base page size of 4k (1<<12), so add 12 here to 6107 * get the real page size value. 6108 */ 6109 cap.r = nvme_get64(nvme, NVME_REG_CAP); 6110 info->nci_caps.cap_mpsmax = 1 << (12 + cap.b.cap_mpsmax); 6111 info->nci_caps.cap_mpsmin = 1 << (12 + cap.b.cap_mpsmin); 6112 6113 info->nci_nintrs = (uint32_t)nvme->n_intr_cnt; 6114 6115 copyout: 6116 if (ddi_copyout(info, (void *)arg, sizeof (nvme_ioctl_ctrl_info_t), 6117 mode & FKIOCTL) != 0) { 6118 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t)); 6119 return (EFAULT); 6120 } 6121 6122 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t)); 6123 return (0); 6124 } 6125 6126 static int 6127 nvme_ioctl_ns_info(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 6128 { 6129 nvme_t *const nvme = minor->nm_ctrl; 6130 nvme_ioctl_ns_info_t *ns_info; 6131 nvme_namespace_t *ns; 6132 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_NSID }; 6133 void *idbuf; 6134 6135 if ((mode & FREAD) == 0) 6136 return (EBADF); 6137 6138 ns_info = kmem_zalloc(sizeof (nvme_ioctl_ns_info_t), KM_NOSLEEP_LAZY); 6139 if (ns_info == NULL) { 6140 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg, 6141 mode)); 6142 } 6143 6144 if (ddi_copyin((void *)arg, ns_info, sizeof (nvme_ioctl_ns_info_t), 6145 mode & FKIOCTL) != 0) { 6146 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t)); 6147 return (EFAULT); 6148 } 6149 6150 if (!nvme_ioctl_check(minor, &ns_info->nni_common, 6151 &nvme_check_ns_info)) { 6152 goto copyout; 6153 } 6154 6155 ASSERT3U(ns_info->nni_common.nioc_nsid, >, 0); 6156 ns = nvme_nsid2ns(nvme, ns_info->nni_common.nioc_nsid); 6157 6158 /* 6159 * First fetch a fresh copy of the namespace information. Most callers 6160 * are using this because they will want a mostly accurate snapshot of 6161 * capacity and utilization. 6162 */ 6163 id.nid_common.nioc_nsid = ns_info->nni_common.nioc_nsid; 6164 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) { 6165 ns_info->nni_common = id.nid_common; 6166 goto copyout; 6167 } 6168 bcopy(idbuf, &ns_info->nni_id, sizeof (nvme_identify_nsid_t)); 6169 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE); 6170 6171 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 6172 if (ns->ns_allocated) 6173 ns_info->nni_state |= NVME_NS_STATE_ALLOCATED; 6174 6175 if (ns->ns_active) 6176 ns_info->nni_state |= NVME_NS_STATE_ACTIVE; 6177 6178 if (ns->ns_ignore) 6179 ns_info->nni_state |= NVME_NS_STATE_IGNORED; 6180 6181 if (ns->ns_attached) { 6182 const char *addr; 6183 6184 ns_info->nni_state |= NVME_NS_STATE_ATTACHED; 6185 addr = bd_address(ns->ns_bd_hdl); 6186 if (strlcpy(ns_info->nni_addr, addr, 6187 sizeof (ns_info->nni_addr)) >= sizeof (ns_info->nni_addr)) { 6188 nvme_mgmt_unlock(nvme); 6189 (void) nvme_ioctl_error(&ns_info->nni_common, 6190 NVME_IOCTL_E_BD_ADDR_OVER, 0, 0); 6191 goto copyout; 6192 } 6193 } 6194 nvme_mgmt_unlock(nvme); 6195 6196 copyout: 6197 if (ddi_copyout(ns_info, (void *)arg, sizeof (nvme_ioctl_ns_info_t), 6198 mode & FKIOCTL) != 0) { 6199 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t)); 6200 return (EFAULT); 6201 } 6202 6203 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t)); 6204 return (0); 6205 } 6206 6207 static int 6208 nvme_ioctl_identify(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 6209 { 6210 _NOTE(ARGUNUSED(cred_p)); 6211 nvme_t *const nvme = minor->nm_ctrl; 6212 void *idctl; 6213 uint_t model; 6214 nvme_ioctl_identify_t id; 6215 #ifdef _MULTI_DATAMODEL 6216 nvme_ioctl_identify32_t id32; 6217 #endif 6218 boolean_t ns_minor; 6219 6220 if ((mode & FREAD) == 0) 6221 return (EBADF); 6222 6223 model = ddi_model_convert_from(mode); 6224 switch (model) { 6225 #ifdef _MULTI_DATAMODEL 6226 case DDI_MODEL_ILP32: 6227 bzero(&id, sizeof (id)); 6228 if (ddi_copyin((void *)arg, &id32, sizeof (id32), 6229 mode & FKIOCTL) != 0) { 6230 return (EFAULT); 6231 } 6232 id.nid_common.nioc_nsid = id32.nid_common.nioc_nsid; 6233 id.nid_cns = id32.nid_cns; 6234 id.nid_ctrlid = id32.nid_ctrlid; 6235 id.nid_data = id32.nid_data; 6236 break; 6237 #endif /* _MULTI_DATAMODEL */ 6238 case DDI_MODEL_NONE: 6239 if (ddi_copyin((void *)arg, &id, sizeof (id), 6240 mode & FKIOCTL) != 0) { 6241 return (EFAULT); 6242 } 6243 break; 6244 default: 6245 return (ENOTSUP); 6246 } 6247 6248 if (!nvme_ioctl_check(minor, &id.nid_common, &nvme_check_identify)) { 6249 goto copyout; 6250 } 6251 6252 ns_minor = minor->nm_ns != NULL; 6253 if (!nvme_validate_identify(nvme, &id, ns_minor)) { 6254 goto copyout; 6255 } 6256 6257 if (nvme_identify(nvme, B_TRUE, &id, &idctl)) { 6258 int ret = ddi_copyout(idctl, (void *)id.nid_data, 6259 NVME_IDENTIFY_BUFSIZE, mode & FKIOCTL); 6260 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE); 6261 if (ret != 0) { 6262 (void) nvme_ioctl_error(&id.nid_common, 6263 NVME_IOCTL_E_BAD_USER_DATA, 0, 0); 6264 goto copyout; 6265 } 6266 6267 nvme_ioctl_success(&id.nid_common); 6268 } 6269 6270 copyout: 6271 switch (model) { 6272 #ifdef _MULTI_DATAMODEL 6273 case DDI_MODEL_ILP32: 6274 id32.nid_common = id.nid_common; 6275 6276 if (ddi_copyout(&id32, (void *)arg, sizeof (id32), 6277 mode & FKIOCTL) != 0) { 6278 return (EFAULT); 6279 } 6280 break; 6281 #endif /* _MULTI_DATAMODEL */ 6282 case DDI_MODEL_NONE: 6283 if (ddi_copyout(&id, (void *)arg, sizeof (id), 6284 mode & FKIOCTL) != 0) { 6285 return (EFAULT); 6286 } 6287 break; 6288 default: 6289 return (ENOTSUP); 6290 } 6291 6292 return (0); 6293 } 6294 6295 /* 6296 * Execute commands on behalf of the various ioctls. 6297 * 6298 * If this returns true then the command completed successfully. Otherwise error 6299 * information is returned in the nvme_ioctl_common_t arguments. 6300 */ 6301 typedef struct { 6302 nvme_sqe_t *ica_sqe; 6303 void *ica_data; 6304 uint32_t ica_data_len; 6305 uint_t ica_dma_flags; 6306 int ica_copy_flags; 6307 uint32_t ica_timeout; 6308 uint32_t ica_cdw0; 6309 } nvme_ioc_cmd_args_t; 6310 6311 static boolean_t 6312 nvme_ioc_cmd(nvme_t *nvme, nvme_ioctl_common_t *ioc, nvme_ioc_cmd_args_t *args) 6313 { 6314 nvme_cmd_t *cmd; 6315 boolean_t ret = B_FALSE; 6316 6317 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP); 6318 cmd->nc_sqid = 0; 6319 6320 /* 6321 * This function is used to facilitate requests from 6322 * userspace, so don't panic if the command fails. This 6323 * is especially true for admin passthru commands, where 6324 * the actual command data structure is entirely defined 6325 * by userspace. 6326 */ 6327 cmd->nc_flags |= NVME_CMD_F_DONTPANIC; 6328 6329 cmd->nc_callback = nvme_wakeup_cmd; 6330 cmd->nc_sqe = *args->ica_sqe; 6331 6332 if ((args->ica_dma_flags & DDI_DMA_RDWR) != 0) { 6333 if (args->ica_data == NULL) { 6334 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_DMA_MEM, 6335 0, 0); 6336 goto free_cmd; 6337 } 6338 6339 if (nvme_zalloc_dma(nvme, args->ica_data_len, 6340 args->ica_dma_flags, &nvme->n_prp_dma_attr, &cmd->nc_dma) != 6341 DDI_SUCCESS) { 6342 dev_err(nvme->n_dip, CE_WARN, 6343 "!nvme_zalloc_dma failed for nvme_ioc_cmd()"); 6344 ret = nvme_ioctl_error(ioc, 6345 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 6346 goto free_cmd; 6347 } 6348 6349 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) { 6350 ret = nvme_ioctl_error(ioc, 6351 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 6352 goto free_cmd; 6353 } 6354 6355 if ((args->ica_dma_flags & DDI_DMA_WRITE) != 0 && 6356 ddi_copyin(args->ica_data, cmd->nc_dma->nd_memp, 6357 args->ica_data_len, args->ica_copy_flags) != 0) { 6358 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 6359 0, 0); 6360 goto free_cmd; 6361 } 6362 } 6363 6364 nvme_admin_cmd(cmd, args->ica_timeout); 6365 6366 if (!nvme_check_cmd_status_ioctl(cmd, ioc)) { 6367 ret = B_FALSE; 6368 goto free_cmd; 6369 } 6370 6371 args->ica_cdw0 = cmd->nc_cqe.cqe_dw0; 6372 6373 if ((args->ica_dma_flags & DDI_DMA_READ) != 0 && 6374 ddi_copyout(cmd->nc_dma->nd_memp, args->ica_data, 6375 args->ica_data_len, args->ica_copy_flags) != 0) { 6376 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 0, 0); 6377 goto free_cmd; 6378 } 6379 6380 ret = B_TRUE; 6381 nvme_ioctl_success(ioc); 6382 6383 free_cmd: 6384 nvme_free_cmd(cmd); 6385 6386 return (ret); 6387 } 6388 6389 static int 6390 nvme_ioctl_get_logpage(nvme_minor_t *minor, intptr_t arg, int mode, 6391 cred_t *cred_p) 6392 { 6393 nvme_t *const nvme = minor->nm_ctrl; 6394 void *buf; 6395 nvme_ioctl_get_logpage_t log; 6396 uint_t model; 6397 #ifdef _MULTI_DATAMODEL 6398 nvme_ioctl_get_logpage32_t log32; 6399 #endif 6400 6401 if ((mode & FREAD) == 0) { 6402 return (EBADF); 6403 } 6404 6405 model = ddi_model_convert_from(mode); 6406 switch (model) { 6407 #ifdef _MULTI_DATAMODEL 6408 case DDI_MODEL_ILP32: 6409 bzero(&log, sizeof (log)); 6410 if (ddi_copyin((void *)arg, &log32, sizeof (log32), 6411 mode & FKIOCTL) != 0) { 6412 return (EFAULT); 6413 } 6414 6415 log.nigl_common.nioc_nsid = log32.nigl_common.nioc_nsid; 6416 log.nigl_csi = log32.nigl_csi; 6417 log.nigl_lid = log32.nigl_lid; 6418 log.nigl_lsp = log32.nigl_lsp; 6419 log.nigl_len = log32.nigl_len; 6420 log.nigl_offset = log32.nigl_offset; 6421 log.nigl_data = log32.nigl_data; 6422 break; 6423 #endif /* _MULTI_DATAMODEL */ 6424 case DDI_MODEL_NONE: 6425 if (ddi_copyin((void *)arg, &log, sizeof (log), 6426 mode & FKIOCTL) != 0) { 6427 return (EFAULT); 6428 } 6429 break; 6430 default: 6431 return (ENOTSUP); 6432 } 6433 6434 /* 6435 * Eventually we'd like to do a soft lock on the namespaces from 6436 * changing out from us during this operation in the future. But we 6437 * haven't implemented that yet. 6438 */ 6439 if (!nvme_ioctl_check(minor, &log.nigl_common, 6440 &nvme_check_get_logpage)) { 6441 goto copyout; 6442 } 6443 6444 if (!nvme_validate_logpage(nvme, &log)) { 6445 goto copyout; 6446 } 6447 6448 if (nvme_get_logpage(nvme, B_TRUE, &log, &buf)) { 6449 int copy; 6450 6451 copy = ddi_copyout(buf, (void *)log.nigl_data, log.nigl_len, 6452 mode & FKIOCTL); 6453 kmem_free(buf, log.nigl_len); 6454 if (copy != 0) { 6455 (void) nvme_ioctl_error(&log.nigl_common, 6456 NVME_IOCTL_E_BAD_USER_DATA, 0, 0); 6457 goto copyout; 6458 } 6459 6460 nvme_ioctl_success(&log.nigl_common); 6461 } 6462 6463 copyout: 6464 switch (model) { 6465 #ifdef _MULTI_DATAMODEL 6466 case DDI_MODEL_ILP32: 6467 bzero(&log32, sizeof (log32)); 6468 6469 log32.nigl_common = log.nigl_common; 6470 log32.nigl_csi = log.nigl_csi; 6471 log32.nigl_lid = log.nigl_lid; 6472 log32.nigl_lsp = log.nigl_lsp; 6473 log32.nigl_len = log.nigl_len; 6474 log32.nigl_offset = log.nigl_offset; 6475 log32.nigl_data = log.nigl_data; 6476 if (ddi_copyout(&log32, (void *)arg, sizeof (log32), 6477 mode & FKIOCTL) != 0) { 6478 return (EFAULT); 6479 } 6480 break; 6481 #endif /* _MULTI_DATAMODEL */ 6482 case DDI_MODEL_NONE: 6483 if (ddi_copyout(&log, (void *)arg, sizeof (log), 6484 mode & FKIOCTL) != 0) { 6485 return (EFAULT); 6486 } 6487 break; 6488 default: 6489 return (ENOTSUP); 6490 } 6491 6492 return (0); 6493 } 6494 6495 static int 6496 nvme_ioctl_get_feature(nvme_minor_t *minor, intptr_t arg, int mode, 6497 cred_t *cred_p) 6498 { 6499 nvme_t *const nvme = minor->nm_ctrl; 6500 nvme_ioctl_get_feature_t feat; 6501 uint_t model; 6502 #ifdef _MULTI_DATAMODEL 6503 nvme_ioctl_get_feature32_t feat32; 6504 #endif 6505 nvme_get_features_dw10_t gf_dw10 = { 0 }; 6506 nvme_ioc_cmd_args_t args = { NULL }; 6507 nvme_sqe_t sqe = { 6508 .sqe_opc = NVME_OPC_GET_FEATURES 6509 }; 6510 6511 if ((mode & FREAD) == 0) { 6512 return (EBADF); 6513 } 6514 6515 model = ddi_model_convert_from(mode); 6516 switch (model) { 6517 #ifdef _MULTI_DATAMODEL 6518 case DDI_MODEL_ILP32: 6519 bzero(&feat, sizeof (feat)); 6520 if (ddi_copyin((void *)arg, &feat32, sizeof (feat32), 6521 mode & FKIOCTL) != 0) { 6522 return (EFAULT); 6523 } 6524 6525 feat.nigf_common.nioc_nsid = feat32.nigf_common.nioc_nsid; 6526 feat.nigf_fid = feat32.nigf_fid; 6527 feat.nigf_sel = feat32.nigf_sel; 6528 feat.nigf_cdw11 = feat32.nigf_cdw11; 6529 feat.nigf_data = feat32.nigf_data; 6530 feat.nigf_len = feat32.nigf_len; 6531 break; 6532 #endif /* _MULTI_DATAMODEL */ 6533 case DDI_MODEL_NONE: 6534 if (ddi_copyin((void *)arg, &feat, sizeof (feat), 6535 mode & FKIOCTL) != 0) { 6536 return (EFAULT); 6537 } 6538 break; 6539 default: 6540 return (ENOTSUP); 6541 } 6542 6543 if (!nvme_ioctl_check(minor, &feat.nigf_common, 6544 &nvme_check_get_feature)) { 6545 goto copyout; 6546 } 6547 6548 if (!nvme_validate_get_feature(nvme, &feat)) { 6549 goto copyout; 6550 } 6551 6552 gf_dw10.b.gt_fid = bitx32(feat.nigf_fid, 7, 0); 6553 gf_dw10.b.gt_sel = bitx32(feat.nigf_sel, 2, 0); 6554 sqe.sqe_cdw10 = gf_dw10.r; 6555 sqe.sqe_cdw11 = feat.nigf_cdw11; 6556 sqe.sqe_nsid = feat.nigf_common.nioc_nsid; 6557 6558 args.ica_sqe = &sqe; 6559 if (feat.nigf_len != 0) { 6560 args.ica_data = (void *)feat.nigf_data; 6561 args.ica_data_len = feat.nigf_len; 6562 args.ica_dma_flags = DDI_DMA_READ; 6563 } 6564 args.ica_copy_flags = mode; 6565 args.ica_timeout = nvme_admin_cmd_timeout; 6566 6567 if (!nvme_ioc_cmd(nvme, &feat.nigf_common, &args)) { 6568 goto copyout; 6569 } 6570 6571 feat.nigf_cdw0 = args.ica_cdw0; 6572 6573 copyout: 6574 switch (model) { 6575 #ifdef _MULTI_DATAMODEL 6576 case DDI_MODEL_ILP32: 6577 bzero(&feat32, sizeof (feat32)); 6578 6579 feat32.nigf_common = feat.nigf_common; 6580 feat32.nigf_fid = feat.nigf_fid; 6581 feat32.nigf_sel = feat.nigf_sel; 6582 feat32.nigf_cdw11 = feat.nigf_cdw11; 6583 feat32.nigf_data = feat.nigf_data; 6584 feat32.nigf_len = feat.nigf_len; 6585 feat32.nigf_cdw0 = feat.nigf_cdw0; 6586 if (ddi_copyout(&feat32, (void *)arg, sizeof (feat32), 6587 mode & FKIOCTL) != 0) { 6588 return (EFAULT); 6589 } 6590 break; 6591 #endif /* _MULTI_DATAMODEL */ 6592 case DDI_MODEL_NONE: 6593 if (ddi_copyout(&feat, (void *)arg, sizeof (feat), 6594 mode & FKIOCTL) != 0) { 6595 return (EFAULT); 6596 } 6597 break; 6598 default: 6599 return (ENOTSUP); 6600 } 6601 6602 return (0); 6603 } 6604 6605 static int 6606 nvme_ioctl_format(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 6607 { 6608 nvme_t *const nvme = minor->nm_ctrl; 6609 nvme_ioctl_format_t ioc; 6610 6611 if ((mode & FWRITE) == 0) 6612 return (EBADF); 6613 6614 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6615 return (EPERM); 6616 6617 if (ddi_copyin((void *)(uintptr_t)arg, &ioc, 6618 sizeof (nvme_ioctl_format_t), mode & FKIOCTL) != 0) 6619 return (EFAULT); 6620 6621 if (!nvme_ioctl_check(minor, &ioc.nif_common, &nvme_check_format)) { 6622 goto copyout; 6623 } 6624 6625 if (!nvme_validate_format(nvme, &ioc)) { 6626 goto copyout; 6627 } 6628 6629 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 6630 if (!nvme_no_blkdev_attached(nvme, ioc.nif_common.nioc_nsid)) { 6631 nvme_mgmt_unlock(nvme); 6632 (void) nvme_ioctl_error(&ioc.nif_common, 6633 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0); 6634 goto copyout; 6635 } 6636 6637 if (nvme_format_nvm(nvme, &ioc)) { 6638 nvme_ioctl_success(&ioc.nif_common); 6639 nvme_rescan_ns(nvme, ioc.nif_common.nioc_nsid); 6640 } 6641 nvme_mgmt_unlock(nvme); 6642 6643 copyout: 6644 if (ddi_copyout(&ioc, (void *)(uintptr_t)arg, sizeof (ioc), 6645 mode & FKIOCTL) != 0) { 6646 return (EFAULT); 6647 } 6648 6649 return (0); 6650 } 6651 6652 static int 6653 nvme_ioctl_detach(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 6654 { 6655 nvme_t *const nvme = minor->nm_ctrl; 6656 nvme_ioctl_common_t com; 6657 6658 if ((mode & FWRITE) == 0) 6659 return (EBADF); 6660 6661 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6662 return (EPERM); 6663 6664 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com), 6665 mode & FKIOCTL) != 0) { 6666 return (EFAULT); 6667 } 6668 6669 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) { 6670 goto copyout; 6671 } 6672 6673 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 6674 if (nvme_detach_ns(nvme, &com)) { 6675 nvme_ioctl_success(&com); 6676 } 6677 nvme_mgmt_unlock(nvme); 6678 6679 copyout: 6680 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com), 6681 mode & FKIOCTL) != 0) { 6682 return (EFAULT); 6683 } 6684 6685 return (0); 6686 } 6687 6688 static int 6689 nvme_ioctl_attach(nvme_minor_t *minor, intptr_t arg, int mode, 6690 cred_t *cred_p) 6691 { 6692 nvme_t *const nvme = minor->nm_ctrl; 6693 nvme_ioctl_common_t com; 6694 nvme_namespace_t *ns; 6695 6696 if ((mode & FWRITE) == 0) 6697 return (EBADF); 6698 6699 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6700 return (EPERM); 6701 6702 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com), 6703 mode & FKIOCTL) != 0) { 6704 return (EFAULT); 6705 } 6706 6707 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) { 6708 goto copyout; 6709 } 6710 6711 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 6712 ns = nvme_nsid2ns(nvme, com.nioc_nsid); 6713 6714 /* 6715 * Strictly speaking we shouldn't need to call nvme_init_ns() here as 6716 * we should be properly refreshing the internal state when we are 6717 * issuing commands that change things. However, we opt to still do so 6718 * as a bit of a safety check lest we give the kernel something bad or a 6719 * vendor unique command somehow did something behind our backs. 6720 */ 6721 if (!ns->ns_attached) { 6722 (void) nvme_rescan_ns(nvme, com.nioc_nsid); 6723 if (nvme_attach_ns(nvme, &com)) { 6724 nvme_ioctl_success(&com); 6725 } 6726 } else { 6727 nvme_ioctl_success(&com); 6728 } 6729 nvme_mgmt_unlock(nvme); 6730 6731 copyout: 6732 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com), 6733 mode & FKIOCTL) != 0) { 6734 return (EFAULT); 6735 } 6736 6737 return (0); 6738 } 6739 6740 static void 6741 nvme_ufm_update(nvme_t *nvme) 6742 { 6743 mutex_enter(&nvme->n_fwslot_mutex); 6744 ddi_ufm_update(nvme->n_ufmh); 6745 if (nvme->n_fwslot != NULL) { 6746 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t)); 6747 nvme->n_fwslot = NULL; 6748 } 6749 mutex_exit(&nvme->n_fwslot_mutex); 6750 } 6751 6752 /* 6753 * Download new firmware to the device's internal staging area. We do not call 6754 * nvme_ufm_update() here because after a firmware download, there has been no 6755 * change to any of the actual persistent firmware data. That requires a 6756 * subsequent ioctl (NVME_IOC_FIRMWARE_COMMIT) to commit the firmware to a slot 6757 * or to activate a slot. 6758 */ 6759 static int 6760 nvme_ioctl_firmware_download(nvme_minor_t *minor, intptr_t arg, int mode, 6761 cred_t *cred_p) 6762 { 6763 nvme_t *const nvme = minor->nm_ctrl; 6764 nvme_ioctl_fw_load_t fw; 6765 uint64_t len, maxcopy; 6766 offset_t offset; 6767 uint32_t gran; 6768 nvme_valid_ctrl_data_t data; 6769 uintptr_t buf; 6770 nvme_sqe_t sqe = { 6771 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD 6772 }; 6773 6774 if ((mode & FWRITE) == 0) 6775 return (EBADF); 6776 6777 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6778 return (EPERM); 6779 6780 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw), 6781 mode & FKIOCTL) != 0) { 6782 return (EFAULT); 6783 } 6784 6785 if (!nvme_ioctl_check(minor, &fw.fwl_common, &nvme_check_firmware)) { 6786 goto copyout; 6787 } 6788 6789 if (!nvme_validate_fw_load(nvme, &fw)) { 6790 goto copyout; 6791 } 6792 6793 len = fw.fwl_len; 6794 offset = fw.fwl_off; 6795 buf = fw.fwl_buf; 6796 6797 /* 6798 * We need to determine the minimum and maximum amount of data that we 6799 * will send to the device in a given go. Starting in NMVe 1.3 this must 6800 * be a multiple of the firmware update granularity (FWUG), but must not 6801 * exceed the maximum data transfer that we've set. Many devices don't 6802 * report something here, which means we'll end up getting our default 6803 * value. Our policy is a little simple, but it's basically if the 6804 * maximum data transfer is evenly divided by the granularity, then use 6805 * it. Otherwise we use the granularity itself. The granularity is 6806 * always in page sized units, so trying to find another optimum point 6807 * isn't worth it. If we encounter a contradiction, then we will have to 6808 * error out. 6809 */ 6810 data.vcd_vers = &nvme->n_version; 6811 data.vcd_id = nvme->n_idctl; 6812 gran = nvme_fw_load_granularity(&data); 6813 6814 if ((nvme->n_max_data_transfer_size % gran) == 0) { 6815 maxcopy = nvme->n_max_data_transfer_size; 6816 } else if (gran <= nvme->n_max_data_transfer_size) { 6817 maxcopy = gran; 6818 } else { 6819 (void) nvme_ioctl_error(&fw.fwl_common, 6820 NVME_IOCTL_E_FW_LOAD_IMPOS_GRAN, 0, 0); 6821 goto copyout; 6822 } 6823 6824 while (len > 0) { 6825 nvme_ioc_cmd_args_t args = { NULL }; 6826 uint64_t copylen = MIN(maxcopy, len); 6827 6828 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1; 6829 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT); 6830 6831 args.ica_sqe = &sqe; 6832 args.ica_data = (void *)buf; 6833 args.ica_data_len = copylen; 6834 args.ica_dma_flags = DDI_DMA_WRITE; 6835 args.ica_copy_flags = mode; 6836 args.ica_timeout = nvme_admin_cmd_timeout; 6837 6838 if (!nvme_ioc_cmd(nvme, &fw.fwl_common, &args)) { 6839 break; 6840 } 6841 6842 buf += copylen; 6843 offset += copylen; 6844 len -= copylen; 6845 } 6846 6847 copyout: 6848 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw), 6849 mode & FKIOCTL) != 0) { 6850 return (EFAULT); 6851 } 6852 6853 return (0); 6854 } 6855 6856 static int 6857 nvme_ioctl_firmware_commit(nvme_minor_t *minor, intptr_t arg, int mode, 6858 cred_t *cred_p) 6859 { 6860 nvme_t *const nvme = minor->nm_ctrl; 6861 nvme_ioctl_fw_commit_t fw; 6862 nvme_firmware_commit_dw10_t fc_dw10 = { 0 }; 6863 nvme_ioc_cmd_args_t args = { NULL }; 6864 nvme_sqe_t sqe = { 6865 .sqe_opc = NVME_OPC_FW_ACTIVATE 6866 }; 6867 6868 if ((mode & FWRITE) == 0) 6869 return (EBADF); 6870 6871 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6872 return (EPERM); 6873 6874 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw), 6875 mode & FKIOCTL) != 0) { 6876 return (EFAULT); 6877 } 6878 6879 if (!nvme_ioctl_check(minor, &fw.fwc_common, &nvme_check_firmware)) { 6880 goto copyout; 6881 } 6882 6883 if (!nvme_validate_fw_commit(nvme, &fw)) { 6884 goto copyout; 6885 } 6886 6887 fc_dw10.b.fc_slot = fw.fwc_slot; 6888 fc_dw10.b.fc_action = fw.fwc_action; 6889 sqe.sqe_cdw10 = fc_dw10.r; 6890 6891 args.ica_sqe = &sqe; 6892 args.ica_timeout = nvme_commit_save_cmd_timeout; 6893 6894 /* 6895 * There are no conditional actions to take based on this succeeding or 6896 * failing. A failure is recorded in the ioctl structure returned to the 6897 * user. 6898 */ 6899 (void) nvme_ioc_cmd(nvme, &fw.fwc_common, &args); 6900 6901 /* 6902 * Let the DDI UFM subsystem know that the firmware information for 6903 * this device has changed. We perform this unconditionally as an 6904 * invalidation doesn't particularly hurt us. 6905 */ 6906 nvme_ufm_update(nvme); 6907 6908 copyout: 6909 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw), 6910 mode & FKIOCTL) != 0) { 6911 return (EFAULT); 6912 } 6913 6914 return (0); 6915 } 6916 6917 /* 6918 * Helper to copy in a passthru command from userspace, handling 6919 * different data models. 6920 */ 6921 static int 6922 nvme_passthru_copyin_cmd(const void *buf, nvme_ioctl_passthru_t *cmd, int mode) 6923 { 6924 switch (ddi_model_convert_from(mode & FMODELS)) { 6925 #ifdef _MULTI_DATAMODEL 6926 case DDI_MODEL_ILP32: { 6927 nvme_ioctl_passthru32_t cmd32; 6928 6929 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0) 6930 return (EFAULT); 6931 6932 bzero(cmd, sizeof (nvme_ioctl_passthru_t)); 6933 6934 cmd->npc_common.nioc_nsid = cmd32.npc_common.nioc_nsid; 6935 cmd->npc_opcode = cmd32.npc_opcode; 6936 cmd->npc_timeout = cmd32.npc_timeout; 6937 cmd->npc_flags = cmd32.npc_flags; 6938 cmd->npc_impact = cmd32.npc_impact; 6939 cmd->npc_cdw12 = cmd32.npc_cdw12; 6940 cmd->npc_cdw13 = cmd32.npc_cdw13; 6941 cmd->npc_cdw14 = cmd32.npc_cdw14; 6942 cmd->npc_cdw15 = cmd32.npc_cdw15; 6943 cmd->npc_buflen = cmd32.npc_buflen; 6944 cmd->npc_buf = cmd32.npc_buf; 6945 break; 6946 } 6947 #endif /* _MULTI_DATAMODEL */ 6948 case DDI_MODEL_NONE: 6949 if (ddi_copyin(buf, (void *)cmd, sizeof (nvme_ioctl_passthru_t), 6950 mode) != 0) { 6951 return (EFAULT); 6952 } 6953 break; 6954 default: 6955 return (ENOTSUP); 6956 } 6957 6958 return (0); 6959 } 6960 6961 /* 6962 * Helper to copy out a passthru command result to userspace, handling 6963 * different data models. 6964 */ 6965 static int 6966 nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t *cmd, void *buf, int mode) 6967 { 6968 switch (ddi_model_convert_from(mode & FMODELS)) { 6969 #ifdef _MULTI_DATAMODEL 6970 case DDI_MODEL_ILP32: { 6971 nvme_ioctl_passthru32_t cmd32; 6972 6973 bzero(&cmd32, sizeof (nvme_ioctl_passthru32_t)); 6974 6975 cmd32.npc_common = cmd->npc_common; 6976 cmd32.npc_opcode = cmd->npc_opcode; 6977 cmd32.npc_timeout = cmd->npc_timeout; 6978 cmd32.npc_flags = cmd->npc_flags; 6979 cmd32.npc_impact = cmd->npc_impact; 6980 cmd32.npc_cdw0 = cmd->npc_cdw0; 6981 cmd32.npc_cdw12 = cmd->npc_cdw12; 6982 cmd32.npc_cdw13 = cmd->npc_cdw13; 6983 cmd32.npc_cdw14 = cmd->npc_cdw14; 6984 cmd32.npc_cdw15 = cmd->npc_cdw15; 6985 cmd32.npc_buflen = (size32_t)cmd->npc_buflen; 6986 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf; 6987 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0) 6988 return (EFAULT); 6989 break; 6990 } 6991 #endif /* _MULTI_DATAMODEL */ 6992 case DDI_MODEL_NONE: 6993 if (ddi_copyout(cmd, buf, sizeof (nvme_ioctl_passthru_t), 6994 mode) != 0) { 6995 return (EFAULT); 6996 } 6997 break; 6998 default: 6999 return (ENOTSUP); 7000 } 7001 return (0); 7002 } 7003 7004 /* 7005 * Run an arbitrary vendor-specific admin command on the device. 7006 */ 7007 static int 7008 nvme_ioctl_passthru(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 7009 { 7010 nvme_t *const nvme = minor->nm_ctrl; 7011 int rv; 7012 nvme_ioctl_passthru_t pass; 7013 nvme_sqe_t sqe; 7014 nvme_ioc_cmd_args_t args = { NULL }; 7015 7016 /* 7017 * Basic checks: permissions, data model, argument size. 7018 */ 7019 if ((mode & FWRITE) == 0) 7020 return (EBADF); 7021 7022 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 7023 return (EPERM); 7024 7025 if ((rv = nvme_passthru_copyin_cmd((void *)(uintptr_t)arg, &pass, 7026 mode)) != 0) { 7027 return (rv); 7028 } 7029 7030 if (!nvme_ioctl_check(minor, &pass.npc_common, &nvme_check_passthru)) { 7031 goto copyout; 7032 } 7033 7034 if (!nvme_validate_vuc(nvme, &pass)) { 7035 goto copyout; 7036 } 7037 7038 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 7039 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) { 7040 /* 7041 * We've been told this has ns impact. Right now force that to 7042 * be every ns until we have more use cases and reason to trust 7043 * the nsid field. 7044 */ 7045 if (!nvme_no_blkdev_attached(nvme, NVME_NSID_BCAST)) { 7046 nvme_mgmt_unlock(nvme); 7047 (void) nvme_ioctl_error(&pass.npc_common, 7048 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0); 7049 goto copyout; 7050 } 7051 } 7052 7053 bzero(&sqe, sizeof (sqe)); 7054 7055 sqe.sqe_opc = pass.npc_opcode; 7056 sqe.sqe_nsid = pass.npc_common.nioc_nsid; 7057 sqe.sqe_cdw10 = (uint32_t)(pass.npc_buflen >> NVME_DWORD_SHIFT); 7058 sqe.sqe_cdw12 = pass.npc_cdw12; 7059 sqe.sqe_cdw13 = pass.npc_cdw13; 7060 sqe.sqe_cdw14 = pass.npc_cdw14; 7061 sqe.sqe_cdw15 = pass.npc_cdw15; 7062 7063 args.ica_sqe = &sqe; 7064 args.ica_data = (void *)pass.npc_buf; 7065 args.ica_data_len = pass.npc_buflen; 7066 args.ica_copy_flags = mode; 7067 args.ica_timeout = pass.npc_timeout; 7068 7069 if ((pass.npc_flags & NVME_PASSTHRU_READ) != 0) 7070 args.ica_dma_flags |= DDI_DMA_READ; 7071 else if ((pass.npc_flags & NVME_PASSTHRU_WRITE) != 0) 7072 args.ica_dma_flags |= DDI_DMA_WRITE; 7073 7074 if (nvme_ioc_cmd(nvme, &pass.npc_common, &args)) { 7075 pass.npc_cdw0 = args.ica_cdw0; 7076 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) { 7077 nvme_rescan_ns(nvme, NVME_NSID_BCAST); 7078 } 7079 } 7080 nvme_mgmt_unlock(nvme); 7081 7082 copyout: 7083 rv = nvme_passthru_copyout_cmd(&pass, (void *)(uintptr_t)arg, 7084 mode); 7085 7086 return (rv); 7087 } 7088 7089 static int 7090 nvme_ioctl_lock(nvme_minor_t *minor, intptr_t arg, int mode, 7091 cred_t *cred_p) 7092 { 7093 nvme_ioctl_lock_t lock; 7094 const nvme_lock_flags_t all_flags = NVME_LOCK_F_DONT_BLOCK; 7095 nvme_t *nvme = minor->nm_ctrl; 7096 7097 if ((mode & FWRITE) == 0) 7098 return (EBADF); 7099 7100 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 7101 return (EPERM); 7102 7103 if (ddi_copyin((void *)(uintptr_t)arg, &lock, sizeof (lock), 7104 mode & FKIOCTL) != 0) { 7105 return (EFAULT); 7106 } 7107 7108 if (lock.nil_ent != NVME_LOCK_E_CTRL && 7109 lock.nil_ent != NVME_LOCK_E_NS) { 7110 (void) nvme_ioctl_error(&lock.nil_common, 7111 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0); 7112 goto copyout; 7113 } 7114 7115 if (lock.nil_level != NVME_LOCK_L_READ && 7116 lock.nil_level != NVME_LOCK_L_WRITE) { 7117 (void) nvme_ioctl_error(&lock.nil_common, 7118 NVME_IOCTL_E_BAD_LOCK_LEVEL, 0, 0); 7119 goto copyout; 7120 } 7121 7122 if ((lock.nil_flags & ~all_flags) != 0) { 7123 (void) nvme_ioctl_error(&lock.nil_common, 7124 NVME_IOCTL_E_BAD_LOCK_FLAGS, 0, 0); 7125 goto copyout; 7126 } 7127 7128 if (!nvme_ioctl_check(minor, &lock.nil_common, &nvme_check_locking)) { 7129 goto copyout; 7130 } 7131 7132 /* 7133 * If we're on a namespace, confirm that we're not asking for the 7134 * controller. 7135 */ 7136 if (lock.nil_common.nioc_nsid != 0 && 7137 lock.nil_ent == NVME_LOCK_E_CTRL) { 7138 (void) nvme_ioctl_error(&lock.nil_common, 7139 NVME_IOCTL_E_NS_CANNOT_LOCK_CTRL, 0, 0); 7140 goto copyout; 7141 } 7142 7143 /* 7144 * We've reached the point where we can no longer actually check things 7145 * without serializing state. First, we need to check to make sure that 7146 * none of our invariants are being broken for locking: 7147 * 7148 * 1) The caller isn't already blocking for a lock operation to 7149 * complete. 7150 * 7151 * 2) The caller is attempting to grab a lock that they already have. 7152 * While there are other rule violations that this might create, we opt 7153 * to check this ahead of it so we can have slightly better error 7154 * messages for our callers. 7155 * 7156 * 3) The caller is trying to grab a controller lock, while holding a 7157 * namespace lock. 7158 * 7159 * 4) The caller has a controller write lock and is trying to get a 7160 * namespace lock. For now, we disallow this case. Holding a controller 7161 * read lock is allowed, but the write lock allows you to operate on all 7162 * namespaces anyways. In addition, this simplifies the locking logic; 7163 * however, this constraint may be loosened in the future. 7164 * 7165 * 5) The caller is trying to acquire a second namespace lock when they 7166 * already have one. 7167 */ 7168 mutex_enter(&nvme->n_minor_mutex); 7169 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_BLOCKED || 7170 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_BLOCKED) { 7171 (void) nvme_ioctl_error(&lock.nil_common, 7172 NVME_IOCTL_E_LOCK_PENDING, 0, 0); 7173 mutex_exit(&nvme->n_minor_mutex); 7174 goto copyout; 7175 } 7176 7177 if ((lock.nil_ent == NVME_LOCK_E_CTRL && 7178 minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) || 7179 (lock.nil_ent == NVME_LOCK_E_NS && 7180 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED && 7181 minor->nm_ns_lock.nli_ns->ns_id == lock.nil_common.nioc_nsid)) { 7182 (void) nvme_ioctl_error(&lock.nil_common, 7183 NVME_IOCTL_E_LOCK_ALREADY_HELD, 0, 0); 7184 mutex_exit(&nvme->n_minor_mutex); 7185 goto copyout; 7186 } 7187 7188 if (lock.nil_ent == NVME_LOCK_E_CTRL && 7189 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) { 7190 (void) nvme_ioctl_error(&lock.nil_common, 7191 NVME_IOCTL_E_LOCK_NO_CTRL_WITH_NS, 0, 0); 7192 mutex_exit(&nvme->n_minor_mutex); 7193 goto copyout; 7194 } 7195 7196 if (lock.nil_ent == NVME_LOCK_E_NS && 7197 (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED && 7198 minor->nm_ctrl_lock.nli_curlevel == NVME_LOCK_L_WRITE)) { 7199 (void) nvme_ioctl_error(&lock.nil_common, 7200 NVME_IOCTL_LOCK_NO_NS_WITH_CTRL_WRLOCK, 0, 0); 7201 mutex_exit(&nvme->n_minor_mutex); 7202 goto copyout; 7203 } 7204 7205 if (lock.nil_ent == NVME_LOCK_E_NS && 7206 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) { 7207 (void) nvme_ioctl_error(&lock.nil_common, 7208 NVME_IOCTL_E_LOCK_NO_2ND_NS, 0, 0); 7209 mutex_exit(&nvme->n_minor_mutex); 7210 goto copyout; 7211 } 7212 7213 #ifdef DEBUG 7214 /* 7215 * This is a big block of sanity checks to make sure that we haven't 7216 * allowed anything bad to happen. 7217 */ 7218 if (lock.nil_ent == NVME_LOCK_E_NS) { 7219 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL); 7220 ASSERT3U(minor->nm_ns_lock.nli_state, ==, 7221 NVME_LOCK_STATE_UNLOCKED); 7222 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0); 7223 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL); 7224 7225 if (minor->nm_ns != NULL) { 7226 ASSERT3U(minor->nm_ns->ns_id, ==, 7227 lock.nil_common.nioc_nsid); 7228 } 7229 7230 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node)); 7231 } else { 7232 ASSERT3P(minor->nm_ctrl_lock.nli_lock, ==, NULL); 7233 ASSERT3U(minor->nm_ctrl_lock.nli_state, ==, 7234 NVME_LOCK_STATE_UNLOCKED); 7235 ASSERT3U(minor->nm_ctrl_lock.nli_curlevel, ==, 0); 7236 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL); 7237 ASSERT0(list_link_active(&minor->nm_ctrl_lock.nli_node)); 7238 7239 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL); 7240 ASSERT3U(minor->nm_ns_lock.nli_state, ==, 7241 NVME_LOCK_STATE_UNLOCKED); 7242 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0); 7243 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL); 7244 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node)); 7245 } 7246 #endif /* DEBUG */ 7247 7248 /* 7249 * At this point we should actually attempt a locking operation. 7250 */ 7251 nvme_rwlock(minor, &lock); 7252 mutex_exit(&nvme->n_minor_mutex); 7253 7254 copyout: 7255 if (ddi_copyout(&lock, (void *)(uintptr_t)arg, sizeof (lock), 7256 mode & FKIOCTL) != 0) { 7257 return (EFAULT); 7258 } 7259 7260 return (0); 7261 } 7262 7263 static int 7264 nvme_ioctl_unlock(nvme_minor_t *minor, intptr_t arg, int mode, 7265 cred_t *cred_p) 7266 { 7267 nvme_ioctl_unlock_t unlock; 7268 nvme_t *const nvme = minor->nm_ctrl; 7269 boolean_t is_ctrl; 7270 nvme_lock_t *lock; 7271 nvme_minor_lock_info_t *info; 7272 7273 /* 7274 * Note, we explicitly don't check for privileges for unlock. The idea 7275 * being that if you have the lock, that's what matters. If you don't 7276 * have the lock, it doesn't matter what privileges that you have at 7277 * all. 7278 */ 7279 if ((mode & FWRITE) == 0) 7280 return (EBADF); 7281 7282 if (ddi_copyin((void *)(uintptr_t)arg, &unlock, sizeof (unlock), 7283 mode & FKIOCTL) != 0) { 7284 return (EFAULT); 7285 } 7286 7287 if (unlock.niu_ent != NVME_LOCK_E_CTRL && 7288 unlock.niu_ent != NVME_LOCK_E_NS) { 7289 (void) nvme_ioctl_error(&unlock.niu_common, 7290 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0); 7291 goto copyout; 7292 } 7293 7294 if (!nvme_ioctl_check(minor, &unlock.niu_common, &nvme_check_locking)) { 7295 goto copyout; 7296 } 7297 7298 /* 7299 * If we're on a namespace, confirm that we're not asking for the 7300 * controller. 7301 */ 7302 if (unlock.niu_common.nioc_nsid != 0 && 7303 unlock.niu_ent == NVME_LOCK_E_CTRL) { 7304 (void) nvme_ioctl_error(&unlock.niu_common, 7305 NVME_IOCTL_E_NS_CANNOT_UNLOCK_CTRL, 0, 0); 7306 goto copyout; 7307 } 7308 7309 mutex_enter(&nvme->n_minor_mutex); 7310 if (unlock.niu_ent == NVME_LOCK_E_CTRL) { 7311 if (minor->nm_ctrl_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) { 7312 mutex_exit(&nvme->n_minor_mutex); 7313 (void) nvme_ioctl_error(&unlock.niu_common, 7314 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0); 7315 goto copyout; 7316 } 7317 } else { 7318 if (minor->nm_ns_lock.nli_ns == NULL) { 7319 mutex_exit(&nvme->n_minor_mutex); 7320 (void) nvme_ioctl_error(&unlock.niu_common, 7321 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0); 7322 goto copyout; 7323 } 7324 7325 /* 7326 * Check that our unlock request corresponds to the namespace ID 7327 * that is currently locked. This could happen if we're using 7328 * the controller node and it specified a valid, but not locked, 7329 * namespace ID. 7330 */ 7331 if (minor->nm_ns_lock.nli_ns->ns_id != 7332 unlock.niu_common.nioc_nsid) { 7333 mutex_exit(&nvme->n_minor_mutex); 7334 ASSERT3P(minor->nm_ns, ==, NULL); 7335 (void) nvme_ioctl_error(&unlock.niu_common, 7336 NVME_IOCTL_E_LOCK_WRONG_NS, 0, 0); 7337 goto copyout; 7338 } 7339 7340 if (minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) { 7341 mutex_exit(&nvme->n_minor_mutex); 7342 (void) nvme_ioctl_error(&unlock.niu_common, 7343 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0); 7344 goto copyout; 7345 } 7346 } 7347 7348 /* 7349 * Finally, perform the unlock. 7350 */ 7351 is_ctrl = unlock.niu_ent == NVME_LOCK_E_CTRL; 7352 if (is_ctrl) { 7353 lock = &nvme->n_lock; 7354 info = &minor->nm_ctrl_lock; 7355 } else { 7356 nvme_namespace_t *ns; 7357 const uint32_t nsid = unlock.niu_common.nioc_nsid; 7358 7359 ns = nvme_nsid2ns(nvme, nsid); 7360 lock = &ns->ns_lock; 7361 info = &minor->nm_ns_lock; 7362 VERIFY3P(ns, ==, info->nli_ns); 7363 } 7364 nvme_rwunlock(info, lock); 7365 mutex_exit(&nvme->n_minor_mutex); 7366 nvme_ioctl_success(&unlock.niu_common); 7367 7368 copyout: 7369 if (ddi_copyout(&unlock, (void *)(uintptr_t)arg, sizeof (unlock), 7370 mode & FKIOCTL) != 0) { 7371 return (EFAULT); 7372 } 7373 7374 return (0); 7375 } 7376 7377 static int 7378 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 7379 int *rval_p) 7380 { 7381 #ifndef __lock_lint 7382 _NOTE(ARGUNUSED(rval_p)); 7383 #endif 7384 nvme_minor_t *minor; 7385 nvme_t *nvme; 7386 7387 minor = nvme_minor_find_by_dev(dev); 7388 if (minor == NULL) { 7389 return (ENXIO); 7390 } 7391 7392 nvme = minor->nm_ctrl; 7393 if (nvme == NULL) 7394 return (ENXIO); 7395 7396 if (IS_DEVCTL(cmd)) 7397 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); 7398 7399 if (nvme->n_dead && (cmd != NVME_IOC_DETACH && cmd != 7400 NVME_IOC_UNLOCK)) { 7401 if (IS_NVME_IOC(cmd) == 0) { 7402 return (EIO); 7403 } 7404 7405 return (nvme_ioctl_copyout_error(nvme->n_dead_status, arg, 7406 mode)); 7407 } 7408 7409 /* 7410 * ioctls that are no longer using the original ioctl structure. 7411 */ 7412 switch (cmd) { 7413 case NVME_IOC_CTRL_INFO: 7414 return (nvme_ioctl_ctrl_info(minor, arg, mode, cred_p)); 7415 case NVME_IOC_IDENTIFY: 7416 return (nvme_ioctl_identify(minor, arg, mode, cred_p)); 7417 case NVME_IOC_GET_LOGPAGE: 7418 return (nvme_ioctl_get_logpage(minor, arg, mode, cred_p)); 7419 case NVME_IOC_GET_FEATURE: 7420 return (nvme_ioctl_get_feature(minor, arg, mode, cred_p)); 7421 case NVME_IOC_DETACH: 7422 return (nvme_ioctl_detach(minor, arg, mode, cred_p)); 7423 case NVME_IOC_ATTACH: 7424 return (nvme_ioctl_attach(minor, arg, mode, cred_p)); 7425 case NVME_IOC_FORMAT: 7426 return (nvme_ioctl_format(minor, arg, mode, cred_p)); 7427 case NVME_IOC_FIRMWARE_DOWNLOAD: 7428 return (nvme_ioctl_firmware_download(minor, arg, mode, 7429 cred_p)); 7430 case NVME_IOC_FIRMWARE_COMMIT: 7431 return (nvme_ioctl_firmware_commit(minor, arg, mode, 7432 cred_p)); 7433 case NVME_IOC_NS_INFO: 7434 return (nvme_ioctl_ns_info(minor, arg, mode, cred_p)); 7435 case NVME_IOC_PASSTHRU: 7436 return (nvme_ioctl_passthru(minor, arg, mode, cred_p)); 7437 case NVME_IOC_LOCK: 7438 return (nvme_ioctl_lock(minor, arg, mode, cred_p)); 7439 case NVME_IOC_UNLOCK: 7440 return (nvme_ioctl_unlock(minor, arg, mode, cred_p)); 7441 default: 7442 return (ENOTTY); 7443 } 7444 } 7445 7446 /* 7447 * DDI UFM Callbacks 7448 */ 7449 static int 7450 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 7451 ddi_ufm_image_t *img) 7452 { 7453 nvme_t *nvme = arg; 7454 7455 if (imgno != 0) 7456 return (EINVAL); 7457 7458 ddi_ufm_image_set_desc(img, "Firmware"); 7459 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot); 7460 7461 return (0); 7462 } 7463 7464 /* 7465 * Fill out firmware slot information for the requested slot. The firmware 7466 * slot information is gathered by requesting the Firmware Slot Information log 7467 * page. The format of the page is described in section 5.10.1.3. 7468 * 7469 * We lazily cache the log page on the first call and then invalidate the cache 7470 * data after a successful firmware download or firmware commit command. 7471 * The cached data is protected by a mutex as the state can change 7472 * asynchronous to this callback. 7473 */ 7474 static int 7475 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 7476 uint_t slotno, ddi_ufm_slot_t *slot) 7477 { 7478 nvme_t *nvme = arg; 7479 void *log = NULL; 7480 size_t bufsize; 7481 ddi_ufm_attr_t attr = 0; 7482 char fw_ver[NVME_FWVER_SZ + 1]; 7483 7484 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1)) 7485 return (EINVAL); 7486 7487 mutex_enter(&nvme->n_fwslot_mutex); 7488 if (nvme->n_fwslot == NULL) { 7489 if (!nvme_get_logpage_int(nvme, B_TRUE, &log, &bufsize, 7490 NVME_LOGPAGE_FWSLOT) || 7491 bufsize != sizeof (nvme_fwslot_log_t)) { 7492 if (log != NULL) 7493 kmem_free(log, bufsize); 7494 mutex_exit(&nvme->n_fwslot_mutex); 7495 return (EIO); 7496 } 7497 nvme->n_fwslot = (nvme_fwslot_log_t *)log; 7498 } 7499 7500 /* 7501 * NVMe numbers firmware slots starting at 1 7502 */ 7503 if (slotno == (nvme->n_fwslot->fw_afi - 1)) 7504 attr |= DDI_UFM_ATTR_ACTIVE; 7505 7506 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0) 7507 attr |= DDI_UFM_ATTR_WRITEABLE; 7508 7509 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') { 7510 attr |= DDI_UFM_ATTR_EMPTY; 7511 } else { 7512 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno], 7513 NVME_FWVER_SZ); 7514 fw_ver[NVME_FWVER_SZ] = '\0'; 7515 ddi_ufm_slot_set_version(slot, fw_ver); 7516 } 7517 mutex_exit(&nvme->n_fwslot_mutex); 7518 7519 ddi_ufm_slot_set_attrs(slot, attr); 7520 7521 return (0); 7522 } 7523 7524 static int 7525 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 7526 { 7527 *caps = DDI_UFM_CAP_REPORT; 7528 return (0); 7529 } 7530 7531 boolean_t 7532 nvme_ctrl_atleast(nvme_t *nvme, const nvme_version_t *min) 7533 { 7534 return (nvme_vers_atleast(&nvme->n_version, min) ? B_TRUE : B_FALSE); 7535 } 7536