1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 14 * Copyright 2019 Unix Software Ltd. 15 * Copyright 2020 Joyent, Inc. 16 * Copyright 2020 Racktop Systems. 17 * Copyright 2024 Oxide Computer Company. 18 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association. 19 * Copyright 2022 Tintri by DDN, Inc. All rights reserved. 20 */ 21 22 /* 23 * blkdev driver for NVMe compliant storage devices 24 * 25 * This driver targets and is designed to support all NVMe 1.x and NVMe 2.x 26 * devices. Features are added to the driver as we encounter devices that 27 * require them and our needs, so some commands or log pages may not take 28 * advantage of newer features that devices support at this time. When you 29 * encounter such a case, it is generally fine to add that support to the driver 30 * as long as you take care to ensure that the requisite device version is met 31 * before using it. 32 * 33 * The driver has only been tested on x86 systems and will not work on big- 34 * endian systems without changes to the code accessing registers and data 35 * structures used by the hardware. 36 * 37 * 38 * Interrupt Usage: 39 * 40 * The driver will use a single interrupt while configuring the device as the 41 * specification requires, but contrary to the specification it will try to use 42 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it 43 * will switch to multiple-message MSI(-X) if supported. The driver wants to 44 * have one interrupt vector per CPU, but it will work correctly if less are 45 * available. Interrupts can be shared by queues, the interrupt handler will 46 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only 47 * the admin queue will share an interrupt with one I/O queue. The interrupt 48 * handler will retrieve completed commands from all queues sharing an interrupt 49 * vector and will post them to a taskq for completion processing. 50 * 51 * 52 * Command Processing: 53 * 54 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up 55 * to 65536 I/O commands. The driver will configure one I/O queue pair per 56 * available interrupt vector, with the queue length usually much smaller than 57 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 58 * interrupt vectors will be used. 59 * 60 * Additionally the hardware provides a single special admin queue pair that can 61 * hold up to 4096 admin commands. 62 * 63 * From the hardware perspective both queues of a queue pair are independent, 64 * but they share some driver state: the command array (holding pointers to 65 * commands currently being processed by the hardware) and the active command 66 * counter. Access to a submission queue and the shared state is protected by 67 * nq_mutex; completion queue is protected by ncq_mutex. 68 * 69 * When a command is submitted to a queue pair the active command counter is 70 * incremented and a pointer to the command is stored in the command array. The 71 * array index is used as command identifier (CID) in the submission queue 72 * entry. Some commands may take a very long time to complete, and if the queue 73 * wraps around in that time a submission may find the next array slot to still 74 * be used by a long-running command. In this case the array is sequentially 75 * searched for the next free slot. The length of the command array is the same 76 * as the configured queue length. Queue overrun is prevented by the semaphore, 77 * so a command submission may block if the queue is full. 78 * 79 * 80 * Polled I/O Support: 81 * 82 * For kernel core dump support the driver can do polled I/O. As interrupts are 83 * turned off while dumping the driver will just submit a command in the regular 84 * way, and then repeatedly attempt a command retrieval until it gets the 85 * command back. 86 * 87 * 88 * Namespace Support: 89 * 90 * NVMe devices can have multiple namespaces, each being a independent data 91 * store. The driver supports multiple namespaces and creates a blkdev interface 92 * for each namespace found. Namespaces can have various attributes to support 93 * protection information. This driver does not support any of this and ignores 94 * namespaces that have these attributes. 95 * 96 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier 97 * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally 98 * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64 99 * if present to generate the devid, and passes the EUI64 to blkdev to use it 100 * in the device node names. 101 * 102 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a 103 * single controller. This is an artificial limit imposed by the driver to be 104 * able to address a reasonable number of controllers and namespaces while 105 * fitting within the constraints of MAXMIN32, aka a 32-bit device number which 106 * only has 18-bits for the minor number. See the minor node section for more 107 * information. 108 * 109 * 110 * Minor nodes: 111 * 112 * For each NVMe device the driver exposes one minor node for the controller and 113 * one minor node for each namespace. The only operations supported by those 114 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the 115 * primary control interface for the devices. The character device is a private 116 * interface and we attempt stability through libnvme and more so nvmeadm. 117 * 118 * The controller minor node is much more flexible than the namespace minor node 119 * and should be preferred. The controller node allows one to target any 120 * namespace that the device has, while the namespace is limited in what it can 121 * acquire. While the namespace minor exists, it should not be relied upon and 122 * is not by libnvme. 123 * 124 * The minor number space is split in two. We use the lower part to support the 125 * controller and namespaces as described above in the 'Namespace Support' 126 * section. The second set is used for cloning opens. We set aside one million 127 * minors for this purpose. We utilize a cloning open so that way we can have 128 * per-file_t state. This is how we end up implementing and tracking locking 129 * state and related. 130 * 131 * When we have this cloned open, then we allocate a new nvme_minor_t which gets 132 * its minor number from the nvme_open_minors id_space_t and is stored in the 133 * nvme_open_minors_avl. While someone calls open on a controller or namespace 134 * minor, everything else occurs in the context of one of these ephemeral 135 * minors. 136 * 137 * 138 * ioctls, Errors, and Exclusive Access: 139 * 140 * All of the logical commands that one can issue are driven through the 141 * ioctl(9E) interface. All of our ioctls have a similar shape where they 142 * all include the 'nvme_ioctl_common_t' as their first member. 143 * 144 * This common ioctl structure is used to communicate the namespace that should 145 * be targeted. When the namespace is left as 0, then that indicates that it 146 * should target whatever the default is of the minor node. For a namespace 147 * minor, that will be transparently rewritten to the namespace's namespace id. 148 * 149 * In addition, the nvme_ioctl_common_t structure also has a standard error 150 * return. Our goal in our ioctl path is to ensure that we have useful semantic 151 * errors as much as possible. EINVAL, EIO, etc. are all overloaded. Instead as 152 * long as we can copy in our structure, then we will set a semantic error. If 153 * we have an error from the controller, then that will be included there. 154 * 155 * Each command has a specific policy that controls whether or not it is allowed 156 * on the namespace or controller minor, whether the broadcast namespace is 157 * allowed, various settings around what kind of exclusive access is allowed, 158 * and more. Each of these is wrapped up in a bit of policy described by the 159 * 'nvme_ioctl_check_t' structure. 160 * 161 * The device provides a form of exclusion in the form of both a 162 * controller-level and namespace-level read and write lock. Most operations do 163 * not require a lock (e.g. get log page, identify, etc.), but a few do (e.g. 164 * format nvm, firmware related activity, etc.). A read lock guarantees that you 165 * can complete your operation without interference, but read locks are not 166 * required. If you don't take a read lock and someone comes in with a write 167 * lock, then subsequent operations will fail with a semantic error indicating 168 * that you were blocked due to this. 169 * 170 * Here are some of the rules that govern our locks: 171 * 172 * 1. Writers starve readers. Any readers are allowed to finish when there is a 173 * pending writer; however, all subsequent readers will be blocked upon that 174 * writer. 175 * 2. A controller write lock takes priority over all other locks. Put 176 * differently a controller writer not only starves subsequent controller 177 * readers, but also all namespace read and write locks. 178 * 3. Each namespace lock is independent. 179 * 4. At most a single namespace lock may be owned. 180 * 5. If you own a namespace lock, you may not take a controller lock (to help 181 * with lock ordering). 182 * 6. In a similar spirit, if you own a controller write lock, you may not take 183 * any namespace lock. Someone with the controller write lock can perform any 184 * operations that they need to. However, if you have a controller read lock 185 * you may take any namespace lock. 186 * 7. There is no ability to upgrade a read lock to a write lock. 187 * 8. There is no recursive locking. 188 * 189 * While there's a lot there to keep track of, the goals of these are to 190 * constrain things so as to avoid deadlock. This is more complex than the 191 * original implementation in the driver which only allowed for an exclusive 192 * open that was tied to the thread. The first issue with tying this to the 193 * thread was that that didn't work well for software that utilized thread 194 * pools, like complex daemons. The second issue is that we want the ability for 195 * daemons, such as a FRU monitor, to be able to retain a file descriptor to the 196 * device without blocking others from taking action except during critical 197 * periods. 198 * 199 * In particular to enable something like libnvme, we didn't want someone to 200 * have to open and close the file descriptor to change what kind of exclusive 201 * access they desired. 202 * 203 * There are two different sets of data structures that we employ for tracking 204 * locking information: 205 * 206 * 1) The nvme_lock_t structure is contained in both the nvme_t and the 207 * nvme_namespace_t and tracks the current writer, readers, and pending writers 208 * and readers. Each of these lists or the writer pointer all refer to our 209 * second data structure. 210 * 211 * When a lock is owned by a single writer, then the nl_writer field is set to a 212 * specific minor's lock data structure. If instead readers are present, then 213 * the nl_readers list_t is not empty. An invariant of the system is that if 214 * nl_writer is non-NULL, nl_readers must be empty and conversely, if nl_readers 215 * is not empty, nl_writer must be NULL. 216 * 217 * 2) The nvme_minor_lock_info_t exists in the nvme_minor_t. There is one 218 * information structure which represents the minor's controller lock and a 219 * second one that represents the minor's namespace lock. The members of this 220 * are broken into tracking what the current lock is and what it targets. It 221 * also several members that are intended for debugging (nli_last_change, 222 * nli_acq_kthread, etc.). 223 * 224 * While the minor has two different lock information structures, our rules 225 * ensure that only one of the two can be pending and that they shouldn't result 226 * in a deadlock. When a lock is pending, the caller is sleeping on the minor's 227 * nm_cv member. 228 * 229 * These relationships are represented in the following image which shows a 230 * controller write lock being held with a pending readers on the controller 231 * lock and pending writers on one of the controller's namespaces. 232 * 233 * +---------+ 234 * | nvme_t | 235 * | | 236 * | n_lock -|-------+ 237 * | n_ns -+ | | +-----------------------------+ 238 * +-------|-+ +-----------------+ | nvme_minor_t | 239 * | | nvme_lock_t | | | 240 * | | | | +------------------------+ | 241 * | | writer --|-------------->| nvme_minor_lock_info_t | | 242 * | | reader list | | | nm_ctrl_lock | | 243 * | | pending writers | | +------------------------+ | 244 * | | pending readers |------+ | +------------------------+ | 245 * | +-----------------+ | | | nvme_minor_lock_info_t | | 246 * | | | | nm_ns_lock | | 247 * | | | +------------------------+ | 248 * | | +-----------------------------+ 249 * +------------------+ | +-----------------+ 250 * | nvme_namespace_t | | | nvme_minor_t | 251 * | | | | | 252 * | ns_lock ---+ | | | +-------------+ | 253 * +------------|-----+ +-----------------|>|nm_ctrl_lock | | 254 * | | +-------------+ | 255 * v +-----------------+ 256 * +------------------+ ... 257 * | nvme_lock_t | +-----------------+ 258 * | | | nvme_minor_t | 259 * | writer | | | 260 * | reader list | | +-------------+ | 261 * | pending writers -|-----------------+ | |nm_ctrl_lock | | 262 * | pending readers | | | +-------------+ | 263 * +------------------+ | +-----------------+ 264 * +-----------------------------+ | +-----------------------------+ 265 * | nvme_minor_t | | | nvme_minor_t | 266 * | | | | | 267 * | +------------------------+ | | | +------------------------+ | 268 * | | nvme_minor_lock_info_t | | | | | nvme_minor_lock_info_t | | 269 * | | nm_ctrl_lock | | | | | nm_ctrl_lock | | 270 * | +------------------------+ | | | +------------------------+ | 271 * | +------------------------+ | v | +------------------------+ | 272 * | | nvme_minor_lock_info_t |-|-----|->| nvme_minor_lock_info_t | | 273 * | | nm_ns_lock | | | | nm_ns_lock | | 274 * | +------------------------+ | | +------------------------+ | 275 * +-----------------------------+ +-----------------------------+ 276 * 277 * Blkdev Interface: 278 * 279 * This driver uses blkdev to do all the heavy lifting involved with presenting 280 * a disk device to the system. As a result, the processing of I/O requests is 281 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 282 * setup, and splitting of transfers into manageable chunks. 283 * 284 * I/O requests coming in from blkdev are turned into NVM commands and posted to 285 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 286 * queues. There is currently no timeout handling of I/O commands. 287 * 288 * Blkdev also supports querying device/media information and generating a 289 * devid. The driver reports the best block size as determined by the namespace 290 * format back to blkdev as physical block size to support partition and block 291 * alignment. The devid is either based on the namespace GUID or EUI64, if 292 * present, or composed using the device vendor ID, model number, serial number, 293 * and the namespace ID. 294 * 295 * 296 * Error Handling: 297 * 298 * Error handling is currently limited to detecting fatal hardware errors, 299 * either by asynchronous events, or synchronously through command status or 300 * admin command timeouts. In case of severe errors the device is fenced off, 301 * all further requests will return EIO. FMA is then called to fault the device. 302 * 303 * The hardware has a limit for outstanding asynchronous event requests. Before 304 * this limit is known the driver assumes it is at least 1 and posts a single 305 * asynchronous request. Later when the limit is known more asynchronous event 306 * requests are posted to allow quicker reception of error information. When an 307 * asynchronous event is posted by the hardware the driver will parse the error 308 * status fields and log information or fault the device, depending on the 309 * severity of the asynchronous event. The asynchronous event request is then 310 * reused and posted to the admin queue again. 311 * 312 * On command completion the command status is checked for errors. In case of 313 * errors indicating a driver bug the driver panics. Almost all other error 314 * status values just cause EIO to be returned. 315 * 316 * Command timeouts are currently detected for all admin commands except 317 * asynchronous event requests. If a command times out and the hardware appears 318 * to be healthy the driver attempts to abort the command. The original command 319 * timeout is also applied to the abort command. If the abort times out too the 320 * driver assumes the device to be dead, fences it off, and calls FMA to retire 321 * it. In all other cases the aborted command should return immediately with a 322 * status indicating it was aborted, and the driver will wait indefinitely for 323 * that to happen. No timeout handling of normal I/O commands is presently done. 324 * 325 * Any command that times out due to the controller dropping dead will be put on 326 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA 327 * memory being reused by the system and later be written to by a "dead" NVMe 328 * controller. 329 * 330 * 331 * Locking: 332 * 333 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held 334 * when accessing shared state and submission queue registers, ncq_mutex 335 * is held when accessing completion queue state and registers. 336 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while 337 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both 338 * mutexes themselves. 339 * 340 * Each command also has its own nc_mutex, which is associated with the 341 * condition variable nc_cv. It is only used on admin commands which are run 342 * synchronously. In that case it must be held across calls to 343 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by 344 * nvme_admin_cmd(). It must also be held whenever the completion state of the 345 * command is changed or while a admin command timeout is handled. 346 * 347 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first. 348 * More than one nc_mutex may only be held when aborting commands. In this case, 349 * the nc_mutex of the command to be aborted must be held across the call to 350 * nvme_abort_cmd() to prevent the command from completing while the abort is in 351 * progress. 352 * 353 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be 354 * acquired first. More than one nq_mutex is never held by a single thread. 355 * The ncq_mutex is only held by nvme_retrieve_cmd() and 356 * nvme_process_iocq(). nvme_process_iocq() is only called from the 357 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the 358 * mutex is non-contentious but is required for implementation completeness 359 * and safety. 360 * 361 * There is one mutex n_minor_mutex which protects all open flags nm_open and 362 * exclusive-open thread pointers nm_oexcl of each minor node associated with a 363 * controller and its namespaces. 364 * 365 * In addition, there is a logical namespace management mutex which protects the 366 * data about namespaces. When interrogating the metadata of any namespace, this 367 * lock must be held. This gets tricky as we need to call into blkdev, which may 368 * issue callbacks into us which want this and it is illegal to hold locks 369 * across those blkdev calls as otherwise they might lead to deadlock (blkdev 370 * leverages ndi_devi_enter()). 371 * 372 * The lock exposes two levels, one that we call 'NVME' and one 'BDRO' or blkdev 373 * read-only. The idea is that most callers will use the NVME level which says 374 * this is a full traditional mutex operation. The BDRO level is used by blkdev 375 * callback functions and is a promise to only only read the data. When a blkdev 376 * operation starts, the lock holder will use nvme_mgmt_bd_start(). This 377 * strictly speaking drops the mutex, but records that the lock is logically 378 * held by the thread that did the start() operation. 379 * 380 * During this time, other threads (or even the same one) may end up calling 381 * into nvme_mgmt_lock(). Only one person may still hold the lock at any time; 382 * however, the BRDO level will be allowed to proceed during this time. This 383 * allows us to make consistent progress and honor the blkdev lock ordering 384 * requirements, albeit it is not as straightforward as a simple mutex. 385 * 386 * Quiesce / Fast Reboot: 387 * 388 * The driver currently does not support fast reboot. A quiesce(9E) entry point 389 * is still provided which is used to send a shutdown notification to the 390 * device. 391 * 392 * 393 * NVMe Hotplug: 394 * 395 * The driver supports hot removal. The driver uses the NDI event framework 396 * to register a callback, nvme_remove_callback, to clean up when a disk is 397 * removed. In particular, the driver will unqueue outstanding I/O commands and 398 * set n_dead on the softstate to true so that other operations, such as ioctls 399 * and command submissions, fail as well. 400 * 401 * While the callback registration relies on the NDI event framework, the 402 * removal event itself is kicked off in the PCIe hotplug framework, when the 403 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a 404 * device was removed from the slot. 405 * 406 * The NVMe driver instance itself will remain until the final close of the 407 * device. 408 * 409 * 410 * DDI UFM Support 411 * 412 * The driver supports the DDI UFM framework for reporting information about 413 * the device's firmware image and slot configuration. This data can be 414 * queried by userland software via ioctls to the ufm driver. For more 415 * information, see ddi_ufm(9E). 416 * 417 * 418 * Driver Configuration: 419 * 420 * The following driver properties can be changed to control some aspects of the 421 * drivers operation: 422 * - strict-version: can be set to 0 to allow devices conforming to newer 423 * major versions to be used 424 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 425 * specific command status as a fatal error leading device faulting 426 * - admin-queue-len: the maximum length of the admin queue (16-4096) 427 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536) 428 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536) 429 * - async-event-limit: the maximum number of asynchronous event requests to be 430 * posted by the driver 431 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 432 * cache 433 * - min-phys-block-size: the minimum physical block size to report to blkdev, 434 * which is among other things the basis for ZFS vdev ashift 435 * - max-submission-queues: the maximum number of I/O submission queues. 436 * - max-completion-queues: the maximum number of I/O completion queues, 437 * can be less than max-submission-queues, in which case the completion 438 * queues are shared. 439 * 440 * In addition to the above properties, some device-specific tunables can be 441 * configured using the nvme-config-list global property. The value of this 442 * property is a list of triplets. The formal syntax is: 443 * 444 * nvme-config-list ::= <triplet> [, <triplet>]* ; 445 * <triplet> ::= "<model>" , "<rev-list>" , "<tuple-list>" 446 * <rev-list> ::= [ <fwrev> [, <fwrev>]*] 447 * <tuple-list> ::= <tunable> [, <tunable>]* 448 * <tunable> ::= <name> : <value> 449 * 450 * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and 451 * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list> 452 * contains one or more tunables to apply to all controllers that match the 453 * specified model number and optionally firmware revision. Each <tunable> is a 454 * <name> : <value> pair. Supported tunables are: 455 * 456 * - ignore-unknown-vendor-status: can be set to "on" to not handle any vendor 457 * specific command status as a fatal error leading device faulting 458 * 459 * - min-phys-block-size: the minimum physical block size to report to blkdev, 460 * which is among other things the basis for ZFS vdev ashift 461 * 462 * - volatile-write-cache: can be set to "on" or "off" to enable or disable the 463 * volatile write cache, if present 464 * 465 * 466 * TODO: 467 * - figure out sane default for I/O queue depth reported to blkdev 468 * - FMA handling of media errors 469 * - support for devices supporting very large I/O requests using chained PRPs 470 * - support for configuring hardware parameters like interrupt coalescing 471 * - support for media formatting and hard partitioning into namespaces 472 * - support for big-endian systems 473 * - support for fast reboot 474 * - support for NVMe Subsystem Reset (1.1) 475 * - support for Scatter/Gather lists (1.1) 476 * - support for Reservations (1.1) 477 * - support for power management 478 */ 479 480 #include <sys/byteorder.h> 481 #ifdef _BIG_ENDIAN 482 #error nvme driver needs porting for big-endian platforms 483 #endif 484 485 #include <sys/modctl.h> 486 #include <sys/conf.h> 487 #include <sys/devops.h> 488 #include <sys/ddi.h> 489 #include <sys/ddi_ufm.h> 490 #include <sys/sunddi.h> 491 #include <sys/sunndi.h> 492 #include <sys/bitmap.h> 493 #include <sys/sysmacros.h> 494 #include <sys/param.h> 495 #include <sys/varargs.h> 496 #include <sys/cpuvar.h> 497 #include <sys/disp.h> 498 #include <sys/blkdev.h> 499 #include <sys/atomic.h> 500 #include <sys/archsystm.h> 501 #include <sys/sata/sata_hba.h> 502 #include <sys/stat.h> 503 #include <sys/policy.h> 504 #include <sys/list.h> 505 #include <sys/dkio.h> 506 #include <sys/pci.h> 507 #include <sys/mkdev.h> 508 509 #include <sys/nvme.h> 510 511 #ifdef __x86 512 #include <sys/x86_archext.h> 513 #endif 514 515 #include "nvme_reg.h" 516 #include "nvme_var.h" 517 518 /* 519 * Assertions to make sure that we've properly captured various aspects of the 520 * packed structures and haven't broken them during updates. 521 */ 522 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE); 523 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256); 524 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512); 525 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520); 526 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768); 527 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792); 528 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048); 529 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072); 530 531 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE); 532 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32); 533 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92); 534 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104); 535 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128); 536 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384); 537 538 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE); 539 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE); 540 541 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE); 542 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32); 543 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64); 544 545 CTASSERT(sizeof (nvme_nschange_list_t) == 4096); 546 547 548 /* NVMe spec version supported */ 549 static const int nvme_version_major = 2; 550 551 /* tunable for admin command timeout in seconds, default is 1s */ 552 uint32_t nvme_admin_cmd_timeout = 1; 553 554 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */ 555 uint32_t nvme_format_cmd_timeout = 600; 556 557 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */ 558 uint32_t nvme_commit_save_cmd_timeout = 15; 559 560 /* 561 * tunable for the size of arbitrary vendor specific admin commands, 562 * default is 16MiB. 563 */ 564 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24; 565 566 /* 567 * tunable for the max timeout of arbitary vendor specific admin commands, 568 * default is 60s. 569 */ 570 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60; 571 572 /* 573 * This ID space, AVL, and lock are used for keeping track of minor state across 574 * opens between different devices. 575 */ 576 static id_space_t *nvme_open_minors; 577 static avl_tree_t nvme_open_minors_avl; 578 kmutex_t nvme_open_minors_mutex; 579 580 /* 581 * Removal taskq used for n_dead callback processing. 582 */ 583 taskq_t *nvme_dead_taskq; 584 585 /* 586 * This enumeration is used in tandem with nvme_mgmt_lock() to describe which 587 * form of the lock is being taken. See the theory statement for more context. 588 */ 589 typedef enum { 590 /* 591 * This is the primary form of taking the management lock and indicates 592 * that the user intends to do a read/write of it. This should always be 593 * used for any ioctl paths or truly anything other than a blkdev 594 * information operation. 595 */ 596 NVME_MGMT_LOCK_NVME, 597 /* 598 * This is a subordinate form of the lock whereby the user is in blkdev 599 * callback context and will only intend to read the namespace data. 600 */ 601 NVME_MGMT_LOCK_BDRO 602 } nvme_mgmt_lock_level_t; 603 604 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 605 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 606 static int nvme_quiesce(dev_info_t *); 607 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 608 static int nvme_setup_interrupts(nvme_t *, int, int); 609 static void nvme_release_interrupts(nvme_t *); 610 static uint_t nvme_intr(caddr_t, caddr_t); 611 612 static void nvme_shutdown(nvme_t *, boolean_t); 613 static boolean_t nvme_reset(nvme_t *, boolean_t); 614 static int nvme_init(nvme_t *); 615 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 616 static void nvme_free_cmd(nvme_cmd_t *); 617 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 618 bd_xfer_t *); 619 static void nvme_admin_cmd(nvme_cmd_t *, uint32_t); 620 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *); 621 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *); 622 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *); 623 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int); 624 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 625 static void nvme_wait_cmd(nvme_cmd_t *, uint_t); 626 static void nvme_wakeup_cmd(void *); 627 static void nvme_async_event_task(void *); 628 629 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 630 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 631 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 632 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 633 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 634 static inline int nvme_check_cmd_status(nvme_cmd_t *); 635 static boolean_t nvme_check_cmd_status_ioctl(nvme_cmd_t *, 636 nvme_ioctl_common_t *); 637 638 static int nvme_abort_cmd(nvme_cmd_t *, uint_t); 639 static void nvme_async_event(nvme_t *); 640 static boolean_t nvme_format_nvm(nvme_t *, nvme_ioctl_format_t *); 641 static boolean_t nvme_get_logpage_int(nvme_t *, boolean_t, void **, size_t *, 642 uint8_t); 643 static boolean_t nvme_identify(nvme_t *, boolean_t, nvme_ioctl_identify_t *, 644 void **); 645 static boolean_t nvme_identify_int(nvme_t *, uint32_t, uint8_t, void **); 646 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t, 647 uint32_t *); 648 static int nvme_write_cache_set(nvme_t *, boolean_t); 649 static int nvme_set_nqueues(nvme_t *); 650 651 static void nvme_free_dma(nvme_dma_t *); 652 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 653 nvme_dma_t **); 654 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 655 nvme_dma_t **); 656 static void nvme_free_qpair(nvme_qpair_t *); 657 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t); 658 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 659 660 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 661 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 662 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 663 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 664 665 static boolean_t nvme_check_regs_hdl(nvme_t *); 666 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 667 668 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t); 669 670 static void nvme_bd_xfer_done(void *); 671 static void nvme_bd_driveinfo(void *, bd_drive_t *); 672 static int nvme_bd_mediainfo(void *, bd_media_t *); 673 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 674 static int nvme_bd_read(void *, bd_xfer_t *); 675 static int nvme_bd_write(void *, bd_xfer_t *); 676 static int nvme_bd_sync(void *, bd_xfer_t *); 677 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 678 static int nvme_bd_free_space(void *, bd_xfer_t *); 679 680 static int nvme_prp_dma_constructor(void *, void *, int); 681 static void nvme_prp_dma_destructor(void *, void *); 682 683 static void nvme_prepare_devid(nvme_t *, uint32_t); 684 685 /* DDI UFM callbacks */ 686 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 687 ddi_ufm_image_t *); 688 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 689 ddi_ufm_slot_t *); 690 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 691 692 static int nvme_open(dev_t *, int, int, cred_t *); 693 static int nvme_close(dev_t, int, int, cred_t *); 694 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 695 696 static int nvme_init_ns(nvme_t *, uint32_t); 697 static boolean_t nvme_attach_ns(nvme_t *, nvme_ioctl_common_t *); 698 static boolean_t nvme_detach_ns(nvme_t *, nvme_ioctl_common_t *); 699 700 static int nvme_minor_comparator(const void *, const void *); 701 702 static ddi_ufm_ops_t nvme_ufm_ops = { 703 NULL, 704 nvme_ufm_fill_image, 705 nvme_ufm_fill_slot, 706 nvme_ufm_getcaps 707 }; 708 709 /* 710 * Minor numbers are split amongst those used for controllers and for device 711 * opens. The number of controller minors are limited based open MAXMIN32 per 712 * the theory statement. We allocate 1 million minors as a total guess at a 713 * number that'll probably be enough. The starting point of the open minors can 714 * be shifted to accommodate future expansion of the NVMe device minors. 715 */ 716 #define NVME_MINOR_INST_SHIFT 9 717 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid)) 718 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT) 719 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1)) 720 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2) 721 #define NVME_IS_VENDOR_SPECIFIC_CMD(x) (((x) >= 0xC0) && ((x) <= 0xFF)) 722 723 #define NVME_OPEN_NMINORS (1024 * 1024) 724 #define NVME_OPEN_MINOR_MIN (MAXMIN32 + 1) 725 #define NVME_OPEN_MINOR_MAX_EXCL (NVME_OPEN_MINOR_MIN + \ 726 NVME_OPEN_NMINORS) 727 728 static void *nvme_state; 729 static kmem_cache_t *nvme_cmd_cache; 730 731 /* 732 * DMA attributes for queue DMA memory 733 * 734 * Queue DMA memory must be page aligned. The maximum length of a queue is 735 * 65536 entries, and an entry can be 64 bytes long. 736 */ 737 static const ddi_dma_attr_t nvme_queue_dma_attr = { 738 .dma_attr_version = DMA_ATTR_V0, 739 .dma_attr_addr_lo = 0, 740 .dma_attr_addr_hi = 0xffffffffffffffffULL, 741 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 742 .dma_attr_align = 0x1000, 743 .dma_attr_burstsizes = 0x7ff, 744 .dma_attr_minxfer = 0x1000, 745 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 746 .dma_attr_seg = 0xffffffffffffffffULL, 747 .dma_attr_sgllen = 1, 748 .dma_attr_granular = 1, 749 .dma_attr_flags = 0, 750 }; 751 752 /* 753 * DMA attributes for transfers using Physical Region Page (PRP) entries 754 * 755 * A PRP entry describes one page of DMA memory using the page size specified 756 * in the controller configuration's memory page size register (CC.MPS). It uses 757 * a 64bit base address aligned to this page size. There is no limitation on 758 * chaining PRPs together for arbitrarily large DMA transfers. These DMA 759 * attributes will be copied into the nvme_t during nvme_attach() and the 760 * dma_attr_maxxfer will be updated. 761 */ 762 static const ddi_dma_attr_t nvme_prp_dma_attr = { 763 .dma_attr_version = DMA_ATTR_V0, 764 .dma_attr_addr_lo = 0, 765 .dma_attr_addr_hi = 0xffffffffffffffffULL, 766 .dma_attr_count_max = 0xfff, 767 .dma_attr_align = 0x1000, 768 .dma_attr_burstsizes = 0x7ff, 769 .dma_attr_minxfer = 0x1000, 770 .dma_attr_maxxfer = 0x1000, 771 .dma_attr_seg = 0xfff, 772 .dma_attr_sgllen = -1, 773 .dma_attr_granular = 1, 774 .dma_attr_flags = 0, 775 }; 776 777 /* 778 * DMA attributes for transfers using scatter/gather lists 779 * 780 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 781 * 32bit length field. SGL Segment and SGL Last Segment entries require the 782 * length to be a multiple of 16 bytes. While the SGL DMA attributes are copied 783 * into the nvme_t, they are not currently used for any I/O. 784 */ 785 static const ddi_dma_attr_t nvme_sgl_dma_attr = { 786 .dma_attr_version = DMA_ATTR_V0, 787 .dma_attr_addr_lo = 0, 788 .dma_attr_addr_hi = 0xffffffffffffffffULL, 789 .dma_attr_count_max = 0xffffffffUL, 790 .dma_attr_align = 1, 791 .dma_attr_burstsizes = 0x7ff, 792 .dma_attr_minxfer = 0x10, 793 .dma_attr_maxxfer = 0xfffffffffULL, 794 .dma_attr_seg = 0xffffffffffffffffULL, 795 .dma_attr_sgllen = -1, 796 .dma_attr_granular = 0x10, 797 .dma_attr_flags = 0 798 }; 799 800 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 801 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 802 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 803 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 804 }; 805 806 /* 807 * ioctl validation policies. These are policies that determine which namespaces 808 * are allowed or disallowed for various operations. Note, all policy items 809 * should be explicitly listed here to help make it clear what our intent is. 810 * That is also why some of these are identical or repeated when they cover 811 * different ioctls. 812 */ 813 814 /* 815 * The controller information ioctl generally contains read-only information 816 * about the controller that is sourced from multiple different pieces of 817 * information. This does not operate on a namespace and none are accepted. 818 */ 819 static const nvme_ioctl_check_t nvme_check_ctrl_info = { 820 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE, 821 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 822 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE 823 }; 824 825 /* 826 * The kernel namespace information requires a namespace ID to be specified. It 827 * does not allow for the broadcast ID to be specified. 828 */ 829 static const nvme_ioctl_check_t nvme_check_ns_info = { 830 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 831 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 832 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE 833 }; 834 835 /* 836 * Identify commands are allowed to operate on a namespace minor. Unfortunately, 837 * the namespace field in identify commands is a bit, weird. In particular, some 838 * commands need a valid namespace, while others are namespace listing 839 * operations, which means illegal namespaces like zero are allowed. 840 */ 841 static const nvme_ioctl_check_t nvme_check_identify = { 842 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 843 .nck_skip_ctrl = B_TRUE, .nck_ctrl_rewrite = B_FALSE, 844 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE 845 }; 846 847 /* 848 * The get log page command requires the ability to specify namespaces. When 849 * targeting the controller, one must use the broadcast NSID. 850 */ 851 static const nvme_ioctl_check_t nvme_check_get_logpage = { 852 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 853 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE, 854 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE 855 }; 856 857 /* 858 * When getting a feature, we do not want rewriting behavior as most features do 859 * not require a namespace to be specified. Specific instances are checked in 860 * nvme_validate_get_feature(). 861 */ 862 static const nvme_ioctl_check_t nvme_check_get_feature = { 863 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 864 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 865 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE 866 }; 867 868 /* 869 * Format commands must target a namespace. The broadcast namespace must be used 870 * when referring to the controller. 871 */ 872 static const nvme_ioctl_check_t nvme_check_format = { 873 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 874 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE, 875 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_WRITE 876 }; 877 878 /* 879 * Attach and detach must always target a minor. However, the broadcast 880 * namespace is not allowed. We still perform rewriting so that way specifying 881 * the controller node with 0 will be caught. 882 */ 883 static const nvme_ioctl_check_t nvme_check_attach_detach = { 884 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 885 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE, 886 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE 887 }; 888 889 /* 890 * Firmware operations must not target a namespace and are only allowed from the 891 * controller. 892 */ 893 static const nvme_ioctl_check_t nvme_check_firmware = { 894 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE, 895 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 896 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE 897 }; 898 899 /* 900 * Passthru commands are an odd set. We only allow them from the primary 901 * controller; however, we allow a namespace to be specified in them and allow 902 * the broadcast namespace. We do not perform rewriting because we don't know 903 * what the semantics are. We explicitly exempt passthru commands from needing 904 * an exclusive lock and leave it up to them to tell us the impact of the 905 * command and semantics. As this is a privileged interface and the semantics 906 * are arbitrary, there's not much we can do without some assistance from the 907 * consumer. 908 */ 909 static const nvme_ioctl_check_t nvme_check_passthru = { 910 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE, 911 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 912 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE 913 }; 914 915 /* 916 * Lock operations are allowed to target a namespace, but must not be rewritten. 917 * There is no support for the broadcast namespace. This is the only ioctl that 918 * should skip exclusive checking as it's used to grant it. 919 */ 920 static const nvme_ioctl_check_t nvme_check_locking = { 921 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE, 922 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE, 923 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_SKIP 924 }; 925 926 static struct cb_ops nvme_cb_ops = { 927 .cb_open = nvme_open, 928 .cb_close = nvme_close, 929 .cb_strategy = nodev, 930 .cb_print = nodev, 931 .cb_dump = nodev, 932 .cb_read = nodev, 933 .cb_write = nodev, 934 .cb_ioctl = nvme_ioctl, 935 .cb_devmap = nodev, 936 .cb_mmap = nodev, 937 .cb_segmap = nodev, 938 .cb_chpoll = nochpoll, 939 .cb_prop_op = ddi_prop_op, 940 .cb_str = 0, 941 .cb_flag = D_NEW | D_MP, 942 .cb_rev = CB_REV, 943 .cb_aread = nodev, 944 .cb_awrite = nodev 945 }; 946 947 static struct dev_ops nvme_dev_ops = { 948 .devo_rev = DEVO_REV, 949 .devo_refcnt = 0, 950 .devo_getinfo = ddi_no_info, 951 .devo_identify = nulldev, 952 .devo_probe = nulldev, 953 .devo_attach = nvme_attach, 954 .devo_detach = nvme_detach, 955 .devo_reset = nodev, 956 .devo_cb_ops = &nvme_cb_ops, 957 .devo_bus_ops = NULL, 958 .devo_power = NULL, 959 .devo_quiesce = nvme_quiesce, 960 }; 961 962 static struct modldrv nvme_modldrv = { 963 .drv_modops = &mod_driverops, 964 .drv_linkinfo = "NVMe driver", 965 .drv_dev_ops = &nvme_dev_ops 966 }; 967 968 static struct modlinkage nvme_modlinkage = { 969 .ml_rev = MODREV_1, 970 .ml_linkage = { &nvme_modldrv, NULL } 971 }; 972 973 static bd_ops_t nvme_bd_ops = { 974 .o_version = BD_OPS_CURRENT_VERSION, 975 .o_drive_info = nvme_bd_driveinfo, 976 .o_media_info = nvme_bd_mediainfo, 977 .o_devid_init = nvme_bd_devid, 978 .o_sync_cache = nvme_bd_sync, 979 .o_read = nvme_bd_read, 980 .o_write = nvme_bd_write, 981 .o_free_space = nvme_bd_free_space, 982 }; 983 984 /* 985 * This list will hold commands that have timed out and couldn't be aborted. 986 * As we don't know what the hardware may still do with the DMA memory we can't 987 * free them, so we'll keep them forever on this list where we can easily look 988 * at them with mdb. 989 */ 990 static struct list nvme_lost_cmds; 991 static kmutex_t nvme_lc_mutex; 992 993 int 994 _init(void) 995 { 996 int error; 997 998 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 999 if (error != DDI_SUCCESS) 1000 return (error); 1001 1002 if ((nvme_open_minors = id_space_create("nvme_open_minors", 1003 NVME_OPEN_MINOR_MIN, NVME_OPEN_MINOR_MAX_EXCL)) == NULL) { 1004 ddi_soft_state_fini(&nvme_state); 1005 return (ENOMEM); 1006 } 1007 1008 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 1009 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1010 1011 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL); 1012 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t), 1013 offsetof(nvme_cmd_t, nc_list)); 1014 1015 mutex_init(&nvme_open_minors_mutex, NULL, MUTEX_DRIVER, NULL); 1016 avl_create(&nvme_open_minors_avl, nvme_minor_comparator, 1017 sizeof (nvme_minor_t), offsetof(nvme_minor_t, nm_avl)); 1018 1019 nvme_dead_taskq = taskq_create("nvme_dead_taskq", 1, minclsyspri, 1, 1, 1020 TASKQ_PREPOPULATE); 1021 1022 bd_mod_init(&nvme_dev_ops); 1023 1024 error = mod_install(&nvme_modlinkage); 1025 if (error != DDI_SUCCESS) { 1026 ddi_soft_state_fini(&nvme_state); 1027 id_space_destroy(nvme_open_minors); 1028 mutex_destroy(&nvme_lc_mutex); 1029 list_destroy(&nvme_lost_cmds); 1030 bd_mod_fini(&nvme_dev_ops); 1031 mutex_destroy(&nvme_open_minors_mutex); 1032 avl_destroy(&nvme_open_minors_avl); 1033 taskq_destroy(nvme_dead_taskq); 1034 } 1035 1036 return (error); 1037 } 1038 1039 int 1040 _fini(void) 1041 { 1042 int error; 1043 1044 if (!list_is_empty(&nvme_lost_cmds)) 1045 return (DDI_FAILURE); 1046 1047 error = mod_remove(&nvme_modlinkage); 1048 if (error == DDI_SUCCESS) { 1049 ddi_soft_state_fini(&nvme_state); 1050 id_space_destroy(nvme_open_minors); 1051 kmem_cache_destroy(nvme_cmd_cache); 1052 mutex_destroy(&nvme_lc_mutex); 1053 list_destroy(&nvme_lost_cmds); 1054 bd_mod_fini(&nvme_dev_ops); 1055 mutex_destroy(&nvme_open_minors_mutex); 1056 avl_destroy(&nvme_open_minors_avl); 1057 taskq_destroy(nvme_dead_taskq); 1058 } 1059 1060 return (error); 1061 } 1062 1063 int 1064 _info(struct modinfo *modinfop) 1065 { 1066 return (mod_info(&nvme_modlinkage, modinfop)); 1067 } 1068 1069 static inline void 1070 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 1071 { 1072 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 1073 1074 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 1075 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 1076 } 1077 1078 static inline void 1079 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 1080 { 1081 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 1082 1083 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 1084 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 1085 } 1086 1087 static inline uint64_t 1088 nvme_get64(nvme_t *nvme, uintptr_t reg) 1089 { 1090 uint64_t val; 1091 1092 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 1093 1094 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 1095 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 1096 1097 return (val); 1098 } 1099 1100 static inline uint32_t 1101 nvme_get32(nvme_t *nvme, uintptr_t reg) 1102 { 1103 uint32_t val; 1104 1105 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 1106 1107 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 1108 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 1109 1110 return (val); 1111 } 1112 1113 static void 1114 nvme_mgmt_lock_fini(nvme_mgmt_lock_t *lock) 1115 { 1116 ASSERT3U(lock->nml_bd_own, ==, 0); 1117 mutex_destroy(&lock->nml_lock); 1118 cv_destroy(&lock->nml_cv); 1119 } 1120 1121 static void 1122 nvme_mgmt_lock_init(nvme_mgmt_lock_t *lock) 1123 { 1124 mutex_init(&lock->nml_lock, NULL, MUTEX_DRIVER, NULL); 1125 cv_init(&lock->nml_cv, NULL, CV_DRIVER, NULL); 1126 lock->nml_bd_own = 0; 1127 } 1128 1129 static void 1130 nvme_mgmt_unlock(nvme_t *nvme) 1131 { 1132 nvme_mgmt_lock_t *lock = &nvme->n_mgmt; 1133 1134 cv_broadcast(&lock->nml_cv); 1135 mutex_exit(&lock->nml_lock); 1136 } 1137 1138 #ifdef DEBUG 1139 static boolean_t 1140 nvme_mgmt_lock_held(nvme_t *nvme) 1141 { 1142 return (MUTEX_HELD(&nvme->n_mgmt.nml_lock) != 0); 1143 } 1144 #endif /* DEBUG */ 1145 1146 static void 1147 nvme_mgmt_lock(nvme_t *nvme, nvme_mgmt_lock_level_t level) 1148 { 1149 nvme_mgmt_lock_t *lock = &nvme->n_mgmt; 1150 mutex_enter(&lock->nml_lock); 1151 while (lock->nml_bd_own != 0) { 1152 if (level == NVME_MGMT_LOCK_BDRO) 1153 break; 1154 cv_wait(&lock->nml_cv, &lock->nml_lock); 1155 } 1156 } 1157 1158 /* 1159 * This and nvme_mgmt_bd_end() are used to indicate that the driver is going to 1160 * be calling into a re-entrant blkdev related function. We cannot hold the lock 1161 * across such an operation and therefore must indicate that this is logically 1162 * held, while allowing other operations to proceed. This nvme_mgmt_bd_end() may 1163 * only be called by a thread that already holds the nmve_mgmt_lock(). 1164 */ 1165 static void 1166 nvme_mgmt_bd_start(nvme_t *nvme) 1167 { 1168 nvme_mgmt_lock_t *lock = &nvme->n_mgmt; 1169 1170 VERIFY(MUTEX_HELD(&lock->nml_lock)); 1171 VERIFY3U(lock->nml_bd_own, ==, 0); 1172 lock->nml_bd_own = (uintptr_t)curthread; 1173 mutex_exit(&lock->nml_lock); 1174 } 1175 1176 static void 1177 nvme_mgmt_bd_end(nvme_t *nvme) 1178 { 1179 nvme_mgmt_lock_t *lock = &nvme->n_mgmt; 1180 1181 mutex_enter(&lock->nml_lock); 1182 VERIFY3U(lock->nml_bd_own, ==, (uintptr_t)curthread); 1183 lock->nml_bd_own = 0; 1184 } 1185 1186 /* 1187 * This is a central clearing house for marking an NVMe controller dead and/or 1188 * removed. This takes care of setting the flag, taking care of outstanding 1189 * blocked locks, and sending a DDI FMA impact. This is called from a precarious 1190 * place where locking is suspect. The only guarantee we have is that the nvme_t 1191 * is valid and won't disappear until we return. 1192 * 1193 * This should only be used after attach has been called. 1194 */ 1195 static void 1196 nvme_ctrl_mark_dead(nvme_t *nvme, boolean_t removed) 1197 { 1198 boolean_t was_dead; 1199 1200 /* 1201 * See if we win the race to set things up here. If someone beat us to 1202 * it, we do not do anything. 1203 */ 1204 was_dead = atomic_cas_32((volatile uint32_t *)&nvme->n_dead, B_FALSE, 1205 B_TRUE); 1206 if (was_dead) { 1207 return; 1208 } 1209 1210 /* 1211 * If this was removed, there is no reason to change the service impact. 1212 * However, then we need to change our default return code that we use 1213 * here to indicate that it was gone versus that it is dead. 1214 */ 1215 if (removed) { 1216 nvme->n_dead_status = NVME_IOCTL_E_CTRL_GONE; 1217 } else { 1218 ASSERT3U(nvme->n_dead_status, ==, NVME_IOCTL_E_CTRL_DEAD); 1219 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1220 } 1221 1222 taskq_dispatch_ent(nvme_dead_taskq, nvme_rwlock_ctrl_dead, nvme, 1223 TQ_NOSLEEP, &nvme->n_dead_tqent); 1224 } 1225 1226 static boolean_t 1227 nvme_check_regs_hdl(nvme_t *nvme) 1228 { 1229 ddi_fm_error_t error; 1230 1231 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 1232 1233 if (error.fme_status != DDI_FM_OK) 1234 return (B_TRUE); 1235 1236 return (B_FALSE); 1237 } 1238 1239 static boolean_t 1240 nvme_check_dma_hdl(nvme_dma_t *dma) 1241 { 1242 ddi_fm_error_t error; 1243 1244 if (dma == NULL) 1245 return (B_FALSE); 1246 1247 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 1248 1249 if (error.fme_status != DDI_FM_OK) 1250 return (B_TRUE); 1251 1252 return (B_FALSE); 1253 } 1254 1255 static void 1256 nvme_free_dma_common(nvme_dma_t *dma) 1257 { 1258 if (dma->nd_dmah != NULL) 1259 (void) ddi_dma_unbind_handle(dma->nd_dmah); 1260 if (dma->nd_acch != NULL) 1261 ddi_dma_mem_free(&dma->nd_acch); 1262 if (dma->nd_dmah != NULL) 1263 ddi_dma_free_handle(&dma->nd_dmah); 1264 } 1265 1266 static void 1267 nvme_free_dma(nvme_dma_t *dma) 1268 { 1269 nvme_free_dma_common(dma); 1270 kmem_free(dma, sizeof (*dma)); 1271 } 1272 1273 /* ARGSUSED */ 1274 static void 1275 nvme_prp_dma_destructor(void *buf, void *private) 1276 { 1277 nvme_dma_t *dma = (nvme_dma_t *)buf; 1278 1279 nvme_free_dma_common(dma); 1280 } 1281 1282 static int 1283 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 1284 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 1285 { 1286 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 1287 &dma->nd_dmah) != DDI_SUCCESS) { 1288 /* 1289 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 1290 * the only other possible error is DDI_DMA_BADATTR which 1291 * indicates a driver bug which should cause a panic. 1292 */ 1293 dev_err(nvme->n_dip, CE_PANIC, 1294 "!failed to get DMA handle, check DMA attributes"); 1295 return (DDI_FAILURE); 1296 } 1297 1298 /* 1299 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 1300 * or the flags are conflicting, which isn't the case here. 1301 */ 1302 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 1303 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 1304 &dma->nd_len, &dma->nd_acch); 1305 1306 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 1307 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1308 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 1309 dev_err(nvme->n_dip, CE_WARN, 1310 "!failed to bind DMA memory"); 1311 atomic_inc_32(&nvme->n_dma_bind_err); 1312 nvme_free_dma_common(dma); 1313 return (DDI_FAILURE); 1314 } 1315 1316 return (DDI_SUCCESS); 1317 } 1318 1319 static int 1320 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 1321 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 1322 { 1323 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 1324 1325 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 1326 DDI_SUCCESS) { 1327 *ret = NULL; 1328 kmem_free(dma, sizeof (nvme_dma_t)); 1329 return (DDI_FAILURE); 1330 } 1331 1332 bzero(dma->nd_memp, dma->nd_len); 1333 1334 *ret = dma; 1335 return (DDI_SUCCESS); 1336 } 1337 1338 /* ARGSUSED */ 1339 static int 1340 nvme_prp_dma_constructor(void *buf, void *private, int flags) 1341 { 1342 nvme_dma_t *dma = (nvme_dma_t *)buf; 1343 nvme_t *nvme = (nvme_t *)private; 1344 1345 dma->nd_dmah = NULL; 1346 dma->nd_acch = NULL; 1347 1348 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 1349 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 1350 return (-1); 1351 } 1352 1353 ASSERT(dma->nd_ncookie == 1); 1354 1355 dma->nd_cached = B_TRUE; 1356 1357 return (0); 1358 } 1359 1360 static int 1361 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 1362 uint_t flags, nvme_dma_t **dma) 1363 { 1364 uint32_t len = nentry * qe_len; 1365 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 1366 1367 len = roundup(len, nvme->n_pagesize); 1368 1369 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 1370 != DDI_SUCCESS) { 1371 dev_err(nvme->n_dip, CE_WARN, 1372 "!failed to get DMA memory for queue"); 1373 goto fail; 1374 } 1375 1376 if ((*dma)->nd_ncookie != 1) { 1377 dev_err(nvme->n_dip, CE_WARN, 1378 "!got too many cookies for queue DMA"); 1379 goto fail; 1380 } 1381 1382 return (DDI_SUCCESS); 1383 1384 fail: 1385 if (*dma) { 1386 nvme_free_dma(*dma); 1387 *dma = NULL; 1388 } 1389 1390 return (DDI_FAILURE); 1391 } 1392 1393 static void 1394 nvme_free_cq(nvme_cq_t *cq) 1395 { 1396 mutex_destroy(&cq->ncq_mutex); 1397 1398 if (cq->ncq_cmd_taskq != NULL) 1399 taskq_destroy(cq->ncq_cmd_taskq); 1400 1401 if (cq->ncq_dma != NULL) 1402 nvme_free_dma(cq->ncq_dma); 1403 1404 kmem_free(cq, sizeof (*cq)); 1405 } 1406 1407 static void 1408 nvme_free_qpair(nvme_qpair_t *qp) 1409 { 1410 int i; 1411 1412 mutex_destroy(&qp->nq_mutex); 1413 sema_destroy(&qp->nq_sema); 1414 1415 if (qp->nq_sqdma != NULL) 1416 nvme_free_dma(qp->nq_sqdma); 1417 1418 if (qp->nq_active_cmds > 0) 1419 for (i = 0; i != qp->nq_nentry; i++) 1420 if (qp->nq_cmd[i] != NULL) 1421 nvme_free_cmd(qp->nq_cmd[i]); 1422 1423 if (qp->nq_cmd != NULL) 1424 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 1425 1426 kmem_free(qp, sizeof (nvme_qpair_t)); 1427 } 1428 1429 /* 1430 * Destroy the pre-allocated cq array, but only free individual completion 1431 * queues from the given starting index. 1432 */ 1433 static void 1434 nvme_destroy_cq_array(nvme_t *nvme, uint_t start) 1435 { 1436 uint_t i; 1437 1438 for (i = start; i < nvme->n_cq_count; i++) 1439 if (nvme->n_cq[i] != NULL) 1440 nvme_free_cq(nvme->n_cq[i]); 1441 1442 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count); 1443 } 1444 1445 static int 1446 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx, 1447 uint_t nthr) 1448 { 1449 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP); 1450 char name[64]; /* large enough for the taskq name */ 1451 1452 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER, 1453 DDI_INTR_PRI(nvme->n_intr_pri)); 1454 1455 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 1456 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS) 1457 goto fail; 1458 1459 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp; 1460 cq->ncq_nentry = nentry; 1461 cq->ncq_id = idx; 1462 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx); 1463 1464 /* 1465 * Each completion queue has its own command taskq. 1466 */ 1467 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u", 1468 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx); 1469 1470 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX, 1471 TASKQ_PREPOPULATE); 1472 1473 if (cq->ncq_cmd_taskq == NULL) { 1474 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd " 1475 "taskq for cq %u", idx); 1476 goto fail; 1477 } 1478 1479 *cqp = cq; 1480 return (DDI_SUCCESS); 1481 1482 fail: 1483 nvme_free_cq(cq); 1484 *cqp = NULL; 1485 1486 return (DDI_FAILURE); 1487 } 1488 1489 /* 1490 * Create the n_cq array big enough to hold "ncq" completion queues. 1491 * If the array already exists it will be re-sized (but only larger). 1492 * The admin queue is included in this array, which boosts the 1493 * max number of entries to UINT16_MAX + 1. 1494 */ 1495 static int 1496 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr) 1497 { 1498 nvme_cq_t **cq; 1499 uint_t i, cq_count; 1500 1501 ASSERT3U(ncq, >, nvme->n_cq_count); 1502 1503 cq = nvme->n_cq; 1504 cq_count = nvme->n_cq_count; 1505 1506 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP); 1507 nvme->n_cq_count = ncq; 1508 1509 for (i = 0; i < cq_count; i++) 1510 nvme->n_cq[i] = cq[i]; 1511 1512 for (; i < nvme->n_cq_count; i++) 1513 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) != 1514 DDI_SUCCESS) 1515 goto fail; 1516 1517 if (cq != NULL) 1518 kmem_free(cq, sizeof (*cq) * cq_count); 1519 1520 return (DDI_SUCCESS); 1521 1522 fail: 1523 nvme_destroy_cq_array(nvme, cq_count); 1524 /* 1525 * Restore the original array 1526 */ 1527 nvme->n_cq_count = cq_count; 1528 nvme->n_cq = cq; 1529 1530 return (DDI_FAILURE); 1531 } 1532 1533 static int 1534 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 1535 uint_t idx) 1536 { 1537 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 1538 uint_t cq_idx; 1539 1540 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 1541 DDI_INTR_PRI(nvme->n_intr_pri)); 1542 1543 /* 1544 * The NVMe spec defines that a full queue has one empty (unused) slot; 1545 * initialize the semaphore accordingly. 1546 */ 1547 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL); 1548 1549 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 1550 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 1551 goto fail; 1552 1553 /* 1554 * idx == 0 is adminq, those above 0 are shared io completion queues. 1555 */ 1556 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1); 1557 qp->nq_cq = nvme->n_cq[cq_idx]; 1558 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 1559 qp->nq_nentry = nentry; 1560 1561 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 1562 1563 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 1564 qp->nq_next_cmd = 0; 1565 1566 *nqp = qp; 1567 return (DDI_SUCCESS); 1568 1569 fail: 1570 nvme_free_qpair(qp); 1571 *nqp = NULL; 1572 1573 return (DDI_FAILURE); 1574 } 1575 1576 static nvme_cmd_t * 1577 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 1578 { 1579 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 1580 1581 if (cmd == NULL) 1582 return (cmd); 1583 1584 bzero(cmd, sizeof (nvme_cmd_t)); 1585 1586 cmd->nc_nvme = nvme; 1587 1588 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 1589 DDI_INTR_PRI(nvme->n_intr_pri)); 1590 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 1591 1592 return (cmd); 1593 } 1594 1595 static void 1596 nvme_free_cmd(nvme_cmd_t *cmd) 1597 { 1598 /* Don't free commands on the lost commands list. */ 1599 if (list_link_active(&cmd->nc_list)) 1600 return; 1601 1602 if (cmd->nc_dma) { 1603 nvme_free_dma(cmd->nc_dma); 1604 cmd->nc_dma = NULL; 1605 } 1606 1607 if (cmd->nc_prp) { 1608 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp); 1609 cmd->nc_prp = NULL; 1610 } 1611 1612 cv_destroy(&cmd->nc_cv); 1613 mutex_destroy(&cmd->nc_mutex); 1614 1615 kmem_cache_free(nvme_cmd_cache, cmd); 1616 } 1617 1618 static void 1619 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1620 { 1621 sema_p(&qp->nq_sema); 1622 nvme_submit_cmd_common(qp, cmd); 1623 } 1624 1625 static int 1626 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1627 { 1628 if (cmd->nc_nvme->n_dead) { 1629 return (EIO); 1630 } 1631 1632 if (sema_tryp(&qp->nq_sema) == 0) 1633 return (EAGAIN); 1634 1635 nvme_submit_cmd_common(qp, cmd); 1636 return (0); 1637 } 1638 1639 static void 1640 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1641 { 1642 nvme_reg_sqtdbl_t tail = { 0 }; 1643 1644 mutex_enter(&qp->nq_mutex); 1645 cmd->nc_completed = B_FALSE; 1646 1647 /* 1648 * Now that we hold the queue pair lock, we must check whether or not 1649 * the controller has been listed as dead (e.g. was removed due to 1650 * hotplug). This is necessary as otherwise we could race with 1651 * nvme_remove_callback(). Because this has not been enqueued, we don't 1652 * call nvme_unqueue_cmd(), which is why we must manually decrement the 1653 * semaphore. 1654 */ 1655 if (cmd->nc_nvme->n_dead) { 1656 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback, 1657 cmd, TQ_NOSLEEP, &cmd->nc_tqent); 1658 sema_v(&qp->nq_sema); 1659 mutex_exit(&qp->nq_mutex); 1660 return; 1661 } 1662 1663 /* 1664 * Try to insert the cmd into the active cmd array at the nq_next_cmd 1665 * slot. If the slot is already occupied advance to the next slot and 1666 * try again. This can happen for long running commands like async event 1667 * requests. 1668 */ 1669 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 1670 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1671 qp->nq_cmd[qp->nq_next_cmd] = cmd; 1672 1673 qp->nq_active_cmds++; 1674 1675 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 1676 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 1677 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 1678 sizeof (nvme_sqe_t) * qp->nq_sqtail, 1679 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 1680 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1681 1682 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 1683 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 1684 1685 mutex_exit(&qp->nq_mutex); 1686 } 1687 1688 static nvme_cmd_t * 1689 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid) 1690 { 1691 nvme_cmd_t *cmd; 1692 1693 ASSERT(mutex_owned(&qp->nq_mutex)); 1694 ASSERT3S(cid, <, qp->nq_nentry); 1695 1696 cmd = qp->nq_cmd[cid]; 1697 /* 1698 * Some controllers will erroneously add things to the completion queue 1699 * for which there is no matching outstanding command. If this happens, 1700 * it is almost certainly a controller firmware bug since nq_mutex 1701 * is held across command submission and ringing the queue doorbell, 1702 * and is also held in this function. 1703 * 1704 * If we see such an unexpected command, there is not much we can do. 1705 * These will be logged and counted in nvme_get_completed(), but 1706 * otherwise ignored. 1707 */ 1708 if (cmd == NULL) 1709 return (NULL); 1710 qp->nq_cmd[cid] = NULL; 1711 ASSERT3U(qp->nq_active_cmds, >, 0); 1712 qp->nq_active_cmds--; 1713 sema_v(&qp->nq_sema); 1714 1715 ASSERT3P(cmd, !=, NULL); 1716 ASSERT3P(cmd->nc_nvme, ==, nvme); 1717 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid); 1718 1719 return (cmd); 1720 } 1721 1722 /* 1723 * Get the command tied to the next completed cqe and bump along completion 1724 * queue head counter. 1725 */ 1726 static nvme_cmd_t * 1727 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq) 1728 { 1729 nvme_qpair_t *qp; 1730 nvme_cqe_t *cqe; 1731 nvme_cmd_t *cmd; 1732 1733 ASSERT(mutex_owned(&cq->ncq_mutex)); 1734 1735 retry: 1736 cqe = &cq->ncq_cq[cq->ncq_head]; 1737 1738 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 1739 if (cqe->cqe_sf.sf_p == cq->ncq_phase) 1740 return (NULL); 1741 1742 qp = nvme->n_ioq[cqe->cqe_sqid]; 1743 1744 mutex_enter(&qp->nq_mutex); 1745 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); 1746 mutex_exit(&qp->nq_mutex); 1747 1748 qp->nq_sqhead = cqe->cqe_sqhd; 1749 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry; 1750 1751 /* Toggle phase on wrap-around. */ 1752 if (cq->ncq_head == 0) 1753 cq->ncq_phase = cq->ncq_phase != 0 ? 0 : 1; 1754 1755 if (cmd == NULL) { 1756 dev_err(nvme->n_dip, CE_WARN, 1757 "!received completion for unknown cid 0x%x", cqe->cqe_cid); 1758 atomic_inc_32(&nvme->n_unknown_cid); 1759 /* 1760 * We want to ignore this unexpected completion entry as it 1761 * is most likely a result of a bug in the controller firmware. 1762 * However, if we return NULL, then callers will assume there 1763 * are no more pending commands for this wakeup. Retry to keep 1764 * enumerating commands until the phase tag indicates there are 1765 * no more and we are really done. 1766 */ 1767 goto retry; 1768 } 1769 1770 ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid); 1771 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 1772 1773 return (cmd); 1774 } 1775 1776 /* 1777 * Process all completed commands on the io completion queue. 1778 */ 1779 static uint_t 1780 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq) 1781 { 1782 nvme_reg_cqhdbl_t head = { 0 }; 1783 nvme_cmd_t *cmd; 1784 uint_t completed = 0; 1785 1786 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1787 DDI_SUCCESS) 1788 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1789 __func__); 1790 1791 mutex_enter(&cq->ncq_mutex); 1792 1793 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1794 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd, 1795 TQ_NOSLEEP, &cmd->nc_tqent); 1796 1797 completed++; 1798 } 1799 1800 if (completed > 0) { 1801 /* 1802 * Update the completion queue head doorbell. 1803 */ 1804 head.b.cqhdbl_cqh = cq->ncq_head; 1805 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1806 } 1807 1808 mutex_exit(&cq->ncq_mutex); 1809 1810 return (completed); 1811 } 1812 1813 static nvme_cmd_t * 1814 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 1815 { 1816 nvme_cq_t *cq = qp->nq_cq; 1817 nvme_reg_cqhdbl_t head = { 0 }; 1818 nvme_cmd_t *cmd; 1819 1820 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1821 DDI_SUCCESS) 1822 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1823 __func__); 1824 1825 mutex_enter(&cq->ncq_mutex); 1826 1827 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1828 head.b.cqhdbl_cqh = cq->ncq_head; 1829 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1830 } 1831 1832 mutex_exit(&cq->ncq_mutex); 1833 1834 return (cmd); 1835 } 1836 1837 static int 1838 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 1839 { 1840 nvme_cqe_t *cqe = &cmd->nc_cqe; 1841 1842 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1843 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1844 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1845 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1846 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1847 1848 if (cmd->nc_xfer != NULL) 1849 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1850 1851 if (cmd->nc_nvme->n_strict_version) { 1852 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 1853 } 1854 1855 return (EIO); 1856 } 1857 1858 static int 1859 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 1860 { 1861 nvme_cqe_t *cqe = &cmd->nc_cqe; 1862 1863 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1864 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1865 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1866 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1867 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1868 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 1869 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 1870 } 1871 1872 return (EIO); 1873 } 1874 1875 static int 1876 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 1877 { 1878 nvme_cqe_t *cqe = &cmd->nc_cqe; 1879 1880 switch (cqe->cqe_sf.sf_sc) { 1881 case NVME_CQE_SC_INT_NVM_WRITE: 1882 /* write fail */ 1883 /* TODO: post ereport */ 1884 if (cmd->nc_xfer != NULL) 1885 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1886 return (EIO); 1887 1888 case NVME_CQE_SC_INT_NVM_READ: 1889 /* read fail */ 1890 /* TODO: post ereport */ 1891 if (cmd->nc_xfer != NULL) 1892 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1893 return (EIO); 1894 1895 default: 1896 return (nvme_check_unknown_cmd_status(cmd)); 1897 } 1898 } 1899 1900 static int 1901 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 1902 { 1903 nvme_cqe_t *cqe = &cmd->nc_cqe; 1904 1905 switch (cqe->cqe_sf.sf_sc) { 1906 case NVME_CQE_SC_GEN_SUCCESS: 1907 return (0); 1908 1909 /* 1910 * Errors indicating a bug in the driver should cause a panic. 1911 */ 1912 case NVME_CQE_SC_GEN_INV_OPC: 1913 /* Invalid Command Opcode */ 1914 if (!cmd->nc_dontpanic) 1915 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1916 "programming error: invalid opcode in cmd %p", 1917 (void *)cmd); 1918 return (EINVAL); 1919 1920 case NVME_CQE_SC_GEN_INV_FLD: 1921 /* Invalid Field in Command */ 1922 if (!cmd->nc_dontpanic) 1923 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1924 "programming error: invalid field in cmd %p", 1925 (void *)cmd); 1926 return (EIO); 1927 1928 case NVME_CQE_SC_GEN_ID_CNFL: 1929 /* Command ID Conflict */ 1930 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1931 "cmd ID conflict in cmd %p", (void *)cmd); 1932 return (0); 1933 1934 case NVME_CQE_SC_GEN_INV_NS: 1935 /* Invalid Namespace or Format */ 1936 if (!cmd->nc_dontpanic) 1937 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1938 "programming error: invalid NS/format in cmd %p", 1939 (void *)cmd); 1940 return (EINVAL); 1941 1942 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 1943 /* LBA Out Of Range */ 1944 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1945 "LBA out of range in cmd %p", (void *)cmd); 1946 return (0); 1947 1948 /* 1949 * Non-fatal errors, handle gracefully. 1950 */ 1951 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 1952 /* Data Transfer Error (DMA) */ 1953 /* TODO: post ereport */ 1954 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 1955 if (cmd->nc_xfer != NULL) 1956 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1957 return (EIO); 1958 1959 case NVME_CQE_SC_GEN_INTERNAL_ERR: 1960 /* 1961 * Internal Error. The spec (v1.0, section 4.5.1.2) says 1962 * detailed error information is returned as async event, 1963 * so we pretty much ignore the error here and handle it 1964 * in the async event handler. 1965 */ 1966 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 1967 if (cmd->nc_xfer != NULL) 1968 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1969 return (EIO); 1970 1971 case NVME_CQE_SC_GEN_ABORT_REQUEST: 1972 /* 1973 * Command Abort Requested. This normally happens only when a 1974 * command times out. 1975 */ 1976 /* TODO: post ereport or change blkdev to handle this? */ 1977 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 1978 return (ECANCELED); 1979 1980 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 1981 /* Command Aborted due to Power Loss Notification */ 1982 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 1983 return (EIO); 1984 1985 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 1986 /* Command Aborted due to SQ Deletion */ 1987 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 1988 return (EIO); 1989 1990 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 1991 /* Capacity Exceeded */ 1992 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 1993 if (cmd->nc_xfer != NULL) 1994 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1995 return (EIO); 1996 1997 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 1998 /* Namespace Not Ready */ 1999 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 2000 if (cmd->nc_xfer != NULL) 2001 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 2002 return (EIO); 2003 2004 case NVME_CQE_SC_GEN_NVM_FORMATTING: 2005 /* Format in progress (1.2) */ 2006 if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2)) 2007 return (nvme_check_unknown_cmd_status(cmd)); 2008 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_formatting); 2009 if (cmd->nc_xfer != NULL) 2010 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 2011 return (EIO); 2012 2013 default: 2014 return (nvme_check_unknown_cmd_status(cmd)); 2015 } 2016 } 2017 2018 static int 2019 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 2020 { 2021 nvme_cqe_t *cqe = &cmd->nc_cqe; 2022 2023 switch (cqe->cqe_sf.sf_sc) { 2024 case NVME_CQE_SC_SPC_INV_CQ: 2025 /* Completion Queue Invalid */ 2026 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 2027 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 2028 return (EINVAL); 2029 2030 case NVME_CQE_SC_SPC_INV_QID: 2031 /* Invalid Queue Identifier */ 2032 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 2033 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 2034 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 2035 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 2036 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 2037 return (EINVAL); 2038 2039 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 2040 /* Max Queue Size Exceeded */ 2041 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 2042 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 2043 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 2044 return (EINVAL); 2045 2046 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 2047 /* Abort Command Limit Exceeded */ 2048 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 2049 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 2050 "abort command limit exceeded in cmd %p", (void *)cmd); 2051 return (0); 2052 2053 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 2054 /* Async Event Request Limit Exceeded */ 2055 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 2056 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 2057 "async event request limit exceeded in cmd %p", 2058 (void *)cmd); 2059 return (0); 2060 2061 case NVME_CQE_SC_SPC_INV_INT_VECT: 2062 /* Invalid Interrupt Vector */ 2063 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 2064 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 2065 return (EINVAL); 2066 2067 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 2068 /* Invalid Log Page */ 2069 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 2070 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 2071 return (EINVAL); 2072 2073 case NVME_CQE_SC_SPC_INV_FORMAT: 2074 /* Invalid Format */ 2075 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 2076 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 2077 if (cmd->nc_xfer != NULL) 2078 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2079 return (EINVAL); 2080 2081 case NVME_CQE_SC_SPC_INV_Q_DEL: 2082 /* Invalid Queue Deletion */ 2083 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 2084 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 2085 return (EINVAL); 2086 2087 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 2088 /* Conflicting Attributes */ 2089 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 2090 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 2091 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 2092 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 2093 if (cmd->nc_xfer != NULL) 2094 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2095 return (EINVAL); 2096 2097 case NVME_CQE_SC_SPC_NVM_INV_PROT: 2098 /* Invalid Protection Information */ 2099 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 2100 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 2101 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 2102 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 2103 if (cmd->nc_xfer != NULL) 2104 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2105 return (EINVAL); 2106 2107 case NVME_CQE_SC_SPC_NVM_READONLY: 2108 /* Write to Read Only Range */ 2109 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 2110 atomic_inc_32(&cmd->nc_nvme->n_readonly); 2111 if (cmd->nc_xfer != NULL) 2112 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 2113 return (EROFS); 2114 2115 case NVME_CQE_SC_SPC_INV_FW_SLOT: 2116 /* Invalid Firmware Slot */ 2117 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2118 return (EINVAL); 2119 2120 case NVME_CQE_SC_SPC_INV_FW_IMG: 2121 /* Invalid Firmware Image */ 2122 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2123 return (EINVAL); 2124 2125 case NVME_CQE_SC_SPC_FW_RESET: 2126 /* Conventional Reset Required */ 2127 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2128 return (0); 2129 2130 case NVME_CQE_SC_SPC_FW_NSSR: 2131 /* NVMe Subsystem Reset Required */ 2132 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2133 return (0); 2134 2135 case NVME_CQE_SC_SPC_FW_NEXT_RESET: 2136 /* Activation Requires Reset */ 2137 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2138 return (0); 2139 2140 case NVME_CQE_SC_SPC_FW_MTFA: 2141 /* Activation Requires Maximum Time Violation */ 2142 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2143 return (EAGAIN); 2144 2145 case NVME_CQE_SC_SPC_FW_PROHIBITED: 2146 /* Activation Prohibited */ 2147 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2148 return (EINVAL); 2149 2150 case NVME_CQE_SC_SPC_FW_OVERLAP: 2151 /* Overlapping Firmware Ranges */ 2152 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD || 2153 cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 2154 return (EINVAL); 2155 2156 default: 2157 return (nvme_check_unknown_cmd_status(cmd)); 2158 } 2159 } 2160 2161 static inline int 2162 nvme_check_cmd_status(nvme_cmd_t *cmd) 2163 { 2164 nvme_cqe_t *cqe = &cmd->nc_cqe; 2165 2166 /* 2167 * Take a shortcut if the controller is dead, or if 2168 * command status indicates no error. 2169 */ 2170 if (cmd->nc_nvme->n_dead) 2171 return (EIO); 2172 2173 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2174 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 2175 return (0); 2176 2177 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 2178 return (nvme_check_generic_cmd_status(cmd)); 2179 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 2180 return (nvme_check_specific_cmd_status(cmd)); 2181 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 2182 return (nvme_check_integrity_cmd_status(cmd)); 2183 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 2184 return (nvme_check_vendor_cmd_status(cmd)); 2185 2186 return (nvme_check_unknown_cmd_status(cmd)); 2187 } 2188 2189 /* 2190 * Check the command status as used by an ioctl path and do not convert it to an 2191 * errno. We still allow all the command status checking to occur, but otherwise 2192 * will pass back the controller error as is. 2193 */ 2194 static boolean_t 2195 nvme_check_cmd_status_ioctl(nvme_cmd_t *cmd, nvme_ioctl_common_t *ioc) 2196 { 2197 nvme_cqe_t *cqe = &cmd->nc_cqe; 2198 nvme_t *nvme = cmd->nc_nvme; 2199 2200 if (nvme->n_dead) { 2201 return (nvme_ioctl_error(ioc, nvme->n_dead_status, 0, 0)); 2202 } 2203 2204 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2205 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 2206 return (B_TRUE); 2207 2208 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) { 2209 (void) nvme_check_generic_cmd_status(cmd); 2210 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) { 2211 (void) nvme_check_specific_cmd_status(cmd); 2212 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) { 2213 (void) nvme_check_integrity_cmd_status(cmd); 2214 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) { 2215 (void) nvme_check_vendor_cmd_status(cmd); 2216 } else { 2217 (void) nvme_check_unknown_cmd_status(cmd); 2218 } 2219 2220 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_ERROR, 2221 cqe->cqe_sf.sf_sct, cqe->cqe_sf.sf_sc)); 2222 } 2223 2224 static int 2225 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec) 2226 { 2227 nvme_t *nvme = abort_cmd->nc_nvme; 2228 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2229 nvme_abort_cmd_t ac = { 0 }; 2230 int ret = 0; 2231 2232 sema_p(&nvme->n_abort_sema); 2233 2234 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 2235 ac.b.ac_sqid = abort_cmd->nc_sqid; 2236 2237 cmd->nc_sqid = 0; 2238 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 2239 cmd->nc_callback = nvme_wakeup_cmd; 2240 cmd->nc_sqe.sqe_cdw10 = ac.r; 2241 2242 /* 2243 * Send the ABORT to the hardware. The ABORT command will return _after_ 2244 * the aborted command has completed (aborted or otherwise), but since 2245 * we still hold the aborted command's mutex its callback hasn't been 2246 * processed yet. 2247 */ 2248 nvme_admin_cmd(cmd, sec); 2249 sema_v(&nvme->n_abort_sema); 2250 2251 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2252 dev_err(nvme->n_dip, CE_WARN, 2253 "!ABORT failed with sct = %x, sc = %x", 2254 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2255 atomic_inc_32(&nvme->n_abort_failed); 2256 } else { 2257 dev_err(nvme->n_dip, CE_WARN, 2258 "!ABORT of command %d/%d %ssuccessful", 2259 abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid, 2260 cmd->nc_cqe.cqe_dw0 & 1 ? "un" : ""); 2261 if ((cmd->nc_cqe.cqe_dw0 & 1) == 0) 2262 atomic_inc_32(&nvme->n_cmd_aborted); 2263 } 2264 2265 nvme_free_cmd(cmd); 2266 return (ret); 2267 } 2268 2269 /* 2270 * nvme_wait_cmd -- wait for command completion or timeout 2271 * 2272 * In case of a serious error or a timeout of the abort command the hardware 2273 * will be declared dead and FMA will be notified. 2274 */ 2275 static void 2276 nvme_wait_cmd(nvme_cmd_t *cmd, uint32_t sec) 2277 { 2278 clock_t timeout = ddi_get_lbolt() + drv_usectohz((long)sec * MICROSEC); 2279 nvme_t *nvme = cmd->nc_nvme; 2280 nvme_reg_csts_t csts; 2281 nvme_qpair_t *qp; 2282 2283 ASSERT(mutex_owned(&cmd->nc_mutex)); 2284 2285 while (!cmd->nc_completed) { 2286 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 2287 break; 2288 } 2289 2290 if (cmd->nc_completed) 2291 return; 2292 2293 /* 2294 * The command timed out. 2295 * 2296 * Check controller for fatal status, any errors associated with the 2297 * register or DMA handle, or for a double timeout (abort command timed 2298 * out). If necessary log a warning and call FMA. 2299 */ 2300 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2301 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, " 2302 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 2303 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 2304 atomic_inc_32(&nvme->n_cmd_timeout); 2305 2306 if (csts.b.csts_cfs || 2307 nvme_check_regs_hdl(nvme) || 2308 nvme_check_dma_hdl(cmd->nc_dma) || 2309 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 2310 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2311 } else if (nvme_abort_cmd(cmd, sec) == 0) { 2312 /* 2313 * If the abort succeeded the command should complete 2314 * immediately with an appropriate status. 2315 */ 2316 while (!cmd->nc_completed) 2317 cv_wait(&cmd->nc_cv, &cmd->nc_mutex); 2318 2319 return; 2320 } 2321 2322 qp = nvme->n_ioq[cmd->nc_sqid]; 2323 2324 mutex_enter(&qp->nq_mutex); 2325 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 2326 mutex_exit(&qp->nq_mutex); 2327 2328 /* 2329 * As we don't know what the presumed dead hardware might still do with 2330 * the DMA memory, we'll put the command on the lost commands list if it 2331 * has any DMA memory. 2332 */ 2333 if (cmd->nc_dma != NULL) { 2334 mutex_enter(&nvme_lc_mutex); 2335 list_insert_head(&nvme_lost_cmds, cmd); 2336 mutex_exit(&nvme_lc_mutex); 2337 } 2338 } 2339 2340 static void 2341 nvme_wakeup_cmd(void *arg) 2342 { 2343 nvme_cmd_t *cmd = arg; 2344 2345 mutex_enter(&cmd->nc_mutex); 2346 cmd->nc_completed = B_TRUE; 2347 cv_signal(&cmd->nc_cv); 2348 mutex_exit(&cmd->nc_mutex); 2349 } 2350 2351 static void 2352 nvme_async_event_task(void *arg) 2353 { 2354 nvme_cmd_t *cmd = arg; 2355 nvme_t *nvme = cmd->nc_nvme; 2356 nvme_error_log_entry_t *error_log = NULL; 2357 nvme_health_log_t *health_log = NULL; 2358 nvme_nschange_list_t *nslist = NULL; 2359 size_t logsize = 0; 2360 nvme_async_event_t event; 2361 2362 /* 2363 * Check for errors associated with the async request itself. The only 2364 * command-specific error is "async event limit exceeded", which 2365 * indicates a programming error in the driver and causes a panic in 2366 * nvme_check_cmd_status(). 2367 * 2368 * Other possible errors are various scenarios where the async request 2369 * was aborted, or internal errors in the device. Internal errors are 2370 * reported to FMA, the command aborts need no special handling here. 2371 * 2372 * And finally, at least qemu nvme does not support async events, 2373 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we 2374 * will avoid posting async events. 2375 */ 2376 2377 if (nvme_check_cmd_status(cmd) != 0) { 2378 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 2379 "!async event request returned failure, sct = 0x%x, " 2380 "sc = 0x%x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 2381 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 2382 cmd->nc_cqe.cqe_sf.sf_m); 2383 2384 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2385 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 2386 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2387 } 2388 2389 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2390 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC && 2391 cmd->nc_cqe.cqe_sf.sf_dnr == 1) { 2392 nvme->n_async_event_supported = B_FALSE; 2393 } 2394 2395 nvme_free_cmd(cmd); 2396 return; 2397 } 2398 2399 event.r = cmd->nc_cqe.cqe_dw0; 2400 2401 /* Clear CQE and re-submit the async request. */ 2402 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 2403 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 2404 cmd = NULL; /* cmd can no longer be used after resubmission */ 2405 2406 switch (event.b.ae_type) { 2407 case NVME_ASYNC_TYPE_ERROR: 2408 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 2409 if (!nvme_get_logpage_int(nvme, B_FALSE, 2410 (void **)&error_log, &logsize, 2411 NVME_LOGPAGE_ERROR)) { 2412 return; 2413 } 2414 } else { 2415 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 2416 "async event reply: type=0x%x logpage=0x%x", 2417 event.b.ae_type, event.b.ae_logpage); 2418 atomic_inc_32(&nvme->n_wrong_logpage); 2419 return; 2420 } 2421 2422 switch (event.b.ae_info) { 2423 case NVME_ASYNC_ERROR_INV_SQ: 2424 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 2425 "invalid submission queue"); 2426 return; 2427 2428 case NVME_ASYNC_ERROR_INV_DBL: 2429 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 2430 "invalid doorbell write value"); 2431 return; 2432 2433 case NVME_ASYNC_ERROR_DIAGFAIL: 2434 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 2435 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2436 atomic_inc_32(&nvme->n_diagfail_event); 2437 break; 2438 2439 case NVME_ASYNC_ERROR_PERSISTENT: 2440 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 2441 "device error"); 2442 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE); 2443 atomic_inc_32(&nvme->n_persistent_event); 2444 break; 2445 2446 case NVME_ASYNC_ERROR_TRANSIENT: 2447 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 2448 "device error"); 2449 /* TODO: send ereport */ 2450 atomic_inc_32(&nvme->n_transient_event); 2451 break; 2452 2453 case NVME_ASYNC_ERROR_FW_LOAD: 2454 dev_err(nvme->n_dip, CE_WARN, 2455 "!firmware image load error"); 2456 atomic_inc_32(&nvme->n_fw_load_event); 2457 break; 2458 } 2459 break; 2460 2461 case NVME_ASYNC_TYPE_HEALTH: 2462 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 2463 if (!nvme_get_logpage_int(nvme, B_FALSE, 2464 (void **)&health_log, &logsize, 2465 NVME_LOGPAGE_HEALTH)) { 2466 return; 2467 } 2468 } else { 2469 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 2470 "type=0x%x logpage=0x%x", event.b.ae_type, 2471 event.b.ae_logpage); 2472 atomic_inc_32(&nvme->n_wrong_logpage); 2473 return; 2474 } 2475 2476 switch (event.b.ae_info) { 2477 case NVME_ASYNC_HEALTH_RELIABILITY: 2478 dev_err(nvme->n_dip, CE_WARN, 2479 "!device reliability compromised"); 2480 /* TODO: send ereport */ 2481 atomic_inc_32(&nvme->n_reliability_event); 2482 break; 2483 2484 case NVME_ASYNC_HEALTH_TEMPERATURE: 2485 dev_err(nvme->n_dip, CE_WARN, 2486 "!temperature above threshold"); 2487 /* TODO: send ereport */ 2488 atomic_inc_32(&nvme->n_temperature_event); 2489 break; 2490 2491 case NVME_ASYNC_HEALTH_SPARE: 2492 dev_err(nvme->n_dip, CE_WARN, 2493 "!spare space below threshold"); 2494 /* TODO: send ereport */ 2495 atomic_inc_32(&nvme->n_spare_event); 2496 break; 2497 } 2498 break; 2499 2500 case NVME_ASYNC_TYPE_NOTICE: 2501 switch (event.b.ae_info) { 2502 case NVME_ASYNC_NOTICE_NS_CHANGE: 2503 if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) { 2504 dev_err(nvme->n_dip, CE_WARN, 2505 "!wrong logpage in async event reply: " 2506 "type=0x%x logpage=0x%x", 2507 event.b.ae_type, event.b.ae_logpage); 2508 atomic_inc_32(&nvme->n_wrong_logpage); 2509 break; 2510 } 2511 2512 dev_err(nvme->n_dip, CE_NOTE, 2513 "namespace attribute change event, " 2514 "logpage = 0x%x", event.b.ae_logpage); 2515 atomic_inc_32(&nvme->n_notice_event); 2516 2517 if (!nvme_get_logpage_int(nvme, B_FALSE, 2518 (void **)&nslist, &logsize, 2519 NVME_LOGPAGE_NSCHANGE)) { 2520 break; 2521 } 2522 2523 if (nslist->nscl_ns[0] == UINT32_MAX) { 2524 dev_err(nvme->n_dip, CE_CONT, 2525 "more than %u namespaces have changed.\n", 2526 NVME_NSCHANGE_LIST_SIZE); 2527 break; 2528 } 2529 2530 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 2531 for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) { 2532 uint32_t nsid = nslist->nscl_ns[i]; 2533 2534 if (nsid == 0) /* end of list */ 2535 break; 2536 2537 dev_err(nvme->n_dip, CE_NOTE, 2538 "!namespace nvme%d/%u has changed.", 2539 ddi_get_instance(nvme->n_dip), nsid); 2540 2541 2542 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) 2543 continue; 2544 2545 nvme_mgmt_bd_start(nvme); 2546 bd_state_change(nvme_nsid2ns(nvme, 2547 nsid)->ns_bd_hdl); 2548 nvme_mgmt_bd_end(nvme); 2549 } 2550 nvme_mgmt_unlock(nvme); 2551 2552 break; 2553 2554 case NVME_ASYNC_NOTICE_FW_ACTIVATE: 2555 dev_err(nvme->n_dip, CE_NOTE, 2556 "firmware activation starting, " 2557 "logpage = 0x%x", event.b.ae_logpage); 2558 atomic_inc_32(&nvme->n_notice_event); 2559 break; 2560 2561 case NVME_ASYNC_NOTICE_TELEMETRY: 2562 dev_err(nvme->n_dip, CE_NOTE, 2563 "telemetry log changed, " 2564 "logpage = 0x%x", event.b.ae_logpage); 2565 atomic_inc_32(&nvme->n_notice_event); 2566 break; 2567 2568 case NVME_ASYNC_NOTICE_NS_ASYMM: 2569 dev_err(nvme->n_dip, CE_NOTE, 2570 "asymmetric namespace access change, " 2571 "logpage = 0x%x", event.b.ae_logpage); 2572 atomic_inc_32(&nvme->n_notice_event); 2573 break; 2574 2575 case NVME_ASYNC_NOTICE_LATENCYLOG: 2576 dev_err(nvme->n_dip, CE_NOTE, 2577 "predictable latency event aggregate log change, " 2578 "logpage = 0x%x", event.b.ae_logpage); 2579 atomic_inc_32(&nvme->n_notice_event); 2580 break; 2581 2582 case NVME_ASYNC_NOTICE_LBASTATUS: 2583 dev_err(nvme->n_dip, CE_NOTE, 2584 "LBA status information alert, " 2585 "logpage = 0x%x", event.b.ae_logpage); 2586 atomic_inc_32(&nvme->n_notice_event); 2587 break; 2588 2589 case NVME_ASYNC_NOTICE_ENDURANCELOG: 2590 dev_err(nvme->n_dip, CE_NOTE, 2591 "endurance group event aggregate log page change, " 2592 "logpage = 0x%x", event.b.ae_logpage); 2593 atomic_inc_32(&nvme->n_notice_event); 2594 break; 2595 2596 default: 2597 dev_err(nvme->n_dip, CE_WARN, 2598 "!unknown notice async event received, " 2599 "info = 0x%x, logpage = 0x%x", event.b.ae_info, 2600 event.b.ae_logpage); 2601 atomic_inc_32(&nvme->n_unknown_event); 2602 break; 2603 } 2604 break; 2605 2606 case NVME_ASYNC_TYPE_VENDOR: 2607 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 2608 "received, info = 0x%x, logpage = 0x%x", event.b.ae_info, 2609 event.b.ae_logpage); 2610 atomic_inc_32(&nvme->n_vendor_event); 2611 break; 2612 2613 default: 2614 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 2615 "type = 0x%x, info = 0x%x, logpage = 0x%x", event.b.ae_type, 2616 event.b.ae_info, event.b.ae_logpage); 2617 atomic_inc_32(&nvme->n_unknown_event); 2618 break; 2619 } 2620 2621 if (error_log != NULL) 2622 kmem_free(error_log, logsize); 2623 2624 if (health_log != NULL) 2625 kmem_free(health_log, logsize); 2626 2627 if (nslist != NULL) 2628 kmem_free(nslist, logsize); 2629 } 2630 2631 static void 2632 nvme_admin_cmd(nvme_cmd_t *cmd, uint32_t sec) 2633 { 2634 mutex_enter(&cmd->nc_mutex); 2635 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd); 2636 nvme_wait_cmd(cmd, sec); 2637 mutex_exit(&cmd->nc_mutex); 2638 } 2639 2640 static void 2641 nvme_async_event(nvme_t *nvme) 2642 { 2643 nvme_cmd_t *cmd; 2644 2645 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2646 cmd->nc_sqid = 0; 2647 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 2648 cmd->nc_callback = nvme_async_event_task; 2649 cmd->nc_dontpanic = B_TRUE; 2650 2651 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 2652 } 2653 2654 /* 2655 * There are commands such as format or vendor unique commands that are going to 2656 * manipulate the data in a namespace or destroy them, we make sure that none of 2657 * the ones that will be impacted are actually attached. 2658 */ 2659 static boolean_t 2660 nvme_no_blkdev_attached(nvme_t *nvme, uint32_t nsid) 2661 { 2662 ASSERT(nvme_mgmt_lock_held(nvme)); 2663 ASSERT3U(nsid, !=, 0); 2664 2665 if (nsid != NVME_NSID_BCAST) { 2666 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid); 2667 return (!ns->ns_attached); 2668 } 2669 2670 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 2671 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i); 2672 2673 if (ns->ns_attached) { 2674 return (B_FALSE); 2675 } 2676 } 2677 2678 return (B_TRUE); 2679 } 2680 2681 static boolean_t 2682 nvme_format_nvm(nvme_t *nvme, nvme_ioctl_format_t *ioc) 2683 { 2684 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2685 nvme_format_nvm_t format_nvm = { 0 }; 2686 boolean_t ret; 2687 2688 format_nvm.b.fm_lbaf = bitx32(ioc->nif_lbaf, 3, 0); 2689 format_nvm.b.fm_ses = bitx32(ioc->nif_ses, 2, 0); 2690 2691 cmd->nc_sqid = 0; 2692 cmd->nc_callback = nvme_wakeup_cmd; 2693 cmd->nc_sqe.sqe_nsid = ioc->nif_common.nioc_nsid; 2694 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT; 2695 cmd->nc_sqe.sqe_cdw10 = format_nvm.r; 2696 2697 /* 2698 * We don't want to panic on any format commands. There are two reasons 2699 * for this: 2700 * 2701 * 1) All format commands are initiated by users. We don't want to panic 2702 * on user commands. 2703 * 2704 * 2) Several devices like the Samsung SM951 don't allow formatting of 2705 * all namespaces in one command and we'd prefer to handle that 2706 * gracefully. 2707 */ 2708 cmd->nc_dontpanic = B_TRUE; 2709 2710 nvme_admin_cmd(cmd, nvme_format_cmd_timeout); 2711 2712 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nif_common) != 0) { 2713 dev_err(nvme->n_dip, CE_WARN, 2714 "!FORMAT failed with sct = %x, sc = %x", 2715 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2716 ret = B_FALSE; 2717 goto fail; 2718 } 2719 2720 ret = B_TRUE; 2721 fail: 2722 nvme_free_cmd(cmd); 2723 return (ret); 2724 } 2725 2726 /* 2727 * Retrieve a specific log page. The contents of the log page request should 2728 * have already been validated by the system. 2729 */ 2730 static boolean_t 2731 nvme_get_logpage(nvme_t *nvme, boolean_t user, nvme_ioctl_get_logpage_t *log, 2732 void **buf) 2733 { 2734 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2735 nvme_getlogpage_dw10_t dw10; 2736 uint32_t offlo, offhi; 2737 nvme_getlogpage_dw11_t dw11; 2738 nvme_getlogpage_dw14_t dw14; 2739 uint32_t ndw; 2740 boolean_t ret = B_FALSE; 2741 2742 bzero(&dw10, sizeof (dw10)); 2743 bzero(&dw11, sizeof (dw11)); 2744 bzero(&dw14, sizeof (dw14)); 2745 2746 cmd->nc_sqid = 0; 2747 cmd->nc_callback = nvme_wakeup_cmd; 2748 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 2749 cmd->nc_sqe.sqe_nsid = log->nigl_common.nioc_nsid; 2750 2751 if (user) 2752 cmd->nc_dontpanic = B_TRUE; 2753 2754 /* 2755 * The size field is the number of double words, but is a zeros based 2756 * value. We need to store our actual value minus one. 2757 */ 2758 ndw = (uint32_t)(log->nigl_len / 4); 2759 ASSERT3U(ndw, >, 0); 2760 ndw--; 2761 2762 dw10.b.lp_lid = bitx32(log->nigl_lid, 7, 0); 2763 dw10.b.lp_lsp = bitx32(log->nigl_lsp, 6, 0); 2764 dw10.b.lp_rae = bitx32(log->nigl_lsp, 0, 0); 2765 dw10.b.lp_lnumdl = bitx32(ndw, 15, 0); 2766 2767 dw11.b.lp_numdu = bitx32(ndw, 31, 16); 2768 dw11.b.lp_lsi = bitx32(log->nigl_lsi, 15, 0); 2769 2770 offlo = bitx64(log->nigl_offset, 31, 0); 2771 offhi = bitx64(log->nigl_offset, 63, 32); 2772 2773 dw14.b.lp_csi = bitx32(log->nigl_csi, 7, 0); 2774 2775 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2776 cmd->nc_sqe.sqe_cdw11 = dw11.r; 2777 cmd->nc_sqe.sqe_cdw12 = offlo; 2778 cmd->nc_sqe.sqe_cdw13 = offhi; 2779 cmd->nc_sqe.sqe_cdw14 = dw14.r; 2780 2781 if (nvme_zalloc_dma(nvme, log->nigl_len, DDI_DMA_READ, 2782 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2783 dev_err(nvme->n_dip, CE_WARN, 2784 "!nvme_zalloc_dma failed for GET LOG PAGE"); 2785 ret = nvme_ioctl_error(&log->nigl_common, 2786 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 2787 goto fail; 2788 } 2789 2790 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) { 2791 ret = nvme_ioctl_error(&log->nigl_common, 2792 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 2793 goto fail; 2794 } 2795 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2796 2797 if (!nvme_check_cmd_status_ioctl(cmd, &log->nigl_common)) { 2798 if (!user) { 2799 dev_err(nvme->n_dip, CE_WARN, 2800 "!GET LOG PAGE failed with sct = %x, sc = %x", 2801 cmd->nc_cqe.cqe_sf.sf_sct, 2802 cmd->nc_cqe.cqe_sf.sf_sc); 2803 } 2804 ret = B_FALSE; 2805 goto fail; 2806 } 2807 2808 *buf = kmem_alloc(log->nigl_len, KM_SLEEP); 2809 bcopy(cmd->nc_dma->nd_memp, *buf, log->nigl_len); 2810 2811 ret = B_TRUE; 2812 fail: 2813 nvme_free_cmd(cmd); 2814 2815 return (ret); 2816 } 2817 2818 /* 2819 * This is an internal wrapper for when the kernel wants to get a log page. 2820 * Currently this assumes that the only thing that is required is the log page 2821 * ID. If more information is required, we'll be better served to just use the 2822 * general ioctl interface. 2823 */ 2824 static boolean_t 2825 nvme_get_logpage_int(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize, 2826 uint8_t lid) 2827 { 2828 const nvme_log_page_info_t *info = NULL; 2829 nvme_ioctl_get_logpage_t log; 2830 nvme_valid_ctrl_data_t data; 2831 boolean_t bret; 2832 bool var; 2833 2834 for (size_t i = 0; i < nvme_std_log_npages; i++) { 2835 if (nvme_std_log_pages[i].nlpi_lid == lid && 2836 nvme_std_log_pages[i].nlpi_csi == NVME_CSI_NVM) { 2837 info = &nvme_std_log_pages[i]; 2838 break; 2839 } 2840 } 2841 2842 if (info == NULL) { 2843 return (B_FALSE); 2844 } 2845 2846 data.vcd_vers = &nvme->n_version; 2847 data.vcd_id = nvme->n_idctl; 2848 bzero(&log, sizeof (log)); 2849 log.nigl_common.nioc_nsid = NVME_NSID_BCAST; 2850 log.nigl_csi = info->nlpi_csi; 2851 log.nigl_lid = info->nlpi_lid; 2852 log.nigl_len = nvme_log_page_info_size(info, &data, &var); 2853 2854 /* 2855 * We only support getting standard fixed-length log pages through the 2856 * kernel interface at this time. If a log page either has an unknown 2857 * size or has a variable length, then we cannot get it. 2858 */ 2859 if (log.nigl_len == 0 || var) { 2860 return (B_FALSE); 2861 } 2862 2863 bret = nvme_get_logpage(nvme, user, &log, buf); 2864 if (!bret) { 2865 return (B_FALSE); 2866 } 2867 2868 *bufsize = log.nigl_len; 2869 return (B_TRUE); 2870 } 2871 2872 static boolean_t 2873 nvme_identify(nvme_t *nvme, boolean_t user, nvme_ioctl_identify_t *ioc, 2874 void **buf) 2875 { 2876 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2877 boolean_t ret = B_FALSE; 2878 nvme_identify_dw10_t dw10; 2879 2880 ASSERT3P(buf, !=, NULL); 2881 2882 bzero(&dw10, sizeof (dw10)); 2883 2884 cmd->nc_sqid = 0; 2885 cmd->nc_callback = nvme_wakeup_cmd; 2886 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 2887 cmd->nc_sqe.sqe_nsid = ioc->nid_common.nioc_nsid; 2888 2889 dw10.b.id_cns = bitx32(ioc->nid_cns, 7, 0); 2890 dw10.b.id_cntid = bitx32(ioc->nid_ctrlid, 15, 0); 2891 2892 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2893 2894 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 2895 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2896 dev_err(nvme->n_dip, CE_WARN, 2897 "!nvme_zalloc_dma failed for IDENTIFY"); 2898 ret = nvme_ioctl_error(&ioc->nid_common, 2899 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 2900 goto fail; 2901 } 2902 2903 if (cmd->nc_dma->nd_ncookie > 2) { 2904 dev_err(nvme->n_dip, CE_WARN, 2905 "!too many DMA cookies for IDENTIFY"); 2906 atomic_inc_32(&nvme->n_too_many_cookies); 2907 ret = nvme_ioctl_error(&ioc->nid_common, 2908 NVME_IOCTL_E_BAD_PRP, 0, 0); 2909 goto fail; 2910 } 2911 2912 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 2913 if (cmd->nc_dma->nd_ncookie > 1) { 2914 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2915 &cmd->nc_dma->nd_cookie); 2916 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2917 cmd->nc_dma->nd_cookie.dmac_laddress; 2918 } 2919 2920 if (user) 2921 cmd->nc_dontpanic = B_TRUE; 2922 2923 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2924 2925 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nid_common)) { 2926 dev_err(nvme->n_dip, CE_WARN, 2927 "!IDENTIFY failed with sct = %x, sc = %x", 2928 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2929 ret = B_FALSE; 2930 goto fail; 2931 } 2932 2933 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 2934 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE); 2935 ret = B_TRUE; 2936 2937 fail: 2938 nvme_free_cmd(cmd); 2939 2940 return (ret); 2941 } 2942 2943 static boolean_t 2944 nvme_identify_int(nvme_t *nvme, uint32_t nsid, uint8_t cns, void **buf) 2945 { 2946 nvme_ioctl_identify_t id; 2947 2948 bzero(&id, sizeof (nvme_ioctl_identify_t)); 2949 id.nid_common.nioc_nsid = nsid; 2950 id.nid_cns = cns; 2951 2952 return (nvme_identify(nvme, B_FALSE, &id, buf)); 2953 } 2954 2955 static int 2956 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2957 uint32_t val, uint32_t *res) 2958 { 2959 _NOTE(ARGUNUSED(nsid)); 2960 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2961 int ret = EINVAL; 2962 2963 ASSERT(res != NULL); 2964 2965 cmd->nc_sqid = 0; 2966 cmd->nc_callback = nvme_wakeup_cmd; 2967 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 2968 cmd->nc_sqe.sqe_cdw10 = feature; 2969 cmd->nc_sqe.sqe_cdw11 = val; 2970 2971 if (user) 2972 cmd->nc_dontpanic = B_TRUE; 2973 2974 switch (feature) { 2975 case NVME_FEAT_WRITE_CACHE: 2976 if (!nvme->n_write_cache_present) 2977 goto fail; 2978 break; 2979 2980 case NVME_FEAT_NQUEUES: 2981 break; 2982 2983 default: 2984 goto fail; 2985 } 2986 2987 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2988 2989 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2990 dev_err(nvme->n_dip, CE_WARN, 2991 "!SET FEATURES %d failed with sct = %x, sc = %x", 2992 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2993 cmd->nc_cqe.cqe_sf.sf_sc); 2994 goto fail; 2995 } 2996 2997 *res = cmd->nc_cqe.cqe_dw0; 2998 2999 fail: 3000 nvme_free_cmd(cmd); 3001 return (ret); 3002 } 3003 3004 static int 3005 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 3006 { 3007 nvme_write_cache_t nwc = { 0 }; 3008 3009 if (enable) 3010 nwc.b.wc_wce = 1; 3011 3012 /* 3013 * We've seen some cases where this fails due to us being told we've 3014 * specified an invalid namespace when operating against the Xen xcp-ng 3015 * qemu NVMe virtual device. As such, we generally ensure that trying to 3016 * enable this doesn't lead us to panic. It's not completely clear why 3017 * specifying namespace zero here fails, but not when we're setting the 3018 * number of queues below. 3019 */ 3020 return (nvme_set_features(nvme, B_TRUE, 0, NVME_FEAT_WRITE_CACHE, 3021 nwc.r, &nwc.r)); 3022 } 3023 3024 static int 3025 nvme_set_nqueues(nvme_t *nvme) 3026 { 3027 nvme_nqueues_t nq = { 0 }; 3028 int ret; 3029 3030 /* 3031 * The default is to allocate one completion queue per vector. 3032 */ 3033 if (nvme->n_completion_queues == -1) 3034 nvme->n_completion_queues = nvme->n_intr_cnt; 3035 3036 /* 3037 * There is no point in having more completion queues than 3038 * interrupt vectors. 3039 */ 3040 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 3041 nvme->n_intr_cnt); 3042 3043 /* 3044 * The default is to use one submission queue per completion queue. 3045 */ 3046 if (nvme->n_submission_queues == -1) 3047 nvme->n_submission_queues = nvme->n_completion_queues; 3048 3049 /* 3050 * There is no point in having more completion queues than 3051 * submission queues. 3052 */ 3053 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 3054 nvme->n_submission_queues); 3055 3056 ASSERT(nvme->n_submission_queues > 0); 3057 ASSERT(nvme->n_completion_queues > 0); 3058 3059 nq.b.nq_nsq = nvme->n_submission_queues - 1; 3060 nq.b.nq_ncq = nvme->n_completion_queues - 1; 3061 3062 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r, 3063 &nq.r); 3064 3065 if (ret == 0) { 3066 /* 3067 * Never use more than the requested number of queues. 3068 */ 3069 nvme->n_submission_queues = MIN(nvme->n_submission_queues, 3070 nq.b.nq_nsq + 1); 3071 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 3072 nq.b.nq_ncq + 1); 3073 } 3074 3075 return (ret); 3076 } 3077 3078 static int 3079 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq) 3080 { 3081 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 3082 nvme_create_queue_dw10_t dw10 = { 0 }; 3083 nvme_create_cq_dw11_t c_dw11 = { 0 }; 3084 int ret; 3085 3086 dw10.b.q_qid = cq->ncq_id; 3087 dw10.b.q_qsize = cq->ncq_nentry - 1; 3088 3089 c_dw11.b.cq_pc = 1; 3090 c_dw11.b.cq_ien = 1; 3091 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt; 3092 3093 cmd->nc_sqid = 0; 3094 cmd->nc_callback = nvme_wakeup_cmd; 3095 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 3096 cmd->nc_sqe.sqe_cdw10 = dw10.r; 3097 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 3098 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress; 3099 3100 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 3101 3102 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 3103 dev_err(nvme->n_dip, CE_WARN, 3104 "!CREATE CQUEUE failed with sct = %x, sc = %x", 3105 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 3106 } 3107 3108 nvme_free_cmd(cmd); 3109 3110 return (ret); 3111 } 3112 3113 static int 3114 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 3115 { 3116 nvme_cq_t *cq = qp->nq_cq; 3117 nvme_cmd_t *cmd; 3118 nvme_create_queue_dw10_t dw10 = { 0 }; 3119 nvme_create_sq_dw11_t s_dw11 = { 0 }; 3120 int ret; 3121 3122 /* 3123 * It is possible to have more qpairs than completion queues, 3124 * and when the idx > ncq_id, that completion queue is shared 3125 * and has already been created. 3126 */ 3127 if (idx <= cq->ncq_id && 3128 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS) 3129 return (DDI_FAILURE); 3130 3131 dw10.b.q_qid = idx; 3132 dw10.b.q_qsize = qp->nq_nentry - 1; 3133 3134 s_dw11.b.sq_pc = 1; 3135 s_dw11.b.sq_cqid = cq->ncq_id; 3136 3137 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 3138 cmd->nc_sqid = 0; 3139 cmd->nc_callback = nvme_wakeup_cmd; 3140 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 3141 cmd->nc_sqe.sqe_cdw10 = dw10.r; 3142 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 3143 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 3144 3145 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 3146 3147 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 3148 dev_err(nvme->n_dip, CE_WARN, 3149 "!CREATE SQUEUE failed with sct = %x, sc = %x", 3150 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 3151 } 3152 3153 nvme_free_cmd(cmd); 3154 3155 return (ret); 3156 } 3157 3158 static boolean_t 3159 nvme_reset(nvme_t *nvme, boolean_t quiesce) 3160 { 3161 nvme_reg_csts_t csts; 3162 int i; 3163 3164 nvme_put32(nvme, NVME_REG_CC, 0); 3165 3166 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3167 if (csts.b.csts_rdy == 1) { 3168 nvme_put32(nvme, NVME_REG_CC, 0); 3169 3170 /* 3171 * The timeout value is from the Controller Capabilities 3172 * register (CAP.TO, section 3.1.1). This is the worst case 3173 * time to wait for CSTS.RDY to transition from 1 to 0 after 3174 * CC.EN transitions from 1 to 0. 3175 * 3176 * The timeout units are in 500 ms units, and we are delaying 3177 * in 50ms chunks, hence counting to n_timeout * 10. 3178 */ 3179 for (i = 0; i < nvme->n_timeout * 10; i++) { 3180 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3181 if (csts.b.csts_rdy == 0) 3182 break; 3183 3184 /* 3185 * Quiescing drivers should not use locks or timeouts, 3186 * so if this is the quiesce path, use a quiesce-safe 3187 * delay. 3188 */ 3189 if (quiesce) { 3190 drv_usecwait(50000); 3191 } else { 3192 delay(drv_usectohz(50000)); 3193 } 3194 } 3195 } 3196 3197 nvme_put32(nvme, NVME_REG_AQA, 0); 3198 nvme_put32(nvme, NVME_REG_ASQ, 0); 3199 nvme_put32(nvme, NVME_REG_ACQ, 0); 3200 3201 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3202 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 3203 } 3204 3205 static void 3206 nvme_shutdown(nvme_t *nvme, boolean_t quiesce) 3207 { 3208 nvme_reg_cc_t cc; 3209 nvme_reg_csts_t csts; 3210 int i; 3211 3212 cc.r = nvme_get32(nvme, NVME_REG_CC); 3213 cc.b.cc_shn = NVME_CC_SHN_NORMAL; 3214 nvme_put32(nvme, NVME_REG_CC, cc.r); 3215 3216 for (i = 0; i < 10; i++) { 3217 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3218 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 3219 break; 3220 3221 if (quiesce) { 3222 drv_usecwait(100000); 3223 } else { 3224 delay(drv_usectohz(100000)); 3225 } 3226 } 3227 } 3228 3229 /* 3230 * Return length of string without trailing spaces. 3231 */ 3232 static int 3233 nvme_strlen(const char *str, int len) 3234 { 3235 if (len <= 0) 3236 return (0); 3237 3238 while (str[--len] == ' ') 3239 ; 3240 3241 return (++len); 3242 } 3243 3244 static void 3245 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val) 3246 { 3247 ulong_t bsize = 0; 3248 char *msg = ""; 3249 3250 if (ddi_strtoul(val, NULL, 0, &bsize) != 0) 3251 goto err; 3252 3253 if (!ISP2(bsize)) { 3254 msg = ": not a power of 2"; 3255 goto err; 3256 } 3257 3258 if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) { 3259 msg = ": too low"; 3260 goto err; 3261 } 3262 3263 nvme->n_min_block_size = bsize; 3264 return; 3265 3266 err: 3267 dev_err(nvme->n_dip, CE_WARN, 3268 "!nvme-config-list: ignoring invalid min-phys-block-size '%s' " 3269 "for model '%s'%s", val, model, msg); 3270 3271 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 3272 } 3273 3274 static void 3275 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val, 3276 boolean_t *b) 3277 { 3278 if (strcmp(val, "on") == 0 || 3279 strcmp(val, "true") == 0) 3280 *b = B_TRUE; 3281 else if (strcmp(val, "off") == 0 || 3282 strcmp(val, "false") == 0) 3283 *b = B_FALSE; 3284 else 3285 dev_err(nvme->n_dip, CE_WARN, 3286 "!nvme-config-list: invalid value for %s '%s'" 3287 " for model '%s', ignoring", name, val, model); 3288 } 3289 3290 static void 3291 nvme_config_list(nvme_t *nvme) 3292 { 3293 char **config_list; 3294 uint_t nelem; 3295 int rv, i; 3296 3297 /* 3298 * We're following the pattern of 'sd-config-list' here, but extend it. 3299 * Instead of two we have three separate strings for "model", "fwrev", 3300 * and "name-value-list". 3301 */ 3302 rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip, 3303 DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem); 3304 3305 if (rv != DDI_PROP_SUCCESS) { 3306 if (rv == DDI_PROP_CANNOT_DECODE) { 3307 dev_err(nvme->n_dip, CE_WARN, 3308 "!nvme-config-list: cannot be decoded"); 3309 } 3310 3311 return; 3312 } 3313 3314 if ((nelem % 3) != 0) { 3315 dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be " 3316 "triplets of <model>/<fwrev>/<name-value-list> strings "); 3317 goto out; 3318 } 3319 3320 for (i = 0; i < nelem; i += 3) { 3321 char *model = config_list[i]; 3322 char *fwrev = config_list[i + 1]; 3323 char *nvp, *save_nv; 3324 int id_model_len, id_fwrev_len; 3325 3326 id_model_len = nvme_strlen(nvme->n_idctl->id_model, 3327 sizeof (nvme->n_idctl->id_model)); 3328 3329 if (strlen(model) != id_model_len) 3330 continue; 3331 3332 if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0) 3333 continue; 3334 3335 id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev, 3336 sizeof (nvme->n_idctl->id_fwrev)); 3337 3338 if (strlen(fwrev) != 0) { 3339 boolean_t match = B_FALSE; 3340 char *fwr, *last_fw; 3341 3342 for (fwr = strtok_r(fwrev, ",", &last_fw); 3343 fwr != NULL; 3344 fwr = strtok_r(NULL, ",", &last_fw)) { 3345 if (strlen(fwr) != id_fwrev_len) 3346 continue; 3347 3348 if (strncmp(fwr, nvme->n_idctl->id_fwrev, 3349 id_fwrev_len) == 0) 3350 match = B_TRUE; 3351 } 3352 3353 if (!match) 3354 continue; 3355 } 3356 3357 /* 3358 * We should now have a comma-separated list of name:value 3359 * pairs. 3360 */ 3361 for (nvp = strtok_r(config_list[i + 2], ",", &save_nv); 3362 nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) { 3363 char *name = nvp; 3364 char *val = strchr(nvp, ':'); 3365 3366 if (val == NULL || name == val) { 3367 dev_err(nvme->n_dip, CE_WARN, 3368 "!nvme-config-list: <name-value-list> " 3369 "for model '%s' is malformed", model); 3370 goto out; 3371 } 3372 3373 /* 3374 * Null-terminate 'name', move 'val' past ':' sep. 3375 */ 3376 *val++ = '\0'; 3377 3378 /* 3379 * Process the name:val pairs that we know about. 3380 */ 3381 if (strcmp(name, "ignore-unknown-vendor-status") == 0) { 3382 nvme_config_boolean(nvme, model, name, val, 3383 &nvme->n_ignore_unknown_vendor_status); 3384 } else if (strcmp(name, "min-phys-block-size") == 0) { 3385 nvme_config_min_block_size(nvme, model, val); 3386 } else if (strcmp(name, "volatile-write-cache") == 0) { 3387 nvme_config_boolean(nvme, model, name, val, 3388 &nvme->n_write_cache_enabled); 3389 } else { 3390 /* 3391 * Unknown 'name'. 3392 */ 3393 dev_err(nvme->n_dip, CE_WARN, 3394 "!nvme-config-list: unknown config '%s' " 3395 "for model '%s', ignoring", name, model); 3396 } 3397 } 3398 } 3399 3400 out: 3401 ddi_prop_free(config_list); 3402 } 3403 3404 static void 3405 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 3406 { 3407 /* 3408 * Section 7.7 of the spec describes how to get a unique ID for 3409 * the controller: the vendor ID, the model name and the serial 3410 * number shall be unique when combined. 3411 * 3412 * If a namespace has no EUI64 we use the above and add the hex 3413 * namespace ID to get a unique ID for the namespace. 3414 */ 3415 char model[sizeof (nvme->n_idctl->id_model) + 1]; 3416 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 3417 3418 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 3419 bcopy(nvme->n_idctl->id_serial, serial, 3420 sizeof (nvme->n_idctl->id_serial)); 3421 3422 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 3423 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 3424 3425 nvme_nsid2ns(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X", 3426 nvme->n_idctl->id_vid, model, serial, nsid); 3427 } 3428 3429 static nvme_identify_nsid_list_t * 3430 nvme_update_nsid_list(nvme_t *nvme, int cns) 3431 { 3432 nvme_identify_nsid_list_t *nslist; 3433 3434 /* 3435 * We currently don't handle cases where there are more than 3436 * 1024 active namespaces, requiring several IDENTIFY commands. 3437 */ 3438 if (nvme_identify_int(nvme, 0, cns, (void **)&nslist)) 3439 return (nslist); 3440 3441 return (NULL); 3442 } 3443 3444 nvme_namespace_t * 3445 nvme_nsid2ns(nvme_t *nvme, uint32_t nsid) 3446 { 3447 ASSERT3U(nsid, !=, 0); 3448 ASSERT3U(nsid, <=, nvme->n_namespace_count); 3449 return (&nvme->n_ns[nsid - 1]); 3450 } 3451 3452 static boolean_t 3453 nvme_allocated_ns(nvme_namespace_t *ns) 3454 { 3455 nvme_t *nvme = ns->ns_nvme; 3456 uint32_t i; 3457 3458 ASSERT(nvme_mgmt_lock_held(nvme)); 3459 3460 /* 3461 * If supported, update the list of allocated namespace IDs. 3462 */ 3463 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) && 3464 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) { 3465 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme, 3466 NVME_IDENTIFY_NSID_ALLOC_LIST); 3467 boolean_t found = B_FALSE; 3468 3469 /* 3470 * When namespace management is supported, this really shouldn't 3471 * be NULL. Treat all namespaces as allocated if it is. 3472 */ 3473 if (nslist == NULL) 3474 return (B_TRUE); 3475 3476 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) { 3477 if (ns->ns_id == 0) 3478 break; 3479 3480 if (ns->ns_id == nslist->nl_nsid[i]) 3481 found = B_TRUE; 3482 } 3483 3484 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE); 3485 return (found); 3486 } else { 3487 /* 3488 * If namespace management isn't supported, report all 3489 * namespaces as allocated. 3490 */ 3491 return (B_TRUE); 3492 } 3493 } 3494 3495 static boolean_t 3496 nvme_active_ns(nvme_namespace_t *ns) 3497 { 3498 nvme_t *nvme = ns->ns_nvme; 3499 uint64_t *ptr; 3500 uint32_t i; 3501 3502 ASSERT(nvme_mgmt_lock_held(nvme)); 3503 3504 /* 3505 * If supported, update the list of active namespace IDs. 3506 */ 3507 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) { 3508 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme, 3509 NVME_IDENTIFY_NSID_LIST); 3510 boolean_t found = B_FALSE; 3511 3512 /* 3513 * When namespace management is supported, this really shouldn't 3514 * be NULL. Treat all namespaces as allocated if it is. 3515 */ 3516 if (nslist == NULL) 3517 return (B_TRUE); 3518 3519 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) { 3520 if (ns->ns_id == 0) 3521 break; 3522 3523 if (ns->ns_id == nslist->nl_nsid[i]) 3524 found = B_TRUE; 3525 } 3526 3527 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE); 3528 return (found); 3529 } 3530 3531 /* 3532 * Workaround for revision 1.0: 3533 * Check whether the IDENTIFY NAMESPACE data is zero-filled. 3534 */ 3535 for (ptr = (uint64_t *)ns->ns_idns; 3536 ptr != (uint64_t *)(ns->ns_idns + 1); 3537 ptr++) { 3538 if (*ptr != 0) { 3539 return (B_TRUE); 3540 } 3541 } 3542 3543 return (B_FALSE); 3544 } 3545 3546 static int 3547 nvme_init_ns(nvme_t *nvme, uint32_t nsid) 3548 { 3549 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid); 3550 nvme_identify_nsid_t *idns; 3551 boolean_t was_ignored; 3552 int last_rp; 3553 3554 ns->ns_nvme = nvme; 3555 3556 ASSERT(nvme_mgmt_lock_held(nvme)); 3557 3558 /* 3559 * Because we might rescan a namespace and this will fail after boot 3560 * that'd leave us in a bad spot. We need to do something about this 3561 * longer term, but it's not clear how exactly we would recover right 3562 * now. 3563 */ 3564 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID, 3565 (void **)&idns)) { 3566 dev_err(nvme->n_dip, CE_WARN, 3567 "!failed to identify namespace %d", nsid); 3568 return (DDI_FAILURE); 3569 } 3570 3571 if (ns->ns_idns != NULL) 3572 kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t)); 3573 3574 ns->ns_idns = idns; 3575 ns->ns_id = nsid; 3576 3577 was_ignored = ns->ns_ignore; 3578 3579 ns->ns_allocated = nvme_allocated_ns(ns); 3580 ns->ns_active = nvme_active_ns(ns); 3581 3582 ns->ns_block_count = idns->id_nsize; 3583 ns->ns_block_size = 3584 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 3585 ns->ns_best_block_size = ns->ns_block_size; 3586 3587 /* 3588 * Get the EUI64 if present. 3589 */ 3590 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 3591 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64)); 3592 3593 /* 3594 * Get the NGUID if present. 3595 */ 3596 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) 3597 bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid)); 3598 3599 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 3600 if (*(uint64_t *)ns->ns_eui64 == 0) 3601 nvme_prepare_devid(nvme, ns->ns_id); 3602 3603 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id); 3604 3605 /* 3606 * Find the LBA format with no metadata and the best relative 3607 * performance. A value of 3 means "degraded", 0 is best. 3608 */ 3609 last_rp = 3; 3610 for (int j = 0; j <= idns->id_nlbaf; j++) { 3611 if (idns->id_lbaf[j].lbaf_lbads == 0) 3612 break; 3613 if (idns->id_lbaf[j].lbaf_ms != 0) 3614 continue; 3615 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 3616 continue; 3617 last_rp = idns->id_lbaf[j].lbaf_rp; 3618 ns->ns_best_block_size = 3619 1 << idns->id_lbaf[j].lbaf_lbads; 3620 } 3621 3622 if (ns->ns_best_block_size < nvme->n_min_block_size) 3623 ns->ns_best_block_size = nvme->n_min_block_size; 3624 3625 was_ignored = ns->ns_ignore; 3626 3627 /* 3628 * We currently don't support namespaces that are inactive, or use 3629 * either: 3630 * - protection information 3631 * - illegal block size (< 512) 3632 */ 3633 if (!ns->ns_active) { 3634 ns->ns_ignore = B_TRUE; 3635 } else if (idns->id_dps.dp_pinfo) { 3636 dev_err(nvme->n_dip, CE_WARN, 3637 "!ignoring namespace %d, unsupported feature: " 3638 "pinfo = %d", nsid, idns->id_dps.dp_pinfo); 3639 ns->ns_ignore = B_TRUE; 3640 } else if (ns->ns_block_size < 512) { 3641 dev_err(nvme->n_dip, CE_WARN, 3642 "!ignoring namespace %d, unsupported block size %"PRIu64, 3643 nsid, (uint64_t)ns->ns_block_size); 3644 ns->ns_ignore = B_TRUE; 3645 } else { 3646 ns->ns_ignore = B_FALSE; 3647 } 3648 3649 /* 3650 * Keep a count of namespaces which are attachable. 3651 * See comments in nvme_bd_driveinfo() to understand its effect. 3652 */ 3653 if (was_ignored) { 3654 /* 3655 * Previously ignored, but now not. Count it. 3656 */ 3657 if (!ns->ns_ignore) 3658 nvme->n_namespaces_attachable++; 3659 } else { 3660 /* 3661 * Wasn't ignored previously, but now needs to be. 3662 * Discount it. 3663 */ 3664 if (ns->ns_ignore) 3665 nvme->n_namespaces_attachable--; 3666 } 3667 3668 return (DDI_SUCCESS); 3669 } 3670 3671 static boolean_t 3672 nvme_attach_ns(nvme_t *nvme, nvme_ioctl_common_t *com) 3673 { 3674 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid); 3675 int ret; 3676 3677 ASSERT(nvme_mgmt_lock_held(nvme)); 3678 3679 if (ns->ns_ignore) { 3680 return (nvme_ioctl_error(com, NVME_IOCTL_E_UNSUP_ATTACH_NS, 3681 0, 0)); 3682 } 3683 3684 if (ns->ns_bd_hdl == NULL) { 3685 bd_ops_t ops = nvme_bd_ops; 3686 3687 if (!nvme->n_idctl->id_oncs.on_dset_mgmt) 3688 ops.o_free_space = NULL; 3689 3690 ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr, 3691 KM_SLEEP); 3692 3693 if (ns->ns_bd_hdl == NULL) { 3694 dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev " 3695 "handle for namespace id %u", com->nioc_nsid); 3696 return (nvme_ioctl_error(com, 3697 NVME_IOCTL_E_BLKDEV_ATTACH, 0, 0)); 3698 } 3699 } 3700 3701 nvme_mgmt_bd_start(nvme); 3702 ret = bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl); 3703 nvme_mgmt_bd_end(nvme); 3704 if (ret != DDI_SUCCESS) { 3705 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_ATTACH, 3706 0, 0)); 3707 } 3708 3709 ns->ns_attached = B_TRUE; 3710 3711 return (B_TRUE); 3712 } 3713 3714 static boolean_t 3715 nvme_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com) 3716 { 3717 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid); 3718 int ret; 3719 3720 ASSERT(nvme_mgmt_lock_held(nvme)); 3721 3722 if (ns->ns_ignore || !ns->ns_attached) 3723 return (B_TRUE); 3724 3725 nvme_mgmt_bd_start(nvme); 3726 ASSERT3P(ns->ns_bd_hdl, !=, NULL); 3727 ret = bd_detach_handle(ns->ns_bd_hdl); 3728 nvme_mgmt_bd_end(nvme); 3729 3730 if (ret != DDI_SUCCESS) { 3731 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_DETACH, 0, 3732 0)); 3733 } 3734 3735 ns->ns_attached = B_FALSE; 3736 return (B_TRUE); 3737 3738 } 3739 3740 /* 3741 * Rescan the namespace information associated with the namespaces indicated by 3742 * ioc. They should not be attached to blkdev right now. 3743 */ 3744 static void 3745 nvme_rescan_ns(nvme_t *nvme, uint32_t nsid) 3746 { 3747 ASSERT(nvme_mgmt_lock_held(nvme)); 3748 ASSERT3U(nsid, !=, 0); 3749 3750 if (nsid != NVME_NSID_BCAST) { 3751 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid); 3752 3753 ASSERT3U(ns->ns_attached, ==, B_FALSE); 3754 (void) nvme_init_ns(nvme, nsid); 3755 return; 3756 } 3757 3758 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 3759 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i); 3760 3761 ASSERT3U(ns->ns_attached, ==, B_FALSE); 3762 (void) nvme_init_ns(nvme, i); 3763 } 3764 } 3765 3766 typedef struct nvme_quirk_table { 3767 uint16_t nq_vendor_id; 3768 uint16_t nq_device_id; 3769 nvme_quirk_t nq_quirks; 3770 } nvme_quirk_table_t; 3771 3772 static const nvme_quirk_table_t nvme_quirks[] = { 3773 { 0x1987, 0x5018, NVME_QUIRK_START_CID }, /* Phison E18 */ 3774 }; 3775 3776 static void 3777 nvme_detect_quirks(nvme_t *nvme) 3778 { 3779 for (uint_t i = 0; i < ARRAY_SIZE(nvme_quirks); i++) { 3780 const nvme_quirk_table_t *nqt = &nvme_quirks[i]; 3781 3782 if (nqt->nq_vendor_id == nvme->n_vendor_id && 3783 nqt->nq_device_id == nvme->n_device_id) { 3784 nvme->n_quirks = nqt->nq_quirks; 3785 return; 3786 } 3787 } 3788 } 3789 3790 static int 3791 nvme_init(nvme_t *nvme) 3792 { 3793 nvme_reg_cc_t cc = { 0 }; 3794 nvme_reg_aqa_t aqa = { 0 }; 3795 nvme_reg_asq_t asq = { 0 }; 3796 nvme_reg_acq_t acq = { 0 }; 3797 nvme_reg_cap_t cap; 3798 nvme_reg_vs_t vs; 3799 nvme_reg_csts_t csts; 3800 int i = 0; 3801 uint16_t nqueues; 3802 uint_t tq_threads; 3803 char model[sizeof (nvme->n_idctl->id_model) + 1]; 3804 char *vendor, *product; 3805 uint32_t nsid; 3806 3807 /* Check controller version */ 3808 vs.r = nvme_get32(nvme, NVME_REG_VS); 3809 nvme->n_version.v_major = vs.b.vs_mjr; 3810 nvme->n_version.v_minor = vs.b.vs_mnr; 3811 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 3812 nvme->n_version.v_major, nvme->n_version.v_minor); 3813 3814 if (nvme->n_version.v_major > nvme_version_major) { 3815 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x", 3816 nvme_version_major); 3817 if (nvme->n_strict_version) 3818 goto fail; 3819 } 3820 3821 /* retrieve controller configuration */ 3822 cap.r = nvme_get64(nvme, NVME_REG_CAP); 3823 3824 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 3825 dev_err(nvme->n_dip, CE_WARN, 3826 "!NVM command set not supported by hardware"); 3827 goto fail; 3828 } 3829 3830 nvme->n_nssr_supported = cap.b.cap_nssrs; 3831 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 3832 nvme->n_timeout = cap.b.cap_to; 3833 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 3834 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 3835 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 3836 3837 /* 3838 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 3839 * the base page size of 4k (1<<12), so add 12 here to get the real 3840 * page size value. 3841 */ 3842 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 3843 cap.b.cap_mpsmax + 12); 3844 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 3845 3846 /* 3847 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 3848 */ 3849 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 3850 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 3851 3852 /* 3853 * Set up PRP DMA to transfer 1 page-aligned page at a time. 3854 * Maxxfer may be increased after we identified the controller limits. 3855 */ 3856 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 3857 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 3858 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 3859 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 3860 3861 /* 3862 * Reset controller if it's still in ready state. 3863 */ 3864 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 3865 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 3866 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 3867 nvme->n_dead = B_TRUE; 3868 goto fail; 3869 } 3870 3871 /* 3872 * Create the cq array with one completion queue to be assigned 3873 * to the admin queue pair and a limited number of taskqs (4). 3874 */ 3875 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) != 3876 DDI_SUCCESS) { 3877 dev_err(nvme->n_dip, CE_WARN, 3878 "!failed to pre-allocate admin completion queue"); 3879 goto fail; 3880 } 3881 /* 3882 * Create the admin queue pair. 3883 */ 3884 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 3885 != DDI_SUCCESS) { 3886 dev_err(nvme->n_dip, CE_WARN, 3887 "!unable to allocate admin qpair"); 3888 goto fail; 3889 } 3890 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 3891 nvme->n_ioq[0] = nvme->n_adminq; 3892 3893 if (nvme->n_quirks & NVME_QUIRK_START_CID) 3894 nvme->n_adminq->nq_next_cmd++; 3895 3896 nvme->n_progress |= NVME_ADMIN_QUEUE; 3897 3898 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3899 "admin-queue-len", nvme->n_admin_queue_len); 3900 3901 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 3902 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 3903 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress; 3904 3905 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 3906 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 3907 3908 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 3909 nvme_put64(nvme, NVME_REG_ASQ, asq); 3910 nvme_put64(nvme, NVME_REG_ACQ, acq); 3911 3912 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 3913 cc.b.cc_css = 0; /* use NVM command set */ 3914 cc.b.cc_mps = nvme->n_pageshift - 12; 3915 cc.b.cc_shn = 0; /* no shutdown in progress */ 3916 cc.b.cc_en = 1; /* enable controller */ 3917 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 3918 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 3919 3920 nvme_put32(nvme, NVME_REG_CC, cc.r); 3921 3922 /* 3923 * Wait for the controller to become ready. 3924 */ 3925 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3926 if (csts.b.csts_rdy == 0) { 3927 for (i = 0; i != nvme->n_timeout * 10; i++) { 3928 delay(drv_usectohz(50000)); 3929 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3930 3931 if (csts.b.csts_cfs == 1) { 3932 dev_err(nvme->n_dip, CE_WARN, 3933 "!controller fatal status at init"); 3934 ddi_fm_service_impact(nvme->n_dip, 3935 DDI_SERVICE_LOST); 3936 nvme->n_dead = B_TRUE; 3937 goto fail; 3938 } 3939 3940 if (csts.b.csts_rdy == 1) 3941 break; 3942 } 3943 } 3944 3945 if (csts.b.csts_rdy == 0) { 3946 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 3947 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 3948 nvme->n_dead = B_TRUE; 3949 goto fail; 3950 } 3951 3952 /* 3953 * Assume an abort command limit of 1. We'll destroy and re-init 3954 * that later when we know the true abort command limit. 3955 */ 3956 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 3957 3958 /* 3959 * Set up initial interrupt for admin queue. 3960 */ 3961 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 3962 != DDI_SUCCESS) && 3963 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 3964 != DDI_SUCCESS) && 3965 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 3966 != DDI_SUCCESS)) { 3967 dev_err(nvme->n_dip, CE_WARN, 3968 "!failed to set up initial interrupt"); 3969 goto fail; 3970 } 3971 3972 /* 3973 * Post an asynchronous event command to catch errors. 3974 * We assume the asynchronous events are supported as required by 3975 * specification (Figure 40 in section 5 of NVMe 1.2). 3976 * However, since at least qemu does not follow the specification, 3977 * we need a mechanism to protect ourselves. 3978 */ 3979 nvme->n_async_event_supported = B_TRUE; 3980 nvme_async_event(nvme); 3981 3982 /* 3983 * Identify Controller 3984 */ 3985 if (!nvme_identify_int(nvme, 0, NVME_IDENTIFY_CTRL, 3986 (void **)&nvme->n_idctl)) { 3987 dev_err(nvme->n_dip, CE_WARN, "!failed to identify controller"); 3988 goto fail; 3989 } 3990 3991 /* 3992 * Get the common namespace information if available. If not, we use the 3993 * information for nsid 1. 3994 */ 3995 if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) && 3996 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) { 3997 nsid = NVME_NSID_BCAST; 3998 } else { 3999 nsid = 1; 4000 } 4001 4002 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID, 4003 (void **)&nvme->n_idcomns)) { 4004 dev_err(nvme->n_dip, CE_WARN, "!failed to identify common " 4005 "namespace information"); 4006 goto fail; 4007 } 4008 /* 4009 * Process nvme-config-list (if present) in nvme.conf. 4010 */ 4011 nvme_config_list(nvme); 4012 4013 /* 4014 * Get Vendor & Product ID 4015 */ 4016 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 4017 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 4018 sata_split_model(model, &vendor, &product); 4019 4020 if (vendor == NULL) 4021 nvme->n_vendor = strdup("NVMe"); 4022 else 4023 nvme->n_vendor = strdup(vendor); 4024 4025 nvme->n_product = strdup(product); 4026 4027 /* 4028 * Get controller limits. 4029 */ 4030 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 4031 MIN(nvme->n_admin_queue_len / 10, 4032 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 4033 4034 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 4035 "async-event-limit", nvme->n_async_event_limit); 4036 4037 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 4038 4039 /* 4040 * Reinitialize the semaphore with the true abort command limit 4041 * supported by the hardware. It's not necessary to disable interrupts 4042 * as only command aborts use the semaphore, and no commands are 4043 * executed or aborted while we're here. 4044 */ 4045 sema_destroy(&nvme->n_abort_sema); 4046 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 4047 SEMA_DRIVER, NULL); 4048 4049 nvme->n_progress |= NVME_CTRL_LIMITS; 4050 4051 if (nvme->n_idctl->id_mdts == 0) 4052 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 4053 else 4054 nvme->n_max_data_transfer_size = 4055 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 4056 4057 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 4058 4059 /* 4060 * Limit n_max_data_transfer_size to what we can handle in one PRP. 4061 * Chained PRPs are currently unsupported. 4062 * 4063 * This is a no-op on hardware which doesn't support a transfer size 4064 * big enough to require chained PRPs. 4065 */ 4066 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 4067 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 4068 4069 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 4070 4071 /* 4072 * Make sure the minimum/maximum queue entry sizes are not 4073 * larger/smaller than the default. 4074 */ 4075 4076 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 4077 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 4078 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 4079 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 4080 goto fail; 4081 4082 /* 4083 * Check for the presence of a Volatile Write Cache. If present, 4084 * enable or disable based on the value of the property 4085 * volatile-write-cache-enable (default is enabled). 4086 */ 4087 nvme->n_write_cache_present = 4088 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 4089 4090 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 4091 "volatile-write-cache-present", 4092 nvme->n_write_cache_present ? 1 : 0); 4093 4094 if (!nvme->n_write_cache_present) { 4095 nvme->n_write_cache_enabled = B_FALSE; 4096 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled) 4097 != 0) { 4098 dev_err(nvme->n_dip, CE_WARN, 4099 "!failed to %sable volatile write cache", 4100 nvme->n_write_cache_enabled ? "en" : "dis"); 4101 /* 4102 * Assume the cache is (still) enabled. 4103 */ 4104 nvme->n_write_cache_enabled = B_TRUE; 4105 } 4106 4107 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 4108 "volatile-write-cache-enable", 4109 nvme->n_write_cache_enabled ? 1 : 0); 4110 4111 /* 4112 * Get number of supported namespaces and allocate namespace array. 4113 */ 4114 nvme->n_namespace_count = nvme->n_idctl->id_nn; 4115 4116 if (nvme->n_namespace_count == 0) { 4117 dev_err(nvme->n_dip, CE_WARN, 4118 "!controllers without namespaces are not supported"); 4119 goto fail; 4120 } 4121 4122 if (nvme->n_namespace_count > NVME_MINOR_MAX) { 4123 dev_err(nvme->n_dip, CE_WARN, 4124 "!too many namespaces: %d, limiting to %d\n", 4125 nvme->n_namespace_count, NVME_MINOR_MAX); 4126 nvme->n_namespace_count = NVME_MINOR_MAX; 4127 } 4128 4129 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 4130 nvme->n_namespace_count, KM_SLEEP); 4131 4132 /* 4133 * Try to set up MSI/MSI-X interrupts. 4134 */ 4135 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 4136 != 0) { 4137 nvme_release_interrupts(nvme); 4138 4139 nqueues = MIN(UINT16_MAX, ncpus); 4140 4141 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 4142 nqueues) != DDI_SUCCESS) && 4143 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 4144 nqueues) != DDI_SUCCESS)) { 4145 dev_err(nvme->n_dip, CE_WARN, 4146 "!failed to set up MSI/MSI-X interrupts"); 4147 goto fail; 4148 } 4149 } 4150 4151 /* 4152 * Create I/O queue pairs. 4153 */ 4154 4155 if (nvme_set_nqueues(nvme) != 0) { 4156 dev_err(nvme->n_dip, CE_WARN, 4157 "!failed to set number of I/O queues to %d", 4158 nvme->n_intr_cnt); 4159 goto fail; 4160 } 4161 4162 /* 4163 * Reallocate I/O queue array 4164 */ 4165 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 4166 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 4167 (nvme->n_submission_queues + 1), KM_SLEEP); 4168 nvme->n_ioq[0] = nvme->n_adminq; 4169 4170 /* 4171 * There should always be at least as many submission queues 4172 * as completion queues. 4173 */ 4174 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues); 4175 4176 nvme->n_ioq_count = nvme->n_submission_queues; 4177 4178 nvme->n_io_squeue_len = 4179 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries); 4180 4181 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len", 4182 nvme->n_io_squeue_len); 4183 4184 /* 4185 * Pre-allocate completion queues. 4186 * When there are the same number of submission and completion 4187 * queues there is no value in having a larger completion 4188 * queue length. 4189 */ 4190 if (nvme->n_submission_queues == nvme->n_completion_queues) 4191 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 4192 nvme->n_io_squeue_len); 4193 4194 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 4195 nvme->n_max_queue_entries); 4196 4197 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len", 4198 nvme->n_io_cqueue_len); 4199 4200 /* 4201 * Assign the equal quantity of taskq threads to each completion 4202 * queue, capping the total number of threads to the number 4203 * of CPUs. 4204 */ 4205 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues; 4206 4207 /* 4208 * In case the calculation above is zero, we need at least one 4209 * thread per completion queue. 4210 */ 4211 tq_threads = MAX(1, tq_threads); 4212 4213 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1, 4214 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) { 4215 dev_err(nvme->n_dip, CE_WARN, 4216 "!failed to pre-allocate completion queues"); 4217 goto fail; 4218 } 4219 4220 /* 4221 * If we use less completion queues than interrupt vectors return 4222 * some of the interrupt vectors back to the system. 4223 */ 4224 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) { 4225 nvme_release_interrupts(nvme); 4226 4227 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 4228 nvme->n_completion_queues + 1) != DDI_SUCCESS) { 4229 dev_err(nvme->n_dip, CE_WARN, 4230 "!failed to reduce number of interrupts"); 4231 goto fail; 4232 } 4233 } 4234 4235 /* 4236 * Alloc & register I/O queue pairs 4237 */ 4238 4239 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 4240 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len, 4241 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 4242 dev_err(nvme->n_dip, CE_WARN, 4243 "!unable to allocate I/O qpair %d", i); 4244 goto fail; 4245 } 4246 4247 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) { 4248 dev_err(nvme->n_dip, CE_WARN, 4249 "!unable to create I/O qpair %d", i); 4250 goto fail; 4251 } 4252 } 4253 4254 /* 4255 * Post more asynchronous events commands to reduce event reporting 4256 * latency as suggested by the spec. 4257 */ 4258 if (nvme->n_async_event_supported) { 4259 for (i = 1; i != nvme->n_async_event_limit; i++) 4260 nvme_async_event(nvme); 4261 } 4262 4263 return (DDI_SUCCESS); 4264 4265 fail: 4266 (void) nvme_reset(nvme, B_FALSE); 4267 return (DDI_FAILURE); 4268 } 4269 4270 static uint_t 4271 nvme_intr(caddr_t arg1, caddr_t arg2) 4272 { 4273 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 4274 nvme_t *nvme = (nvme_t *)arg1; 4275 int inum = (int)(uintptr_t)arg2; 4276 int ccnt = 0; 4277 int qnum; 4278 4279 if (inum >= nvme->n_intr_cnt) 4280 return (DDI_INTR_UNCLAIMED); 4281 4282 if (nvme->n_dead) 4283 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ? 4284 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED); 4285 4286 /* 4287 * The interrupt vector a queue uses is calculated as queue_idx % 4288 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 4289 * in steps of n_intr_cnt to process all queues using this vector. 4290 */ 4291 for (qnum = inum; 4292 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL; 4293 qnum += nvme->n_intr_cnt) { 4294 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]); 4295 } 4296 4297 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 4298 } 4299 4300 static void 4301 nvme_release_interrupts(nvme_t *nvme) 4302 { 4303 int i; 4304 4305 for (i = 0; i < nvme->n_intr_cnt; i++) { 4306 if (nvme->n_inth[i] == NULL) 4307 break; 4308 4309 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 4310 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 4311 else 4312 (void) ddi_intr_disable(nvme->n_inth[i]); 4313 4314 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 4315 (void) ddi_intr_free(nvme->n_inth[i]); 4316 } 4317 4318 kmem_free(nvme->n_inth, nvme->n_inth_sz); 4319 nvme->n_inth = NULL; 4320 nvme->n_inth_sz = 0; 4321 4322 nvme->n_progress &= ~NVME_INTERRUPTS; 4323 } 4324 4325 static int 4326 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 4327 { 4328 int nintrs, navail, count; 4329 int ret; 4330 int i; 4331 4332 if (nvme->n_intr_types == 0) { 4333 ret = ddi_intr_get_supported_types(nvme->n_dip, 4334 &nvme->n_intr_types); 4335 if (ret != DDI_SUCCESS) { 4336 dev_err(nvme->n_dip, CE_WARN, 4337 "!%s: ddi_intr_get_supported types failed", 4338 __func__); 4339 return (ret); 4340 } 4341 #ifdef __x86 4342 if (get_hwenv() == HW_VMWARE) 4343 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; 4344 #endif 4345 } 4346 4347 if ((nvme->n_intr_types & intr_type) == 0) 4348 return (DDI_FAILURE); 4349 4350 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 4351 if (ret != DDI_SUCCESS) { 4352 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 4353 __func__); 4354 return (ret); 4355 } 4356 4357 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 4358 if (ret != DDI_SUCCESS) { 4359 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 4360 __func__); 4361 return (ret); 4362 } 4363 4364 /* We want at most one interrupt per queue pair. */ 4365 if (navail > nqpairs) 4366 navail = nqpairs; 4367 4368 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 4369 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 4370 4371 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 4372 &count, 0); 4373 if (ret != DDI_SUCCESS) { 4374 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 4375 __func__); 4376 goto fail; 4377 } 4378 4379 nvme->n_intr_cnt = count; 4380 4381 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 4382 if (ret != DDI_SUCCESS) { 4383 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 4384 __func__); 4385 goto fail; 4386 } 4387 4388 for (i = 0; i < count; i++) { 4389 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 4390 (void *)nvme, (void *)(uintptr_t)i); 4391 if (ret != DDI_SUCCESS) { 4392 dev_err(nvme->n_dip, CE_WARN, 4393 "!%s: ddi_intr_add_handler failed", __func__); 4394 goto fail; 4395 } 4396 } 4397 4398 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 4399 4400 for (i = 0; i < count; i++) { 4401 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 4402 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 4403 else 4404 ret = ddi_intr_enable(nvme->n_inth[i]); 4405 4406 if (ret != DDI_SUCCESS) { 4407 dev_err(nvme->n_dip, CE_WARN, 4408 "!%s: enabling interrupt %d failed", __func__, i); 4409 goto fail; 4410 } 4411 } 4412 4413 nvme->n_intr_type = intr_type; 4414 4415 nvme->n_progress |= NVME_INTERRUPTS; 4416 4417 return (DDI_SUCCESS); 4418 4419 fail: 4420 nvme_release_interrupts(nvme); 4421 4422 return (ret); 4423 } 4424 4425 static int 4426 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 4427 { 4428 _NOTE(ARGUNUSED(arg)); 4429 4430 pci_ereport_post(dip, fm_error, NULL); 4431 return (fm_error->fme_status); 4432 } 4433 4434 static void 4435 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a, 4436 void *b) 4437 { 4438 nvme_t *nvme = a; 4439 4440 nvme_ctrl_mark_dead(nvme, B_TRUE); 4441 4442 /* 4443 * Fail all outstanding commands, including those in the admin queue 4444 * (queue 0). 4445 */ 4446 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) { 4447 nvme_qpair_t *qp = nvme->n_ioq[i]; 4448 4449 mutex_enter(&qp->nq_mutex); 4450 for (size_t j = 0; j < qp->nq_nentry; j++) { 4451 nvme_cmd_t *cmd = qp->nq_cmd[j]; 4452 nvme_cmd_t *u_cmd; 4453 4454 if (cmd == NULL) { 4455 continue; 4456 } 4457 4458 /* 4459 * Since we have the queue lock held the entire time we 4460 * iterate over it, it's not possible for the queue to 4461 * change underneath us. Thus, we don't need to check 4462 * that the return value of nvme_unqueue_cmd matches the 4463 * requested cmd to unqueue. 4464 */ 4465 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 4466 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, 4467 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 4468 4469 ASSERT3P(u_cmd, ==, cmd); 4470 } 4471 mutex_exit(&qp->nq_mutex); 4472 } 4473 } 4474 4475 /* 4476 * Open minor management 4477 */ 4478 static int 4479 nvme_minor_comparator(const void *l, const void *r) 4480 { 4481 const nvme_minor_t *lm = l; 4482 const nvme_minor_t *rm = r; 4483 4484 if (lm->nm_minor > rm->nm_minor) { 4485 return (1); 4486 } else if (lm->nm_minor < rm->nm_minor) { 4487 return (-1); 4488 } else { 4489 return (0); 4490 } 4491 } 4492 4493 static void 4494 nvme_minor_free(nvme_minor_t *minor) 4495 { 4496 if (minor->nm_minor > 0) { 4497 ASSERT3S(minor->nm_minor, >=, NVME_OPEN_MINOR_MIN); 4498 id_free(nvme_open_minors, minor->nm_minor); 4499 minor->nm_minor = 0; 4500 } 4501 VERIFY0(list_link_active(&minor->nm_ctrl_lock.nli_node)); 4502 VERIFY0(list_link_active(&minor->nm_ns_lock.nli_node)); 4503 cv_destroy(&minor->nm_cv); 4504 kmem_free(minor, sizeof (nvme_minor_t)); 4505 } 4506 4507 static nvme_minor_t * 4508 nvme_minor_find_by_dev(dev_t dev) 4509 { 4510 id_t id = (id_t)getminor(dev); 4511 nvme_minor_t search = { .nm_minor = id }; 4512 nvme_minor_t *ret; 4513 4514 mutex_enter(&nvme_open_minors_mutex); 4515 ret = avl_find(&nvme_open_minors_avl, &search, NULL); 4516 mutex_exit(&nvme_open_minors_mutex); 4517 4518 return (ret); 4519 } 4520 4521 static int 4522 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4523 { 4524 nvme_t *nvme; 4525 int instance; 4526 int nregs; 4527 off_t regsize; 4528 char name[32]; 4529 boolean_t attached_ns; 4530 4531 if (cmd != DDI_ATTACH) 4532 return (DDI_FAILURE); 4533 4534 instance = ddi_get_instance(dip); 4535 4536 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 4537 return (DDI_FAILURE); 4538 4539 nvme = ddi_get_soft_state(nvme_state, instance); 4540 ddi_set_driver_private(dip, nvme); 4541 nvme->n_dip = dip; 4542 4543 /* 4544 * Map PCI config space 4545 */ 4546 if (pci_config_setup(dip, &nvme->n_pcicfg_handle) != DDI_SUCCESS) { 4547 dev_err(dip, CE_WARN, "!failed to map PCI config space"); 4548 goto fail; 4549 } 4550 nvme->n_progress |= NVME_PCI_CONFIG; 4551 4552 /* 4553 * Get the various PCI IDs from config space 4554 */ 4555 nvme->n_vendor_id = 4556 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_VENID); 4557 nvme->n_device_id = 4558 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_DEVID); 4559 nvme->n_revision_id = 4560 pci_config_get8(nvme->n_pcicfg_handle, PCI_CONF_REVID); 4561 nvme->n_subsystem_device_id = 4562 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBSYSID); 4563 nvme->n_subsystem_vendor_id = 4564 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBVENID); 4565 4566 nvme_detect_quirks(nvme); 4567 4568 /* 4569 * Set up event handlers for hot removal. While npe(4D) supports the hot 4570 * removal event being injected for devices, the same is not true of all 4571 * of our possible parents (i.e. pci(4D) as of this writing). The most 4572 * common case this shows up is in some virtualization environments. We 4573 * should treat this as non-fatal so that way devices work but leave 4574 * this set up in such a way that if a nexus does grow support for this 4575 * we're good to go. 4576 */ 4577 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT, 4578 &nvme->n_rm_cookie) == DDI_SUCCESS) { 4579 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie, 4580 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) != 4581 DDI_SUCCESS) { 4582 goto fail; 4583 } 4584 } else { 4585 nvme->n_ev_rm_cb_id = NULL; 4586 } 4587 4588 mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL); 4589 nvme->n_progress |= NVME_MUTEX_INIT; 4590 4591 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4592 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 4593 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 4594 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 4595 B_TRUE : B_FALSE; 4596 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4597 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 4598 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4599 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN); 4600 /* 4601 * Double up the default for completion queues in case of 4602 * queue sharing. 4603 */ 4604 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4605 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN); 4606 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4607 DDI_PROP_DONTPASS, "async-event-limit", 4608 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 4609 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4610 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 4611 B_TRUE : B_FALSE; 4612 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4613 DDI_PROP_DONTPASS, "min-phys-block-size", 4614 NVME_DEFAULT_MIN_BLOCK_SIZE); 4615 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4616 DDI_PROP_DONTPASS, "max-submission-queues", -1); 4617 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4618 DDI_PROP_DONTPASS, "max-completion-queues", -1); 4619 4620 if (!ISP2(nvme->n_min_block_size) || 4621 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { 4622 dev_err(dip, CE_WARN, "!min-phys-block-size %s, " 4623 "using default %d", ISP2(nvme->n_min_block_size) ? 4624 "too low" : "not a power of 2", 4625 NVME_DEFAULT_MIN_BLOCK_SIZE); 4626 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 4627 } 4628 4629 if (nvme->n_submission_queues != -1 && 4630 (nvme->n_submission_queues < 1 || 4631 nvme->n_submission_queues > UINT16_MAX)) { 4632 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not " 4633 "valid. Must be [1..%d]", nvme->n_submission_queues, 4634 UINT16_MAX); 4635 nvme->n_submission_queues = -1; 4636 } 4637 4638 if (nvme->n_completion_queues != -1 && 4639 (nvme->n_completion_queues < 1 || 4640 nvme->n_completion_queues > UINT16_MAX)) { 4641 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not " 4642 "valid. Must be [1..%d]", nvme->n_completion_queues, 4643 UINT16_MAX); 4644 nvme->n_completion_queues = -1; 4645 } 4646 4647 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 4648 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 4649 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 4650 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 4651 4652 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN) 4653 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN; 4654 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN) 4655 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN; 4656 4657 if (nvme->n_async_event_limit < 1) 4658 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 4659 4660 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 4661 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 4662 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 4663 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 4664 4665 /* 4666 * Set up FMA support. 4667 */ 4668 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 4669 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 4670 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 4671 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 4672 4673 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 4674 4675 if (nvme->n_fm_cap) { 4676 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 4677 nvme->n_reg_acc_attr.devacc_attr_access = 4678 DDI_FLAGERR_ACC; 4679 4680 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 4681 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 4682 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 4683 } 4684 4685 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 4686 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4687 pci_ereport_setup(dip); 4688 4689 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4690 ddi_fm_handler_register(dip, nvme_fm_errcb, 4691 (void *)nvme); 4692 } 4693 4694 nvme->n_progress |= NVME_FMA_INIT; 4695 4696 /* 4697 * The spec defines several register sets. Only the controller 4698 * registers (set 1) are currently used. 4699 */ 4700 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 4701 nregs < 2 || 4702 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 4703 goto fail; 4704 4705 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 4706 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 4707 dev_err(dip, CE_WARN, "!failed to map regset 1"); 4708 goto fail; 4709 } 4710 4711 nvme->n_progress |= NVME_REGS_MAPPED; 4712 4713 /* 4714 * Create PRP DMA cache 4715 */ 4716 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 4717 ddi_driver_name(dip), ddi_get_instance(dip)); 4718 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 4719 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 4720 NULL, (void *)nvme, NULL, 0); 4721 4722 if (nvme_init(nvme) != DDI_SUCCESS) 4723 goto fail; 4724 4725 /* 4726 * Initialize the driver with the UFM subsystem 4727 */ 4728 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops, 4729 &nvme->n_ufmh, nvme) != 0) { 4730 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem"); 4731 goto fail; 4732 } 4733 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL); 4734 ddi_ufm_update(nvme->n_ufmh); 4735 nvme->n_progress |= NVME_UFM_INIT; 4736 4737 nvme_mgmt_lock_init(&nvme->n_mgmt); 4738 nvme_lock_init(&nvme->n_lock); 4739 nvme->n_progress |= NVME_MGMT_INIT; 4740 nvme->n_dead_status = NVME_IOCTL_E_CTRL_DEAD; 4741 4742 4743 /* 4744 * Identify namespaces. 4745 */ 4746 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 4747 4748 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 4749 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i); 4750 4751 nvme_lock_init(&ns->ns_lock); 4752 ns->ns_progress |= NVME_NS_LOCK; 4753 4754 /* 4755 * Namespaces start out ignored. When nvme_init_ns() checks 4756 * their properties and finds they can be used, it will set 4757 * ns_ignore to B_FALSE. It will also use this state change 4758 * to keep an accurate count of attachable namespaces. 4759 */ 4760 ns->ns_ignore = B_TRUE; 4761 if (nvme_init_ns(nvme, i) != 0) { 4762 nvme_mgmt_unlock(nvme); 4763 goto fail; 4764 } 4765 4766 if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR, 4767 NVME_MINOR(ddi_get_instance(nvme->n_dip), i), 4768 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 4769 nvme_mgmt_unlock(nvme); 4770 dev_err(dip, CE_WARN, 4771 "!failed to create minor node for namespace %d", i); 4772 goto fail; 4773 } 4774 } 4775 4776 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 4777 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) != 4778 DDI_SUCCESS) { 4779 nvme_mgmt_unlock(nvme); 4780 dev_err(dip, CE_WARN, "nvme_attach: " 4781 "cannot create devctl minor node"); 4782 goto fail; 4783 } 4784 4785 attached_ns = B_FALSE; 4786 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 4787 nvme_ioctl_common_t com = { .nioc_nsid = i }; 4788 4789 if (nvme_attach_ns(nvme, &com)) { 4790 attached_ns = B_TRUE; 4791 } else if (com.nioc_drv_err != NVME_IOCTL_E_UNSUP_ATTACH_NS) { 4792 dev_err(nvme->n_dip, CE_WARN, "!failed to attach " 4793 "namespace %d due to blkdev error", i); 4794 /* 4795 * Once we have successfully attached a namespace we 4796 * can no longer fail the driver attach as there is now 4797 * a blkdev child node linked to this device, and 4798 * our node is not yet in the attached state. 4799 */ 4800 if (!attached_ns) { 4801 nvme_mgmt_unlock(nvme); 4802 goto fail; 4803 } 4804 } 4805 } 4806 4807 nvme_mgmt_unlock(nvme); 4808 4809 return (DDI_SUCCESS); 4810 4811 fail: 4812 /* attach successful anyway so that FMA can retire the device */ 4813 if (nvme->n_dead) 4814 return (DDI_SUCCESS); 4815 4816 (void) nvme_detach(dip, DDI_DETACH); 4817 4818 return (DDI_FAILURE); 4819 } 4820 4821 static int 4822 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4823 { 4824 int instance; 4825 nvme_t *nvme; 4826 4827 if (cmd != DDI_DETACH) 4828 return (DDI_FAILURE); 4829 4830 instance = ddi_get_instance(dip); 4831 4832 nvme = ddi_get_soft_state(nvme_state, instance); 4833 4834 if (nvme == NULL) 4835 return (DDI_FAILURE); 4836 4837 /* 4838 * Remove all minor nodes from the device regardless of the source in 4839 * one swoop. 4840 */ 4841 ddi_remove_minor_node(dip, NULL); 4842 4843 /* 4844 * We need to remove the event handler as one of the first things that 4845 * we do. If we proceed with other teardown without removing the event 4846 * handler, we could end up in a very unfortunate race with ourselves. 4847 * The DDI does not serialize these with detach (just like timeout(9F) 4848 * and others). 4849 */ 4850 if (nvme->n_ev_rm_cb_id != NULL) { 4851 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id); 4852 } 4853 nvme->n_ev_rm_cb_id = NULL; 4854 4855 /* 4856 * If the controller was marked dead, there is a slight chance that we 4857 * are asynchronusly processing the removal taskq. Because we have 4858 * removed the callback handler above and all minor nodes and commands 4859 * are closed, there is no other way to get in here. As such, we wait on 4860 * the nvme_dead_taskq to complete so we can avoid tracking if it's 4861 * running or not. 4862 */ 4863 taskq_wait(nvme_dead_taskq); 4864 4865 if (nvme->n_ns) { 4866 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) { 4867 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i); 4868 4869 if (ns->ns_bd_hdl) { 4870 (void) bd_detach_handle(ns->ns_bd_hdl); 4871 bd_free_handle(ns->ns_bd_hdl); 4872 } 4873 4874 if (ns->ns_idns) 4875 kmem_free(ns->ns_idns, 4876 sizeof (nvme_identify_nsid_t)); 4877 if (ns->ns_devid) 4878 strfree(ns->ns_devid); 4879 4880 if ((ns->ns_progress & NVME_NS_LOCK) != 0) 4881 nvme_lock_fini(&ns->ns_lock); 4882 } 4883 4884 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 4885 nvme->n_namespace_count); 4886 } 4887 4888 if (nvme->n_progress & NVME_MGMT_INIT) { 4889 nvme_lock_fini(&nvme->n_lock); 4890 nvme_mgmt_lock_fini(&nvme->n_mgmt); 4891 } 4892 4893 if (nvme->n_progress & NVME_UFM_INIT) { 4894 ddi_ufm_fini(nvme->n_ufmh); 4895 mutex_destroy(&nvme->n_fwslot_mutex); 4896 } 4897 4898 if (nvme->n_progress & NVME_INTERRUPTS) 4899 nvme_release_interrupts(nvme); 4900 4901 for (uint_t i = 0; i < nvme->n_cq_count; i++) { 4902 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL) 4903 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq); 4904 } 4905 4906 if (nvme->n_progress & NVME_MUTEX_INIT) { 4907 mutex_destroy(&nvme->n_minor_mutex); 4908 } 4909 4910 if (nvme->n_ioq_count > 0) { 4911 for (uint_t i = 1; i != nvme->n_ioq_count + 1; i++) { 4912 if (nvme->n_ioq[i] != NULL) { 4913 /* TODO: send destroy queue commands */ 4914 nvme_free_qpair(nvme->n_ioq[i]); 4915 } 4916 } 4917 4918 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 4919 (nvme->n_ioq_count + 1)); 4920 } 4921 4922 if (nvme->n_prp_cache != NULL) { 4923 kmem_cache_destroy(nvme->n_prp_cache); 4924 } 4925 4926 if (nvme->n_progress & NVME_REGS_MAPPED) { 4927 nvme_shutdown(nvme, B_FALSE); 4928 (void) nvme_reset(nvme, B_FALSE); 4929 } 4930 4931 if (nvme->n_progress & NVME_CTRL_LIMITS) 4932 sema_destroy(&nvme->n_abort_sema); 4933 4934 if (nvme->n_progress & NVME_ADMIN_QUEUE) 4935 nvme_free_qpair(nvme->n_adminq); 4936 4937 if (nvme->n_cq_count > 0) { 4938 nvme_destroy_cq_array(nvme, 0); 4939 nvme->n_cq = NULL; 4940 nvme->n_cq_count = 0; 4941 } 4942 4943 if (nvme->n_idcomns) 4944 kmem_free(nvme->n_idcomns, NVME_IDENTIFY_BUFSIZE); 4945 4946 if (nvme->n_idctl) 4947 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); 4948 4949 if (nvme->n_progress & NVME_REGS_MAPPED) 4950 ddi_regs_map_free(&nvme->n_regh); 4951 4952 if (nvme->n_progress & NVME_FMA_INIT) { 4953 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4954 ddi_fm_handler_unregister(nvme->n_dip); 4955 4956 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 4957 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4958 pci_ereport_teardown(nvme->n_dip); 4959 4960 ddi_fm_fini(nvme->n_dip); 4961 } 4962 4963 if (nvme->n_progress & NVME_PCI_CONFIG) 4964 pci_config_teardown(&nvme->n_pcicfg_handle); 4965 4966 if (nvme->n_vendor != NULL) 4967 strfree(nvme->n_vendor); 4968 4969 if (nvme->n_product != NULL) 4970 strfree(nvme->n_product); 4971 4972 ddi_soft_state_free(nvme_state, instance); 4973 4974 return (DDI_SUCCESS); 4975 } 4976 4977 static int 4978 nvme_quiesce(dev_info_t *dip) 4979 { 4980 int instance; 4981 nvme_t *nvme; 4982 4983 instance = ddi_get_instance(dip); 4984 4985 nvme = ddi_get_soft_state(nvme_state, instance); 4986 4987 if (nvme == NULL) 4988 return (DDI_FAILURE); 4989 4990 nvme_shutdown(nvme, B_TRUE); 4991 4992 (void) nvme_reset(nvme, B_TRUE); 4993 4994 return (DDI_SUCCESS); 4995 } 4996 4997 static int 4998 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma) 4999 { 5000 nvme_t *nvme = cmd->nc_nvme; 5001 uint_t nprp_per_page, nprp; 5002 uint64_t *prp; 5003 const ddi_dma_cookie_t *cookie; 5004 uint_t idx; 5005 uint_t ncookies = ddi_dma_ncookies(dma); 5006 5007 if (ncookies == 0) 5008 return (DDI_FAILURE); 5009 5010 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL) 5011 return (DDI_FAILURE); 5012 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress; 5013 5014 if (ncookies == 1) { 5015 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 5016 return (DDI_SUCCESS); 5017 } else if (ncookies == 2) { 5018 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL) 5019 return (DDI_FAILURE); 5020 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress; 5021 return (DDI_SUCCESS); 5022 } 5023 5024 /* 5025 * At this point, we're always operating on cookies at 5026 * index >= 1 and writing the addresses of those cookies 5027 * into a new page. The address of that page is stored 5028 * as the second PRP entry. 5029 */ 5030 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t); 5031 ASSERT(nprp_per_page > 0); 5032 5033 /* 5034 * We currently don't support chained PRPs and set up our DMA 5035 * attributes to reflect that. If we still get an I/O request 5036 * that needs a chained PRP something is very wrong. Account 5037 * for the first cookie here, which we've placed in d_prp[0]. 5038 */ 5039 nprp = howmany(ncookies - 1, nprp_per_page); 5040 VERIFY(nprp == 1); 5041 5042 /* 5043 * Allocate a page of pointers, in which we'll write the 5044 * addresses of cookies 1 to `ncookies`. 5045 */ 5046 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 5047 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 5048 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress; 5049 5050 prp = (uint64_t *)cmd->nc_prp->nd_memp; 5051 for (idx = 1; idx < ncookies; idx++) { 5052 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL) 5053 return (DDI_FAILURE); 5054 *prp++ = cookie->dmac_laddress; 5055 } 5056 5057 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 5058 DDI_DMA_SYNC_FORDEV); 5059 return (DDI_SUCCESS); 5060 } 5061 5062 /* 5063 * The maximum number of requests supported for a deallocate request is 5064 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and 5065 * unchanged through at least 1.4a). The definition of nvme_range_t is also 5066 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for 5067 * a deallocate request will fit into the smallest supported namespace page 5068 * (4k). 5069 */ 5070 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096); 5071 5072 static int 5073 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize, 5074 int allocflag) 5075 { 5076 const dkioc_free_list_t *dfl = xfer->x_dfl; 5077 const dkioc_free_list_ext_t *exts = dfl->dfl_exts; 5078 nvme_t *nvme = cmd->nc_nvme; 5079 nvme_range_t *ranges = NULL; 5080 uint_t i; 5081 5082 /* 5083 * The number of ranges in the request is 0s based (that is 5084 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ..., 5085 * word10 == 255 -> 256 ranges). Therefore the allowed values are 5086 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request, 5087 * we either provided bad info in nvme_bd_driveinfo() or there is a bug 5088 * in blkdev. 5089 */ 5090 VERIFY3U(dfl->dfl_num_exts, >, 0); 5091 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES); 5092 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff; 5093 5094 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE; 5095 5096 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag); 5097 if (cmd->nc_prp == NULL) 5098 return (DDI_FAILURE); 5099 5100 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 5101 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp; 5102 5103 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress; 5104 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 5105 5106 for (i = 0; i < dfl->dfl_num_exts; i++) { 5107 uint64_t lba, len; 5108 5109 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize; 5110 len = exts[i].dfle_length / blocksize; 5111 5112 VERIFY3U(len, <=, UINT32_MAX); 5113 5114 /* No context attributes for a deallocate request */ 5115 ranges[i].nr_ctxattr = 0; 5116 ranges[i].nr_len = len; 5117 ranges[i].nr_lba = lba; 5118 } 5119 5120 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 5121 DDI_DMA_SYNC_FORDEV); 5122 5123 return (DDI_SUCCESS); 5124 } 5125 5126 static nvme_cmd_t * 5127 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 5128 { 5129 nvme_t *nvme = ns->ns_nvme; 5130 nvme_cmd_t *cmd; 5131 int allocflag; 5132 5133 /* 5134 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 5135 */ 5136 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP; 5137 cmd = nvme_alloc_cmd(nvme, allocflag); 5138 5139 if (cmd == NULL) 5140 return (NULL); 5141 5142 cmd->nc_sqe.sqe_opc = opc; 5143 cmd->nc_callback = nvme_bd_xfer_done; 5144 cmd->nc_xfer = xfer; 5145 5146 switch (opc) { 5147 case NVME_OPC_NVM_WRITE: 5148 case NVME_OPC_NVM_READ: 5149 VERIFY(xfer->x_nblks <= 0x10000); 5150 5151 cmd->nc_sqe.sqe_nsid = ns->ns_id; 5152 5153 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 5154 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 5155 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 5156 5157 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS) 5158 goto fail; 5159 break; 5160 5161 case NVME_OPC_NVM_FLUSH: 5162 cmd->nc_sqe.sqe_nsid = ns->ns_id; 5163 break; 5164 5165 case NVME_OPC_NVM_DSET_MGMT: 5166 cmd->nc_sqe.sqe_nsid = ns->ns_id; 5167 5168 if (nvme_fill_ranges(cmd, xfer, 5169 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS) 5170 goto fail; 5171 break; 5172 5173 default: 5174 goto fail; 5175 } 5176 5177 return (cmd); 5178 5179 fail: 5180 nvme_free_cmd(cmd); 5181 return (NULL); 5182 } 5183 5184 static void 5185 nvme_bd_xfer_done(void *arg) 5186 { 5187 nvme_cmd_t *cmd = arg; 5188 bd_xfer_t *xfer = cmd->nc_xfer; 5189 int error = 0; 5190 5191 error = nvme_check_cmd_status(cmd); 5192 nvme_free_cmd(cmd); 5193 5194 bd_xfer_done(xfer, error); 5195 } 5196 5197 static void 5198 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 5199 { 5200 nvme_namespace_t *ns = arg; 5201 nvme_t *nvme = ns->ns_nvme; 5202 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable); 5203 5204 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO); 5205 5206 /* 5207 * Set the blkdev qcount to the number of submission queues. 5208 * It will then create one waitq/runq pair for each submission 5209 * queue and spread I/O requests across the queues. 5210 */ 5211 drive->d_qcount = nvme->n_ioq_count; 5212 5213 /* 5214 * I/O activity to individual namespaces is distributed across 5215 * each of the d_qcount blkdev queues (which has been set to 5216 * the number of nvme submission queues). d_qsize is the number 5217 * of submitted and not completed I/Os within each queue that blkdev 5218 * will allow before it starts holding them in the waitq. 5219 * 5220 * Each namespace will create a child blkdev instance, for each one 5221 * we try and set the d_qsize so that each namespace gets an 5222 * equal portion of the submission queue. 5223 * 5224 * If post instantiation of the nvme drive, n_namespaces_attachable 5225 * changes and a namespace is attached it could calculate a 5226 * different d_qsize. It may even be that the sum of the d_qsizes is 5227 * now beyond the submission queue size. Should that be the case 5228 * and the I/O rate is such that blkdev attempts to submit more 5229 * I/Os than the size of the submission queue, the excess I/Os 5230 * will be held behind the semaphore nq_sema. 5231 */ 5232 drive->d_qsize = nvme->n_io_squeue_len / ns_count; 5233 5234 /* 5235 * Don't let the queue size drop below the minimum, though. 5236 */ 5237 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN); 5238 5239 /* 5240 * d_maxxfer is not set, which means the value is taken from the DMA 5241 * attributes specified to bd_alloc_handle. 5242 */ 5243 5244 drive->d_removable = B_FALSE; 5245 drive->d_hotpluggable = B_FALSE; 5246 5247 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64)); 5248 drive->d_target = ns->ns_id; 5249 drive->d_lun = 0; 5250 5251 drive->d_model = nvme->n_idctl->id_model; 5252 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 5253 drive->d_vendor = nvme->n_vendor; 5254 drive->d_vendor_len = strlen(nvme->n_vendor); 5255 drive->d_product = nvme->n_product; 5256 drive->d_product_len = strlen(nvme->n_product); 5257 drive->d_serial = nvme->n_idctl->id_serial; 5258 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 5259 drive->d_revision = nvme->n_idctl->id_fwrev; 5260 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 5261 5262 /* 5263 * If we support the dataset management command, the only restrictions 5264 * on a discard request are the maximum number of ranges (segments) 5265 * per single request. 5266 */ 5267 if (nvme->n_idctl->id_oncs.on_dset_mgmt) 5268 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES; 5269 5270 nvme_mgmt_unlock(nvme); 5271 } 5272 5273 static int 5274 nvme_bd_mediainfo(void *arg, bd_media_t *media) 5275 { 5276 nvme_namespace_t *ns = arg; 5277 nvme_t *nvme = ns->ns_nvme; 5278 5279 if (nvme->n_dead) { 5280 return (EIO); 5281 } 5282 5283 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO); 5284 5285 media->m_nblks = ns->ns_block_count; 5286 media->m_blksize = ns->ns_block_size; 5287 media->m_readonly = B_FALSE; 5288 media->m_solidstate = B_TRUE; 5289 5290 media->m_pblksize = ns->ns_best_block_size; 5291 5292 nvme_mgmt_unlock(nvme); 5293 5294 return (0); 5295 } 5296 5297 static int 5298 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 5299 { 5300 nvme_t *nvme = ns->ns_nvme; 5301 nvme_cmd_t *cmd; 5302 nvme_qpair_t *ioq; 5303 boolean_t poll; 5304 int ret; 5305 5306 if (nvme->n_dead) { 5307 return (EIO); 5308 } 5309 5310 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 5311 if (cmd == NULL) 5312 return (ENOMEM); 5313 5314 cmd->nc_sqid = xfer->x_qnum + 1; 5315 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 5316 ioq = nvme->n_ioq[cmd->nc_sqid]; 5317 5318 /* 5319 * Get the polling flag before submitting the command. The command may 5320 * complete immediately after it was submitted, which means we must 5321 * treat both cmd and xfer as if they have been freed already. 5322 */ 5323 poll = (xfer->x_flags & BD_XFER_POLL) != 0; 5324 5325 ret = nvme_submit_io_cmd(ioq, cmd); 5326 5327 if (ret != 0) 5328 return (ret); 5329 5330 if (!poll) 5331 return (0); 5332 5333 do { 5334 cmd = nvme_retrieve_cmd(nvme, ioq); 5335 if (cmd != NULL) 5336 cmd->nc_callback(cmd); 5337 else 5338 drv_usecwait(10); 5339 } while (ioq->nq_active_cmds != 0); 5340 5341 return (0); 5342 } 5343 5344 static int 5345 nvme_bd_read(void *arg, bd_xfer_t *xfer) 5346 { 5347 nvme_namespace_t *ns = arg; 5348 5349 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 5350 } 5351 5352 static int 5353 nvme_bd_write(void *arg, bd_xfer_t *xfer) 5354 { 5355 nvme_namespace_t *ns = arg; 5356 5357 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 5358 } 5359 5360 static int 5361 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 5362 { 5363 nvme_namespace_t *ns = arg; 5364 5365 if (ns->ns_nvme->n_dead) 5366 return (EIO); 5367 5368 /* 5369 * If the volatile write cache is not present or not enabled the FLUSH 5370 * command is a no-op, so we can take a shortcut here. 5371 */ 5372 if (!ns->ns_nvme->n_write_cache_present) { 5373 bd_xfer_done(xfer, ENOTSUP); 5374 return (0); 5375 } 5376 5377 if (!ns->ns_nvme->n_write_cache_enabled) { 5378 bd_xfer_done(xfer, 0); 5379 return (0); 5380 } 5381 5382 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 5383 } 5384 5385 static int 5386 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 5387 { 5388 nvme_namespace_t *ns = arg; 5389 nvme_t *nvme = ns->ns_nvme; 5390 5391 if (nvme->n_dead) { 5392 return (EIO); 5393 } 5394 5395 if (*(uint64_t *)ns->ns_nguid != 0 || 5396 *(uint64_t *)(ns->ns_nguid + 8) != 0) { 5397 return (ddi_devid_init(devinfo, DEVID_NVME_NGUID, 5398 sizeof (ns->ns_nguid), ns->ns_nguid, devid)); 5399 } else if (*(uint64_t *)ns->ns_eui64 != 0) { 5400 return (ddi_devid_init(devinfo, DEVID_NVME_EUI64, 5401 sizeof (ns->ns_eui64), ns->ns_eui64, devid)); 5402 } else { 5403 return (ddi_devid_init(devinfo, DEVID_NVME_NSID, 5404 strlen(ns->ns_devid), ns->ns_devid, devid)); 5405 } 5406 } 5407 5408 static int 5409 nvme_bd_free_space(void *arg, bd_xfer_t *xfer) 5410 { 5411 nvme_namespace_t *ns = arg; 5412 5413 if (xfer->x_dfl == NULL) 5414 return (EINVAL); 5415 5416 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt) 5417 return (ENOTSUP); 5418 5419 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT)); 5420 } 5421 5422 static int 5423 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 5424 { 5425 #ifndef __lock_lint 5426 _NOTE(ARGUNUSED(cred_p)); 5427 #endif 5428 nvme_t *nvme; 5429 nvme_minor_t *minor = NULL; 5430 uint32_t nsid; 5431 minor_t m = getminor(*devp); 5432 int rv = 0; 5433 5434 if (otyp != OTYP_CHR) 5435 return (EINVAL); 5436 5437 if (m >= NVME_OPEN_MINOR_MIN) 5438 return (ENXIO); 5439 5440 nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(m)); 5441 nsid = NVME_MINOR_NSID(m); 5442 5443 if (nvme == NULL) 5444 return (ENXIO); 5445 5446 if (nsid > nvme->n_namespace_count) 5447 return (ENXIO); 5448 5449 if (nvme->n_dead) 5450 return (EIO); 5451 5452 /* 5453 * At this point, we're going to allow an open to proceed on this 5454 * device. We need to allocate a new instance for this (presuming one is 5455 * available). 5456 */ 5457 minor = kmem_zalloc(sizeof (nvme_minor_t), KM_NOSLEEP_LAZY); 5458 if (minor == NULL) { 5459 return (ENOMEM); 5460 } 5461 5462 cv_init(&minor->nm_cv, NULL, CV_DRIVER, NULL); 5463 list_link_init(&minor->nm_ctrl_lock.nli_node); 5464 minor->nm_ctrl_lock.nli_nvme = nvme; 5465 minor->nm_ctrl_lock.nli_minor = minor; 5466 list_link_init(&minor->nm_ns_lock.nli_node); 5467 minor->nm_ns_lock.nli_nvme = nvme; 5468 minor->nm_ns_lock.nli_minor = minor; 5469 minor->nm_minor = id_alloc_nosleep(nvme_open_minors); 5470 if (minor->nm_minor == -1) { 5471 nvme_minor_free(minor); 5472 return (ENOSPC); 5473 } 5474 5475 minor->nm_ctrl = nvme; 5476 if (nsid != 0) { 5477 minor->nm_ns = nvme_nsid2ns(nvme, nsid); 5478 } 5479 5480 /* 5481 * Before we check for exclusive access and attempt a lock if requested, 5482 * ensure that this minor is persisted. 5483 */ 5484 mutex_enter(&nvme_open_minors_mutex); 5485 avl_add(&nvme_open_minors_avl, minor); 5486 mutex_exit(&nvme_open_minors_mutex); 5487 5488 /* 5489 * A request for opening this FEXCL, is translated into a non-blocking 5490 * write lock of the appropriate entity. This honors the original 5491 * semantics here. In the future, we should see if we can remove this 5492 * and turn a request for FEXCL at open into ENOTSUP. 5493 */ 5494 mutex_enter(&nvme->n_minor_mutex); 5495 if ((flag & FEXCL) != 0) { 5496 nvme_ioctl_lock_t lock = { 5497 .nil_level = NVME_LOCK_L_WRITE, 5498 .nil_flags = NVME_LOCK_F_DONT_BLOCK 5499 }; 5500 5501 if (minor->nm_ns != NULL) { 5502 lock.nil_ent = NVME_LOCK_E_NS; 5503 lock.nil_common.nioc_nsid = nsid; 5504 } else { 5505 lock.nil_ent = NVME_LOCK_E_CTRL; 5506 } 5507 nvme_rwlock(minor, &lock); 5508 if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) { 5509 mutex_exit(&nvme->n_minor_mutex); 5510 5511 mutex_enter(&nvme_open_minors_mutex); 5512 avl_remove(&nvme_open_minors_avl, minor); 5513 mutex_exit(&nvme_open_minors_mutex); 5514 5515 nvme_minor_free(minor); 5516 return (EBUSY); 5517 } 5518 } 5519 mutex_exit(&nvme->n_minor_mutex); 5520 5521 *devp = makedevice(getmajor(*devp), (minor_t)minor->nm_minor); 5522 return (rv); 5523 5524 } 5525 5526 static int 5527 nvme_close(dev_t dev, int flag __unused, int otyp, cred_t *cred_p __unused) 5528 { 5529 nvme_minor_t *minor; 5530 nvme_t *nvme; 5531 5532 if (otyp != OTYP_CHR) { 5533 return (ENXIO); 5534 } 5535 5536 minor = nvme_minor_find_by_dev(dev); 5537 if (minor == NULL) { 5538 return (ENXIO); 5539 } 5540 5541 mutex_enter(&nvme_open_minors_mutex); 5542 avl_remove(&nvme_open_minors_avl, minor); 5543 mutex_exit(&nvme_open_minors_mutex); 5544 5545 /* 5546 * When this device is being closed, we must ensure that any locks held 5547 * by this are dealt with. 5548 */ 5549 nvme = minor->nm_ctrl; 5550 mutex_enter(&nvme->n_minor_mutex); 5551 ASSERT3U(minor->nm_ctrl_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED); 5552 ASSERT3U(minor->nm_ns_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED); 5553 5554 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) { 5555 VERIFY3P(minor->nm_ctrl_lock.nli_lock, !=, NULL); 5556 nvme_rwunlock(&minor->nm_ctrl_lock, 5557 minor->nm_ctrl_lock.nli_lock); 5558 } 5559 5560 if (minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) { 5561 VERIFY3P(minor->nm_ns_lock.nli_lock, !=, NULL); 5562 nvme_rwunlock(&minor->nm_ns_lock, minor->nm_ns_lock.nli_lock); 5563 } 5564 mutex_exit(&nvme->n_minor_mutex); 5565 5566 nvme_minor_free(minor); 5567 5568 return (0); 5569 } 5570 5571 void 5572 nvme_ioctl_success(nvme_ioctl_common_t *ioc) 5573 { 5574 ioc->nioc_drv_err = NVME_IOCTL_E_OK; 5575 ioc->nioc_ctrl_sc = NVME_CQE_SC_GEN_SUCCESS; 5576 ioc->nioc_ctrl_sct = NVME_CQE_SCT_GENERIC; 5577 } 5578 5579 boolean_t 5580 nvme_ioctl_error(nvme_ioctl_common_t *ioc, nvme_ioctl_errno_t err, uint32_t sct, 5581 uint32_t sc) 5582 { 5583 ioc->nioc_drv_err = err; 5584 ioc->nioc_ctrl_sct = sct; 5585 ioc->nioc_ctrl_sc = sc; 5586 5587 return (B_FALSE); 5588 } 5589 5590 static int 5591 nvme_ioctl_copyout_error(nvme_ioctl_errno_t err, intptr_t uaddr, int mode) 5592 { 5593 nvme_ioctl_common_t ioc; 5594 5595 ASSERT3U(err, !=, NVME_IOCTL_E_CTRL_ERROR); 5596 bzero(&ioc, sizeof (ioc)); 5597 if (ddi_copyout(&ioc, (void *)uaddr, sizeof (nvme_ioctl_common_t), 5598 mode & FKIOCTL) != 0) { 5599 return (EFAULT); 5600 } 5601 return (0); 5602 } 5603 5604 5605 /* 5606 * The companion to the namespace checking. This occurs after any rewriting 5607 * occurs. This is the primary point that we attempt to enforce any operation's 5608 * exclusivity. Note, it is theoretically possible for an operation to be 5609 * ongoing and to have someone with an exclusive lock ask to unlock it for some 5610 * reason. This does not maintain the number of such events that are going on. 5611 * While perhaps this is leaving too much up to the user, by the same token we 5612 * don't try to stop them from issuing two different format NVM commands 5613 * targeting the whole device at the same time either, even though the 5614 * controller would really rather that didn't happen. 5615 */ 5616 static boolean_t 5617 nvme_ioctl_excl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc, 5618 const nvme_ioctl_check_t *check) 5619 { 5620 nvme_t *const nvme = minor->nm_ctrl; 5621 nvme_namespace_t *ns; 5622 boolean_t have_ctrl, have_ns, ctrl_is_excl, ns_is_excl; 5623 5624 /* 5625 * If the command doesn't require anything, then we're done. 5626 */ 5627 if (check->nck_excl == NVME_IOCTL_EXCL_SKIP) { 5628 return (B_TRUE); 5629 } 5630 5631 if (ioc->nioc_nsid == 0 || ioc->nioc_nsid == NVME_NSID_BCAST) { 5632 ns = NULL; 5633 } else { 5634 ns = nvme_nsid2ns(nvme, ioc->nioc_nsid); 5635 } 5636 5637 mutex_enter(&nvme->n_minor_mutex); 5638 ctrl_is_excl = nvme->n_lock.nl_writer != NULL; 5639 have_ctrl = nvme->n_lock.nl_writer == &minor->nm_ctrl_lock; 5640 if (ns != NULL) { 5641 /* 5642 * We explicitly test the namespace lock's writer versus asking 5643 * the minor because the minor's namespace lock may apply to a 5644 * different namespace. 5645 */ 5646 ns_is_excl = ns->ns_lock.nl_writer != NULL; 5647 have_ns = ns->ns_lock.nl_writer == &minor->nm_ns_lock; 5648 ASSERT0(have_ctrl && have_ns); 5649 #ifdef DEBUG 5650 if (have_ns) { 5651 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, ns); 5652 } 5653 #endif 5654 } else { 5655 ns_is_excl = B_FALSE; 5656 have_ns = B_FALSE; 5657 } 5658 ASSERT0(ctrl_is_excl && ns_is_excl); 5659 mutex_exit(&nvme->n_minor_mutex); 5660 5661 if (check->nck_excl == NVME_IOCTL_EXCL_WRITE) { 5662 if (ns == NULL) { 5663 if (have_ctrl) { 5664 return (B_TRUE); 5665 } 5666 return (nvme_ioctl_error(ioc, 5667 NVME_IOCTL_E_NEED_CTRL_WRLOCK, 0, 0)); 5668 } else { 5669 if (have_ctrl || have_ns) { 5670 return (B_TRUE); 5671 } 5672 return (nvme_ioctl_error(ioc, 5673 NVME_IOCTL_E_NEED_NS_WRLOCK, 0, 0)); 5674 } 5675 } 5676 5677 /* 5678 * Now we have an operation that does not require exclusive access. We 5679 * can proceed as long as no one else has it or if someone does it is 5680 * us. Regardless of what we target, a controller lock will stop us. 5681 */ 5682 if (ctrl_is_excl && !have_ctrl) { 5683 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_LOCKED, 0, 0)); 5684 } 5685 5686 /* 5687 * Only check namespace exclusivity if we are targeting one. 5688 */ 5689 if (ns != NULL && ns_is_excl && !have_ns) { 5690 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_LOCKED, 0, 0)); 5691 } 5692 5693 return (B_TRUE); 5694 } 5695 5696 /* 5697 * Perform common checking as to whether or not an ioctl operation may proceed. 5698 * We check in this function various aspects of the namespace attributes that 5699 * it's calling on. Once the namespace attributes and any possible rewriting 5700 * have been performed, then we proceed to check whether or not the requisite 5701 * exclusive access is present in nvme_ioctl_excl_check(). 5702 */ 5703 static boolean_t 5704 nvme_ioctl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc, 5705 const nvme_ioctl_check_t *check) 5706 { 5707 /* 5708 * If the minor has a namespace pointer, then it is constrained to that 5709 * namespace. If a namespace is allowed, then there are only two valid 5710 * values that we can find. The first is matching the minor. The second 5711 * is our value zero, which will be transformed to the current 5712 * namespace. 5713 */ 5714 if (minor->nm_ns != NULL) { 5715 if (!check->nck_ns_ok || !check->nck_ns_minor_ok) { 5716 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NOT_CTRL, 0, 5717 0)); 5718 } 5719 5720 if (ioc->nioc_nsid == 0) { 5721 ioc->nioc_nsid = minor->nm_ns->ns_id; 5722 } else if (ioc->nioc_nsid != minor->nm_ns->ns_id) { 5723 return (nvme_ioctl_error(ioc, 5724 NVME_IOCTL_E_MINOR_WRONG_NS, 0, 0)); 5725 } 5726 5727 return (nvme_ioctl_excl_check(minor, ioc, check)); 5728 } 5729 5730 /* 5731 * If we've been told to skip checking the controller, here's where we 5732 * do that. This should really only be for commands which use the 5733 * namespace ID for listing purposes and therefore can have 5734 * traditionally illegal values here. 5735 */ 5736 if (check->nck_skip_ctrl) { 5737 return (nvme_ioctl_excl_check(minor, ioc, check)); 5738 } 5739 5740 /* 5741 * At this point, we know that we're on the controller's node. We first 5742 * deal with the simple case, is a namespace allowed at all or not. If 5743 * it is not allowed, then the only acceptable value is zero. 5744 */ 5745 if (!check->nck_ns_ok) { 5746 if (ioc->nioc_nsid != 0) { 5747 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_UNUSE, 0, 5748 0)); 5749 } 5750 5751 return (nvme_ioctl_excl_check(minor, ioc, check)); 5752 } 5753 5754 /* 5755 * At this point, we know that a controller is allowed to use a 5756 * namespace. If we haven't been given zero or the broadcast namespace, 5757 * check to see if it's actually a valid namespace ID. If is outside of 5758 * range, then it is an error. Next, if we have been requested to 5759 * rewrite 0 (the this controller indicator) as the broadcast namespace, 5760 * do so. 5761 * 5762 * While we validate that this namespace is within the valid range, we 5763 * do not check if it is active or inactive. That is left to our callers 5764 * to determine. 5765 */ 5766 if (ioc->nioc_nsid > minor->nm_ctrl->n_namespace_count && 5767 ioc->nioc_nsid != NVME_NSID_BCAST) { 5768 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_RANGE, 0, 0)); 5769 } 5770 5771 if (ioc->nioc_nsid == 0 && check->nck_ctrl_rewrite) { 5772 ioc->nioc_nsid = NVME_NSID_BCAST; 5773 } 5774 5775 /* 5776 * Finally, see if we have ended up with a broadcast namespace ID 5777 * whether through specification or rewriting. If that is not allowed, 5778 * then that is an error. 5779 */ 5780 if (!check->nck_bcast_ok && ioc->nioc_nsid == NVME_NSID_BCAST) { 5781 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_BCAST_NS, 0, 0)); 5782 } 5783 5784 return (nvme_ioctl_excl_check(minor, ioc, check)); 5785 } 5786 5787 static int 5788 nvme_ioctl_ctrl_info(nvme_minor_t *minor, intptr_t arg, int mode, 5789 cred_t *cred_p) 5790 { 5791 nvme_t *const nvme = minor->nm_ctrl; 5792 nvme_ioctl_ctrl_info_t *info; 5793 nvme_reg_cap_t cap = { 0 }; 5794 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_CTRL }; 5795 void *idbuf; 5796 5797 if ((mode & FREAD) == 0) 5798 return (EBADF); 5799 5800 info = kmem_alloc(sizeof (nvme_ioctl_ctrl_info_t), KM_NOSLEEP_LAZY); 5801 if (info == NULL) { 5802 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg, 5803 mode)); 5804 } 5805 5806 if (ddi_copyin((void *)arg, info, sizeof (nvme_ioctl_ctrl_info_t), 5807 mode & FKIOCTL) != 0) { 5808 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t)); 5809 return (EFAULT); 5810 } 5811 5812 if (!nvme_ioctl_check(minor, &info->nci_common, 5813 &nvme_check_ctrl_info)) { 5814 goto copyout; 5815 } 5816 5817 /* 5818 * We explicitly do not use the identify controller copy in the kernel 5819 * right now so that way we can get a snapshot of the controller's 5820 * current capacity and values. While it's tempting to try to use this 5821 * to refresh the kernel's version we don't just to simplify the rest of 5822 * the driver right now. 5823 */ 5824 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) { 5825 info->nci_common = id.nid_common; 5826 goto copyout; 5827 } 5828 bcopy(idbuf, &info->nci_ctrl_id, sizeof (nvme_identify_ctrl_t)); 5829 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE); 5830 5831 /* 5832 * Use the kernel's cached common namespace information for this. 5833 */ 5834 bcopy(nvme->n_idcomns, &info->nci_common_ns, 5835 sizeof (nvme_identify_nsid_t)); 5836 5837 info->nci_vers = nvme->n_version; 5838 5839 /* 5840 * The MPSMIN and MPSMAX fields in the CAP register use 0 to 5841 * specify the base page size of 4k (1<<12), so add 12 here to 5842 * get the real page size value. 5843 */ 5844 cap.r = nvme_get64(nvme, NVME_REG_CAP); 5845 info->nci_caps.cap_mpsmax = 1 << (12 + cap.b.cap_mpsmax); 5846 info->nci_caps.cap_mpsmin = 1 << (12 + cap.b.cap_mpsmin); 5847 5848 info->nci_nintrs = (uint32_t)nvme->n_intr_cnt; 5849 5850 copyout: 5851 if (ddi_copyout(info, (void *)arg, sizeof (nvme_ioctl_ctrl_info_t), 5852 mode & FKIOCTL) != 0) { 5853 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t)); 5854 return (EFAULT); 5855 } 5856 5857 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t)); 5858 return (0); 5859 } 5860 5861 static int 5862 nvme_ioctl_ns_info(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 5863 { 5864 nvme_t *const nvme = minor->nm_ctrl; 5865 nvme_ioctl_ns_info_t *ns_info; 5866 nvme_namespace_t *ns; 5867 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_NSID }; 5868 void *idbuf; 5869 5870 if ((mode & FREAD) == 0) 5871 return (EBADF); 5872 5873 ns_info = kmem_zalloc(sizeof (nvme_ioctl_ns_info_t), KM_NOSLEEP_LAZY); 5874 if (ns_info == NULL) { 5875 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg, 5876 mode)); 5877 } 5878 5879 if (ddi_copyin((void *)arg, ns_info, sizeof (nvme_ioctl_ns_info_t), 5880 mode & FKIOCTL) != 0) { 5881 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t)); 5882 return (EFAULT); 5883 } 5884 5885 if (!nvme_ioctl_check(minor, &ns_info->nni_common, 5886 &nvme_check_ns_info)) { 5887 goto copyout; 5888 } 5889 5890 ASSERT3U(ns_info->nni_common.nioc_nsid, >, 0); 5891 ns = nvme_nsid2ns(nvme, ns_info->nni_common.nioc_nsid); 5892 5893 /* 5894 * First fetch a fresh copy of the namespace information. Most callers 5895 * are using this because they will want a mostly accurate snapshot of 5896 * capacity and utilization. 5897 */ 5898 id.nid_common.nioc_nsid = ns_info->nni_common.nioc_nsid; 5899 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) { 5900 ns_info->nni_common = id.nid_common; 5901 goto copyout; 5902 } 5903 bcopy(idbuf, &ns_info->nni_id, sizeof (nvme_identify_nsid_t)); 5904 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE); 5905 5906 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 5907 if (ns->ns_allocated) 5908 ns_info->nni_state |= NVME_NS_STATE_ALLOCATED; 5909 5910 if (ns->ns_active) 5911 ns_info->nni_state |= NVME_NS_STATE_ACTIVE; 5912 5913 if (ns->ns_ignore) 5914 ns_info->nni_state |= NVME_NS_STATE_IGNORED; 5915 5916 if (ns->ns_attached) { 5917 const char *addr; 5918 5919 ns_info->nni_state |= NVME_NS_STATE_ATTACHED; 5920 addr = bd_address(ns->ns_bd_hdl); 5921 if (strlcpy(ns_info->nni_addr, addr, 5922 sizeof (ns_info->nni_addr)) >= sizeof (ns_info->nni_addr)) { 5923 nvme_mgmt_unlock(nvme); 5924 (void) nvme_ioctl_error(&ns_info->nni_common, 5925 NVME_IOCTL_E_BD_ADDR_OVER, 0, 0); 5926 goto copyout; 5927 } 5928 } 5929 nvme_mgmt_unlock(nvme); 5930 5931 copyout: 5932 if (ddi_copyout(ns_info, (void *)arg, sizeof (nvme_ioctl_ns_info_t), 5933 mode & FKIOCTL) != 0) { 5934 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t)); 5935 return (EFAULT); 5936 } 5937 5938 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t)); 5939 return (0); 5940 } 5941 5942 static int 5943 nvme_ioctl_identify(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 5944 { 5945 _NOTE(ARGUNUSED(cred_p)); 5946 nvme_t *const nvme = minor->nm_ctrl; 5947 void *idctl; 5948 uint_t model; 5949 nvme_ioctl_identify_t id; 5950 #ifdef _MULTI_DATAMODEL 5951 nvme_ioctl_identify32_t id32; 5952 #endif 5953 boolean_t ns_minor; 5954 5955 if ((mode & FREAD) == 0) 5956 return (EBADF); 5957 5958 model = ddi_model_convert_from(mode); 5959 switch (model) { 5960 #ifdef _MULTI_DATAMODEL 5961 case DDI_MODEL_ILP32: 5962 bzero(&id, sizeof (id)); 5963 if (ddi_copyin((void *)arg, &id32, sizeof (id32), 5964 mode & FKIOCTL) != 0) { 5965 return (EFAULT); 5966 } 5967 id.nid_common.nioc_nsid = id32.nid_common.nioc_nsid; 5968 id.nid_cns = id32.nid_cns; 5969 id.nid_ctrlid = id32.nid_ctrlid; 5970 id.nid_data = id32.nid_data; 5971 break; 5972 #endif /* _MULTI_DATAMODEL */ 5973 case DDI_MODEL_NONE: 5974 if (ddi_copyin((void *)arg, &id, sizeof (id), 5975 mode & FKIOCTL) != 0) { 5976 return (EFAULT); 5977 } 5978 break; 5979 default: 5980 return (ENOTSUP); 5981 } 5982 5983 if (!nvme_ioctl_check(minor, &id.nid_common, &nvme_check_identify)) { 5984 goto copyout; 5985 } 5986 5987 ns_minor = minor->nm_ns != NULL; 5988 if (!nvme_validate_identify(nvme, &id, ns_minor)) { 5989 goto copyout; 5990 } 5991 5992 if (nvme_identify(nvme, B_TRUE, &id, &idctl)) { 5993 int ret = ddi_copyout(idctl, (void *)id.nid_data, 5994 NVME_IDENTIFY_BUFSIZE, mode & FKIOCTL); 5995 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE); 5996 if (ret != 0) { 5997 (void) nvme_ioctl_error(&id.nid_common, 5998 NVME_IOCTL_E_BAD_USER_DATA, 0, 0); 5999 goto copyout; 6000 } 6001 6002 nvme_ioctl_success(&id.nid_common); 6003 } 6004 6005 copyout: 6006 switch (model) { 6007 #ifdef _MULTI_DATAMODEL 6008 case DDI_MODEL_ILP32: 6009 id32.nid_common = id.nid_common; 6010 6011 if (ddi_copyout(&id32, (void *)arg, sizeof (id32), 6012 mode & FKIOCTL) != 0) { 6013 return (EFAULT); 6014 } 6015 break; 6016 #endif /* _MULTI_DATAMODEL */ 6017 case DDI_MODEL_NONE: 6018 if (ddi_copyout(&id, (void *)arg, sizeof (id), 6019 mode & FKIOCTL) != 0) { 6020 return (EFAULT); 6021 } 6022 break; 6023 default: 6024 return (ENOTSUP); 6025 } 6026 6027 return (0); 6028 } 6029 6030 /* 6031 * Execute commands on behalf of the various ioctls. 6032 * 6033 * If this returns true then the command completed successfully. Otherwise error 6034 * information is returned in the nvme_ioctl_common_t arguments. 6035 */ 6036 typedef struct { 6037 nvme_sqe_t *ica_sqe; 6038 void *ica_data; 6039 uint32_t ica_data_len; 6040 uint_t ica_dma_flags; 6041 int ica_copy_flags; 6042 uint32_t ica_timeout; 6043 uint32_t ica_cdw0; 6044 } nvme_ioc_cmd_args_t; 6045 6046 static boolean_t 6047 nvme_ioc_cmd(nvme_t *nvme, nvme_ioctl_common_t *ioc, nvme_ioc_cmd_args_t *args) 6048 { 6049 nvme_cmd_t *cmd; 6050 boolean_t ret = B_FALSE; 6051 6052 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 6053 cmd->nc_sqid = 0; 6054 6055 /* 6056 * This function is used to facilitate requests from 6057 * userspace, so don't panic if the command fails. This 6058 * is especially true for admin passthru commands, where 6059 * the actual command data structure is entirely defined 6060 * by userspace. 6061 */ 6062 cmd->nc_dontpanic = B_TRUE; 6063 6064 cmd->nc_callback = nvme_wakeup_cmd; 6065 cmd->nc_sqe = *args->ica_sqe; 6066 6067 if ((args->ica_dma_flags & DDI_DMA_RDWR) != 0) { 6068 if (args->ica_data == NULL) { 6069 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_DMA_MEM, 6070 0, 0); 6071 goto free_cmd; 6072 } 6073 6074 if (nvme_zalloc_dma(nvme, args->ica_data_len, 6075 args->ica_dma_flags, &nvme->n_prp_dma_attr, &cmd->nc_dma) != 6076 DDI_SUCCESS) { 6077 dev_err(nvme->n_dip, CE_WARN, 6078 "!nvme_zalloc_dma failed for nvme_ioc_cmd()"); 6079 ret = nvme_ioctl_error(ioc, 6080 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 6081 goto free_cmd; 6082 } 6083 6084 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) { 6085 ret = nvme_ioctl_error(ioc, 6086 NVME_IOCTL_E_NO_DMA_MEM, 0, 0); 6087 goto free_cmd; 6088 } 6089 6090 if ((args->ica_dma_flags & DDI_DMA_WRITE) != 0 && 6091 ddi_copyin(args->ica_data, cmd->nc_dma->nd_memp, 6092 args->ica_data_len, args->ica_copy_flags) != 0) { 6093 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 6094 0, 0); 6095 goto free_cmd; 6096 } 6097 } 6098 6099 nvme_admin_cmd(cmd, args->ica_timeout); 6100 6101 if (!nvme_check_cmd_status_ioctl(cmd, ioc)) { 6102 ret = B_FALSE; 6103 goto free_cmd; 6104 } 6105 6106 args->ica_cdw0 = cmd->nc_cqe.cqe_dw0; 6107 6108 if ((args->ica_dma_flags & DDI_DMA_READ) != 0 && 6109 ddi_copyout(cmd->nc_dma->nd_memp, args->ica_data, 6110 args->ica_data_len, args->ica_copy_flags) != 0) { 6111 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 0, 0); 6112 goto free_cmd; 6113 } 6114 6115 ret = B_TRUE; 6116 nvme_ioctl_success(ioc); 6117 6118 free_cmd: 6119 nvme_free_cmd(cmd); 6120 6121 return (ret); 6122 } 6123 6124 static int 6125 nvme_ioctl_get_logpage(nvme_minor_t *minor, intptr_t arg, int mode, 6126 cred_t *cred_p) 6127 { 6128 nvme_t *const nvme = minor->nm_ctrl; 6129 void *buf; 6130 nvme_ioctl_get_logpage_t log; 6131 uint_t model; 6132 #ifdef _MULTI_DATAMODEL 6133 nvme_ioctl_get_logpage32_t log32; 6134 #endif 6135 6136 if ((mode & FREAD) == 0) { 6137 return (EBADF); 6138 } 6139 6140 model = ddi_model_convert_from(mode); 6141 switch (model) { 6142 #ifdef _MULTI_DATAMODEL 6143 case DDI_MODEL_ILP32: 6144 bzero(&log, sizeof (log)); 6145 if (ddi_copyin((void *)arg, &log32, sizeof (log32), 6146 mode & FKIOCTL) != 0) { 6147 return (EFAULT); 6148 } 6149 6150 log.nigl_common.nioc_nsid = log32.nigl_common.nioc_nsid; 6151 log.nigl_csi = log32.nigl_csi; 6152 log.nigl_lid = log32.nigl_lid; 6153 log.nigl_lsp = log32.nigl_lsp; 6154 log.nigl_len = log32.nigl_len; 6155 log.nigl_offset = log32.nigl_offset; 6156 log.nigl_data = log32.nigl_data; 6157 break; 6158 #endif /* _MULTI_DATAMODEL */ 6159 case DDI_MODEL_NONE: 6160 if (ddi_copyin((void *)arg, &log, sizeof (log), 6161 mode & FKIOCTL) != 0) { 6162 return (EFAULT); 6163 } 6164 break; 6165 default: 6166 return (ENOTSUP); 6167 } 6168 6169 /* 6170 * Eventually we'd like to do a soft lock on the namespaces from 6171 * changing out from us during this operation in the future. But we 6172 * haven't implemented that yet. 6173 */ 6174 if (!nvme_ioctl_check(minor, &log.nigl_common, 6175 &nvme_check_get_logpage)) { 6176 goto copyout; 6177 } 6178 6179 if (!nvme_validate_logpage(nvme, &log)) { 6180 goto copyout; 6181 } 6182 6183 if (nvme_get_logpage(nvme, B_TRUE, &log, &buf)) { 6184 int copy; 6185 6186 copy = ddi_copyout(buf, (void *)log.nigl_data, log.nigl_len, 6187 mode & FKIOCTL); 6188 kmem_free(buf, log.nigl_len); 6189 if (copy != 0) { 6190 (void) nvme_ioctl_error(&log.nigl_common, 6191 NVME_IOCTL_E_BAD_USER_DATA, 0, 0); 6192 goto copyout; 6193 } 6194 6195 nvme_ioctl_success(&log.nigl_common); 6196 } 6197 6198 copyout: 6199 switch (model) { 6200 #ifdef _MULTI_DATAMODEL 6201 case DDI_MODEL_ILP32: 6202 bzero(&log32, sizeof (log32)); 6203 6204 log32.nigl_common = log.nigl_common; 6205 log32.nigl_csi = log.nigl_csi; 6206 log32.nigl_lid = log.nigl_lid; 6207 log32.nigl_lsp = log.nigl_lsp; 6208 log32.nigl_len = log.nigl_len; 6209 log32.nigl_offset = log.nigl_offset; 6210 log32.nigl_data = log.nigl_data; 6211 if (ddi_copyout(&log32, (void *)arg, sizeof (log32), 6212 mode & FKIOCTL) != 0) { 6213 return (EFAULT); 6214 } 6215 break; 6216 #endif /* _MULTI_DATAMODEL */ 6217 case DDI_MODEL_NONE: 6218 if (ddi_copyout(&log, (void *)arg, sizeof (log), 6219 mode & FKIOCTL) != 0) { 6220 return (EFAULT); 6221 } 6222 break; 6223 default: 6224 return (ENOTSUP); 6225 } 6226 6227 return (0); 6228 } 6229 6230 static int 6231 nvme_ioctl_get_feature(nvme_minor_t *minor, intptr_t arg, int mode, 6232 cred_t *cred_p) 6233 { 6234 nvme_t *const nvme = minor->nm_ctrl; 6235 nvme_ioctl_get_feature_t feat; 6236 uint_t model; 6237 #ifdef _MULTI_DATAMODEL 6238 nvme_ioctl_get_feature32_t feat32; 6239 #endif 6240 nvme_get_features_dw10_t gf_dw10 = { 0 }; 6241 nvme_ioc_cmd_args_t args = { NULL }; 6242 nvme_sqe_t sqe = { 6243 .sqe_opc = NVME_OPC_GET_FEATURES 6244 }; 6245 6246 if ((mode & FREAD) == 0) { 6247 return (EBADF); 6248 } 6249 6250 model = ddi_model_convert_from(mode); 6251 switch (model) { 6252 #ifdef _MULTI_DATAMODEL 6253 case DDI_MODEL_ILP32: 6254 bzero(&feat, sizeof (feat)); 6255 if (ddi_copyin((void *)arg, &feat32, sizeof (feat32), 6256 mode & FKIOCTL) != 0) { 6257 return (EFAULT); 6258 } 6259 6260 feat.nigf_common.nioc_nsid = feat32.nigf_common.nioc_nsid; 6261 feat.nigf_fid = feat32.nigf_fid; 6262 feat.nigf_sel = feat32.nigf_sel; 6263 feat.nigf_cdw11 = feat32.nigf_cdw11; 6264 feat.nigf_data = feat32.nigf_data; 6265 feat.nigf_len = feat32.nigf_len; 6266 break; 6267 #endif /* _MULTI_DATAMODEL */ 6268 case DDI_MODEL_NONE: 6269 if (ddi_copyin((void *)arg, &feat, sizeof (feat), 6270 mode & FKIOCTL) != 0) { 6271 return (EFAULT); 6272 } 6273 break; 6274 default: 6275 return (ENOTSUP); 6276 } 6277 6278 if (!nvme_ioctl_check(minor, &feat.nigf_common, 6279 &nvme_check_get_feature)) { 6280 goto copyout; 6281 } 6282 6283 if (!nvme_validate_get_feature(nvme, &feat)) { 6284 goto copyout; 6285 } 6286 6287 gf_dw10.b.gt_fid = bitx32(feat.nigf_fid, 7, 0); 6288 gf_dw10.b.gt_sel = bitx32(feat.nigf_sel, 2, 0); 6289 sqe.sqe_cdw10 = gf_dw10.r; 6290 sqe.sqe_cdw11 = feat.nigf_cdw11; 6291 sqe.sqe_nsid = feat.nigf_common.nioc_nsid; 6292 6293 args.ica_sqe = &sqe; 6294 if (feat.nigf_len != 0) { 6295 args.ica_data = (void *)feat.nigf_data; 6296 args.ica_data_len = feat.nigf_len; 6297 args.ica_dma_flags = DDI_DMA_READ; 6298 } 6299 args.ica_copy_flags = mode; 6300 args.ica_timeout = nvme_admin_cmd_timeout; 6301 6302 if (!nvme_ioc_cmd(nvme, &feat.nigf_common, &args)) { 6303 goto copyout; 6304 } 6305 6306 feat.nigf_cdw0 = args.ica_cdw0; 6307 6308 copyout: 6309 switch (model) { 6310 #ifdef _MULTI_DATAMODEL 6311 case DDI_MODEL_ILP32: 6312 bzero(&feat32, sizeof (feat32)); 6313 6314 feat32.nigf_common = feat.nigf_common; 6315 feat32.nigf_fid = feat.nigf_fid; 6316 feat32.nigf_sel = feat.nigf_sel; 6317 feat32.nigf_cdw11 = feat.nigf_cdw11; 6318 feat32.nigf_data = feat.nigf_data; 6319 feat32.nigf_len = feat.nigf_len; 6320 feat32.nigf_cdw0 = feat.nigf_cdw0; 6321 if (ddi_copyout(&feat32, (void *)arg, sizeof (feat32), 6322 mode & FKIOCTL) != 0) { 6323 return (EFAULT); 6324 } 6325 break; 6326 #endif /* _MULTI_DATAMODEL */ 6327 case DDI_MODEL_NONE: 6328 if (ddi_copyout(&feat, (void *)arg, sizeof (feat), 6329 mode & FKIOCTL) != 0) { 6330 return (EFAULT); 6331 } 6332 break; 6333 default: 6334 return (ENOTSUP); 6335 } 6336 6337 return (0); 6338 } 6339 6340 static int 6341 nvme_ioctl_format(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 6342 { 6343 nvme_t *const nvme = minor->nm_ctrl; 6344 nvme_ioctl_format_t ioc; 6345 6346 if ((mode & FWRITE) == 0) 6347 return (EBADF); 6348 6349 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6350 return (EPERM); 6351 6352 if (ddi_copyin((void *)(uintptr_t)arg, &ioc, 6353 sizeof (nvme_ioctl_format_t), mode & FKIOCTL) != 0) 6354 return (EFAULT); 6355 6356 if (!nvme_ioctl_check(minor, &ioc.nif_common, &nvme_check_format)) { 6357 goto copyout; 6358 } 6359 6360 if (!nvme_validate_format(nvme, &ioc)) { 6361 goto copyout; 6362 } 6363 6364 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 6365 if (!nvme_no_blkdev_attached(nvme, ioc.nif_common.nioc_nsid)) { 6366 nvme_mgmt_unlock(nvme); 6367 (void) nvme_ioctl_error(&ioc.nif_common, 6368 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0); 6369 goto copyout; 6370 } 6371 6372 if (nvme_format_nvm(nvme, &ioc)) { 6373 nvme_ioctl_success(&ioc.nif_common); 6374 nvme_rescan_ns(nvme, ioc.nif_common.nioc_nsid); 6375 } 6376 nvme_mgmt_unlock(nvme); 6377 6378 copyout: 6379 if (ddi_copyout(&ioc, (void *)(uintptr_t)arg, sizeof (ioc), 6380 mode & FKIOCTL) != 0) { 6381 return (EFAULT); 6382 } 6383 6384 return (0); 6385 } 6386 6387 static int 6388 nvme_ioctl_detach(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 6389 { 6390 nvme_t *const nvme = minor->nm_ctrl; 6391 nvme_ioctl_common_t com; 6392 6393 if ((mode & FWRITE) == 0) 6394 return (EBADF); 6395 6396 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6397 return (EPERM); 6398 6399 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com), 6400 mode & FKIOCTL) != 0) { 6401 return (EFAULT); 6402 } 6403 6404 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) { 6405 goto copyout; 6406 } 6407 6408 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 6409 if (nvme_detach_ns(nvme, &com)) { 6410 nvme_ioctl_success(&com); 6411 } 6412 nvme_mgmt_unlock(nvme); 6413 6414 copyout: 6415 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com), 6416 mode & FKIOCTL) != 0) { 6417 return (EFAULT); 6418 } 6419 6420 return (0); 6421 } 6422 6423 static int 6424 nvme_ioctl_attach(nvme_minor_t *minor, intptr_t arg, int mode, 6425 cred_t *cred_p) 6426 { 6427 nvme_t *const nvme = minor->nm_ctrl; 6428 nvme_ioctl_common_t com; 6429 nvme_namespace_t *ns; 6430 6431 if ((mode & FWRITE) == 0) 6432 return (EBADF); 6433 6434 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6435 return (EPERM); 6436 6437 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com), 6438 mode & FKIOCTL) != 0) { 6439 return (EFAULT); 6440 } 6441 6442 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) { 6443 goto copyout; 6444 } 6445 6446 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 6447 ns = nvme_nsid2ns(nvme, com.nioc_nsid); 6448 6449 /* 6450 * Strictly speaking we shouldn't need to call nvme_init_ns() here as 6451 * we should be properly refreshing the internal state when we are 6452 * issuing commands that change things. However, we opt to still do so 6453 * as a bit of a safety check lest we give the kernel something bad or a 6454 * vendor unique command somehow did something behind our backs. 6455 */ 6456 if (!ns->ns_attached) { 6457 (void) nvme_rescan_ns(nvme, com.nioc_nsid); 6458 if (nvme_attach_ns(nvme, &com)) { 6459 nvme_ioctl_success(&com); 6460 } 6461 } else { 6462 nvme_ioctl_success(&com); 6463 } 6464 nvme_mgmt_unlock(nvme); 6465 6466 copyout: 6467 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com), 6468 mode & FKIOCTL) != 0) { 6469 return (EFAULT); 6470 } 6471 6472 return (0); 6473 } 6474 6475 static void 6476 nvme_ufm_update(nvme_t *nvme) 6477 { 6478 mutex_enter(&nvme->n_fwslot_mutex); 6479 ddi_ufm_update(nvme->n_ufmh); 6480 if (nvme->n_fwslot != NULL) { 6481 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t)); 6482 nvme->n_fwslot = NULL; 6483 } 6484 mutex_exit(&nvme->n_fwslot_mutex); 6485 } 6486 6487 /* 6488 * Download new firmware to the device's internal staging area. We do not call 6489 * nvme_ufm_update() here because after a firmware download, there has been no 6490 * change to any of the actual persistent firmware data. That requires a 6491 * subsequent ioctl (NVME_IOC_FIRMWARE_COMMIT) to commit the firmware to a slot 6492 * or to activate a slot. 6493 */ 6494 static int 6495 nvme_ioctl_firmware_download(nvme_minor_t *minor, intptr_t arg, int mode, 6496 cred_t *cred_p) 6497 { 6498 nvme_t *const nvme = minor->nm_ctrl; 6499 nvme_ioctl_fw_load_t fw; 6500 uint64_t len, maxcopy; 6501 offset_t offset; 6502 uint32_t gran; 6503 nvme_valid_ctrl_data_t data; 6504 uintptr_t buf; 6505 nvme_sqe_t sqe = { 6506 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD 6507 }; 6508 6509 if ((mode & FWRITE) == 0) 6510 return (EBADF); 6511 6512 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6513 return (EPERM); 6514 6515 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw), 6516 mode & FKIOCTL) != 0) { 6517 return (EFAULT); 6518 } 6519 6520 if (!nvme_ioctl_check(minor, &fw.fwl_common, &nvme_check_firmware)) { 6521 goto copyout; 6522 } 6523 6524 if (!nvme_validate_fw_load(nvme, &fw)) { 6525 goto copyout; 6526 } 6527 6528 len = fw.fwl_len; 6529 offset = fw.fwl_off; 6530 buf = fw.fwl_buf; 6531 6532 /* 6533 * We need to determine the minimum and maximum amount of data that we 6534 * will send to the device in a given go. Starting in NMVe 1.3 this must 6535 * be a multiple of the firmware update granularity (FWUG), but must not 6536 * exceed the maximum data transfer that we've set. Many devices don't 6537 * report something here, which means we'll end up getting our default 6538 * value. Our policy is a little simple, but it's basically if the 6539 * maximum data transfer is evenly divided by the granularity, then use 6540 * it. Otherwise we use the granularity itself. The granularity is 6541 * always in page sized units, so trying to find another optimum point 6542 * isn't worth it. If we encounter a contradiction, then we will have to 6543 * error out. 6544 */ 6545 data.vcd_vers = &nvme->n_version; 6546 data.vcd_id = nvme->n_idctl; 6547 gran = nvme_fw_load_granularity(&data); 6548 6549 if ((nvme->n_max_data_transfer_size % gran) == 0) { 6550 maxcopy = nvme->n_max_data_transfer_size; 6551 } else if (gran <= nvme->n_max_data_transfer_size) { 6552 maxcopy = gran; 6553 } else { 6554 (void) nvme_ioctl_error(&fw.fwl_common, 6555 NVME_IOCTL_E_FW_LOAD_IMPOS_GRAN, 0, 0); 6556 goto copyout; 6557 } 6558 6559 while (len > 0) { 6560 nvme_ioc_cmd_args_t args = { NULL }; 6561 uint64_t copylen = MIN(maxcopy, len); 6562 6563 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1; 6564 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT); 6565 6566 args.ica_sqe = &sqe; 6567 args.ica_data = (void *)buf; 6568 args.ica_data_len = copylen; 6569 args.ica_dma_flags = DDI_DMA_WRITE; 6570 args.ica_copy_flags = mode; 6571 args.ica_timeout = nvme_admin_cmd_timeout; 6572 6573 if (!nvme_ioc_cmd(nvme, &fw.fwl_common, &args)) { 6574 break; 6575 } 6576 6577 buf += copylen; 6578 offset += copylen; 6579 len -= copylen; 6580 } 6581 6582 copyout: 6583 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw), 6584 mode & FKIOCTL) != 0) { 6585 return (EFAULT); 6586 } 6587 6588 return (0); 6589 } 6590 6591 static int 6592 nvme_ioctl_firmware_commit(nvme_minor_t *minor, intptr_t arg, int mode, 6593 cred_t *cred_p) 6594 { 6595 nvme_t *const nvme = minor->nm_ctrl; 6596 nvme_ioctl_fw_commit_t fw; 6597 nvme_firmware_commit_dw10_t fc_dw10 = { 0 }; 6598 nvme_ioc_cmd_args_t args = { NULL }; 6599 nvme_sqe_t sqe = { 6600 .sqe_opc = NVME_OPC_FW_ACTIVATE 6601 }; 6602 6603 if ((mode & FWRITE) == 0) 6604 return (EBADF); 6605 6606 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6607 return (EPERM); 6608 6609 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw), 6610 mode & FKIOCTL) != 0) { 6611 return (EFAULT); 6612 } 6613 6614 if (!nvme_ioctl_check(minor, &fw.fwc_common, &nvme_check_firmware)) { 6615 goto copyout; 6616 } 6617 6618 if (!nvme_validate_fw_commit(nvme, &fw)) { 6619 goto copyout; 6620 } 6621 6622 fc_dw10.b.fc_slot = fw.fwc_slot; 6623 fc_dw10.b.fc_action = fw.fwc_action; 6624 sqe.sqe_cdw10 = fc_dw10.r; 6625 6626 args.ica_sqe = &sqe; 6627 args.ica_timeout = nvme_commit_save_cmd_timeout; 6628 6629 /* 6630 * There are no conditional actions to take based on this succeeding or 6631 * failing. A failure is recorded in the ioctl structure returned to the 6632 * user. 6633 */ 6634 (void) nvme_ioc_cmd(nvme, &fw.fwc_common, &args); 6635 6636 /* 6637 * Let the DDI UFM subsystem know that the firmware information for 6638 * this device has changed. We perform this unconditionally as an 6639 * invalidation doesn't particularly hurt us. 6640 */ 6641 nvme_ufm_update(nvme); 6642 6643 copyout: 6644 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw), 6645 mode & FKIOCTL) != 0) { 6646 return (EFAULT); 6647 } 6648 6649 return (0); 6650 } 6651 6652 /* 6653 * Helper to copy in a passthru command from userspace, handling 6654 * different data models. 6655 */ 6656 static int 6657 nvme_passthru_copyin_cmd(const void *buf, nvme_ioctl_passthru_t *cmd, int mode) 6658 { 6659 switch (ddi_model_convert_from(mode & FMODELS)) { 6660 #ifdef _MULTI_DATAMODEL 6661 case DDI_MODEL_ILP32: { 6662 nvme_ioctl_passthru32_t cmd32; 6663 6664 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0) 6665 return (EFAULT); 6666 6667 bzero(cmd, sizeof (nvme_ioctl_passthru_t)); 6668 6669 cmd->npc_common.nioc_nsid = cmd32.npc_common.nioc_nsid; 6670 cmd->npc_opcode = cmd32.npc_opcode; 6671 cmd->npc_timeout = cmd32.npc_timeout; 6672 cmd->npc_flags = cmd32.npc_flags; 6673 cmd->npc_impact = cmd32.npc_impact; 6674 cmd->npc_cdw12 = cmd32.npc_cdw12; 6675 cmd->npc_cdw13 = cmd32.npc_cdw13; 6676 cmd->npc_cdw14 = cmd32.npc_cdw14; 6677 cmd->npc_cdw15 = cmd32.npc_cdw15; 6678 cmd->npc_buflen = cmd32.npc_buflen; 6679 cmd->npc_buf = cmd32.npc_buf; 6680 break; 6681 } 6682 #endif /* _MULTI_DATAMODEL */ 6683 case DDI_MODEL_NONE: 6684 if (ddi_copyin(buf, (void *)cmd, sizeof (nvme_ioctl_passthru_t), 6685 mode) != 0) { 6686 return (EFAULT); 6687 } 6688 break; 6689 default: 6690 return (ENOTSUP); 6691 } 6692 6693 return (0); 6694 } 6695 6696 /* 6697 * Helper to copy out a passthru command result to userspace, handling 6698 * different data models. 6699 */ 6700 static int 6701 nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t *cmd, void *buf, int mode) 6702 { 6703 switch (ddi_model_convert_from(mode & FMODELS)) { 6704 #ifdef _MULTI_DATAMODEL 6705 case DDI_MODEL_ILP32: { 6706 nvme_ioctl_passthru32_t cmd32; 6707 6708 bzero(&cmd32, sizeof (nvme_ioctl_passthru32_t)); 6709 6710 cmd32.npc_common = cmd->npc_common; 6711 cmd32.npc_opcode = cmd->npc_opcode; 6712 cmd32.npc_timeout = cmd->npc_timeout; 6713 cmd32.npc_flags = cmd->npc_flags; 6714 cmd32.npc_impact = cmd->npc_impact; 6715 cmd32.npc_cdw0 = cmd->npc_cdw0; 6716 cmd32.npc_cdw12 = cmd->npc_cdw12; 6717 cmd32.npc_cdw13 = cmd->npc_cdw13; 6718 cmd32.npc_cdw14 = cmd->npc_cdw14; 6719 cmd32.npc_cdw15 = cmd->npc_cdw15; 6720 cmd32.npc_buflen = (size32_t)cmd->npc_buflen; 6721 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf; 6722 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0) 6723 return (EFAULT); 6724 break; 6725 } 6726 #endif /* _MULTI_DATAMODEL */ 6727 case DDI_MODEL_NONE: 6728 if (ddi_copyout(cmd, buf, sizeof (nvme_ioctl_passthru_t), 6729 mode) != 0) { 6730 return (EFAULT); 6731 } 6732 break; 6733 default: 6734 return (ENOTSUP); 6735 } 6736 return (0); 6737 } 6738 6739 /* 6740 * Run an arbitrary vendor-specific admin command on the device. 6741 */ 6742 static int 6743 nvme_ioctl_passthru(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p) 6744 { 6745 nvme_t *const nvme = minor->nm_ctrl; 6746 int rv; 6747 nvme_ioctl_passthru_t pass; 6748 nvme_sqe_t sqe; 6749 nvme_ioc_cmd_args_t args = { NULL }; 6750 6751 /* 6752 * Basic checks: permissions, data model, argument size. 6753 */ 6754 if ((mode & FWRITE) == 0) 6755 return (EBADF); 6756 6757 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6758 return (EPERM); 6759 6760 if ((rv = nvme_passthru_copyin_cmd((void *)(uintptr_t)arg, &pass, 6761 mode)) != 0) { 6762 return (rv); 6763 } 6764 6765 if (!nvme_ioctl_check(minor, &pass.npc_common, &nvme_check_passthru)) { 6766 goto copyout; 6767 } 6768 6769 if (!nvme_validate_vuc(nvme, &pass)) { 6770 goto copyout; 6771 } 6772 6773 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME); 6774 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) { 6775 /* 6776 * We've been told this has ns impact. Right now force that to 6777 * be every ns until we have more use cases and reason to trust 6778 * the nsid field. 6779 */ 6780 if (!nvme_no_blkdev_attached(nvme, NVME_NSID_BCAST)) { 6781 nvme_mgmt_unlock(nvme); 6782 (void) nvme_ioctl_error(&pass.npc_common, 6783 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0); 6784 goto copyout; 6785 } 6786 } 6787 6788 bzero(&sqe, sizeof (sqe)); 6789 6790 sqe.sqe_opc = pass.npc_opcode; 6791 sqe.sqe_nsid = pass.npc_common.nioc_nsid; 6792 sqe.sqe_cdw10 = (uint32_t)(pass.npc_buflen >> NVME_DWORD_SHIFT); 6793 sqe.sqe_cdw12 = pass.npc_cdw12; 6794 sqe.sqe_cdw13 = pass.npc_cdw13; 6795 sqe.sqe_cdw14 = pass.npc_cdw14; 6796 sqe.sqe_cdw15 = pass.npc_cdw15; 6797 6798 args.ica_sqe = &sqe; 6799 args.ica_data = (void *)pass.npc_buf; 6800 args.ica_data_len = pass.npc_buflen; 6801 args.ica_copy_flags = mode; 6802 args.ica_timeout = pass.npc_timeout; 6803 6804 if ((pass.npc_flags & NVME_PASSTHRU_READ) != 0) 6805 args.ica_dma_flags |= DDI_DMA_READ; 6806 else if ((pass.npc_flags & NVME_PASSTHRU_WRITE) != 0) 6807 args.ica_dma_flags |= DDI_DMA_WRITE; 6808 6809 if (nvme_ioc_cmd(nvme, &pass.npc_common, &args)) { 6810 pass.npc_cdw0 = args.ica_cdw0; 6811 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) { 6812 nvme_rescan_ns(nvme, NVME_NSID_BCAST); 6813 } 6814 } 6815 nvme_mgmt_unlock(nvme); 6816 6817 copyout: 6818 rv = nvme_passthru_copyout_cmd(&pass, (void *)(uintptr_t)arg, 6819 mode); 6820 6821 return (rv); 6822 } 6823 6824 static int 6825 nvme_ioctl_lock(nvme_minor_t *minor, intptr_t arg, int mode, 6826 cred_t *cred_p) 6827 { 6828 nvme_ioctl_lock_t lock; 6829 const nvme_lock_flags_t all_flags = NVME_LOCK_F_DONT_BLOCK; 6830 nvme_t *nvme = minor->nm_ctrl; 6831 6832 if ((mode & FWRITE) == 0) 6833 return (EBADF); 6834 6835 if (secpolicy_sys_config(cred_p, B_FALSE) != 0) 6836 return (EPERM); 6837 6838 if (ddi_copyin((void *)(uintptr_t)arg, &lock, sizeof (lock), 6839 mode & FKIOCTL) != 0) { 6840 return (EFAULT); 6841 } 6842 6843 if (lock.nil_ent != NVME_LOCK_E_CTRL && 6844 lock.nil_ent != NVME_LOCK_E_NS) { 6845 (void) nvme_ioctl_error(&lock.nil_common, 6846 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0); 6847 goto copyout; 6848 } 6849 6850 if (lock.nil_level != NVME_LOCK_L_READ && 6851 lock.nil_level != NVME_LOCK_L_WRITE) { 6852 (void) nvme_ioctl_error(&lock.nil_common, 6853 NVME_IOCTL_E_BAD_LOCK_LEVEL, 0, 0); 6854 goto copyout; 6855 } 6856 6857 if ((lock.nil_flags & ~all_flags) != 0) { 6858 (void) nvme_ioctl_error(&lock.nil_common, 6859 NVME_IOCTL_E_BAD_LOCK_FLAGS, 0, 0); 6860 goto copyout; 6861 } 6862 6863 if (!nvme_ioctl_check(minor, &lock.nil_common, &nvme_check_locking)) { 6864 goto copyout; 6865 } 6866 6867 /* 6868 * If we're on a namespace, confirm that we're not asking for the 6869 * controller. 6870 */ 6871 if (lock.nil_common.nioc_nsid != 0 && 6872 lock.nil_ent == NVME_LOCK_E_CTRL) { 6873 (void) nvme_ioctl_error(&lock.nil_common, 6874 NVME_IOCTL_E_NS_CANNOT_LOCK_CTRL, 0, 0); 6875 goto copyout; 6876 } 6877 6878 /* 6879 * We've reached the point where we can no longer actually check things 6880 * without serializing state. First, we need to check to make sure that 6881 * none of our invariants are being broken for locking: 6882 * 6883 * 1) The caller isn't already blocking for a lock operation to 6884 * complete. 6885 * 6886 * 2) The caller is attempting to grab a lock that they already have. 6887 * While there are other rule violations that this might create, we opt 6888 * to check this ahead of it so we can have slightly better error 6889 * messages for our callers. 6890 * 6891 * 3) The caller is trying to grab a controller lock, while holding a 6892 * namespace lock. 6893 * 6894 * 4) The caller has a controller write lock and is trying to get a 6895 * namespace lock. For now, we disallow this case. Holding a controller 6896 * read lock is allowed, but the write lock allows you to operate on all 6897 * namespaces anyways. In addition, this simplifies the locking logic; 6898 * however, this constraint may be loosened in the future. 6899 * 6900 * 5) The caller is trying to acquire a second namespace lock when they 6901 * already have one. 6902 */ 6903 mutex_enter(&nvme->n_minor_mutex); 6904 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_BLOCKED || 6905 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_BLOCKED) { 6906 (void) nvme_ioctl_error(&lock.nil_common, 6907 NVME_IOCTL_E_LOCK_PENDING, 0, 0); 6908 mutex_exit(&nvme->n_minor_mutex); 6909 goto copyout; 6910 } 6911 6912 if ((lock.nil_ent == NVME_LOCK_E_CTRL && 6913 minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) || 6914 (lock.nil_ent == NVME_LOCK_E_NS && 6915 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED && 6916 minor->nm_ns_lock.nli_ns->ns_id == lock.nil_common.nioc_nsid)) { 6917 (void) nvme_ioctl_error(&lock.nil_common, 6918 NVME_IOCTL_E_LOCK_ALREADY_HELD, 0, 0); 6919 mutex_exit(&nvme->n_minor_mutex); 6920 goto copyout; 6921 } 6922 6923 if (lock.nil_ent == NVME_LOCK_E_CTRL && 6924 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) { 6925 (void) nvme_ioctl_error(&lock.nil_common, 6926 NVME_IOCTL_E_LOCK_NO_CTRL_WITH_NS, 0, 0); 6927 mutex_exit(&nvme->n_minor_mutex); 6928 goto copyout; 6929 } 6930 6931 if (lock.nil_ent == NVME_LOCK_E_NS && 6932 (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED && 6933 minor->nm_ctrl_lock.nli_curlevel == NVME_LOCK_L_WRITE)) { 6934 (void) nvme_ioctl_error(&lock.nil_common, 6935 NVME_IOCTL_LOCK_NO_NS_WITH_CTRL_WRLOCK, 0, 0); 6936 mutex_exit(&nvme->n_minor_mutex); 6937 goto copyout; 6938 } 6939 6940 if (lock.nil_ent == NVME_LOCK_E_NS && 6941 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) { 6942 (void) nvme_ioctl_error(&lock.nil_common, 6943 NVME_IOCTL_E_LOCK_NO_2ND_NS, 0, 0); 6944 mutex_exit(&nvme->n_minor_mutex); 6945 goto copyout; 6946 } 6947 6948 6949 #ifdef DEBUG 6950 /* 6951 * This is a big block of sanity checks to make sure that we haven't 6952 * allowed anything bad to happen. 6953 */ 6954 if (lock.nil_ent == NVME_LOCK_E_NS) { 6955 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL); 6956 ASSERT3U(minor->nm_ns_lock.nli_state, ==, 6957 NVME_LOCK_STATE_UNLOCKED); 6958 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0); 6959 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL); 6960 6961 if (minor->nm_ns != NULL) { 6962 ASSERT3U(minor->nm_ns->ns_id, ==, 6963 lock.nil_common.nioc_nsid); 6964 } 6965 6966 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node)); 6967 } else { 6968 ASSERT3P(minor->nm_ctrl_lock.nli_lock, ==, NULL); 6969 ASSERT3U(minor->nm_ctrl_lock.nli_state, ==, 6970 NVME_LOCK_STATE_UNLOCKED); 6971 ASSERT3U(minor->nm_ctrl_lock.nli_curlevel, ==, 0); 6972 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL); 6973 ASSERT0(list_link_active(&minor->nm_ctrl_lock.nli_node)); 6974 6975 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL); 6976 ASSERT3U(minor->nm_ns_lock.nli_state, ==, 6977 NVME_LOCK_STATE_UNLOCKED); 6978 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0); 6979 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL); 6980 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node)); 6981 } 6982 #endif /* DEBUG */ 6983 6984 /* 6985 * At this point we should actually attempt a locking operation. 6986 */ 6987 nvme_rwlock(minor, &lock); 6988 mutex_exit(&nvme->n_minor_mutex); 6989 6990 copyout: 6991 if (ddi_copyout(&lock, (void *)(uintptr_t)arg, sizeof (lock), 6992 mode & FKIOCTL) != 0) { 6993 return (EFAULT); 6994 } 6995 6996 return (0); 6997 } 6998 6999 static int 7000 nvme_ioctl_unlock(nvme_minor_t *minor, intptr_t arg, int mode, 7001 cred_t *cred_p) 7002 { 7003 nvme_ioctl_unlock_t unlock; 7004 nvme_t *const nvme = minor->nm_ctrl; 7005 boolean_t is_ctrl; 7006 nvme_lock_t *lock; 7007 nvme_minor_lock_info_t *info; 7008 7009 /* 7010 * Note, we explicitly don't check for privileges for unlock. The idea 7011 * being that if you have the lock, that's what matters. If you don't 7012 * have the lock, it doesn't matter what privileges that you have at 7013 * all. 7014 */ 7015 if ((mode & FWRITE) == 0) 7016 return (EBADF); 7017 7018 if (ddi_copyin((void *)(uintptr_t)arg, &unlock, sizeof (unlock), 7019 mode & FKIOCTL) != 0) { 7020 return (EFAULT); 7021 } 7022 7023 if (unlock.niu_ent != NVME_LOCK_E_CTRL && 7024 unlock.niu_ent != NVME_LOCK_E_NS) { 7025 (void) nvme_ioctl_error(&unlock.niu_common, 7026 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0); 7027 goto copyout; 7028 } 7029 7030 if (!nvme_ioctl_check(minor, &unlock.niu_common, &nvme_check_locking)) { 7031 goto copyout; 7032 } 7033 7034 /* 7035 * If we're on a namespace, confirm that we're not asking for the 7036 * controller. 7037 */ 7038 if (unlock.niu_common.nioc_nsid != 0 && 7039 unlock.niu_ent == NVME_LOCK_E_CTRL) { 7040 (void) nvme_ioctl_error(&unlock.niu_common, 7041 NVME_IOCTL_E_NS_CANNOT_UNLOCK_CTRL, 0, 0); 7042 goto copyout; 7043 } 7044 7045 mutex_enter(&nvme->n_minor_mutex); 7046 if (unlock.niu_ent == NVME_LOCK_E_CTRL) { 7047 if (minor->nm_ctrl_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) { 7048 mutex_exit(&nvme->n_minor_mutex); 7049 (void) nvme_ioctl_error(&unlock.niu_common, 7050 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0); 7051 goto copyout; 7052 } 7053 } else { 7054 if (minor->nm_ns_lock.nli_ns == NULL) { 7055 mutex_exit(&nvme->n_minor_mutex); 7056 (void) nvme_ioctl_error(&unlock.niu_common, 7057 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0); 7058 goto copyout; 7059 } 7060 7061 /* 7062 * Check that our unlock request corresponds to the namespace ID 7063 * that is currently locked. This could happen if we're using 7064 * the controller node and it specified a valid, but not locked, 7065 * namespace ID. 7066 */ 7067 if (minor->nm_ns_lock.nli_ns->ns_id != 7068 unlock.niu_common.nioc_nsid) { 7069 mutex_exit(&nvme->n_minor_mutex); 7070 ASSERT3P(minor->nm_ns, ==, NULL); 7071 (void) nvme_ioctl_error(&unlock.niu_common, 7072 NVME_IOCTL_E_LOCK_WRONG_NS, 0, 0); 7073 goto copyout; 7074 } 7075 7076 if (minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) { 7077 mutex_exit(&nvme->n_minor_mutex); 7078 (void) nvme_ioctl_error(&unlock.niu_common, 7079 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0); 7080 goto copyout; 7081 } 7082 } 7083 7084 /* 7085 * Finally, perform the unlock. 7086 */ 7087 is_ctrl = unlock.niu_ent == NVME_LOCK_E_CTRL; 7088 if (is_ctrl) { 7089 lock = &nvme->n_lock; 7090 info = &minor->nm_ctrl_lock; 7091 } else { 7092 nvme_namespace_t *ns; 7093 const uint32_t nsid = unlock.niu_common.nioc_nsid; 7094 7095 ns = nvme_nsid2ns(nvme, nsid); 7096 lock = &ns->ns_lock; 7097 info = &minor->nm_ns_lock; 7098 VERIFY3P(ns, ==, info->nli_ns); 7099 } 7100 nvme_rwunlock(info, lock); 7101 mutex_exit(&nvme->n_minor_mutex); 7102 nvme_ioctl_success(&unlock.niu_common); 7103 7104 copyout: 7105 if (ddi_copyout(&unlock, (void *)(uintptr_t)arg, sizeof (unlock), 7106 mode & FKIOCTL) != 0) { 7107 return (EFAULT); 7108 } 7109 7110 return (0); 7111 } 7112 7113 static int 7114 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 7115 int *rval_p) 7116 { 7117 #ifndef __lock_lint 7118 _NOTE(ARGUNUSED(rval_p)); 7119 #endif 7120 nvme_minor_t *minor; 7121 nvme_t *nvme; 7122 7123 minor = nvme_minor_find_by_dev(dev); 7124 if (minor == NULL) { 7125 return (ENXIO); 7126 } 7127 7128 nvme = minor->nm_ctrl; 7129 if (nvme == NULL) 7130 return (ENXIO); 7131 7132 if (IS_DEVCTL(cmd)) 7133 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); 7134 7135 if (nvme->n_dead && (cmd != NVME_IOC_DETACH && cmd != 7136 NVME_IOC_UNLOCK)) { 7137 if (IS_NVME_IOC(cmd) == 0) { 7138 return (EIO); 7139 } 7140 7141 return (nvme_ioctl_copyout_error(nvme->n_dead_status, arg, 7142 mode)); 7143 } 7144 7145 /* 7146 * ioctls that are no longer using the original ioctl structure. 7147 */ 7148 switch (cmd) { 7149 case NVME_IOC_CTRL_INFO: 7150 return (nvme_ioctl_ctrl_info(minor, arg, mode, cred_p)); 7151 case NVME_IOC_IDENTIFY: 7152 return (nvme_ioctl_identify(minor, arg, mode, cred_p)); 7153 case NVME_IOC_GET_LOGPAGE: 7154 return (nvme_ioctl_get_logpage(minor, arg, mode, cred_p)); 7155 case NVME_IOC_GET_FEATURE: 7156 return (nvme_ioctl_get_feature(minor, arg, mode, cred_p)); 7157 case NVME_IOC_DETACH: 7158 return (nvme_ioctl_detach(minor, arg, mode, cred_p)); 7159 case NVME_IOC_ATTACH: 7160 return (nvme_ioctl_attach(minor, arg, mode, cred_p)); 7161 case NVME_IOC_FORMAT: 7162 return (nvme_ioctl_format(minor, arg, mode, cred_p)); 7163 case NVME_IOC_FIRMWARE_DOWNLOAD: 7164 return (nvme_ioctl_firmware_download(minor, arg, mode, 7165 cred_p)); 7166 case NVME_IOC_FIRMWARE_COMMIT: 7167 return (nvme_ioctl_firmware_commit(minor, arg, mode, 7168 cred_p)); 7169 case NVME_IOC_NS_INFO: 7170 return (nvme_ioctl_ns_info(minor, arg, mode, cred_p)); 7171 case NVME_IOC_PASSTHRU: 7172 return (nvme_ioctl_passthru(minor, arg, mode, cred_p)); 7173 case NVME_IOC_LOCK: 7174 return (nvme_ioctl_lock(minor, arg, mode, cred_p)); 7175 case NVME_IOC_UNLOCK: 7176 return (nvme_ioctl_unlock(minor, arg, mode, cred_p)); 7177 default: 7178 return (ENOTTY); 7179 } 7180 } 7181 7182 /* 7183 * DDI UFM Callbacks 7184 */ 7185 static int 7186 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 7187 ddi_ufm_image_t *img) 7188 { 7189 nvme_t *nvme = arg; 7190 7191 if (imgno != 0) 7192 return (EINVAL); 7193 7194 ddi_ufm_image_set_desc(img, "Firmware"); 7195 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot); 7196 7197 return (0); 7198 } 7199 7200 /* 7201 * Fill out firmware slot information for the requested slot. The firmware 7202 * slot information is gathered by requesting the Firmware Slot Information log 7203 * page. The format of the page is described in section 5.10.1.3. 7204 * 7205 * We lazily cache the log page on the first call and then invalidate the cache 7206 * data after a successful firmware download or firmware commit command. 7207 * The cached data is protected by a mutex as the state can change 7208 * asynchronous to this callback. 7209 */ 7210 static int 7211 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 7212 uint_t slotno, ddi_ufm_slot_t *slot) 7213 { 7214 nvme_t *nvme = arg; 7215 void *log = NULL; 7216 size_t bufsize; 7217 ddi_ufm_attr_t attr = 0; 7218 char fw_ver[NVME_FWVER_SZ + 1]; 7219 7220 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1)) 7221 return (EINVAL); 7222 7223 mutex_enter(&nvme->n_fwslot_mutex); 7224 if (nvme->n_fwslot == NULL) { 7225 if (!nvme_get_logpage_int(nvme, B_TRUE, &log, &bufsize, 7226 NVME_LOGPAGE_FWSLOT) || 7227 bufsize != sizeof (nvme_fwslot_log_t)) { 7228 if (log != NULL) 7229 kmem_free(log, bufsize); 7230 mutex_exit(&nvme->n_fwslot_mutex); 7231 return (EIO); 7232 } 7233 nvme->n_fwslot = (nvme_fwslot_log_t *)log; 7234 } 7235 7236 /* 7237 * NVMe numbers firmware slots starting at 1 7238 */ 7239 if (slotno == (nvme->n_fwslot->fw_afi - 1)) 7240 attr |= DDI_UFM_ATTR_ACTIVE; 7241 7242 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0) 7243 attr |= DDI_UFM_ATTR_WRITEABLE; 7244 7245 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') { 7246 attr |= DDI_UFM_ATTR_EMPTY; 7247 } else { 7248 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno], 7249 NVME_FWVER_SZ); 7250 fw_ver[NVME_FWVER_SZ] = '\0'; 7251 ddi_ufm_slot_set_version(slot, fw_ver); 7252 } 7253 mutex_exit(&nvme->n_fwslot_mutex); 7254 7255 ddi_ufm_slot_set_attrs(slot, attr); 7256 7257 return (0); 7258 } 7259 7260 static int 7261 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 7262 { 7263 *caps = DDI_UFM_CAP_REPORT; 7264 return (0); 7265 } 7266 7267 boolean_t 7268 nvme_ctrl_atleast(nvme_t *nvme, const nvme_version_t *min) 7269 { 7270 return (nvme_vers_atleast(&nvme->n_version, min) ? B_TRUE : B_FALSE); 7271 } 7272