1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 14 * Copyright 2019 Unix Software Ltd. 15 * Copyright 2020 Joyent, Inc. 16 * Copyright 2020 Racktop Systems. 17 * Copyright 2022 Oxide Computer Company. 18 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association. 19 * Copyright 2022 Tintri by DDN, Inc. All rights reserved. 20 */ 21 22 /* 23 * blkdev driver for NVMe compliant storage devices 24 * 25 * This driver targets and is designed to support all NVMe 1.x devices. 26 * Features are added to the driver as we encounter devices that require them 27 * and our needs, so some commands or log pages may not take advantage of newer 28 * features that devices support at this time. When you encounter such a case, 29 * it is generally fine to add that support to the driver as long as you take 30 * care to ensure that the requisite device version is met before using it. 31 * 32 * The driver has only been tested on x86 systems and will not work on big- 33 * endian systems without changes to the code accessing registers and data 34 * structures used by the hardware. 35 * 36 * 37 * Interrupt Usage: 38 * 39 * The driver will use a single interrupt while configuring the device as the 40 * specification requires, but contrary to the specification it will try to use 41 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it 42 * will switch to multiple-message MSI(-X) if supported. The driver wants to 43 * have one interrupt vector per CPU, but it will work correctly if less are 44 * available. Interrupts can be shared by queues, the interrupt handler will 45 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only 46 * the admin queue will share an interrupt with one I/O queue. The interrupt 47 * handler will retrieve completed commands from all queues sharing an interrupt 48 * vector and will post them to a taskq for completion processing. 49 * 50 * 51 * Command Processing: 52 * 53 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up 54 * to 65536 I/O commands. The driver will configure one I/O queue pair per 55 * available interrupt vector, with the queue length usually much smaller than 56 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 57 * interrupt vectors will be used. 58 * 59 * Additionally the hardware provides a single special admin queue pair that can 60 * hold up to 4096 admin commands. 61 * 62 * From the hardware perspective both queues of a queue pair are independent, 63 * but they share some driver state: the command array (holding pointers to 64 * commands currently being processed by the hardware) and the active command 65 * counter. Access to a submission queue and the shared state is protected by 66 * nq_mutex; completion queue is protected by ncq_mutex. 67 * 68 * When a command is submitted to a queue pair the active command counter is 69 * incremented and a pointer to the command is stored in the command array. The 70 * array index is used as command identifier (CID) in the submission queue 71 * entry. Some commands may take a very long time to complete, and if the queue 72 * wraps around in that time a submission may find the next array slot to still 73 * be used by a long-running command. In this case the array is sequentially 74 * searched for the next free slot. The length of the command array is the same 75 * as the configured queue length. Queue overrun is prevented by the semaphore, 76 * so a command submission may block if the queue is full. 77 * 78 * 79 * Polled I/O Support: 80 * 81 * For kernel core dump support the driver can do polled I/O. As interrupts are 82 * turned off while dumping the driver will just submit a command in the regular 83 * way, and then repeatedly attempt a command retrieval until it gets the 84 * command back. 85 * 86 * 87 * Namespace Support: 88 * 89 * NVMe devices can have multiple namespaces, each being a independent data 90 * store. The driver supports multiple namespaces and creates a blkdev interface 91 * for each namespace found. Namespaces can have various attributes to support 92 * protection information. This driver does not support any of this and ignores 93 * namespaces that have these attributes. 94 * 95 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier 96 * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally 97 * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64 98 * if present to generate the devid, and passes the EUI64 to blkdev to use it 99 * in the device node names. 100 * 101 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a 102 * single controller. This is an artificial limit imposed by the driver to be 103 * able to address a reasonable number of controllers and namespaces using a 104 * 32bit minor node number. 105 * 106 * 107 * Minor nodes: 108 * 109 * For each NVMe device the driver exposes one minor node for the controller and 110 * one minor node for each namespace. The only operations supported by those 111 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the 112 * interface for the nvmeadm(8) utility. 113 * 114 * Exclusive opens are required for certain ioctl(9E) operations that alter 115 * controller and/or namespace state. While different namespaces may be opened 116 * exclusively in parallel, an exclusive open of the controller minor node 117 * requires that no namespaces are currently open (exclusive or otherwise). 118 * Opening any namespace minor node (exclusive or otherwise) will fail while 119 * the controller minor node is opened exclusively by any other thread. Thus it 120 * is possible for one thread at a time to open the controller minor node 121 * exclusively, and keep it open while opening any namespace minor node of the 122 * same controller, exclusively or otherwise. 123 * 124 * 125 * 126 * Blkdev Interface: 127 * 128 * This driver uses blkdev to do all the heavy lifting involved with presenting 129 * a disk device to the system. As a result, the processing of I/O requests is 130 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 131 * setup, and splitting of transfers into manageable chunks. 132 * 133 * I/O requests coming in from blkdev are turned into NVM commands and posted to 134 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 135 * queues. There is currently no timeout handling of I/O commands. 136 * 137 * Blkdev also supports querying device/media information and generating a 138 * devid. The driver reports the best block size as determined by the namespace 139 * format back to blkdev as physical block size to support partition and block 140 * alignment. The devid is either based on the namespace GUID or EUI64, if 141 * present, or composed using the device vendor ID, model number, serial number, 142 * and the namespace ID. 143 * 144 * 145 * Error Handling: 146 * 147 * Error handling is currently limited to detecting fatal hardware errors, 148 * either by asynchronous events, or synchronously through command status or 149 * admin command timeouts. In case of severe errors the device is fenced off, 150 * all further requests will return EIO. FMA is then called to fault the device. 151 * 152 * The hardware has a limit for outstanding asynchronous event requests. Before 153 * this limit is known the driver assumes it is at least 1 and posts a single 154 * asynchronous request. Later when the limit is known more asynchronous event 155 * requests are posted to allow quicker reception of error information. When an 156 * asynchronous event is posted by the hardware the driver will parse the error 157 * status fields and log information or fault the device, depending on the 158 * severity of the asynchronous event. The asynchronous event request is then 159 * reused and posted to the admin queue again. 160 * 161 * On command completion the command status is checked for errors. In case of 162 * errors indicating a driver bug the driver panics. Almost all other error 163 * status values just cause EIO to be returned. 164 * 165 * Command timeouts are currently detected for all admin commands except 166 * asynchronous event requests. If a command times out and the hardware appears 167 * to be healthy the driver attempts to abort the command. The original command 168 * timeout is also applied to the abort command. If the abort times out too the 169 * driver assumes the device to be dead, fences it off, and calls FMA to retire 170 * it. In all other cases the aborted command should return immediately with a 171 * status indicating it was aborted, and the driver will wait indefinitely for 172 * that to happen. No timeout handling of normal I/O commands is presently done. 173 * 174 * Any command that times out due to the controller dropping dead will be put on 175 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA 176 * memory being reused by the system and later be written to by a "dead" NVMe 177 * controller. 178 * 179 * 180 * Locking: 181 * 182 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held 183 * when accessing shared state and submission queue registers, ncq_mutex 184 * is held when accessing completion queue state and registers. 185 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while 186 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both 187 * mutexes themselves. 188 * 189 * Each command also has its own nc_mutex, which is associated with the 190 * condition variable nc_cv. It is only used on admin commands which are run 191 * synchronously. In that case it must be held across calls to 192 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by 193 * nvme_admin_cmd(). It must also be held whenever the completion state of the 194 * command is changed or while a admin command timeout is handled. 195 * 196 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first. 197 * More than one nc_mutex may only be held when aborting commands. In this case, 198 * the nc_mutex of the command to be aborted must be held across the call to 199 * nvme_abort_cmd() to prevent the command from completing while the abort is in 200 * progress. 201 * 202 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be 203 * acquired first. More than one nq_mutex is never held by a single thread. 204 * The ncq_mutex is only held by nvme_retrieve_cmd() and 205 * nvme_process_iocq(). nvme_process_iocq() is only called from the 206 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the 207 * mutex is non-contentious but is required for implementation completeness 208 * and safety. 209 * 210 * There is one mutex n_minor_mutex which protects all open flags nm_open and 211 * exclusive-open thread pointers nm_oexcl of each minor node associated with a 212 * controller and its namespaces. 213 * 214 * In addition, there is one mutex n_mgmt_mutex which must be held whenever the 215 * driver state for any namespace is changed, especially across calls to 216 * nvme_init_ns(), nvme_attach_ns() and nvme_detach_ns(). Except when detaching 217 * nvme, it should also be held across calls that modify the blkdev handle of a 218 * namespace. Command and queue mutexes may be acquired and released while 219 * n_mgmt_mutex is held, n_minor_mutex should not. 220 * 221 * 222 * Quiesce / Fast Reboot: 223 * 224 * The driver currently does not support fast reboot. A quiesce(9E) entry point 225 * is still provided which is used to send a shutdown notification to the 226 * device. 227 * 228 * 229 * NVMe Hotplug: 230 * 231 * The driver supports hot removal. The driver uses the NDI event framework 232 * to register a callback, nvme_remove_callback, to clean up when a disk is 233 * removed. In particular, the driver will unqueue outstanding I/O commands and 234 * set n_dead on the softstate to true so that other operations, such as ioctls 235 * and command submissions, fail as well. 236 * 237 * While the callback registration relies on the NDI event framework, the 238 * removal event itself is kicked off in the PCIe hotplug framework, when the 239 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a 240 * device was removed from the slot. 241 * 242 * The NVMe driver instance itself will remain until the final close of the 243 * device. 244 * 245 * 246 * DDI UFM Support 247 * 248 * The driver supports the DDI UFM framework for reporting information about 249 * the device's firmware image and slot configuration. This data can be 250 * queried by userland software via ioctls to the ufm driver. For more 251 * information, see ddi_ufm(9E). 252 * 253 * 254 * Driver Configuration: 255 * 256 * The following driver properties can be changed to control some aspects of the 257 * drivers operation: 258 * - strict-version: can be set to 0 to allow devices conforming to newer 259 * major versions to be used 260 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 261 * specific command status as a fatal error leading device faulting 262 * - admin-queue-len: the maximum length of the admin queue (16-4096) 263 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536) 264 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536) 265 * - async-event-limit: the maximum number of asynchronous event requests to be 266 * posted by the driver 267 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 268 * cache 269 * - min-phys-block-size: the minimum physical block size to report to blkdev, 270 * which is among other things the basis for ZFS vdev ashift 271 * - max-submission-queues: the maximum number of I/O submission queues. 272 * - max-completion-queues: the maximum number of I/O completion queues, 273 * can be less than max-submission-queues, in which case the completion 274 * queues are shared. 275 * 276 * In addition to the above properties, some device-specific tunables can be 277 * configured using the nvme-config-list global property. The value of this 278 * property is a list of triplets. The formal syntax is: 279 * 280 * nvme-config-list ::= <triplet> [, <triplet>]* ; 281 * <triplet> ::= "<model>" , "<rev-list>" , "<tuple-list>" 282 * <rev-list> ::= [ <fwrev> [, <fwrev>]*] 283 * <tuple-list> ::= <tunable> [, <tunable>]* 284 * <tunable> ::= <name> : <value> 285 * 286 * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and 287 * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list> 288 * contains one or more tunables to apply to all controllers that match the 289 * specified model number and optionally firmware revision. Each <tunable> is a 290 * <name> : <value> pair. Supported tunables are: 291 * 292 * - ignore-unknown-vendor-status: can be set to "on" to not handle any vendor 293 * specific command status as a fatal error leading device faulting 294 * 295 * - min-phys-block-size: the minimum physical block size to report to blkdev, 296 * which is among other things the basis for ZFS vdev ashift 297 * 298 * - volatile-write-cache: can be set to "on" or "off" to enable or disable the 299 * volatile write cache, if present 300 * 301 * 302 * TODO: 303 * - figure out sane default for I/O queue depth reported to blkdev 304 * - FMA handling of media errors 305 * - support for devices supporting very large I/O requests using chained PRPs 306 * - support for configuring hardware parameters like interrupt coalescing 307 * - support for media formatting and hard partitioning into namespaces 308 * - support for big-endian systems 309 * - support for fast reboot 310 * - support for NVMe Subsystem Reset (1.1) 311 * - support for Scatter/Gather lists (1.1) 312 * - support for Reservations (1.1) 313 * - support for power management 314 */ 315 316 #include <sys/byteorder.h> 317 #ifdef _BIG_ENDIAN 318 #error nvme driver needs porting for big-endian platforms 319 #endif 320 321 #include <sys/modctl.h> 322 #include <sys/conf.h> 323 #include <sys/devops.h> 324 #include <sys/ddi.h> 325 #include <sys/ddi_ufm.h> 326 #include <sys/sunddi.h> 327 #include <sys/sunndi.h> 328 #include <sys/bitmap.h> 329 #include <sys/sysmacros.h> 330 #include <sys/param.h> 331 #include <sys/varargs.h> 332 #include <sys/cpuvar.h> 333 #include <sys/disp.h> 334 #include <sys/blkdev.h> 335 #include <sys/atomic.h> 336 #include <sys/archsystm.h> 337 #include <sys/sata/sata_hba.h> 338 #include <sys/stat.h> 339 #include <sys/policy.h> 340 #include <sys/list.h> 341 #include <sys/dkio.h> 342 343 #include <sys/nvme.h> 344 345 #ifdef __x86 346 #include <sys/x86_archext.h> 347 #endif 348 349 #include "nvme_reg.h" 350 #include "nvme_var.h" 351 352 /* 353 * Assertions to make sure that we've properly captured various aspects of the 354 * packed structures and haven't broken them during updates. 355 */ 356 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE); 357 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256); 358 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512); 359 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520); 360 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768); 361 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792); 362 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048); 363 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072); 364 365 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE); 366 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32); 367 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92); 368 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104); 369 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128); 370 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384); 371 372 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE); 373 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE); 374 375 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE); 376 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32); 377 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64); 378 379 CTASSERT(sizeof (nvme_nschange_list_t) == 4096); 380 381 382 /* NVMe spec version supported */ 383 static const int nvme_version_major = 1; 384 385 /* tunable for admin command timeout in seconds, default is 1s */ 386 int nvme_admin_cmd_timeout = 1; 387 388 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */ 389 int nvme_format_cmd_timeout = 600; 390 391 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */ 392 int nvme_commit_save_cmd_timeout = 15; 393 394 /* 395 * tunable for the size of arbitrary vendor specific admin commands, 396 * default is 16MiB. 397 */ 398 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24; 399 400 /* 401 * tunable for the max timeout of arbitary vendor specific admin commands, 402 * default is 60s. 403 */ 404 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60; 405 406 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 407 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 408 static int nvme_quiesce(dev_info_t *); 409 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 410 static int nvme_setup_interrupts(nvme_t *, int, int); 411 static void nvme_release_interrupts(nvme_t *); 412 static uint_t nvme_intr(caddr_t, caddr_t); 413 414 static void nvme_shutdown(nvme_t *, int, boolean_t); 415 static boolean_t nvme_reset(nvme_t *, boolean_t); 416 static int nvme_init(nvme_t *); 417 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 418 static void nvme_free_cmd(nvme_cmd_t *); 419 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 420 bd_xfer_t *); 421 static void nvme_admin_cmd(nvme_cmd_t *, int); 422 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *); 423 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *); 424 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *); 425 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int); 426 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 427 static void nvme_wait_cmd(nvme_cmd_t *, uint_t); 428 static void nvme_wakeup_cmd(void *); 429 static void nvme_async_event_task(void *); 430 431 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 432 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 433 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 434 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 435 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 436 static inline int nvme_check_cmd_status(nvme_cmd_t *); 437 438 static int nvme_abort_cmd(nvme_cmd_t *, uint_t); 439 static void nvme_async_event(nvme_t *); 440 static int nvme_format_nvm(nvme_t *, boolean_t, uint32_t, uint8_t, boolean_t, 441 uint8_t, boolean_t, uint8_t); 442 static int nvme_get_logpage(nvme_t *, boolean_t, void **, size_t *, uint8_t, 443 ...); 444 static int nvme_identify(nvme_t *, boolean_t, uint32_t, uint8_t, void **); 445 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t, 446 uint32_t *); 447 static int nvme_get_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t *, 448 void **, size_t *); 449 static int nvme_write_cache_set(nvme_t *, boolean_t); 450 static int nvme_set_nqueues(nvme_t *); 451 452 static void nvme_free_dma(nvme_dma_t *); 453 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 454 nvme_dma_t **); 455 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 456 nvme_dma_t **); 457 static void nvme_free_qpair(nvme_qpair_t *); 458 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t); 459 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 460 461 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 462 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 463 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 464 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 465 466 static boolean_t nvme_check_regs_hdl(nvme_t *); 467 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 468 469 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t); 470 471 static void nvme_bd_xfer_done(void *); 472 static void nvme_bd_driveinfo(void *, bd_drive_t *); 473 static int nvme_bd_mediainfo(void *, bd_media_t *); 474 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 475 static int nvme_bd_read(void *, bd_xfer_t *); 476 static int nvme_bd_write(void *, bd_xfer_t *); 477 static int nvme_bd_sync(void *, bd_xfer_t *); 478 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 479 static int nvme_bd_free_space(void *, bd_xfer_t *); 480 481 static int nvme_prp_dma_constructor(void *, void *, int); 482 static void nvme_prp_dma_destructor(void *, void *); 483 484 static void nvme_prepare_devid(nvme_t *, uint32_t); 485 486 /* DDI UFM callbacks */ 487 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 488 ddi_ufm_image_t *); 489 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 490 ddi_ufm_slot_t *); 491 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 492 493 static int nvme_open(dev_t *, int, int, cred_t *); 494 static int nvme_close(dev_t, int, int, cred_t *); 495 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 496 497 static int nvme_init_ns(nvme_t *, int); 498 static int nvme_attach_ns(nvme_t *, int); 499 static int nvme_detach_ns(nvme_t *, int); 500 501 #define NVME_NSID2NS(nvme, nsid) (&((nvme)->n_ns[(nsid) - 1])) 502 503 static ddi_ufm_ops_t nvme_ufm_ops = { 504 NULL, 505 nvme_ufm_fill_image, 506 nvme_ufm_fill_slot, 507 nvme_ufm_getcaps 508 }; 509 510 #define NVME_MINOR_INST_SHIFT 9 511 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid)) 512 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT) 513 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1)) 514 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2) 515 #define NVME_IS_VENDOR_SPECIFIC_CMD(x) (((x) >= 0xC0) && ((x) <= 0xFF)) 516 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MIN 0xC0 517 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MAX 0xFF 518 #define NVME_IS_VENDOR_SPECIFIC_LOGPAGE(x) \ 519 (((x) >= NVME_VENDOR_SPECIFIC_LOGPAGE_MIN) && \ 520 ((x) <= NVME_VENDOR_SPECIFIC_LOGPAGE_MAX)) 521 522 /* 523 * NVMe versions 1.3 and later actually support log pages up to UINT32_MAX 524 * DWords in size. However, revision 1.3 also modified the layout of the Get Log 525 * Page command significantly relative to version 1.2, including changing 526 * reserved bits, adding new bitfields, and requiring the use of command DWord 527 * 11 to fully specify the size of the log page (the lower and upper 16 bits of 528 * the number of DWords in the page are split between DWord 10 and DWord 11, 529 * respectively). 530 * 531 * All of these impose significantly different layout requirements on the 532 * `nvme_getlogpage_t` type. This could be solved with two different types, or a 533 * complicated/nested union with the two versions as the overlying members. Both 534 * of these are reasonable, if a bit convoluted. However, these is no current 535 * need for such large pages, or a way to test them, as most log pages actually 536 * fit within the current size limit. So for simplicity, we retain the size cap 537 * from version 1.2. 538 * 539 * Note that the number of DWords is zero-based, so we add 1. It is subtracted 540 * to form a zero-based value in `nvme_get_logpage`. 541 */ 542 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE \ 543 (((1 << 12) + 1) * sizeof (uint32_t)) 544 545 static void *nvme_state; 546 static kmem_cache_t *nvme_cmd_cache; 547 548 /* 549 * DMA attributes for queue DMA memory 550 * 551 * Queue DMA memory must be page aligned. The maximum length of a queue is 552 * 65536 entries, and an entry can be 64 bytes long. 553 */ 554 static ddi_dma_attr_t nvme_queue_dma_attr = { 555 .dma_attr_version = DMA_ATTR_V0, 556 .dma_attr_addr_lo = 0, 557 .dma_attr_addr_hi = 0xffffffffffffffffULL, 558 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 559 .dma_attr_align = 0x1000, 560 .dma_attr_burstsizes = 0x7ff, 561 .dma_attr_minxfer = 0x1000, 562 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 563 .dma_attr_seg = 0xffffffffffffffffULL, 564 .dma_attr_sgllen = 1, 565 .dma_attr_granular = 1, 566 .dma_attr_flags = 0, 567 }; 568 569 /* 570 * DMA attributes for transfers using Physical Region Page (PRP) entries 571 * 572 * A PRP entry describes one page of DMA memory using the page size specified 573 * in the controller configuration's memory page size register (CC.MPS). It uses 574 * a 64bit base address aligned to this page size. There is no limitation on 575 * chaining PRPs together for arbitrarily large DMA transfers. 576 */ 577 static ddi_dma_attr_t nvme_prp_dma_attr = { 578 .dma_attr_version = DMA_ATTR_V0, 579 .dma_attr_addr_lo = 0, 580 .dma_attr_addr_hi = 0xffffffffffffffffULL, 581 .dma_attr_count_max = 0xfff, 582 .dma_attr_align = 0x1000, 583 .dma_attr_burstsizes = 0x7ff, 584 .dma_attr_minxfer = 0x1000, 585 .dma_attr_maxxfer = 0x1000, 586 .dma_attr_seg = 0xfff, 587 .dma_attr_sgllen = -1, 588 .dma_attr_granular = 1, 589 .dma_attr_flags = 0, 590 }; 591 592 /* 593 * DMA attributes for transfers using scatter/gather lists 594 * 595 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 596 * 32bit length field. SGL Segment and SGL Last Segment entries require the 597 * length to be a multiple of 16 bytes. 598 */ 599 static ddi_dma_attr_t nvme_sgl_dma_attr = { 600 .dma_attr_version = DMA_ATTR_V0, 601 .dma_attr_addr_lo = 0, 602 .dma_attr_addr_hi = 0xffffffffffffffffULL, 603 .dma_attr_count_max = 0xffffffffUL, 604 .dma_attr_align = 1, 605 .dma_attr_burstsizes = 0x7ff, 606 .dma_attr_minxfer = 0x10, 607 .dma_attr_maxxfer = 0xfffffffffULL, 608 .dma_attr_seg = 0xffffffffffffffffULL, 609 .dma_attr_sgllen = -1, 610 .dma_attr_granular = 0x10, 611 .dma_attr_flags = 0 612 }; 613 614 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 615 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 616 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 617 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 618 }; 619 620 static struct cb_ops nvme_cb_ops = { 621 .cb_open = nvme_open, 622 .cb_close = nvme_close, 623 .cb_strategy = nodev, 624 .cb_print = nodev, 625 .cb_dump = nodev, 626 .cb_read = nodev, 627 .cb_write = nodev, 628 .cb_ioctl = nvme_ioctl, 629 .cb_devmap = nodev, 630 .cb_mmap = nodev, 631 .cb_segmap = nodev, 632 .cb_chpoll = nochpoll, 633 .cb_prop_op = ddi_prop_op, 634 .cb_str = 0, 635 .cb_flag = D_NEW | D_MP, 636 .cb_rev = CB_REV, 637 .cb_aread = nodev, 638 .cb_awrite = nodev 639 }; 640 641 static struct dev_ops nvme_dev_ops = { 642 .devo_rev = DEVO_REV, 643 .devo_refcnt = 0, 644 .devo_getinfo = ddi_no_info, 645 .devo_identify = nulldev, 646 .devo_probe = nulldev, 647 .devo_attach = nvme_attach, 648 .devo_detach = nvme_detach, 649 .devo_reset = nodev, 650 .devo_cb_ops = &nvme_cb_ops, 651 .devo_bus_ops = NULL, 652 .devo_power = NULL, 653 .devo_quiesce = nvme_quiesce, 654 }; 655 656 static struct modldrv nvme_modldrv = { 657 .drv_modops = &mod_driverops, 658 .drv_linkinfo = "NVMe v1.1b", 659 .drv_dev_ops = &nvme_dev_ops 660 }; 661 662 static struct modlinkage nvme_modlinkage = { 663 .ml_rev = MODREV_1, 664 .ml_linkage = { &nvme_modldrv, NULL } 665 }; 666 667 static bd_ops_t nvme_bd_ops = { 668 .o_version = BD_OPS_CURRENT_VERSION, 669 .o_drive_info = nvme_bd_driveinfo, 670 .o_media_info = nvme_bd_mediainfo, 671 .o_devid_init = nvme_bd_devid, 672 .o_sync_cache = nvme_bd_sync, 673 .o_read = nvme_bd_read, 674 .o_write = nvme_bd_write, 675 .o_free_space = nvme_bd_free_space, 676 }; 677 678 /* 679 * This list will hold commands that have timed out and couldn't be aborted. 680 * As we don't know what the hardware may still do with the DMA memory we can't 681 * free them, so we'll keep them forever on this list where we can easily look 682 * at them with mdb. 683 */ 684 static struct list nvme_lost_cmds; 685 static kmutex_t nvme_lc_mutex; 686 687 int 688 _init(void) 689 { 690 int error; 691 692 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 693 if (error != DDI_SUCCESS) 694 return (error); 695 696 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 697 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 698 699 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL); 700 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t), 701 offsetof(nvme_cmd_t, nc_list)); 702 703 bd_mod_init(&nvme_dev_ops); 704 705 error = mod_install(&nvme_modlinkage); 706 if (error != DDI_SUCCESS) { 707 ddi_soft_state_fini(&nvme_state); 708 mutex_destroy(&nvme_lc_mutex); 709 list_destroy(&nvme_lost_cmds); 710 bd_mod_fini(&nvme_dev_ops); 711 } 712 713 return (error); 714 } 715 716 int 717 _fini(void) 718 { 719 int error; 720 721 if (!list_is_empty(&nvme_lost_cmds)) 722 return (DDI_FAILURE); 723 724 error = mod_remove(&nvme_modlinkage); 725 if (error == DDI_SUCCESS) { 726 ddi_soft_state_fini(&nvme_state); 727 kmem_cache_destroy(nvme_cmd_cache); 728 mutex_destroy(&nvme_lc_mutex); 729 list_destroy(&nvme_lost_cmds); 730 bd_mod_fini(&nvme_dev_ops); 731 } 732 733 return (error); 734 } 735 736 int 737 _info(struct modinfo *modinfop) 738 { 739 return (mod_info(&nvme_modlinkage, modinfop)); 740 } 741 742 static inline void 743 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 744 { 745 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 746 747 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 748 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 749 } 750 751 static inline void 752 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 753 { 754 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 755 756 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 757 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 758 } 759 760 static inline uint64_t 761 nvme_get64(nvme_t *nvme, uintptr_t reg) 762 { 763 uint64_t val; 764 765 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 766 767 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 768 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 769 770 return (val); 771 } 772 773 static inline uint32_t 774 nvme_get32(nvme_t *nvme, uintptr_t reg) 775 { 776 uint32_t val; 777 778 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 779 780 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 781 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 782 783 return (val); 784 } 785 786 static boolean_t 787 nvme_check_regs_hdl(nvme_t *nvme) 788 { 789 ddi_fm_error_t error; 790 791 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 792 793 if (error.fme_status != DDI_FM_OK) 794 return (B_TRUE); 795 796 return (B_FALSE); 797 } 798 799 static boolean_t 800 nvme_check_dma_hdl(nvme_dma_t *dma) 801 { 802 ddi_fm_error_t error; 803 804 if (dma == NULL) 805 return (B_FALSE); 806 807 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 808 809 if (error.fme_status != DDI_FM_OK) 810 return (B_TRUE); 811 812 return (B_FALSE); 813 } 814 815 static void 816 nvme_free_dma_common(nvme_dma_t *dma) 817 { 818 if (dma->nd_dmah != NULL) 819 (void) ddi_dma_unbind_handle(dma->nd_dmah); 820 if (dma->nd_acch != NULL) 821 ddi_dma_mem_free(&dma->nd_acch); 822 if (dma->nd_dmah != NULL) 823 ddi_dma_free_handle(&dma->nd_dmah); 824 } 825 826 static void 827 nvme_free_dma(nvme_dma_t *dma) 828 { 829 nvme_free_dma_common(dma); 830 kmem_free(dma, sizeof (*dma)); 831 } 832 833 /* ARGSUSED */ 834 static void 835 nvme_prp_dma_destructor(void *buf, void *private) 836 { 837 nvme_dma_t *dma = (nvme_dma_t *)buf; 838 839 nvme_free_dma_common(dma); 840 } 841 842 static int 843 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 844 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 845 { 846 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 847 &dma->nd_dmah) != DDI_SUCCESS) { 848 /* 849 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 850 * the only other possible error is DDI_DMA_BADATTR which 851 * indicates a driver bug which should cause a panic. 852 */ 853 dev_err(nvme->n_dip, CE_PANIC, 854 "!failed to get DMA handle, check DMA attributes"); 855 return (DDI_FAILURE); 856 } 857 858 /* 859 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 860 * or the flags are conflicting, which isn't the case here. 861 */ 862 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 863 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 864 &dma->nd_len, &dma->nd_acch); 865 866 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 867 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 868 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 869 dev_err(nvme->n_dip, CE_WARN, 870 "!failed to bind DMA memory"); 871 atomic_inc_32(&nvme->n_dma_bind_err); 872 nvme_free_dma_common(dma); 873 return (DDI_FAILURE); 874 } 875 876 return (DDI_SUCCESS); 877 } 878 879 static int 880 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 881 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 882 { 883 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 884 885 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 886 DDI_SUCCESS) { 887 *ret = NULL; 888 kmem_free(dma, sizeof (nvme_dma_t)); 889 return (DDI_FAILURE); 890 } 891 892 bzero(dma->nd_memp, dma->nd_len); 893 894 *ret = dma; 895 return (DDI_SUCCESS); 896 } 897 898 /* ARGSUSED */ 899 static int 900 nvme_prp_dma_constructor(void *buf, void *private, int flags) 901 { 902 nvme_dma_t *dma = (nvme_dma_t *)buf; 903 nvme_t *nvme = (nvme_t *)private; 904 905 dma->nd_dmah = NULL; 906 dma->nd_acch = NULL; 907 908 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 909 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 910 return (-1); 911 } 912 913 ASSERT(dma->nd_ncookie == 1); 914 915 dma->nd_cached = B_TRUE; 916 917 return (0); 918 } 919 920 static int 921 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 922 uint_t flags, nvme_dma_t **dma) 923 { 924 uint32_t len = nentry * qe_len; 925 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 926 927 len = roundup(len, nvme->n_pagesize); 928 929 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 930 != DDI_SUCCESS) { 931 dev_err(nvme->n_dip, CE_WARN, 932 "!failed to get DMA memory for queue"); 933 goto fail; 934 } 935 936 if ((*dma)->nd_ncookie != 1) { 937 dev_err(nvme->n_dip, CE_WARN, 938 "!got too many cookies for queue DMA"); 939 goto fail; 940 } 941 942 return (DDI_SUCCESS); 943 944 fail: 945 if (*dma) { 946 nvme_free_dma(*dma); 947 *dma = NULL; 948 } 949 950 return (DDI_FAILURE); 951 } 952 953 static void 954 nvme_free_cq(nvme_cq_t *cq) 955 { 956 mutex_destroy(&cq->ncq_mutex); 957 958 if (cq->ncq_cmd_taskq != NULL) 959 taskq_destroy(cq->ncq_cmd_taskq); 960 961 if (cq->ncq_dma != NULL) 962 nvme_free_dma(cq->ncq_dma); 963 964 kmem_free(cq, sizeof (*cq)); 965 } 966 967 static void 968 nvme_free_qpair(nvme_qpair_t *qp) 969 { 970 int i; 971 972 mutex_destroy(&qp->nq_mutex); 973 sema_destroy(&qp->nq_sema); 974 975 if (qp->nq_sqdma != NULL) 976 nvme_free_dma(qp->nq_sqdma); 977 978 if (qp->nq_active_cmds > 0) 979 for (i = 0; i != qp->nq_nentry; i++) 980 if (qp->nq_cmd[i] != NULL) 981 nvme_free_cmd(qp->nq_cmd[i]); 982 983 if (qp->nq_cmd != NULL) 984 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 985 986 kmem_free(qp, sizeof (nvme_qpair_t)); 987 } 988 989 /* 990 * Destroy the pre-allocated cq array, but only free individual completion 991 * queues from the given starting index. 992 */ 993 static void 994 nvme_destroy_cq_array(nvme_t *nvme, uint_t start) 995 { 996 uint_t i; 997 998 for (i = start; i < nvme->n_cq_count; i++) 999 if (nvme->n_cq[i] != NULL) 1000 nvme_free_cq(nvme->n_cq[i]); 1001 1002 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count); 1003 } 1004 1005 static int 1006 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx, 1007 uint_t nthr) 1008 { 1009 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP); 1010 char name[64]; /* large enough for the taskq name */ 1011 1012 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER, 1013 DDI_INTR_PRI(nvme->n_intr_pri)); 1014 1015 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 1016 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS) 1017 goto fail; 1018 1019 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp; 1020 cq->ncq_nentry = nentry; 1021 cq->ncq_id = idx; 1022 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx); 1023 1024 /* 1025 * Each completion queue has its own command taskq. 1026 */ 1027 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u", 1028 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx); 1029 1030 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX, 1031 TASKQ_PREPOPULATE); 1032 1033 if (cq->ncq_cmd_taskq == NULL) { 1034 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd " 1035 "taskq for cq %u", idx); 1036 goto fail; 1037 } 1038 1039 *cqp = cq; 1040 return (DDI_SUCCESS); 1041 1042 fail: 1043 nvme_free_cq(cq); 1044 *cqp = NULL; 1045 1046 return (DDI_FAILURE); 1047 } 1048 1049 /* 1050 * Create the n_cq array big enough to hold "ncq" completion queues. 1051 * If the array already exists it will be re-sized (but only larger). 1052 * The admin queue is included in this array, which boosts the 1053 * max number of entries to UINT16_MAX + 1. 1054 */ 1055 static int 1056 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr) 1057 { 1058 nvme_cq_t **cq; 1059 uint_t i, cq_count; 1060 1061 ASSERT3U(ncq, >, nvme->n_cq_count); 1062 1063 cq = nvme->n_cq; 1064 cq_count = nvme->n_cq_count; 1065 1066 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP); 1067 nvme->n_cq_count = ncq; 1068 1069 for (i = 0; i < cq_count; i++) 1070 nvme->n_cq[i] = cq[i]; 1071 1072 for (; i < nvme->n_cq_count; i++) 1073 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) != 1074 DDI_SUCCESS) 1075 goto fail; 1076 1077 if (cq != NULL) 1078 kmem_free(cq, sizeof (*cq) * cq_count); 1079 1080 return (DDI_SUCCESS); 1081 1082 fail: 1083 nvme_destroy_cq_array(nvme, cq_count); 1084 /* 1085 * Restore the original array 1086 */ 1087 nvme->n_cq_count = cq_count; 1088 nvme->n_cq = cq; 1089 1090 return (DDI_FAILURE); 1091 } 1092 1093 static int 1094 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 1095 uint_t idx) 1096 { 1097 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 1098 uint_t cq_idx; 1099 1100 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 1101 DDI_INTR_PRI(nvme->n_intr_pri)); 1102 1103 /* 1104 * The NVMe spec defines that a full queue has one empty (unused) slot; 1105 * initialize the semaphore accordingly. 1106 */ 1107 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL); 1108 1109 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 1110 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 1111 goto fail; 1112 1113 /* 1114 * idx == 0 is adminq, those above 0 are shared io completion queues. 1115 */ 1116 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1); 1117 qp->nq_cq = nvme->n_cq[cq_idx]; 1118 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 1119 qp->nq_nentry = nentry; 1120 1121 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 1122 1123 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 1124 qp->nq_next_cmd = 0; 1125 1126 *nqp = qp; 1127 return (DDI_SUCCESS); 1128 1129 fail: 1130 nvme_free_qpair(qp); 1131 *nqp = NULL; 1132 1133 return (DDI_FAILURE); 1134 } 1135 1136 static nvme_cmd_t * 1137 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 1138 { 1139 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 1140 1141 if (cmd == NULL) 1142 return (cmd); 1143 1144 bzero(cmd, sizeof (nvme_cmd_t)); 1145 1146 cmd->nc_nvme = nvme; 1147 1148 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 1149 DDI_INTR_PRI(nvme->n_intr_pri)); 1150 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 1151 1152 return (cmd); 1153 } 1154 1155 static void 1156 nvme_free_cmd(nvme_cmd_t *cmd) 1157 { 1158 /* Don't free commands on the lost commands list. */ 1159 if (list_link_active(&cmd->nc_list)) 1160 return; 1161 1162 if (cmd->nc_dma) { 1163 nvme_free_dma(cmd->nc_dma); 1164 cmd->nc_dma = NULL; 1165 } 1166 1167 if (cmd->nc_prp) { 1168 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp); 1169 cmd->nc_prp = NULL; 1170 } 1171 1172 cv_destroy(&cmd->nc_cv); 1173 mutex_destroy(&cmd->nc_mutex); 1174 1175 kmem_cache_free(nvme_cmd_cache, cmd); 1176 } 1177 1178 static void 1179 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1180 { 1181 sema_p(&qp->nq_sema); 1182 nvme_submit_cmd_common(qp, cmd); 1183 } 1184 1185 static int 1186 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1187 { 1188 if (cmd->nc_nvme->n_dead) { 1189 return (EIO); 1190 } 1191 1192 if (sema_tryp(&qp->nq_sema) == 0) 1193 return (EAGAIN); 1194 1195 nvme_submit_cmd_common(qp, cmd); 1196 return (0); 1197 } 1198 1199 static void 1200 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1201 { 1202 nvme_reg_sqtdbl_t tail = { 0 }; 1203 1204 mutex_enter(&qp->nq_mutex); 1205 cmd->nc_completed = B_FALSE; 1206 1207 /* 1208 * Now that we hold the queue pair lock, we must check whether or not 1209 * the controller has been listed as dead (e.g. was removed due to 1210 * hotplug). This is necessary as otherwise we could race with 1211 * nvme_remove_callback(). Because this has not been enqueued, we don't 1212 * call nvme_unqueue_cmd(), which is why we must manually decrement the 1213 * semaphore. 1214 */ 1215 if (cmd->nc_nvme->n_dead) { 1216 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback, 1217 cmd, TQ_NOSLEEP, &cmd->nc_tqent); 1218 sema_v(&qp->nq_sema); 1219 mutex_exit(&qp->nq_mutex); 1220 return; 1221 } 1222 1223 /* 1224 * Try to insert the cmd into the active cmd array at the nq_next_cmd 1225 * slot. If the slot is already occupied advance to the next slot and 1226 * try again. This can happen for long running commands like async event 1227 * requests. 1228 */ 1229 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 1230 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1231 qp->nq_cmd[qp->nq_next_cmd] = cmd; 1232 1233 qp->nq_active_cmds++; 1234 1235 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 1236 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 1237 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 1238 sizeof (nvme_sqe_t) * qp->nq_sqtail, 1239 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 1240 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1241 1242 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 1243 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 1244 1245 mutex_exit(&qp->nq_mutex); 1246 } 1247 1248 static nvme_cmd_t * 1249 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid) 1250 { 1251 nvme_cmd_t *cmd; 1252 1253 ASSERT(mutex_owned(&qp->nq_mutex)); 1254 ASSERT3S(cid, <, qp->nq_nentry); 1255 1256 cmd = qp->nq_cmd[cid]; 1257 qp->nq_cmd[cid] = NULL; 1258 ASSERT3U(qp->nq_active_cmds, >, 0); 1259 qp->nq_active_cmds--; 1260 sema_v(&qp->nq_sema); 1261 1262 ASSERT3P(cmd, !=, NULL); 1263 ASSERT3P(cmd->nc_nvme, ==, nvme); 1264 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid); 1265 1266 return (cmd); 1267 } 1268 1269 /* 1270 * Get the command tied to the next completed cqe and bump along completion 1271 * queue head counter. 1272 */ 1273 static nvme_cmd_t * 1274 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq) 1275 { 1276 nvme_qpair_t *qp; 1277 nvme_cqe_t *cqe; 1278 nvme_cmd_t *cmd; 1279 1280 ASSERT(mutex_owned(&cq->ncq_mutex)); 1281 1282 cqe = &cq->ncq_cq[cq->ncq_head]; 1283 1284 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 1285 if (cqe->cqe_sf.sf_p == cq->ncq_phase) 1286 return (NULL); 1287 1288 qp = nvme->n_ioq[cqe->cqe_sqid]; 1289 1290 mutex_enter(&qp->nq_mutex); 1291 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); 1292 mutex_exit(&qp->nq_mutex); 1293 1294 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 1295 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 1296 1297 qp->nq_sqhead = cqe->cqe_sqhd; 1298 1299 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry; 1300 1301 /* Toggle phase on wrap-around. */ 1302 if (cq->ncq_head == 0) 1303 cq->ncq_phase = cq->ncq_phase ? 0 : 1; 1304 1305 return (cmd); 1306 } 1307 1308 /* 1309 * Process all completed commands on the io completion queue. 1310 */ 1311 static uint_t 1312 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq) 1313 { 1314 nvme_reg_cqhdbl_t head = { 0 }; 1315 nvme_cmd_t *cmd; 1316 uint_t completed = 0; 1317 1318 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1319 DDI_SUCCESS) 1320 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1321 __func__); 1322 1323 mutex_enter(&cq->ncq_mutex); 1324 1325 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1326 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd, 1327 TQ_NOSLEEP, &cmd->nc_tqent); 1328 1329 completed++; 1330 } 1331 1332 if (completed > 0) { 1333 /* 1334 * Update the completion queue head doorbell. 1335 */ 1336 head.b.cqhdbl_cqh = cq->ncq_head; 1337 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1338 } 1339 1340 mutex_exit(&cq->ncq_mutex); 1341 1342 return (completed); 1343 } 1344 1345 static nvme_cmd_t * 1346 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 1347 { 1348 nvme_cq_t *cq = qp->nq_cq; 1349 nvme_reg_cqhdbl_t head = { 0 }; 1350 nvme_cmd_t *cmd; 1351 1352 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1353 DDI_SUCCESS) 1354 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1355 __func__); 1356 1357 mutex_enter(&cq->ncq_mutex); 1358 1359 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1360 head.b.cqhdbl_cqh = cq->ncq_head; 1361 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1362 } 1363 1364 mutex_exit(&cq->ncq_mutex); 1365 1366 return (cmd); 1367 } 1368 1369 static int 1370 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 1371 { 1372 nvme_cqe_t *cqe = &cmd->nc_cqe; 1373 1374 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1375 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1376 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1377 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1378 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1379 1380 if (cmd->nc_xfer != NULL) 1381 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1382 1383 if (cmd->nc_nvme->n_strict_version) { 1384 cmd->nc_nvme->n_dead = B_TRUE; 1385 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1386 } 1387 1388 return (EIO); 1389 } 1390 1391 static int 1392 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 1393 { 1394 nvme_cqe_t *cqe = &cmd->nc_cqe; 1395 1396 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1397 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1398 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1399 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1400 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1401 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 1402 cmd->nc_nvme->n_dead = B_TRUE; 1403 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1404 } 1405 1406 return (EIO); 1407 } 1408 1409 static int 1410 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 1411 { 1412 nvme_cqe_t *cqe = &cmd->nc_cqe; 1413 1414 switch (cqe->cqe_sf.sf_sc) { 1415 case NVME_CQE_SC_INT_NVM_WRITE: 1416 /* write fail */ 1417 /* TODO: post ereport */ 1418 if (cmd->nc_xfer != NULL) 1419 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1420 return (EIO); 1421 1422 case NVME_CQE_SC_INT_NVM_READ: 1423 /* read fail */ 1424 /* TODO: post ereport */ 1425 if (cmd->nc_xfer != NULL) 1426 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1427 return (EIO); 1428 1429 default: 1430 return (nvme_check_unknown_cmd_status(cmd)); 1431 } 1432 } 1433 1434 static int 1435 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 1436 { 1437 nvme_cqe_t *cqe = &cmd->nc_cqe; 1438 1439 switch (cqe->cqe_sf.sf_sc) { 1440 case NVME_CQE_SC_GEN_SUCCESS: 1441 return (0); 1442 1443 /* 1444 * Errors indicating a bug in the driver should cause a panic. 1445 */ 1446 case NVME_CQE_SC_GEN_INV_OPC: 1447 /* Invalid Command Opcode */ 1448 if (!cmd->nc_dontpanic) 1449 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1450 "programming error: invalid opcode in cmd %p", 1451 (void *)cmd); 1452 return (EINVAL); 1453 1454 case NVME_CQE_SC_GEN_INV_FLD: 1455 /* Invalid Field in Command */ 1456 if (!cmd->nc_dontpanic) 1457 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1458 "programming error: invalid field in cmd %p", 1459 (void *)cmd); 1460 return (EIO); 1461 1462 case NVME_CQE_SC_GEN_ID_CNFL: 1463 /* Command ID Conflict */ 1464 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1465 "cmd ID conflict in cmd %p", (void *)cmd); 1466 return (0); 1467 1468 case NVME_CQE_SC_GEN_INV_NS: 1469 /* Invalid Namespace or Format */ 1470 if (!cmd->nc_dontpanic) 1471 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1472 "programming error: invalid NS/format in cmd %p", 1473 (void *)cmd); 1474 return (EINVAL); 1475 1476 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 1477 /* LBA Out Of Range */ 1478 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1479 "LBA out of range in cmd %p", (void *)cmd); 1480 return (0); 1481 1482 /* 1483 * Non-fatal errors, handle gracefully. 1484 */ 1485 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 1486 /* Data Transfer Error (DMA) */ 1487 /* TODO: post ereport */ 1488 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 1489 if (cmd->nc_xfer != NULL) 1490 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1491 return (EIO); 1492 1493 case NVME_CQE_SC_GEN_INTERNAL_ERR: 1494 /* 1495 * Internal Error. The spec (v1.0, section 4.5.1.2) says 1496 * detailed error information is returned as async event, 1497 * so we pretty much ignore the error here and handle it 1498 * in the async event handler. 1499 */ 1500 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 1501 if (cmd->nc_xfer != NULL) 1502 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1503 return (EIO); 1504 1505 case NVME_CQE_SC_GEN_ABORT_REQUEST: 1506 /* 1507 * Command Abort Requested. This normally happens only when a 1508 * command times out. 1509 */ 1510 /* TODO: post ereport or change blkdev to handle this? */ 1511 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 1512 return (ECANCELED); 1513 1514 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 1515 /* Command Aborted due to Power Loss Notification */ 1516 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1517 cmd->nc_nvme->n_dead = B_TRUE; 1518 return (EIO); 1519 1520 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 1521 /* Command Aborted due to SQ Deletion */ 1522 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 1523 return (EIO); 1524 1525 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 1526 /* Capacity Exceeded */ 1527 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 1528 if (cmd->nc_xfer != NULL) 1529 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1530 return (EIO); 1531 1532 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 1533 /* Namespace Not Ready */ 1534 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 1535 if (cmd->nc_xfer != NULL) 1536 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1537 return (EIO); 1538 1539 default: 1540 return (nvme_check_unknown_cmd_status(cmd)); 1541 } 1542 } 1543 1544 static int 1545 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 1546 { 1547 nvme_cqe_t *cqe = &cmd->nc_cqe; 1548 1549 switch (cqe->cqe_sf.sf_sc) { 1550 case NVME_CQE_SC_SPC_INV_CQ: 1551 /* Completion Queue Invalid */ 1552 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 1553 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 1554 return (EINVAL); 1555 1556 case NVME_CQE_SC_SPC_INV_QID: 1557 /* Invalid Queue Identifier */ 1558 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1559 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 1560 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 1561 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1562 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 1563 return (EINVAL); 1564 1565 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 1566 /* Max Queue Size Exceeded */ 1567 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1568 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1569 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 1570 return (EINVAL); 1571 1572 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 1573 /* Abort Command Limit Exceeded */ 1574 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 1575 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1576 "abort command limit exceeded in cmd %p", (void *)cmd); 1577 return (0); 1578 1579 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 1580 /* Async Event Request Limit Exceeded */ 1581 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 1582 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1583 "async event request limit exceeded in cmd %p", 1584 (void *)cmd); 1585 return (0); 1586 1587 case NVME_CQE_SC_SPC_INV_INT_VECT: 1588 /* Invalid Interrupt Vector */ 1589 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1590 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 1591 return (EINVAL); 1592 1593 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 1594 /* Invalid Log Page */ 1595 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 1596 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 1597 return (EINVAL); 1598 1599 case NVME_CQE_SC_SPC_INV_FORMAT: 1600 /* Invalid Format */ 1601 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 1602 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 1603 if (cmd->nc_xfer != NULL) 1604 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1605 return (EINVAL); 1606 1607 case NVME_CQE_SC_SPC_INV_Q_DEL: 1608 /* Invalid Queue Deletion */ 1609 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1610 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 1611 return (EINVAL); 1612 1613 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 1614 /* Conflicting Attributes */ 1615 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 1616 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1617 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1618 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 1619 if (cmd->nc_xfer != NULL) 1620 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1621 return (EINVAL); 1622 1623 case NVME_CQE_SC_SPC_NVM_INV_PROT: 1624 /* Invalid Protection Information */ 1625 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 1626 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1627 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1628 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1629 if (cmd->nc_xfer != NULL) 1630 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1631 return (EINVAL); 1632 1633 case NVME_CQE_SC_SPC_NVM_READONLY: 1634 /* Write to Read Only Range */ 1635 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1636 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1637 if (cmd->nc_xfer != NULL) 1638 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1639 return (EROFS); 1640 1641 case NVME_CQE_SC_SPC_INV_FW_SLOT: 1642 /* Invalid Firmware Slot */ 1643 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1644 return (EINVAL); 1645 1646 case NVME_CQE_SC_SPC_INV_FW_IMG: 1647 /* Invalid Firmware Image */ 1648 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1649 return (EINVAL); 1650 1651 case NVME_CQE_SC_SPC_FW_RESET: 1652 /* Conventional Reset Required */ 1653 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1654 return (0); 1655 1656 case NVME_CQE_SC_SPC_FW_NSSR: 1657 /* NVMe Subsystem Reset Required */ 1658 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1659 return (0); 1660 1661 case NVME_CQE_SC_SPC_FW_NEXT_RESET: 1662 /* Activation Requires Reset */ 1663 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1664 return (0); 1665 1666 case NVME_CQE_SC_SPC_FW_MTFA: 1667 /* Activation Requires Maximum Time Violation */ 1668 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1669 return (EAGAIN); 1670 1671 case NVME_CQE_SC_SPC_FW_PROHIBITED: 1672 /* Activation Prohibited */ 1673 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1674 return (EINVAL); 1675 1676 case NVME_CQE_SC_SPC_FW_OVERLAP: 1677 /* Overlapping Firmware Ranges */ 1678 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD); 1679 return (EINVAL); 1680 1681 default: 1682 return (nvme_check_unknown_cmd_status(cmd)); 1683 } 1684 } 1685 1686 static inline int 1687 nvme_check_cmd_status(nvme_cmd_t *cmd) 1688 { 1689 nvme_cqe_t *cqe = &cmd->nc_cqe; 1690 1691 /* 1692 * Take a shortcut if the controller is dead, or if 1693 * command status indicates no error. 1694 */ 1695 if (cmd->nc_nvme->n_dead) 1696 return (EIO); 1697 1698 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1699 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1700 return (0); 1701 1702 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1703 return (nvme_check_generic_cmd_status(cmd)); 1704 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1705 return (nvme_check_specific_cmd_status(cmd)); 1706 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1707 return (nvme_check_integrity_cmd_status(cmd)); 1708 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1709 return (nvme_check_vendor_cmd_status(cmd)); 1710 1711 return (nvme_check_unknown_cmd_status(cmd)); 1712 } 1713 1714 static int 1715 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec) 1716 { 1717 nvme_t *nvme = abort_cmd->nc_nvme; 1718 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1719 nvme_abort_cmd_t ac = { 0 }; 1720 int ret = 0; 1721 1722 sema_p(&nvme->n_abort_sema); 1723 1724 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1725 ac.b.ac_sqid = abort_cmd->nc_sqid; 1726 1727 cmd->nc_sqid = 0; 1728 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1729 cmd->nc_callback = nvme_wakeup_cmd; 1730 cmd->nc_sqe.sqe_cdw10 = ac.r; 1731 1732 /* 1733 * Send the ABORT to the hardware. The ABORT command will return _after_ 1734 * the aborted command has completed (aborted or otherwise), but since 1735 * we still hold the aborted command's mutex its callback hasn't been 1736 * processed yet. 1737 */ 1738 nvme_admin_cmd(cmd, sec); 1739 sema_v(&nvme->n_abort_sema); 1740 1741 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1742 dev_err(nvme->n_dip, CE_WARN, 1743 "!ABORT failed with sct = %x, sc = %x", 1744 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1745 atomic_inc_32(&nvme->n_abort_failed); 1746 } else { 1747 dev_err(nvme->n_dip, CE_WARN, 1748 "!ABORT of command %d/%d %ssuccessful", 1749 abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid, 1750 cmd->nc_cqe.cqe_dw0 & 1 ? "un" : ""); 1751 if ((cmd->nc_cqe.cqe_dw0 & 1) == 0) 1752 atomic_inc_32(&nvme->n_cmd_aborted); 1753 } 1754 1755 nvme_free_cmd(cmd); 1756 return (ret); 1757 } 1758 1759 /* 1760 * nvme_wait_cmd -- wait for command completion or timeout 1761 * 1762 * In case of a serious error or a timeout of the abort command the hardware 1763 * will be declared dead and FMA will be notified. 1764 */ 1765 static void 1766 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec) 1767 { 1768 clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC); 1769 nvme_t *nvme = cmd->nc_nvme; 1770 nvme_reg_csts_t csts; 1771 nvme_qpair_t *qp; 1772 1773 ASSERT(mutex_owned(&cmd->nc_mutex)); 1774 1775 while (!cmd->nc_completed) { 1776 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1777 break; 1778 } 1779 1780 if (cmd->nc_completed) 1781 return; 1782 1783 /* 1784 * The command timed out. 1785 * 1786 * Check controller for fatal status, any errors associated with the 1787 * register or DMA handle, or for a double timeout (abort command timed 1788 * out). If necessary log a warning and call FMA. 1789 */ 1790 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1791 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, " 1792 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 1793 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1794 atomic_inc_32(&nvme->n_cmd_timeout); 1795 1796 if (csts.b.csts_cfs || 1797 nvme_check_regs_hdl(nvme) || 1798 nvme_check_dma_hdl(cmd->nc_dma) || 1799 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1800 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1801 nvme->n_dead = B_TRUE; 1802 } else if (nvme_abort_cmd(cmd, sec) == 0) { 1803 /* 1804 * If the abort succeeded the command should complete 1805 * immediately with an appropriate status. 1806 */ 1807 while (!cmd->nc_completed) 1808 cv_wait(&cmd->nc_cv, &cmd->nc_mutex); 1809 1810 return; 1811 } 1812 1813 qp = nvme->n_ioq[cmd->nc_sqid]; 1814 1815 mutex_enter(&qp->nq_mutex); 1816 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 1817 mutex_exit(&qp->nq_mutex); 1818 1819 /* 1820 * As we don't know what the presumed dead hardware might still do with 1821 * the DMA memory, we'll put the command on the lost commands list if it 1822 * has any DMA memory. 1823 */ 1824 if (cmd->nc_dma != NULL) { 1825 mutex_enter(&nvme_lc_mutex); 1826 list_insert_head(&nvme_lost_cmds, cmd); 1827 mutex_exit(&nvme_lc_mutex); 1828 } 1829 } 1830 1831 static void 1832 nvme_wakeup_cmd(void *arg) 1833 { 1834 nvme_cmd_t *cmd = arg; 1835 1836 mutex_enter(&cmd->nc_mutex); 1837 cmd->nc_completed = B_TRUE; 1838 cv_signal(&cmd->nc_cv); 1839 mutex_exit(&cmd->nc_mutex); 1840 } 1841 1842 static void 1843 nvme_async_event_task(void *arg) 1844 { 1845 nvme_cmd_t *cmd = arg; 1846 nvme_t *nvme = cmd->nc_nvme; 1847 nvme_error_log_entry_t *error_log = NULL; 1848 nvme_health_log_t *health_log = NULL; 1849 nvme_nschange_list_t *nslist = NULL; 1850 size_t logsize = 0; 1851 nvme_async_event_t event; 1852 1853 /* 1854 * Check for errors associated with the async request itself. The only 1855 * command-specific error is "async event limit exceeded", which 1856 * indicates a programming error in the driver and causes a panic in 1857 * nvme_check_cmd_status(). 1858 * 1859 * Other possible errors are various scenarios where the async request 1860 * was aborted, or internal errors in the device. Internal errors are 1861 * reported to FMA, the command aborts need no special handling here. 1862 * 1863 * And finally, at least qemu nvme does not support async events, 1864 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we 1865 * will avoid posting async events. 1866 */ 1867 1868 if (nvme_check_cmd_status(cmd) != 0) { 1869 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1870 "!async event request returned failure, sct = %x, " 1871 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1872 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1873 cmd->nc_cqe.cqe_sf.sf_m); 1874 1875 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1876 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1877 cmd->nc_nvme->n_dead = B_TRUE; 1878 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1879 DDI_SERVICE_LOST); 1880 } 1881 1882 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1883 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC && 1884 cmd->nc_cqe.cqe_sf.sf_dnr == 1) { 1885 nvme->n_async_event_supported = B_FALSE; 1886 } 1887 1888 nvme_free_cmd(cmd); 1889 return; 1890 } 1891 1892 event.r = cmd->nc_cqe.cqe_dw0; 1893 1894 /* Clear CQE and re-submit the async request. */ 1895 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1896 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 1897 1898 switch (event.b.ae_type) { 1899 case NVME_ASYNC_TYPE_ERROR: 1900 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1901 (void) nvme_get_logpage(nvme, B_FALSE, 1902 (void **)&error_log, &logsize, event.b.ae_logpage); 1903 } else { 1904 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1905 "async event reply: %d", event.b.ae_logpage); 1906 atomic_inc_32(&nvme->n_wrong_logpage); 1907 } 1908 1909 switch (event.b.ae_info) { 1910 case NVME_ASYNC_ERROR_INV_SQ: 1911 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1912 "invalid submission queue"); 1913 return; 1914 1915 case NVME_ASYNC_ERROR_INV_DBL: 1916 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1917 "invalid doorbell write value"); 1918 return; 1919 1920 case NVME_ASYNC_ERROR_DIAGFAIL: 1921 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1922 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1923 nvme->n_dead = B_TRUE; 1924 atomic_inc_32(&nvme->n_diagfail_event); 1925 break; 1926 1927 case NVME_ASYNC_ERROR_PERSISTENT: 1928 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1929 "device error"); 1930 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1931 nvme->n_dead = B_TRUE; 1932 atomic_inc_32(&nvme->n_persistent_event); 1933 break; 1934 1935 case NVME_ASYNC_ERROR_TRANSIENT: 1936 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1937 "device error"); 1938 /* TODO: send ereport */ 1939 atomic_inc_32(&nvme->n_transient_event); 1940 break; 1941 1942 case NVME_ASYNC_ERROR_FW_LOAD: 1943 dev_err(nvme->n_dip, CE_WARN, 1944 "!firmware image load error"); 1945 atomic_inc_32(&nvme->n_fw_load_event); 1946 break; 1947 } 1948 break; 1949 1950 case NVME_ASYNC_TYPE_HEALTH: 1951 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1952 (void) nvme_get_logpage(nvme, B_FALSE, 1953 (void **)&health_log, &logsize, event.b.ae_logpage, 1954 -1); 1955 } else { 1956 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1957 "async event reply: %d", event.b.ae_logpage); 1958 atomic_inc_32(&nvme->n_wrong_logpage); 1959 } 1960 1961 switch (event.b.ae_info) { 1962 case NVME_ASYNC_HEALTH_RELIABILITY: 1963 dev_err(nvme->n_dip, CE_WARN, 1964 "!device reliability compromised"); 1965 /* TODO: send ereport */ 1966 atomic_inc_32(&nvme->n_reliability_event); 1967 break; 1968 1969 case NVME_ASYNC_HEALTH_TEMPERATURE: 1970 dev_err(nvme->n_dip, CE_WARN, 1971 "!temperature above threshold"); 1972 /* TODO: send ereport */ 1973 atomic_inc_32(&nvme->n_temperature_event); 1974 break; 1975 1976 case NVME_ASYNC_HEALTH_SPARE: 1977 dev_err(nvme->n_dip, CE_WARN, 1978 "!spare space below threshold"); 1979 /* TODO: send ereport */ 1980 atomic_inc_32(&nvme->n_spare_event); 1981 break; 1982 } 1983 break; 1984 1985 case NVME_ASYNC_TYPE_NOTICE: 1986 switch (event.b.ae_info) { 1987 case NVME_ASYNC_NOTICE_NS_CHANGE: 1988 dev_err(nvme->n_dip, CE_NOTE, 1989 "namespace attribute change event, " 1990 "logpage = %x", event.b.ae_logpage); 1991 atomic_inc_32(&nvme->n_notice_event); 1992 1993 if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) 1994 break; 1995 1996 if (nvme_get_logpage(nvme, B_FALSE, (void **)&nslist, 1997 &logsize, event.b.ae_logpage, -1) != 0) { 1998 break; 1999 } 2000 2001 if (nslist->nscl_ns[0] == UINT32_MAX) { 2002 dev_err(nvme->n_dip, CE_CONT, 2003 "more than %u namespaces have changed.\n", 2004 NVME_NSCHANGE_LIST_SIZE); 2005 break; 2006 } 2007 2008 mutex_enter(&nvme->n_mgmt_mutex); 2009 for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) { 2010 uint32_t nsid = nslist->nscl_ns[i]; 2011 2012 if (nsid == 0) /* end of list */ 2013 break; 2014 2015 dev_err(nvme->n_dip, CE_NOTE, 2016 "!namespace %u (%s) has changed.", nsid, 2017 NVME_NSID2NS(nvme, nsid)->ns_name); 2018 2019 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) 2020 continue; 2021 2022 bd_state_change( 2023 NVME_NSID2NS(nvme, nsid)->ns_bd_hdl); 2024 } 2025 mutex_exit(&nvme->n_mgmt_mutex); 2026 2027 break; 2028 2029 case NVME_ASYNC_NOTICE_FW_ACTIVATE: 2030 dev_err(nvme->n_dip, CE_NOTE, 2031 "firmware activation starting, " 2032 "logpage = %x", event.b.ae_logpage); 2033 atomic_inc_32(&nvme->n_notice_event); 2034 break; 2035 2036 case NVME_ASYNC_NOTICE_TELEMETRY: 2037 dev_err(nvme->n_dip, CE_NOTE, 2038 "telemetry log changed, " 2039 "logpage = %x", event.b.ae_logpage); 2040 atomic_inc_32(&nvme->n_notice_event); 2041 break; 2042 2043 case NVME_ASYNC_NOTICE_NS_ASYMM: 2044 dev_err(nvme->n_dip, CE_NOTE, 2045 "asymmetric namespace access change, " 2046 "logpage = %x", event.b.ae_logpage); 2047 atomic_inc_32(&nvme->n_notice_event); 2048 break; 2049 2050 case NVME_ASYNC_NOTICE_LATENCYLOG: 2051 dev_err(nvme->n_dip, CE_NOTE, 2052 "predictable latency event aggregate log change, " 2053 "logpage = %x", event.b.ae_logpage); 2054 atomic_inc_32(&nvme->n_notice_event); 2055 break; 2056 2057 case NVME_ASYNC_NOTICE_LBASTATUS: 2058 dev_err(nvme->n_dip, CE_NOTE, 2059 "LBA status information alert, " 2060 "logpage = %x", event.b.ae_logpage); 2061 atomic_inc_32(&nvme->n_notice_event); 2062 break; 2063 2064 case NVME_ASYNC_NOTICE_ENDURANCELOG: 2065 dev_err(nvme->n_dip, CE_NOTE, 2066 "endurance group event aggregate log page change, " 2067 "logpage = %x", event.b.ae_logpage); 2068 atomic_inc_32(&nvme->n_notice_event); 2069 break; 2070 2071 default: 2072 dev_err(nvme->n_dip, CE_WARN, 2073 "!unknown notice async event received, " 2074 "info = %x, logpage = %x", event.b.ae_info, 2075 event.b.ae_logpage); 2076 atomic_inc_32(&nvme->n_unknown_event); 2077 break; 2078 } 2079 break; 2080 2081 case NVME_ASYNC_TYPE_VENDOR: 2082 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 2083 "received, info = %x, logpage = %x", event.b.ae_info, 2084 event.b.ae_logpage); 2085 atomic_inc_32(&nvme->n_vendor_event); 2086 break; 2087 2088 default: 2089 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 2090 "type = %x, info = %x, logpage = %x", event.b.ae_type, 2091 event.b.ae_info, event.b.ae_logpage); 2092 atomic_inc_32(&nvme->n_unknown_event); 2093 break; 2094 } 2095 2096 if (error_log != NULL) 2097 kmem_free(error_log, logsize); 2098 2099 if (health_log != NULL) 2100 kmem_free(health_log, logsize); 2101 2102 if (nslist != NULL) 2103 kmem_free(nslist, logsize); 2104 } 2105 2106 static void 2107 nvme_admin_cmd(nvme_cmd_t *cmd, int sec) 2108 { 2109 mutex_enter(&cmd->nc_mutex); 2110 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd); 2111 nvme_wait_cmd(cmd, sec); 2112 mutex_exit(&cmd->nc_mutex); 2113 } 2114 2115 static void 2116 nvme_async_event(nvme_t *nvme) 2117 { 2118 nvme_cmd_t *cmd; 2119 2120 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2121 cmd->nc_sqid = 0; 2122 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 2123 cmd->nc_callback = nvme_async_event_task; 2124 cmd->nc_dontpanic = B_TRUE; 2125 2126 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 2127 } 2128 2129 static int 2130 nvme_format_nvm(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t lbaf, 2131 boolean_t ms, uint8_t pi, boolean_t pil, uint8_t ses) 2132 { 2133 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2134 nvme_format_nvm_t format_nvm = { 0 }; 2135 int ret; 2136 2137 format_nvm.b.fm_lbaf = lbaf & 0xf; 2138 format_nvm.b.fm_ms = ms ? 1 : 0; 2139 format_nvm.b.fm_pi = pi & 0x7; 2140 format_nvm.b.fm_pil = pil ? 1 : 0; 2141 format_nvm.b.fm_ses = ses & 0x7; 2142 2143 cmd->nc_sqid = 0; 2144 cmd->nc_callback = nvme_wakeup_cmd; 2145 cmd->nc_sqe.sqe_nsid = nsid; 2146 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT; 2147 cmd->nc_sqe.sqe_cdw10 = format_nvm.r; 2148 2149 /* 2150 * Some devices like Samsung SM951 don't allow formatting of all 2151 * namespaces in one command. Handle that gracefully. 2152 */ 2153 if (nsid == (uint32_t)-1) 2154 cmd->nc_dontpanic = B_TRUE; 2155 /* 2156 * If this format request was initiated by the user, then don't allow a 2157 * programmer error to panic the system. 2158 */ 2159 if (user) 2160 cmd->nc_dontpanic = B_TRUE; 2161 2162 nvme_admin_cmd(cmd, nvme_format_cmd_timeout); 2163 2164 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2165 dev_err(nvme->n_dip, CE_WARN, 2166 "!FORMAT failed with sct = %x, sc = %x", 2167 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2168 } 2169 2170 nvme_free_cmd(cmd); 2171 return (ret); 2172 } 2173 2174 /* 2175 * The `bufsize` parameter is usually an output parameter, set by this routine 2176 * when filling in the supported types of logpages from the device. However, for 2177 * vendor-specific pages, it is an input parameter, and must be set 2178 * appropriately by callers. 2179 */ 2180 static int 2181 nvme_get_logpage(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize, 2182 uint8_t logpage, ...) 2183 { 2184 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2185 nvme_getlogpage_t getlogpage = { 0 }; 2186 va_list ap; 2187 int ret; 2188 2189 va_start(ap, logpage); 2190 2191 cmd->nc_sqid = 0; 2192 cmd->nc_callback = nvme_wakeup_cmd; 2193 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 2194 2195 if (user) 2196 cmd->nc_dontpanic = B_TRUE; 2197 2198 getlogpage.b.lp_lid = logpage; 2199 2200 switch (logpage) { 2201 case NVME_LOGPAGE_ERROR: 2202 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 2203 *bufsize = MIN(NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE, 2204 nvme->n_error_log_len * sizeof (nvme_error_log_entry_t)); 2205 break; 2206 2207 case NVME_LOGPAGE_HEALTH: 2208 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 2209 *bufsize = sizeof (nvme_health_log_t); 2210 break; 2211 2212 case NVME_LOGPAGE_FWSLOT: 2213 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 2214 *bufsize = sizeof (nvme_fwslot_log_t); 2215 break; 2216 2217 case NVME_LOGPAGE_NSCHANGE: 2218 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 2219 *bufsize = sizeof (nvme_nschange_list_t); 2220 break; 2221 2222 default: 2223 /* 2224 * This intentionally only checks against the minimum valid 2225 * log page ID. `logpage` is a uint8_t, and `0xFF` is a valid 2226 * page ID, so this one-sided check avoids a compiler error 2227 * about a check that's always true. 2228 */ 2229 if (logpage < NVME_VENDOR_SPECIFIC_LOGPAGE_MIN) { 2230 dev_err(nvme->n_dip, CE_WARN, 2231 "!unknown log page requested: %d", logpage); 2232 atomic_inc_32(&nvme->n_unknown_logpage); 2233 ret = EINVAL; 2234 goto fail; 2235 } 2236 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 2237 } 2238 2239 va_end(ap); 2240 2241 getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1; 2242 2243 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 2244 2245 if (nvme_zalloc_dma(nvme, *bufsize, 2246 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2247 dev_err(nvme->n_dip, CE_WARN, 2248 "!nvme_zalloc_dma failed for GET LOG PAGE"); 2249 ret = ENOMEM; 2250 goto fail; 2251 } 2252 2253 if ((ret = nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah)) != 0) 2254 goto fail; 2255 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2256 2257 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2258 dev_err(nvme->n_dip, CE_WARN, 2259 "!GET LOG PAGE failed with sct = %x, sc = %x", 2260 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2261 goto fail; 2262 } 2263 2264 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2265 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2266 2267 fail: 2268 nvme_free_cmd(cmd); 2269 2270 return (ret); 2271 } 2272 2273 static int 2274 nvme_identify(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t cns, 2275 void **buf) 2276 { 2277 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2278 int ret; 2279 2280 if (buf == NULL) 2281 return (EINVAL); 2282 2283 cmd->nc_sqid = 0; 2284 cmd->nc_callback = nvme_wakeup_cmd; 2285 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 2286 cmd->nc_sqe.sqe_nsid = nsid; 2287 cmd->nc_sqe.sqe_cdw10 = cns; 2288 2289 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 2290 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2291 dev_err(nvme->n_dip, CE_WARN, 2292 "!nvme_zalloc_dma failed for IDENTIFY"); 2293 ret = ENOMEM; 2294 goto fail; 2295 } 2296 2297 if (cmd->nc_dma->nd_ncookie > 2) { 2298 dev_err(nvme->n_dip, CE_WARN, 2299 "!too many DMA cookies for IDENTIFY"); 2300 atomic_inc_32(&nvme->n_too_many_cookies); 2301 ret = ENOMEM; 2302 goto fail; 2303 } 2304 2305 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 2306 if (cmd->nc_dma->nd_ncookie > 1) { 2307 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2308 &cmd->nc_dma->nd_cookie); 2309 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2310 cmd->nc_dma->nd_cookie.dmac_laddress; 2311 } 2312 2313 if (user) 2314 cmd->nc_dontpanic = B_TRUE; 2315 2316 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2317 2318 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2319 dev_err(nvme->n_dip, CE_WARN, 2320 "!IDENTIFY failed with sct = %x, sc = %x", 2321 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2322 goto fail; 2323 } 2324 2325 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 2326 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE); 2327 2328 fail: 2329 nvme_free_cmd(cmd); 2330 2331 return (ret); 2332 } 2333 2334 static int 2335 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2336 uint32_t val, uint32_t *res) 2337 { 2338 _NOTE(ARGUNUSED(nsid)); 2339 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2340 int ret = EINVAL; 2341 2342 ASSERT(res != NULL); 2343 2344 cmd->nc_sqid = 0; 2345 cmd->nc_callback = nvme_wakeup_cmd; 2346 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 2347 cmd->nc_sqe.sqe_cdw10 = feature; 2348 cmd->nc_sqe.sqe_cdw11 = val; 2349 2350 if (user) 2351 cmd->nc_dontpanic = B_TRUE; 2352 2353 switch (feature) { 2354 case NVME_FEAT_WRITE_CACHE: 2355 if (!nvme->n_write_cache_present) 2356 goto fail; 2357 break; 2358 2359 case NVME_FEAT_NQUEUES: 2360 break; 2361 2362 default: 2363 goto fail; 2364 } 2365 2366 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2367 2368 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2369 dev_err(nvme->n_dip, CE_WARN, 2370 "!SET FEATURES %d failed with sct = %x, sc = %x", 2371 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2372 cmd->nc_cqe.cqe_sf.sf_sc); 2373 goto fail; 2374 } 2375 2376 *res = cmd->nc_cqe.cqe_dw0; 2377 2378 fail: 2379 nvme_free_cmd(cmd); 2380 return (ret); 2381 } 2382 2383 static int 2384 nvme_get_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2385 uint32_t *res, void **buf, size_t *bufsize) 2386 { 2387 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2388 int ret = EINVAL; 2389 2390 ASSERT(res != NULL); 2391 2392 if (bufsize != NULL) 2393 *bufsize = 0; 2394 2395 cmd->nc_sqid = 0; 2396 cmd->nc_callback = nvme_wakeup_cmd; 2397 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES; 2398 cmd->nc_sqe.sqe_cdw10 = feature; 2399 cmd->nc_sqe.sqe_cdw11 = *res; 2400 2401 /* 2402 * For some of the optional features there doesn't seem to be a method 2403 * of detecting whether it is supported other than using it. This will 2404 * cause "Invalid Field in Command" error, which is normally considered 2405 * a programming error. Set the nc_dontpanic flag to override the panic 2406 * in nvme_check_generic_cmd_status(). 2407 */ 2408 switch (feature) { 2409 case NVME_FEAT_ARBITRATION: 2410 case NVME_FEAT_POWER_MGMT: 2411 case NVME_FEAT_TEMPERATURE: 2412 case NVME_FEAT_ERROR: 2413 case NVME_FEAT_NQUEUES: 2414 case NVME_FEAT_INTR_COAL: 2415 case NVME_FEAT_INTR_VECT: 2416 case NVME_FEAT_WRITE_ATOM: 2417 case NVME_FEAT_ASYNC_EVENT: 2418 break; 2419 2420 case NVME_FEAT_WRITE_CACHE: 2421 if (!nvme->n_write_cache_present) 2422 goto fail; 2423 break; 2424 2425 case NVME_FEAT_LBA_RANGE: 2426 if (!nvme->n_lba_range_supported) 2427 goto fail; 2428 2429 cmd->nc_dontpanic = B_TRUE; 2430 cmd->nc_sqe.sqe_nsid = nsid; 2431 ASSERT(bufsize != NULL); 2432 *bufsize = NVME_LBA_RANGE_BUFSIZE; 2433 break; 2434 2435 case NVME_FEAT_AUTO_PST: 2436 if (!nvme->n_auto_pst_supported) 2437 goto fail; 2438 2439 ASSERT(bufsize != NULL); 2440 *bufsize = NVME_AUTO_PST_BUFSIZE; 2441 break; 2442 2443 case NVME_FEAT_PROGRESS: 2444 if (!nvme->n_progress_supported) 2445 goto fail; 2446 2447 cmd->nc_dontpanic = B_TRUE; 2448 break; 2449 2450 default: 2451 goto fail; 2452 } 2453 2454 if (user) 2455 cmd->nc_dontpanic = B_TRUE; 2456 2457 if (bufsize != NULL && *bufsize != 0) { 2458 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ, 2459 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2460 dev_err(nvme->n_dip, CE_WARN, 2461 "!nvme_zalloc_dma failed for GET FEATURES"); 2462 ret = ENOMEM; 2463 goto fail; 2464 } 2465 2466 if (cmd->nc_dma->nd_ncookie > 2) { 2467 dev_err(nvme->n_dip, CE_WARN, 2468 "!too many DMA cookies for GET FEATURES"); 2469 atomic_inc_32(&nvme->n_too_many_cookies); 2470 ret = ENOMEM; 2471 goto fail; 2472 } 2473 2474 cmd->nc_sqe.sqe_dptr.d_prp[0] = 2475 cmd->nc_dma->nd_cookie.dmac_laddress; 2476 if (cmd->nc_dma->nd_ncookie > 1) { 2477 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2478 &cmd->nc_dma->nd_cookie); 2479 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2480 cmd->nc_dma->nd_cookie.dmac_laddress; 2481 } 2482 } 2483 2484 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2485 2486 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2487 boolean_t known = B_TRUE; 2488 2489 /* Check if this is unsupported optional feature */ 2490 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2491 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) { 2492 switch (feature) { 2493 case NVME_FEAT_LBA_RANGE: 2494 nvme->n_lba_range_supported = B_FALSE; 2495 break; 2496 case NVME_FEAT_PROGRESS: 2497 nvme->n_progress_supported = B_FALSE; 2498 break; 2499 default: 2500 known = B_FALSE; 2501 break; 2502 } 2503 } else { 2504 known = B_FALSE; 2505 } 2506 2507 /* Report the error otherwise */ 2508 if (!known) { 2509 dev_err(nvme->n_dip, CE_WARN, 2510 "!GET FEATURES %d failed with sct = %x, sc = %x", 2511 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2512 cmd->nc_cqe.cqe_sf.sf_sc); 2513 } 2514 2515 goto fail; 2516 } 2517 2518 if (bufsize != NULL && *bufsize != 0) { 2519 ASSERT(buf != NULL); 2520 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2521 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2522 } 2523 2524 *res = cmd->nc_cqe.cqe_dw0; 2525 2526 fail: 2527 nvme_free_cmd(cmd); 2528 return (ret); 2529 } 2530 2531 static int 2532 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 2533 { 2534 nvme_write_cache_t nwc = { 0 }; 2535 2536 if (enable) 2537 nwc.b.wc_wce = 1; 2538 2539 return (nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_WRITE_CACHE, 2540 nwc.r, &nwc.r)); 2541 } 2542 2543 static int 2544 nvme_set_nqueues(nvme_t *nvme) 2545 { 2546 nvme_nqueues_t nq = { 0 }; 2547 int ret; 2548 2549 /* 2550 * The default is to allocate one completion queue per vector. 2551 */ 2552 if (nvme->n_completion_queues == -1) 2553 nvme->n_completion_queues = nvme->n_intr_cnt; 2554 2555 /* 2556 * There is no point in having more completion queues than 2557 * interrupt vectors. 2558 */ 2559 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2560 nvme->n_intr_cnt); 2561 2562 /* 2563 * The default is to use one submission queue per completion queue. 2564 */ 2565 if (nvme->n_submission_queues == -1) 2566 nvme->n_submission_queues = nvme->n_completion_queues; 2567 2568 /* 2569 * There is no point in having more compeletion queues than 2570 * submission queues. 2571 */ 2572 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2573 nvme->n_submission_queues); 2574 2575 ASSERT(nvme->n_submission_queues > 0); 2576 ASSERT(nvme->n_completion_queues > 0); 2577 2578 nq.b.nq_nsq = nvme->n_submission_queues - 1; 2579 nq.b.nq_ncq = nvme->n_completion_queues - 1; 2580 2581 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r, 2582 &nq.r); 2583 2584 if (ret == 0) { 2585 /* 2586 * Never use more than the requested number of queues. 2587 */ 2588 nvme->n_submission_queues = MIN(nvme->n_submission_queues, 2589 nq.b.nq_nsq + 1); 2590 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2591 nq.b.nq_ncq + 1); 2592 } 2593 2594 return (ret); 2595 } 2596 2597 static int 2598 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq) 2599 { 2600 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2601 nvme_create_queue_dw10_t dw10 = { 0 }; 2602 nvme_create_cq_dw11_t c_dw11 = { 0 }; 2603 int ret; 2604 2605 dw10.b.q_qid = cq->ncq_id; 2606 dw10.b.q_qsize = cq->ncq_nentry - 1; 2607 2608 c_dw11.b.cq_pc = 1; 2609 c_dw11.b.cq_ien = 1; 2610 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt; 2611 2612 cmd->nc_sqid = 0; 2613 cmd->nc_callback = nvme_wakeup_cmd; 2614 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 2615 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2616 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 2617 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress; 2618 2619 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2620 2621 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2622 dev_err(nvme->n_dip, CE_WARN, 2623 "!CREATE CQUEUE failed with sct = %x, sc = %x", 2624 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2625 } 2626 2627 nvme_free_cmd(cmd); 2628 2629 return (ret); 2630 } 2631 2632 static int 2633 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 2634 { 2635 nvme_cq_t *cq = qp->nq_cq; 2636 nvme_cmd_t *cmd; 2637 nvme_create_queue_dw10_t dw10 = { 0 }; 2638 nvme_create_sq_dw11_t s_dw11 = { 0 }; 2639 int ret; 2640 2641 /* 2642 * It is possible to have more qpairs than completion queues, 2643 * and when the idx > ncq_id, that completion queue is shared 2644 * and has already been created. 2645 */ 2646 if (idx <= cq->ncq_id && 2647 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS) 2648 return (DDI_FAILURE); 2649 2650 dw10.b.q_qid = idx; 2651 dw10.b.q_qsize = qp->nq_nentry - 1; 2652 2653 s_dw11.b.sq_pc = 1; 2654 s_dw11.b.sq_cqid = cq->ncq_id; 2655 2656 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2657 cmd->nc_sqid = 0; 2658 cmd->nc_callback = nvme_wakeup_cmd; 2659 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 2660 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2661 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 2662 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 2663 2664 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2665 2666 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2667 dev_err(nvme->n_dip, CE_WARN, 2668 "!CREATE SQUEUE failed with sct = %x, sc = %x", 2669 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2670 } 2671 2672 nvme_free_cmd(cmd); 2673 2674 return (ret); 2675 } 2676 2677 static boolean_t 2678 nvme_reset(nvme_t *nvme, boolean_t quiesce) 2679 { 2680 nvme_reg_csts_t csts; 2681 int i; 2682 2683 nvme_put32(nvme, NVME_REG_CC, 0); 2684 2685 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2686 if (csts.b.csts_rdy == 1) { 2687 nvme_put32(nvme, NVME_REG_CC, 0); 2688 for (i = 0; i != nvme->n_timeout * 10; i++) { 2689 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2690 if (csts.b.csts_rdy == 0) 2691 break; 2692 2693 if (quiesce) 2694 drv_usecwait(50000); 2695 else 2696 delay(drv_usectohz(50000)); 2697 } 2698 } 2699 2700 nvme_put32(nvme, NVME_REG_AQA, 0); 2701 nvme_put32(nvme, NVME_REG_ASQ, 0); 2702 nvme_put32(nvme, NVME_REG_ACQ, 0); 2703 2704 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2705 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 2706 } 2707 2708 static void 2709 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) 2710 { 2711 nvme_reg_cc_t cc; 2712 nvme_reg_csts_t csts; 2713 int i; 2714 2715 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT); 2716 2717 cc.r = nvme_get32(nvme, NVME_REG_CC); 2718 cc.b.cc_shn = mode & 0x3; 2719 nvme_put32(nvme, NVME_REG_CC, cc.r); 2720 2721 for (i = 0; i != 10; i++) { 2722 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2723 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 2724 break; 2725 2726 if (quiesce) 2727 drv_usecwait(100000); 2728 else 2729 delay(drv_usectohz(100000)); 2730 } 2731 } 2732 2733 /* 2734 * Return length of string without trailing spaces. 2735 */ 2736 static int 2737 nvme_strlen(const char *str, int len) 2738 { 2739 if (len <= 0) 2740 return (0); 2741 2742 while (str[--len] == ' ') 2743 ; 2744 2745 return (++len); 2746 } 2747 2748 static void 2749 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val) 2750 { 2751 ulong_t bsize = 0; 2752 char *msg = ""; 2753 2754 if (ddi_strtoul(val, NULL, 0, &bsize) != 0) 2755 goto err; 2756 2757 if (!ISP2(bsize)) { 2758 msg = ": not a power of 2"; 2759 goto err; 2760 } 2761 2762 if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) { 2763 msg = ": too low"; 2764 goto err; 2765 } 2766 2767 nvme->n_min_block_size = bsize; 2768 return; 2769 2770 err: 2771 dev_err(nvme->n_dip, CE_WARN, 2772 "!nvme-config-list: ignoring invalid min-phys-block-size '%s' " 2773 "for model '%s'%s", val, model, msg); 2774 2775 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 2776 } 2777 2778 static void 2779 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val, 2780 boolean_t *b) 2781 { 2782 if (strcmp(val, "on") == 0 || 2783 strcmp(val, "true") == 0) 2784 *b = B_TRUE; 2785 else if (strcmp(val, "off") == 0 || 2786 strcmp(val, "false") == 0) 2787 *b = B_FALSE; 2788 else 2789 dev_err(nvme->n_dip, CE_WARN, 2790 "!nvme-config-list: invalid value for %s '%s'" 2791 " for model '%s', ignoring", name, val, model); 2792 } 2793 2794 static void 2795 nvme_config_list(nvme_t *nvme) 2796 { 2797 char **config_list; 2798 uint_t nelem; 2799 int rv, i; 2800 2801 /* 2802 * We're following the pattern of 'sd-config-list' here, but extend it. 2803 * Instead of two we have three separate strings for "model", "fwrev", 2804 * and "name-value-list". 2805 */ 2806 rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip, 2807 DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem); 2808 2809 if (rv != DDI_PROP_SUCCESS) { 2810 if (rv == DDI_PROP_CANNOT_DECODE) { 2811 dev_err(nvme->n_dip, CE_WARN, 2812 "!nvme-config-list: cannot be decoded"); 2813 } 2814 2815 return; 2816 } 2817 2818 if ((nelem % 3) != 0) { 2819 dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be " 2820 "triplets of <model>/<fwrev>/<name-value-list> strings "); 2821 goto out; 2822 } 2823 2824 for (i = 0; i < nelem; i += 3) { 2825 char *model = config_list[i]; 2826 char *fwrev = config_list[i + 1]; 2827 char *nvp, *save_nv; 2828 int id_model_len, id_fwrev_len; 2829 2830 id_model_len = nvme_strlen(nvme->n_idctl->id_model, 2831 sizeof (nvme->n_idctl->id_model)); 2832 2833 if (strlen(model) != id_model_len) 2834 continue; 2835 2836 if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0) 2837 continue; 2838 2839 id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev, 2840 sizeof (nvme->n_idctl->id_fwrev)); 2841 2842 if (strlen(fwrev) != 0) { 2843 boolean_t match = B_FALSE; 2844 char *fwr, *last_fw; 2845 2846 for (fwr = strtok_r(fwrev, ",", &last_fw); 2847 fwr != NULL; 2848 fwr = strtok_r(NULL, ",", &last_fw)) { 2849 if (strlen(fwr) != id_fwrev_len) 2850 continue; 2851 2852 if (strncmp(fwr, nvme->n_idctl->id_fwrev, 2853 id_fwrev_len) == 0) 2854 match = B_TRUE; 2855 } 2856 2857 if (!match) 2858 continue; 2859 } 2860 2861 /* 2862 * We should now have a comma-separated list of name:value 2863 * pairs. 2864 */ 2865 for (nvp = strtok_r(config_list[i + 2], ",", &save_nv); 2866 nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) { 2867 char *name = nvp; 2868 char *val = strchr(nvp, ':'); 2869 2870 if (val == NULL || name == val) { 2871 dev_err(nvme->n_dip, CE_WARN, 2872 "!nvme-config-list: <name-value-list> " 2873 "for model '%s' is malformed", model); 2874 goto out; 2875 } 2876 2877 /* 2878 * Null-terminate 'name', move 'val' past ':' sep. 2879 */ 2880 *val++ = '\0'; 2881 2882 /* 2883 * Process the name:val pairs that we know about. 2884 */ 2885 if (strcmp(name, "ignore-unknown-vendor-status") == 0) { 2886 nvme_config_boolean(nvme, model, name, val, 2887 &nvme->n_ignore_unknown_vendor_status); 2888 } else if (strcmp(name, "min-phys-block-size") == 0) { 2889 nvme_config_min_block_size(nvme, model, val); 2890 } else if (strcmp(name, "volatile-write-cache") == 0) { 2891 nvme_config_boolean(nvme, model, name, val, 2892 &nvme->n_write_cache_enabled); 2893 } else { 2894 /* 2895 * Unknown 'name'. 2896 */ 2897 dev_err(nvme->n_dip, CE_WARN, 2898 "!nvme-config-list: unknown config '%s' " 2899 "for model '%s', ignoring", name, model); 2900 } 2901 } 2902 } 2903 2904 out: 2905 ddi_prop_free(config_list); 2906 } 2907 2908 static void 2909 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 2910 { 2911 /* 2912 * Section 7.7 of the spec describes how to get a unique ID for 2913 * the controller: the vendor ID, the model name and the serial 2914 * number shall be unique when combined. 2915 * 2916 * If a namespace has no EUI64 we use the above and add the hex 2917 * namespace ID to get a unique ID for the namespace. 2918 */ 2919 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2920 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 2921 2922 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2923 bcopy(nvme->n_idctl->id_serial, serial, 2924 sizeof (nvme->n_idctl->id_serial)); 2925 2926 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2927 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 2928 2929 NVME_NSID2NS(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X", 2930 nvme->n_idctl->id_vid, model, serial, nsid); 2931 } 2932 2933 static boolean_t 2934 nvme_allocated_ns(nvme_namespace_t *ns) 2935 { 2936 nvme_t *nvme = ns->ns_nvme; 2937 2938 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 2939 2940 /* 2941 * Since we don't know any better, we assume all namespaces to be 2942 * allocated. 2943 */ 2944 return (B_TRUE); 2945 } 2946 2947 static boolean_t 2948 nvme_active_ns(nvme_namespace_t *ns) 2949 { 2950 nvme_t *nvme = ns->ns_nvme; 2951 boolean_t ret = B_FALSE; 2952 uint64_t *ptr; 2953 2954 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 2955 2956 /* 2957 * Check whether the IDENTIFY NAMESPACE data is zero-filled. 2958 */ 2959 for (ptr = (uint64_t *)ns->ns_idns; 2960 ptr != (uint64_t *)(ns->ns_idns + 1); 2961 ptr++) { 2962 if (*ptr != 0) { 2963 ret = B_TRUE; 2964 break; 2965 } 2966 } 2967 2968 return (ret); 2969 } 2970 2971 static int 2972 nvme_init_ns(nvme_t *nvme, int nsid) 2973 { 2974 nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid); 2975 nvme_identify_nsid_t *idns; 2976 boolean_t was_ignored; 2977 int last_rp; 2978 2979 ns->ns_nvme = nvme; 2980 2981 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 2982 2983 if (nvme_identify(nvme, B_FALSE, nsid, NVME_IDENTIFY_NSID, 2984 (void **)&idns) != 0) { 2985 dev_err(nvme->n_dip, CE_WARN, 2986 "!failed to identify namespace %d", nsid); 2987 return (DDI_FAILURE); 2988 } 2989 2990 if (ns->ns_idns != NULL) 2991 kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t)); 2992 2993 ns->ns_idns = idns; 2994 ns->ns_id = nsid; 2995 2996 was_ignored = ns->ns_ignore; 2997 2998 ns->ns_allocated = nvme_allocated_ns(ns); 2999 ns->ns_active = nvme_active_ns(ns); 3000 3001 ns->ns_block_count = idns->id_nsize; 3002 ns->ns_block_size = 3003 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 3004 ns->ns_best_block_size = ns->ns_block_size; 3005 3006 /* 3007 * Get the EUI64 if present. 3008 */ 3009 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 3010 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64)); 3011 3012 /* 3013 * Get the NGUID if present. 3014 */ 3015 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) 3016 bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid)); 3017 3018 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 3019 if (*(uint64_t *)ns->ns_eui64 != 0) { 3020 uint8_t *eui64 = ns->ns_eui64; 3021 3022 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), 3023 "%02x%02x%02x%02x%02x%02x%02x%02x", 3024 eui64[0], eui64[1], eui64[2], eui64[3], 3025 eui64[4], eui64[5], eui64[6], eui64[7]); 3026 } else { 3027 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%d", 3028 ns->ns_id); 3029 3030 nvme_prepare_devid(nvme, ns->ns_id); 3031 } 3032 3033 /* 3034 * Find the LBA format with no metadata and the best relative 3035 * performance. A value of 3 means "degraded", 0 is best. 3036 */ 3037 last_rp = 3; 3038 for (int j = 0; j <= idns->id_nlbaf; j++) { 3039 if (idns->id_lbaf[j].lbaf_lbads == 0) 3040 break; 3041 if (idns->id_lbaf[j].lbaf_ms != 0) 3042 continue; 3043 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 3044 continue; 3045 last_rp = idns->id_lbaf[j].lbaf_rp; 3046 ns->ns_best_block_size = 3047 1 << idns->id_lbaf[j].lbaf_lbads; 3048 } 3049 3050 if (ns->ns_best_block_size < nvme->n_min_block_size) 3051 ns->ns_best_block_size = nvme->n_min_block_size; 3052 3053 was_ignored = ns->ns_ignore; 3054 3055 /* 3056 * We currently don't support namespaces that use either: 3057 * - protection information 3058 * - illegal block size (< 512) 3059 */ 3060 if (idns->id_dps.dp_pinfo) { 3061 dev_err(nvme->n_dip, CE_WARN, 3062 "!ignoring namespace %d, unsupported feature: " 3063 "pinfo = %d", nsid, idns->id_dps.dp_pinfo); 3064 ns->ns_ignore = B_TRUE; 3065 } else if (ns->ns_block_size < 512) { 3066 dev_err(nvme->n_dip, CE_WARN, 3067 "!ignoring namespace %d, unsupported block size %"PRIu64, 3068 nsid, (uint64_t)ns->ns_block_size); 3069 ns->ns_ignore = B_TRUE; 3070 } else { 3071 ns->ns_ignore = B_FALSE; 3072 } 3073 3074 /* 3075 * Keep a count of namespaces which are attachable. 3076 * See comments in nvme_bd_driveinfo() to understand its effect. 3077 */ 3078 if (was_ignored) { 3079 /* 3080 * Previously ignored, but now not. Count it. 3081 */ 3082 if (!ns->ns_ignore) 3083 nvme->n_namespaces_attachable++; 3084 } else { 3085 /* 3086 * Wasn't ignored previously, but now needs to be. 3087 * Discount it. 3088 */ 3089 if (ns->ns_ignore) 3090 nvme->n_namespaces_attachable--; 3091 } 3092 3093 return (DDI_SUCCESS); 3094 } 3095 3096 static int 3097 nvme_attach_ns(nvme_t *nvme, int nsid) 3098 { 3099 nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid); 3100 3101 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 3102 3103 if (ns->ns_ignore) 3104 return (ENOTSUP); 3105 3106 if (ns->ns_bd_hdl == NULL) { 3107 bd_ops_t ops = nvme_bd_ops; 3108 3109 if (!nvme->n_idctl->id_oncs.on_dset_mgmt) 3110 ops.o_free_space = NULL; 3111 3112 ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr, 3113 KM_SLEEP); 3114 3115 if (ns->ns_bd_hdl == NULL) { 3116 dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev " 3117 "handle for namespace id %d", nsid); 3118 return (EINVAL); 3119 } 3120 } 3121 3122 if (bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl) != DDI_SUCCESS) 3123 return (EBUSY); 3124 3125 ns->ns_attached = B_TRUE; 3126 3127 return (0); 3128 } 3129 3130 static int 3131 nvme_detach_ns(nvme_t *nvme, int nsid) 3132 { 3133 nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid); 3134 int rv; 3135 3136 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 3137 3138 if (ns->ns_ignore || !ns->ns_attached) 3139 return (0); 3140 3141 ASSERT(ns->ns_bd_hdl != NULL); 3142 rv = bd_detach_handle(ns->ns_bd_hdl); 3143 if (rv != DDI_SUCCESS) 3144 return (EBUSY); 3145 else 3146 ns->ns_attached = B_FALSE; 3147 3148 return (0); 3149 } 3150 3151 static int 3152 nvme_init(nvme_t *nvme) 3153 { 3154 nvme_reg_cc_t cc = { 0 }; 3155 nvme_reg_aqa_t aqa = { 0 }; 3156 nvme_reg_asq_t asq = { 0 }; 3157 nvme_reg_acq_t acq = { 0 }; 3158 nvme_reg_cap_t cap; 3159 nvme_reg_vs_t vs; 3160 nvme_reg_csts_t csts; 3161 int i = 0; 3162 uint16_t nqueues; 3163 uint_t tq_threads; 3164 char model[sizeof (nvme->n_idctl->id_model) + 1]; 3165 char *vendor, *product; 3166 3167 /* Check controller version */ 3168 vs.r = nvme_get32(nvme, NVME_REG_VS); 3169 nvme->n_version.v_major = vs.b.vs_mjr; 3170 nvme->n_version.v_minor = vs.b.vs_mnr; 3171 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 3172 nvme->n_version.v_major, nvme->n_version.v_minor); 3173 3174 if (nvme->n_version.v_major > nvme_version_major) { 3175 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x", 3176 nvme_version_major); 3177 if (nvme->n_strict_version) 3178 goto fail; 3179 } 3180 3181 /* retrieve controller configuration */ 3182 cap.r = nvme_get64(nvme, NVME_REG_CAP); 3183 3184 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 3185 dev_err(nvme->n_dip, CE_WARN, 3186 "!NVM command set not supported by hardware"); 3187 goto fail; 3188 } 3189 3190 nvme->n_nssr_supported = cap.b.cap_nssrs; 3191 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 3192 nvme->n_timeout = cap.b.cap_to; 3193 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 3194 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 3195 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 3196 3197 /* 3198 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 3199 * the base page size of 4k (1<<12), so add 12 here to get the real 3200 * page size value. 3201 */ 3202 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 3203 cap.b.cap_mpsmax + 12); 3204 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 3205 3206 /* 3207 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 3208 */ 3209 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 3210 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 3211 3212 /* 3213 * Set up PRP DMA to transfer 1 page-aligned page at a time. 3214 * Maxxfer may be increased after we identified the controller limits. 3215 */ 3216 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 3217 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 3218 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 3219 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 3220 3221 /* 3222 * Reset controller if it's still in ready state. 3223 */ 3224 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 3225 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 3226 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 3227 nvme->n_dead = B_TRUE; 3228 goto fail; 3229 } 3230 3231 /* 3232 * Create the cq array with one completion queue to be assigned 3233 * to the admin queue pair and a limited number of taskqs (4). 3234 */ 3235 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) != 3236 DDI_SUCCESS) { 3237 dev_err(nvme->n_dip, CE_WARN, 3238 "!failed to pre-allocate admin completion queue"); 3239 goto fail; 3240 } 3241 /* 3242 * Create the admin queue pair. 3243 */ 3244 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 3245 != DDI_SUCCESS) { 3246 dev_err(nvme->n_dip, CE_WARN, 3247 "!unable to allocate admin qpair"); 3248 goto fail; 3249 } 3250 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 3251 nvme->n_ioq[0] = nvme->n_adminq; 3252 3253 nvme->n_progress |= NVME_ADMIN_QUEUE; 3254 3255 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3256 "admin-queue-len", nvme->n_admin_queue_len); 3257 3258 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 3259 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 3260 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress; 3261 3262 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 3263 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 3264 3265 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 3266 nvme_put64(nvme, NVME_REG_ASQ, asq); 3267 nvme_put64(nvme, NVME_REG_ACQ, acq); 3268 3269 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 3270 cc.b.cc_css = 0; /* use NVM command set */ 3271 cc.b.cc_mps = nvme->n_pageshift - 12; 3272 cc.b.cc_shn = 0; /* no shutdown in progress */ 3273 cc.b.cc_en = 1; /* enable controller */ 3274 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 3275 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 3276 3277 nvme_put32(nvme, NVME_REG_CC, cc.r); 3278 3279 /* 3280 * Wait for the controller to become ready. 3281 */ 3282 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3283 if (csts.b.csts_rdy == 0) { 3284 for (i = 0; i != nvme->n_timeout * 10; i++) { 3285 delay(drv_usectohz(50000)); 3286 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3287 3288 if (csts.b.csts_cfs == 1) { 3289 dev_err(nvme->n_dip, CE_WARN, 3290 "!controller fatal status at init"); 3291 ddi_fm_service_impact(nvme->n_dip, 3292 DDI_SERVICE_LOST); 3293 nvme->n_dead = B_TRUE; 3294 goto fail; 3295 } 3296 3297 if (csts.b.csts_rdy == 1) 3298 break; 3299 } 3300 } 3301 3302 if (csts.b.csts_rdy == 0) { 3303 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 3304 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 3305 nvme->n_dead = B_TRUE; 3306 goto fail; 3307 } 3308 3309 /* 3310 * Assume an abort command limit of 1. We'll destroy and re-init 3311 * that later when we know the true abort command limit. 3312 */ 3313 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 3314 3315 /* 3316 * Set up initial interrupt for admin queue. 3317 */ 3318 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 3319 != DDI_SUCCESS) && 3320 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 3321 != DDI_SUCCESS) && 3322 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 3323 != DDI_SUCCESS)) { 3324 dev_err(nvme->n_dip, CE_WARN, 3325 "!failed to setup initial interrupt"); 3326 goto fail; 3327 } 3328 3329 /* 3330 * Post an asynchronous event command to catch errors. 3331 * We assume the asynchronous events are supported as required by 3332 * specification (Figure 40 in section 5 of NVMe 1.2). 3333 * However, since at least qemu does not follow the specification, 3334 * we need a mechanism to protect ourselves. 3335 */ 3336 nvme->n_async_event_supported = B_TRUE; 3337 nvme_async_event(nvme); 3338 3339 /* 3340 * Identify Controller 3341 */ 3342 if (nvme_identify(nvme, B_FALSE, 0, NVME_IDENTIFY_CTRL, 3343 (void **)&nvme->n_idctl) != 0) { 3344 dev_err(nvme->n_dip, CE_WARN, 3345 "!failed to identify controller"); 3346 goto fail; 3347 } 3348 3349 /* 3350 * Process nvme-config-list (if present) in nvme.conf. 3351 */ 3352 nvme_config_list(nvme); 3353 3354 /* 3355 * Get Vendor & Product ID 3356 */ 3357 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 3358 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 3359 sata_split_model(model, &vendor, &product); 3360 3361 if (vendor == NULL) 3362 nvme->n_vendor = strdup("NVMe"); 3363 else 3364 nvme->n_vendor = strdup(vendor); 3365 3366 nvme->n_product = strdup(product); 3367 3368 /* 3369 * Get controller limits. 3370 */ 3371 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 3372 MIN(nvme->n_admin_queue_len / 10, 3373 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 3374 3375 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3376 "async-event-limit", nvme->n_async_event_limit); 3377 3378 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 3379 3380 /* 3381 * Reinitialize the semaphore with the true abort command limit 3382 * supported by the hardware. It's not necessary to disable interrupts 3383 * as only command aborts use the semaphore, and no commands are 3384 * executed or aborted while we're here. 3385 */ 3386 sema_destroy(&nvme->n_abort_sema); 3387 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 3388 SEMA_DRIVER, NULL); 3389 3390 nvme->n_progress |= NVME_CTRL_LIMITS; 3391 3392 if (nvme->n_idctl->id_mdts == 0) 3393 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 3394 else 3395 nvme->n_max_data_transfer_size = 3396 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 3397 3398 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 3399 3400 /* 3401 * Limit n_max_data_transfer_size to what we can handle in one PRP. 3402 * Chained PRPs are currently unsupported. 3403 * 3404 * This is a no-op on hardware which doesn't support a transfer size 3405 * big enough to require chained PRPs. 3406 */ 3407 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 3408 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 3409 3410 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 3411 3412 /* 3413 * Make sure the minimum/maximum queue entry sizes are not 3414 * larger/smaller than the default. 3415 */ 3416 3417 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 3418 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 3419 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 3420 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 3421 goto fail; 3422 3423 /* 3424 * Check for the presence of a Volatile Write Cache. If present, 3425 * enable or disable based on the value of the property 3426 * volatile-write-cache-enable (default is enabled). 3427 */ 3428 nvme->n_write_cache_present = 3429 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 3430 3431 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3432 "volatile-write-cache-present", 3433 nvme->n_write_cache_present ? 1 : 0); 3434 3435 if (!nvme->n_write_cache_present) { 3436 nvme->n_write_cache_enabled = B_FALSE; 3437 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled) 3438 != 0) { 3439 dev_err(nvme->n_dip, CE_WARN, 3440 "!failed to %sable volatile write cache", 3441 nvme->n_write_cache_enabled ? "en" : "dis"); 3442 /* 3443 * Assume the cache is (still) enabled. 3444 */ 3445 nvme->n_write_cache_enabled = B_TRUE; 3446 } 3447 3448 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3449 "volatile-write-cache-enable", 3450 nvme->n_write_cache_enabled ? 1 : 0); 3451 3452 /* 3453 * Assume LBA Range Type feature is supported. If it isn't this 3454 * will be set to B_FALSE by nvme_get_features(). 3455 */ 3456 nvme->n_lba_range_supported = B_TRUE; 3457 3458 /* 3459 * Check support for Autonomous Power State Transition. 3460 */ 3461 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 3462 nvme->n_auto_pst_supported = 3463 nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE; 3464 3465 /* 3466 * Assume Software Progress Marker feature is supported. If it isn't 3467 * this will be set to B_FALSE by nvme_get_features(). 3468 */ 3469 nvme->n_progress_supported = B_TRUE; 3470 3471 /* 3472 * Get number of supported namespaces and allocate namespace array. 3473 */ 3474 nvme->n_namespace_count = nvme->n_idctl->id_nn; 3475 3476 if (nvme->n_namespace_count == 0) { 3477 dev_err(nvme->n_dip, CE_WARN, 3478 "!controllers without namespaces are not supported"); 3479 goto fail; 3480 } 3481 3482 if (nvme->n_namespace_count > NVME_MINOR_MAX) { 3483 dev_err(nvme->n_dip, CE_WARN, 3484 "!too many namespaces: %d, limiting to %d\n", 3485 nvme->n_namespace_count, NVME_MINOR_MAX); 3486 nvme->n_namespace_count = NVME_MINOR_MAX; 3487 } 3488 3489 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 3490 nvme->n_namespace_count, KM_SLEEP); 3491 3492 /* 3493 * Try to set up MSI/MSI-X interrupts. 3494 */ 3495 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 3496 != 0) { 3497 nvme_release_interrupts(nvme); 3498 3499 nqueues = MIN(UINT16_MAX, ncpus); 3500 3501 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 3502 nqueues) != DDI_SUCCESS) && 3503 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 3504 nqueues) != DDI_SUCCESS)) { 3505 dev_err(nvme->n_dip, CE_WARN, 3506 "!failed to setup MSI/MSI-X interrupts"); 3507 goto fail; 3508 } 3509 } 3510 3511 /* 3512 * Create I/O queue pairs. 3513 */ 3514 3515 if (nvme_set_nqueues(nvme) != 0) { 3516 dev_err(nvme->n_dip, CE_WARN, 3517 "!failed to set number of I/O queues to %d", 3518 nvme->n_intr_cnt); 3519 goto fail; 3520 } 3521 3522 /* 3523 * Reallocate I/O queue array 3524 */ 3525 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 3526 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 3527 (nvme->n_submission_queues + 1), KM_SLEEP); 3528 nvme->n_ioq[0] = nvme->n_adminq; 3529 3530 /* 3531 * There should always be at least as many submission queues 3532 * as completion queues. 3533 */ 3534 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues); 3535 3536 nvme->n_ioq_count = nvme->n_submission_queues; 3537 3538 nvme->n_io_squeue_len = 3539 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries); 3540 3541 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len", 3542 nvme->n_io_squeue_len); 3543 3544 /* 3545 * Pre-allocate completion queues. 3546 * When there are the same number of submission and completion 3547 * queues there is no value in having a larger completion 3548 * queue length. 3549 */ 3550 if (nvme->n_submission_queues == nvme->n_completion_queues) 3551 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 3552 nvme->n_io_squeue_len); 3553 3554 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 3555 nvme->n_max_queue_entries); 3556 3557 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len", 3558 nvme->n_io_cqueue_len); 3559 3560 /* 3561 * Assign the equal quantity of taskq threads to each completion 3562 * queue, capping the total number of threads to the number 3563 * of CPUs. 3564 */ 3565 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues; 3566 3567 /* 3568 * In case the calculation above is zero, we need at least one 3569 * thread per completion queue. 3570 */ 3571 tq_threads = MAX(1, tq_threads); 3572 3573 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1, 3574 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) { 3575 dev_err(nvme->n_dip, CE_WARN, 3576 "!failed to pre-allocate completion queues"); 3577 goto fail; 3578 } 3579 3580 /* 3581 * If we use less completion queues than interrupt vectors return 3582 * some of the interrupt vectors back to the system. 3583 */ 3584 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) { 3585 nvme_release_interrupts(nvme); 3586 3587 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 3588 nvme->n_completion_queues + 1) != DDI_SUCCESS) { 3589 dev_err(nvme->n_dip, CE_WARN, 3590 "!failed to reduce number of interrupts"); 3591 goto fail; 3592 } 3593 } 3594 3595 /* 3596 * Alloc & register I/O queue pairs 3597 */ 3598 3599 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3600 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len, 3601 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 3602 dev_err(nvme->n_dip, CE_WARN, 3603 "!unable to allocate I/O qpair %d", i); 3604 goto fail; 3605 } 3606 3607 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) { 3608 dev_err(nvme->n_dip, CE_WARN, 3609 "!unable to create I/O qpair %d", i); 3610 goto fail; 3611 } 3612 } 3613 3614 /* 3615 * Post more asynchronous events commands to reduce event reporting 3616 * latency as suggested by the spec. 3617 */ 3618 if (nvme->n_async_event_supported) { 3619 for (i = 1; i != nvme->n_async_event_limit; i++) 3620 nvme_async_event(nvme); 3621 } 3622 3623 return (DDI_SUCCESS); 3624 3625 fail: 3626 (void) nvme_reset(nvme, B_FALSE); 3627 return (DDI_FAILURE); 3628 } 3629 3630 static uint_t 3631 nvme_intr(caddr_t arg1, caddr_t arg2) 3632 { 3633 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 3634 nvme_t *nvme = (nvme_t *)arg1; 3635 int inum = (int)(uintptr_t)arg2; 3636 int ccnt = 0; 3637 int qnum; 3638 3639 if (inum >= nvme->n_intr_cnt) 3640 return (DDI_INTR_UNCLAIMED); 3641 3642 if (nvme->n_dead) 3643 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ? 3644 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED); 3645 3646 /* 3647 * The interrupt vector a queue uses is calculated as queue_idx % 3648 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 3649 * in steps of n_intr_cnt to process all queues using this vector. 3650 */ 3651 for (qnum = inum; 3652 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL; 3653 qnum += nvme->n_intr_cnt) { 3654 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]); 3655 } 3656 3657 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 3658 } 3659 3660 static void 3661 nvme_release_interrupts(nvme_t *nvme) 3662 { 3663 int i; 3664 3665 for (i = 0; i < nvme->n_intr_cnt; i++) { 3666 if (nvme->n_inth[i] == NULL) 3667 break; 3668 3669 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3670 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 3671 else 3672 (void) ddi_intr_disable(nvme->n_inth[i]); 3673 3674 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 3675 (void) ddi_intr_free(nvme->n_inth[i]); 3676 } 3677 3678 kmem_free(nvme->n_inth, nvme->n_inth_sz); 3679 nvme->n_inth = NULL; 3680 nvme->n_inth_sz = 0; 3681 3682 nvme->n_progress &= ~NVME_INTERRUPTS; 3683 } 3684 3685 static int 3686 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 3687 { 3688 int nintrs, navail, count; 3689 int ret; 3690 int i; 3691 3692 if (nvme->n_intr_types == 0) { 3693 ret = ddi_intr_get_supported_types(nvme->n_dip, 3694 &nvme->n_intr_types); 3695 if (ret != DDI_SUCCESS) { 3696 dev_err(nvme->n_dip, CE_WARN, 3697 "!%s: ddi_intr_get_supported types failed", 3698 __func__); 3699 return (ret); 3700 } 3701 #ifdef __x86 3702 if (get_hwenv() == HW_VMWARE) 3703 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; 3704 #endif 3705 } 3706 3707 if ((nvme->n_intr_types & intr_type) == 0) 3708 return (DDI_FAILURE); 3709 3710 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 3711 if (ret != DDI_SUCCESS) { 3712 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 3713 __func__); 3714 return (ret); 3715 } 3716 3717 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 3718 if (ret != DDI_SUCCESS) { 3719 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 3720 __func__); 3721 return (ret); 3722 } 3723 3724 /* We want at most one interrupt per queue pair. */ 3725 if (navail > nqpairs) 3726 navail = nqpairs; 3727 3728 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 3729 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 3730 3731 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 3732 &count, 0); 3733 if (ret != DDI_SUCCESS) { 3734 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 3735 __func__); 3736 goto fail; 3737 } 3738 3739 nvme->n_intr_cnt = count; 3740 3741 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 3742 if (ret != DDI_SUCCESS) { 3743 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 3744 __func__); 3745 goto fail; 3746 } 3747 3748 for (i = 0; i < count; i++) { 3749 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 3750 (void *)nvme, (void *)(uintptr_t)i); 3751 if (ret != DDI_SUCCESS) { 3752 dev_err(nvme->n_dip, CE_WARN, 3753 "!%s: ddi_intr_add_handler failed", __func__); 3754 goto fail; 3755 } 3756 } 3757 3758 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 3759 3760 for (i = 0; i < count; i++) { 3761 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3762 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 3763 else 3764 ret = ddi_intr_enable(nvme->n_inth[i]); 3765 3766 if (ret != DDI_SUCCESS) { 3767 dev_err(nvme->n_dip, CE_WARN, 3768 "!%s: enabling interrupt %d failed", __func__, i); 3769 goto fail; 3770 } 3771 } 3772 3773 nvme->n_intr_type = intr_type; 3774 3775 nvme->n_progress |= NVME_INTERRUPTS; 3776 3777 return (DDI_SUCCESS); 3778 3779 fail: 3780 nvme_release_interrupts(nvme); 3781 3782 return (ret); 3783 } 3784 3785 static int 3786 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 3787 { 3788 _NOTE(ARGUNUSED(arg)); 3789 3790 pci_ereport_post(dip, fm_error, NULL); 3791 return (fm_error->fme_status); 3792 } 3793 3794 static void 3795 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a, 3796 void *b) 3797 { 3798 nvme_t *nvme = a; 3799 3800 nvme->n_dead = B_TRUE; 3801 3802 /* 3803 * Fail all outstanding commands, including those in the admin queue 3804 * (queue 0). 3805 */ 3806 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) { 3807 nvme_qpair_t *qp = nvme->n_ioq[i]; 3808 3809 mutex_enter(&qp->nq_mutex); 3810 for (size_t j = 0; j < qp->nq_nentry; j++) { 3811 nvme_cmd_t *cmd = qp->nq_cmd[j]; 3812 nvme_cmd_t *u_cmd; 3813 3814 if (cmd == NULL) { 3815 continue; 3816 } 3817 3818 /* 3819 * Since we have the queue lock held the entire time we 3820 * iterate over it, it's not possible for the queue to 3821 * change underneath us. Thus, we don't need to check 3822 * that the return value of nvme_unqueue_cmd matches the 3823 * requested cmd to unqueue. 3824 */ 3825 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 3826 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, 3827 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 3828 3829 ASSERT3P(u_cmd, ==, cmd); 3830 } 3831 mutex_exit(&qp->nq_mutex); 3832 } 3833 } 3834 3835 static int 3836 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 3837 { 3838 nvme_t *nvme; 3839 int instance; 3840 int nregs; 3841 off_t regsize; 3842 int i; 3843 char name[32]; 3844 boolean_t attached_ns; 3845 3846 if (cmd != DDI_ATTACH) 3847 return (DDI_FAILURE); 3848 3849 instance = ddi_get_instance(dip); 3850 3851 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 3852 return (DDI_FAILURE); 3853 3854 nvme = ddi_get_soft_state(nvme_state, instance); 3855 ddi_set_driver_private(dip, nvme); 3856 nvme->n_dip = dip; 3857 3858 /* Set up event handlers for hot removal. */ 3859 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT, 3860 &nvme->n_rm_cookie) != DDI_SUCCESS) { 3861 goto fail; 3862 } 3863 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie, 3864 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) != 3865 DDI_SUCCESS) { 3866 goto fail; 3867 } 3868 3869 mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL); 3870 nvme->n_progress |= NVME_MUTEX_INIT; 3871 3872 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3873 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 3874 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 3875 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 3876 B_TRUE : B_FALSE; 3877 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3878 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 3879 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3880 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN); 3881 /* 3882 * Double up the default for completion queues in case of 3883 * queue sharing. 3884 */ 3885 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3886 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN); 3887 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3888 DDI_PROP_DONTPASS, "async-event-limit", 3889 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 3890 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3891 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 3892 B_TRUE : B_FALSE; 3893 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3894 DDI_PROP_DONTPASS, "min-phys-block-size", 3895 NVME_DEFAULT_MIN_BLOCK_SIZE); 3896 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3897 DDI_PROP_DONTPASS, "max-submission-queues", -1); 3898 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3899 DDI_PROP_DONTPASS, "max-completion-queues", -1); 3900 3901 if (!ISP2(nvme->n_min_block_size) || 3902 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { 3903 dev_err(dip, CE_WARN, "!min-phys-block-size %s, " 3904 "using default %d", ISP2(nvme->n_min_block_size) ? 3905 "too low" : "not a power of 2", 3906 NVME_DEFAULT_MIN_BLOCK_SIZE); 3907 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 3908 } 3909 3910 if (nvme->n_submission_queues != -1 && 3911 (nvme->n_submission_queues < 1 || 3912 nvme->n_submission_queues > UINT16_MAX)) { 3913 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not " 3914 "valid. Must be [1..%d]", nvme->n_submission_queues, 3915 UINT16_MAX); 3916 nvme->n_submission_queues = -1; 3917 } 3918 3919 if (nvme->n_completion_queues != -1 && 3920 (nvme->n_completion_queues < 1 || 3921 nvme->n_completion_queues > UINT16_MAX)) { 3922 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not " 3923 "valid. Must be [1..%d]", nvme->n_completion_queues, 3924 UINT16_MAX); 3925 nvme->n_completion_queues = -1; 3926 } 3927 3928 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 3929 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 3930 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 3931 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 3932 3933 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN) 3934 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN; 3935 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN) 3936 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN; 3937 3938 if (nvme->n_async_event_limit < 1) 3939 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 3940 3941 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 3942 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 3943 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 3944 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 3945 3946 /* 3947 * Set up FMA support. 3948 */ 3949 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 3950 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 3951 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3952 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3953 3954 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 3955 3956 if (nvme->n_fm_cap) { 3957 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 3958 nvme->n_reg_acc_attr.devacc_attr_access = 3959 DDI_FLAGERR_ACC; 3960 3961 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 3962 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3963 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3964 } 3965 3966 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 3967 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3968 pci_ereport_setup(dip); 3969 3970 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3971 ddi_fm_handler_register(dip, nvme_fm_errcb, 3972 (void *)nvme); 3973 } 3974 3975 nvme->n_progress |= NVME_FMA_INIT; 3976 3977 /* 3978 * The spec defines several register sets. Only the controller 3979 * registers (set 1) are currently used. 3980 */ 3981 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 3982 nregs < 2 || 3983 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 3984 goto fail; 3985 3986 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 3987 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 3988 dev_err(dip, CE_WARN, "!failed to map regset 1"); 3989 goto fail; 3990 } 3991 3992 nvme->n_progress |= NVME_REGS_MAPPED; 3993 3994 /* 3995 * Create PRP DMA cache 3996 */ 3997 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 3998 ddi_driver_name(dip), ddi_get_instance(dip)); 3999 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 4000 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 4001 NULL, (void *)nvme, NULL, 0); 4002 4003 if (nvme_init(nvme) != DDI_SUCCESS) 4004 goto fail; 4005 4006 /* 4007 * Initialize the driver with the UFM subsystem 4008 */ 4009 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops, 4010 &nvme->n_ufmh, nvme) != 0) { 4011 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem"); 4012 goto fail; 4013 } 4014 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL); 4015 ddi_ufm_update(nvme->n_ufmh); 4016 nvme->n_progress |= NVME_UFM_INIT; 4017 4018 mutex_init(&nvme->n_mgmt_mutex, NULL, MUTEX_DRIVER, NULL); 4019 nvme->n_progress |= NVME_MGMT_INIT; 4020 4021 /* 4022 * Identify namespaces. 4023 */ 4024 mutex_enter(&nvme->n_mgmt_mutex); 4025 4026 for (i = 1; i <= nvme->n_namespace_count; i++) { 4027 nvme_namespace_t *ns = NVME_NSID2NS(nvme, i); 4028 4029 /* 4030 * Namespaces start out ignored. When nvme_init_ns() checks 4031 * their properties and finds they can be used, it will set 4032 * ns_ignore to B_FALSE. It will also use this state change 4033 * to keep an accurate count of attachable namespaces. 4034 */ 4035 ns->ns_ignore = B_TRUE; 4036 if (nvme_init_ns(nvme, i) != 0) { 4037 mutex_exit(&nvme->n_mgmt_mutex); 4038 goto fail; 4039 } 4040 4041 if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR, 4042 NVME_MINOR(ddi_get_instance(nvme->n_dip), i), 4043 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 4044 mutex_exit(&nvme->n_mgmt_mutex); 4045 dev_err(dip, CE_WARN, 4046 "!failed to create minor node for namespace %d", i); 4047 goto fail; 4048 } 4049 } 4050 4051 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 4052 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) 4053 != DDI_SUCCESS) { 4054 mutex_exit(&nvme->n_mgmt_mutex); 4055 dev_err(dip, CE_WARN, "nvme_attach: " 4056 "cannot create devctl minor node"); 4057 goto fail; 4058 } 4059 4060 attached_ns = B_FALSE; 4061 for (i = 1; i <= nvme->n_namespace_count; i++) { 4062 int rv; 4063 4064 rv = nvme_attach_ns(nvme, i); 4065 if (rv == 0) { 4066 attached_ns = B_TRUE; 4067 } else if (rv != ENOTSUP) { 4068 dev_err(nvme->n_dip, CE_WARN, 4069 "!failed to attach namespace %d: %d", i, rv); 4070 /* 4071 * Once we have successfully attached a namespace we 4072 * can no longer fail the driver attach as there is now 4073 * a blkdev child node linked to this device, and 4074 * our node is not yet in the attached state. 4075 */ 4076 if (!attached_ns) { 4077 mutex_exit(&nvme->n_mgmt_mutex); 4078 goto fail; 4079 } 4080 } 4081 } 4082 4083 mutex_exit(&nvme->n_mgmt_mutex); 4084 4085 return (DDI_SUCCESS); 4086 4087 fail: 4088 /* attach successful anyway so that FMA can retire the device */ 4089 if (nvme->n_dead) 4090 return (DDI_SUCCESS); 4091 4092 (void) nvme_detach(dip, DDI_DETACH); 4093 4094 return (DDI_FAILURE); 4095 } 4096 4097 static int 4098 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4099 { 4100 int instance, i; 4101 nvme_t *nvme; 4102 4103 if (cmd != DDI_DETACH) 4104 return (DDI_FAILURE); 4105 4106 instance = ddi_get_instance(dip); 4107 4108 nvme = ddi_get_soft_state(nvme_state, instance); 4109 4110 if (nvme == NULL) 4111 return (DDI_FAILURE); 4112 4113 ddi_remove_minor_node(dip, "devctl"); 4114 4115 if (nvme->n_ns) { 4116 for (i = 1; i <= nvme->n_namespace_count; i++) { 4117 nvme_namespace_t *ns = NVME_NSID2NS(nvme, i); 4118 4119 ddi_remove_minor_node(dip, ns->ns_name); 4120 4121 if (ns->ns_bd_hdl) { 4122 (void) bd_detach_handle(ns->ns_bd_hdl); 4123 bd_free_handle(ns->ns_bd_hdl); 4124 } 4125 4126 if (ns->ns_idns) 4127 kmem_free(ns->ns_idns, 4128 sizeof (nvme_identify_nsid_t)); 4129 if (ns->ns_devid) 4130 strfree(ns->ns_devid); 4131 } 4132 4133 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 4134 nvme->n_namespace_count); 4135 } 4136 4137 if (nvme->n_progress & NVME_MGMT_INIT) { 4138 mutex_destroy(&nvme->n_mgmt_mutex); 4139 } 4140 4141 if (nvme->n_progress & NVME_UFM_INIT) { 4142 ddi_ufm_fini(nvme->n_ufmh); 4143 mutex_destroy(&nvme->n_fwslot_mutex); 4144 } 4145 4146 if (nvme->n_progress & NVME_INTERRUPTS) 4147 nvme_release_interrupts(nvme); 4148 4149 for (i = 0; i < nvme->n_cq_count; i++) { 4150 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL) 4151 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq); 4152 } 4153 4154 if (nvme->n_progress & NVME_MUTEX_INIT) { 4155 mutex_destroy(&nvme->n_minor_mutex); 4156 } 4157 4158 if (nvme->n_ioq_count > 0) { 4159 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 4160 if (nvme->n_ioq[i] != NULL) { 4161 /* TODO: send destroy queue commands */ 4162 nvme_free_qpair(nvme->n_ioq[i]); 4163 } 4164 } 4165 4166 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 4167 (nvme->n_ioq_count + 1)); 4168 } 4169 4170 if (nvme->n_prp_cache != NULL) { 4171 kmem_cache_destroy(nvme->n_prp_cache); 4172 } 4173 4174 if (nvme->n_progress & NVME_REGS_MAPPED) { 4175 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); 4176 (void) nvme_reset(nvme, B_FALSE); 4177 } 4178 4179 if (nvme->n_progress & NVME_CTRL_LIMITS) 4180 sema_destroy(&nvme->n_abort_sema); 4181 4182 if (nvme->n_progress & NVME_ADMIN_QUEUE) 4183 nvme_free_qpair(nvme->n_adminq); 4184 4185 if (nvme->n_cq_count > 0) { 4186 nvme_destroy_cq_array(nvme, 0); 4187 nvme->n_cq = NULL; 4188 nvme->n_cq_count = 0; 4189 } 4190 4191 if (nvme->n_idctl) 4192 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); 4193 4194 if (nvme->n_progress & NVME_REGS_MAPPED) 4195 ddi_regs_map_free(&nvme->n_regh); 4196 4197 if (nvme->n_progress & NVME_FMA_INIT) { 4198 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4199 ddi_fm_handler_unregister(nvme->n_dip); 4200 4201 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 4202 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4203 pci_ereport_teardown(nvme->n_dip); 4204 4205 ddi_fm_fini(nvme->n_dip); 4206 } 4207 4208 if (nvme->n_vendor != NULL) 4209 strfree(nvme->n_vendor); 4210 4211 if (nvme->n_product != NULL) 4212 strfree(nvme->n_product); 4213 4214 /* Clean up hot removal event handler. */ 4215 if (nvme->n_ev_rm_cb_id != NULL) { 4216 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id); 4217 } 4218 nvme->n_ev_rm_cb_id = NULL; 4219 4220 ddi_soft_state_free(nvme_state, instance); 4221 4222 return (DDI_SUCCESS); 4223 } 4224 4225 static int 4226 nvme_quiesce(dev_info_t *dip) 4227 { 4228 int instance; 4229 nvme_t *nvme; 4230 4231 instance = ddi_get_instance(dip); 4232 4233 nvme = ddi_get_soft_state(nvme_state, instance); 4234 4235 if (nvme == NULL) 4236 return (DDI_FAILURE); 4237 4238 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); 4239 4240 (void) nvme_reset(nvme, B_TRUE); 4241 4242 return (DDI_FAILURE); 4243 } 4244 4245 static int 4246 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma) 4247 { 4248 nvme_t *nvme = cmd->nc_nvme; 4249 uint_t nprp_per_page, nprp; 4250 uint64_t *prp; 4251 const ddi_dma_cookie_t *cookie; 4252 uint_t idx; 4253 uint_t ncookies = ddi_dma_ncookies(dma); 4254 4255 if (ncookies == 0) 4256 return (DDI_FAILURE); 4257 4258 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL) 4259 return (DDI_FAILURE); 4260 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress; 4261 4262 if (ncookies == 1) { 4263 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 4264 return (DDI_SUCCESS); 4265 } else if (ncookies == 2) { 4266 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL) 4267 return (DDI_FAILURE); 4268 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress; 4269 return (DDI_SUCCESS); 4270 } 4271 4272 /* 4273 * At this point, we're always operating on cookies at 4274 * index >= 1 and writing the addresses of those cookies 4275 * into a new page. The address of that page is stored 4276 * as the second PRP entry. 4277 */ 4278 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t); 4279 ASSERT(nprp_per_page > 0); 4280 4281 /* 4282 * We currently don't support chained PRPs and set up our DMA 4283 * attributes to reflect that. If we still get an I/O request 4284 * that needs a chained PRP something is very wrong. Account 4285 * for the first cookie here, which we've placed in d_prp[0]. 4286 */ 4287 nprp = howmany(ncookies - 1, nprp_per_page); 4288 VERIFY(nprp == 1); 4289 4290 /* 4291 * Allocate a page of pointers, in which we'll write the 4292 * addresses of cookies 1 to `ncookies`. 4293 */ 4294 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 4295 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 4296 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress; 4297 4298 prp = (uint64_t *)cmd->nc_prp->nd_memp; 4299 for (idx = 1; idx < ncookies; idx++) { 4300 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL) 4301 return (DDI_FAILURE); 4302 *prp++ = cookie->dmac_laddress; 4303 } 4304 4305 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 4306 DDI_DMA_SYNC_FORDEV); 4307 return (DDI_SUCCESS); 4308 } 4309 4310 /* 4311 * The maximum number of requests supported for a deallocate request is 4312 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and 4313 * unchanged through at least 1.4a). The definition of nvme_range_t is also 4314 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for 4315 * a deallocate request will fit into the smallest supported namespace page 4316 * (4k). 4317 */ 4318 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096); 4319 4320 static int 4321 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize, 4322 int allocflag) 4323 { 4324 const dkioc_free_list_t *dfl = xfer->x_dfl; 4325 const dkioc_free_list_ext_t *exts = dfl->dfl_exts; 4326 nvme_t *nvme = cmd->nc_nvme; 4327 nvme_range_t *ranges = NULL; 4328 uint_t i; 4329 4330 /* 4331 * The number of ranges in the request is 0s based (that is 4332 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ..., 4333 * word10 == 255 -> 256 ranges). Therefore the allowed values are 4334 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request, 4335 * we either provided bad info in nvme_bd_driveinfo() or there is a bug 4336 * in blkdev. 4337 */ 4338 VERIFY3U(dfl->dfl_num_exts, >, 0); 4339 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES); 4340 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff; 4341 4342 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE; 4343 4344 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag); 4345 if (cmd->nc_prp == NULL) 4346 return (DDI_FAILURE); 4347 4348 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 4349 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp; 4350 4351 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress; 4352 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 4353 4354 for (i = 0; i < dfl->dfl_num_exts; i++) { 4355 uint64_t lba, len; 4356 4357 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize; 4358 len = exts[i].dfle_length / blocksize; 4359 4360 VERIFY3U(len, <=, UINT32_MAX); 4361 4362 /* No context attributes for a deallocate request */ 4363 ranges[i].nr_ctxattr = 0; 4364 ranges[i].nr_len = len; 4365 ranges[i].nr_lba = lba; 4366 } 4367 4368 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 4369 DDI_DMA_SYNC_FORDEV); 4370 4371 return (DDI_SUCCESS); 4372 } 4373 4374 static nvme_cmd_t * 4375 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 4376 { 4377 nvme_t *nvme = ns->ns_nvme; 4378 nvme_cmd_t *cmd; 4379 int allocflag; 4380 4381 /* 4382 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 4383 */ 4384 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP; 4385 cmd = nvme_alloc_cmd(nvme, allocflag); 4386 4387 if (cmd == NULL) 4388 return (NULL); 4389 4390 cmd->nc_sqe.sqe_opc = opc; 4391 cmd->nc_callback = nvme_bd_xfer_done; 4392 cmd->nc_xfer = xfer; 4393 4394 switch (opc) { 4395 case NVME_OPC_NVM_WRITE: 4396 case NVME_OPC_NVM_READ: 4397 VERIFY(xfer->x_nblks <= 0x10000); 4398 4399 cmd->nc_sqe.sqe_nsid = ns->ns_id; 4400 4401 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 4402 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 4403 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 4404 4405 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS) 4406 goto fail; 4407 break; 4408 4409 case NVME_OPC_NVM_FLUSH: 4410 cmd->nc_sqe.sqe_nsid = ns->ns_id; 4411 break; 4412 4413 case NVME_OPC_NVM_DSET_MGMT: 4414 cmd->nc_sqe.sqe_nsid = ns->ns_id; 4415 4416 if (nvme_fill_ranges(cmd, xfer, 4417 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS) 4418 goto fail; 4419 break; 4420 4421 default: 4422 goto fail; 4423 } 4424 4425 return (cmd); 4426 4427 fail: 4428 nvme_free_cmd(cmd); 4429 return (NULL); 4430 } 4431 4432 static void 4433 nvme_bd_xfer_done(void *arg) 4434 { 4435 nvme_cmd_t *cmd = arg; 4436 bd_xfer_t *xfer = cmd->nc_xfer; 4437 int error = 0; 4438 4439 error = nvme_check_cmd_status(cmd); 4440 nvme_free_cmd(cmd); 4441 4442 bd_xfer_done(xfer, error); 4443 } 4444 4445 static void 4446 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 4447 { 4448 nvme_namespace_t *ns = arg; 4449 nvme_t *nvme = ns->ns_nvme; 4450 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable); 4451 boolean_t mutex_exit_needed = B_TRUE; 4452 4453 /* 4454 * nvme_bd_driveinfo is called by blkdev in two situations: 4455 * - during bd_attach_handle(), which we call with the mutex held 4456 * - during bd_attach(), which may be called with or without the 4457 * mutex held 4458 */ 4459 if (mutex_owned(&nvme->n_mgmt_mutex)) 4460 mutex_exit_needed = B_FALSE; 4461 else 4462 mutex_enter(&nvme->n_mgmt_mutex); 4463 4464 /* 4465 * Set the blkdev qcount to the number of submission queues. 4466 * It will then create one waitq/runq pair for each submission 4467 * queue and spread I/O requests across the queues. 4468 */ 4469 drive->d_qcount = nvme->n_ioq_count; 4470 4471 /* 4472 * I/O activity to individual namespaces is distributed across 4473 * each of the d_qcount blkdev queues (which has been set to 4474 * the number of nvme submission queues). d_qsize is the number 4475 * of submitted and not completed I/Os within each queue that blkdev 4476 * will allow before it starts holding them in the waitq. 4477 * 4478 * Each namespace will create a child blkdev instance, for each one 4479 * we try and set the d_qsize so that each namespace gets an 4480 * equal portion of the submission queue. 4481 * 4482 * If post instantiation of the nvme drive, n_namespaces_attachable 4483 * changes and a namespace is attached it could calculate a 4484 * different d_qsize. It may even be that the sum of the d_qsizes is 4485 * now beyond the submission queue size. Should that be the case 4486 * and the I/O rate is such that blkdev attempts to submit more 4487 * I/Os than the size of the submission queue, the excess I/Os 4488 * will be held behind the semaphore nq_sema. 4489 */ 4490 drive->d_qsize = nvme->n_io_squeue_len / ns_count; 4491 4492 /* 4493 * Don't let the queue size drop below the minimum, though. 4494 */ 4495 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN); 4496 4497 /* 4498 * d_maxxfer is not set, which means the value is taken from the DMA 4499 * attributes specified to bd_alloc_handle. 4500 */ 4501 4502 drive->d_removable = B_FALSE; 4503 drive->d_hotpluggable = B_FALSE; 4504 4505 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64)); 4506 drive->d_target = ns->ns_id; 4507 drive->d_lun = 0; 4508 4509 drive->d_model = nvme->n_idctl->id_model; 4510 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 4511 drive->d_vendor = nvme->n_vendor; 4512 drive->d_vendor_len = strlen(nvme->n_vendor); 4513 drive->d_product = nvme->n_product; 4514 drive->d_product_len = strlen(nvme->n_product); 4515 drive->d_serial = nvme->n_idctl->id_serial; 4516 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 4517 drive->d_revision = nvme->n_idctl->id_fwrev; 4518 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 4519 4520 /* 4521 * If we support the dataset management command, the only restrictions 4522 * on a discard request are the maximum number of ranges (segments) 4523 * per single request. 4524 */ 4525 if (nvme->n_idctl->id_oncs.on_dset_mgmt) 4526 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES; 4527 4528 if (mutex_exit_needed) 4529 mutex_exit(&nvme->n_mgmt_mutex); 4530 } 4531 4532 static int 4533 nvme_bd_mediainfo(void *arg, bd_media_t *media) 4534 { 4535 nvme_namespace_t *ns = arg; 4536 nvme_t *nvme = ns->ns_nvme; 4537 boolean_t mutex_exit_needed = B_TRUE; 4538 4539 if (nvme->n_dead) { 4540 return (EIO); 4541 } 4542 4543 /* 4544 * nvme_bd_mediainfo is called by blkdev in various situations, 4545 * most of them out of our control. There's one exception though: 4546 * When we call bd_state_change() in response to "namespace change" 4547 * notification, where the mutex is already being held by us. 4548 */ 4549 if (mutex_owned(&nvme->n_mgmt_mutex)) 4550 mutex_exit_needed = B_FALSE; 4551 else 4552 mutex_enter(&nvme->n_mgmt_mutex); 4553 4554 media->m_nblks = ns->ns_block_count; 4555 media->m_blksize = ns->ns_block_size; 4556 media->m_readonly = B_FALSE; 4557 media->m_solidstate = B_TRUE; 4558 4559 media->m_pblksize = ns->ns_best_block_size; 4560 4561 if (mutex_exit_needed) 4562 mutex_exit(&nvme->n_mgmt_mutex); 4563 4564 return (0); 4565 } 4566 4567 static int 4568 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 4569 { 4570 nvme_t *nvme = ns->ns_nvme; 4571 nvme_cmd_t *cmd; 4572 nvme_qpair_t *ioq; 4573 boolean_t poll; 4574 int ret; 4575 4576 if (nvme->n_dead) { 4577 return (EIO); 4578 } 4579 4580 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 4581 if (cmd == NULL) 4582 return (ENOMEM); 4583 4584 cmd->nc_sqid = xfer->x_qnum + 1; 4585 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 4586 ioq = nvme->n_ioq[cmd->nc_sqid]; 4587 4588 /* 4589 * Get the polling flag before submitting the command. The command may 4590 * complete immediately after it was submitted, which means we must 4591 * treat both cmd and xfer as if they have been freed already. 4592 */ 4593 poll = (xfer->x_flags & BD_XFER_POLL) != 0; 4594 4595 ret = nvme_submit_io_cmd(ioq, cmd); 4596 4597 if (ret != 0) 4598 return (ret); 4599 4600 if (!poll) 4601 return (0); 4602 4603 do { 4604 cmd = nvme_retrieve_cmd(nvme, ioq); 4605 if (cmd != NULL) 4606 cmd->nc_callback(cmd); 4607 else 4608 drv_usecwait(10); 4609 } while (ioq->nq_active_cmds != 0); 4610 4611 return (0); 4612 } 4613 4614 static int 4615 nvme_bd_read(void *arg, bd_xfer_t *xfer) 4616 { 4617 nvme_namespace_t *ns = arg; 4618 4619 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 4620 } 4621 4622 static int 4623 nvme_bd_write(void *arg, bd_xfer_t *xfer) 4624 { 4625 nvme_namespace_t *ns = arg; 4626 4627 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 4628 } 4629 4630 static int 4631 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 4632 { 4633 nvme_namespace_t *ns = arg; 4634 4635 if (ns->ns_nvme->n_dead) 4636 return (EIO); 4637 4638 /* 4639 * If the volatile write cache is not present or not enabled the FLUSH 4640 * command is a no-op, so we can take a shortcut here. 4641 */ 4642 if (!ns->ns_nvme->n_write_cache_present) { 4643 bd_xfer_done(xfer, ENOTSUP); 4644 return (0); 4645 } 4646 4647 if (!ns->ns_nvme->n_write_cache_enabled) { 4648 bd_xfer_done(xfer, 0); 4649 return (0); 4650 } 4651 4652 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 4653 } 4654 4655 static int 4656 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 4657 { 4658 nvme_namespace_t *ns = arg; 4659 nvme_t *nvme = ns->ns_nvme; 4660 4661 if (nvme->n_dead) { 4662 return (EIO); 4663 } 4664 4665 if (*(uint64_t *)ns->ns_nguid != 0 || 4666 *(uint64_t *)(ns->ns_nguid + 8) != 0) { 4667 return (ddi_devid_init(devinfo, DEVID_NVME_NGUID, 4668 sizeof (ns->ns_nguid), ns->ns_nguid, devid)); 4669 } else if (*(uint64_t *)ns->ns_eui64 != 0) { 4670 return (ddi_devid_init(devinfo, DEVID_NVME_EUI64, 4671 sizeof (ns->ns_eui64), ns->ns_eui64, devid)); 4672 } else { 4673 return (ddi_devid_init(devinfo, DEVID_NVME_NSID, 4674 strlen(ns->ns_devid), ns->ns_devid, devid)); 4675 } 4676 } 4677 4678 static int 4679 nvme_bd_free_space(void *arg, bd_xfer_t *xfer) 4680 { 4681 nvme_namespace_t *ns = arg; 4682 4683 if (xfer->x_dfl == NULL) 4684 return (EINVAL); 4685 4686 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt) 4687 return (ENOTSUP); 4688 4689 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT)); 4690 } 4691 4692 static int 4693 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 4694 { 4695 #ifndef __lock_lint 4696 _NOTE(ARGUNUSED(cred_p)); 4697 #endif 4698 minor_t minor = getminor(*devp); 4699 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 4700 int nsid = NVME_MINOR_NSID(minor); 4701 nvme_minor_state_t *nm; 4702 int rv = 0; 4703 4704 if (otyp != OTYP_CHR) 4705 return (EINVAL); 4706 4707 if (nvme == NULL) 4708 return (ENXIO); 4709 4710 if (nsid > nvme->n_namespace_count) 4711 return (ENXIO); 4712 4713 if (nvme->n_dead) 4714 return (EIO); 4715 4716 mutex_enter(&nvme->n_minor_mutex); 4717 4718 /* 4719 * First check the devctl node and error out if it's been opened 4720 * exclusively already by any other thread. 4721 */ 4722 if (nvme->n_minor.nm_oexcl != NULL && 4723 nvme->n_minor.nm_oexcl != curthread) { 4724 rv = EBUSY; 4725 goto out; 4726 } 4727 4728 nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor); 4729 4730 if (flag & FEXCL) { 4731 if (nm->nm_oexcl != NULL || nm->nm_open) { 4732 rv = EBUSY; 4733 goto out; 4734 } 4735 4736 /* 4737 * If at least one namespace is already open, fail the 4738 * exclusive open of the devctl node. 4739 */ 4740 if (nsid == 0) { 4741 for (int i = 1; i <= nvme->n_namespace_count; i++) { 4742 if (NVME_NSID2NS(nvme, i)->ns_minor.nm_open) { 4743 rv = EBUSY; 4744 goto out; 4745 } 4746 } 4747 } 4748 4749 nm->nm_oexcl = curthread; 4750 } 4751 4752 nm->nm_open = B_TRUE; 4753 4754 out: 4755 mutex_exit(&nvme->n_minor_mutex); 4756 return (rv); 4757 4758 } 4759 4760 static int 4761 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 4762 { 4763 #ifndef __lock_lint 4764 _NOTE(ARGUNUSED(cred_p)); 4765 _NOTE(ARGUNUSED(flag)); 4766 #endif 4767 minor_t minor = getminor(dev); 4768 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 4769 int nsid = NVME_MINOR_NSID(minor); 4770 nvme_minor_state_t *nm; 4771 4772 if (otyp != OTYP_CHR) 4773 return (ENXIO); 4774 4775 if (nvme == NULL) 4776 return (ENXIO); 4777 4778 if (nsid > nvme->n_namespace_count) 4779 return (ENXIO); 4780 4781 nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor); 4782 4783 mutex_enter(&nvme->n_minor_mutex); 4784 if (nm->nm_oexcl != NULL) { 4785 ASSERT(nm->nm_oexcl == curthread); 4786 nm->nm_oexcl = NULL; 4787 } 4788 4789 ASSERT(nm->nm_open); 4790 nm->nm_open = B_FALSE; 4791 mutex_exit(&nvme->n_minor_mutex); 4792 4793 return (0); 4794 } 4795 4796 static int 4797 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4798 cred_t *cred_p) 4799 { 4800 _NOTE(ARGUNUSED(cred_p)); 4801 int rv = 0; 4802 void *idctl; 4803 4804 if ((mode & FREAD) == 0) 4805 return (EPERM); 4806 4807 if (nioc->n_len < NVME_IDENTIFY_BUFSIZE) 4808 return (EINVAL); 4809 4810 switch (nioc->n_arg) { 4811 case NVME_IDENTIFY_NSID: 4812 /* 4813 * If we support namespace management, set the nsid to -1 to 4814 * retrieve the common namespace capabilities. Otherwise 4815 * have a best guess by returning identify data for namespace 1. 4816 */ 4817 if (nsid == 0) 4818 nsid = nvme->n_idctl->id_oacs.oa_nsmgmt == 1 ? -1 : 1; 4819 break; 4820 4821 case NVME_IDENTIFY_CTRL: 4822 /* 4823 * Let NVME_IDENTIFY_CTRL work the same on devctl and attachment 4824 * point nodes. 4825 */ 4826 nsid = 0; 4827 break; 4828 4829 case NVME_IDENTIFY_NSID_LIST: 4830 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 4831 return (ENOTSUP); 4832 4833 /* 4834 * For now, always try to get the list of active NSIDs starting 4835 * at the first namespace. This will have to be revisited should 4836 * the need arise to support more than 1024 namespaces. 4837 */ 4838 nsid = 0; 4839 break; 4840 4841 case NVME_IDENTIFY_NSID_DESC: 4842 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 3)) 4843 return (ENOTSUP); 4844 break; 4845 4846 case NVME_IDENTIFY_NSID_ALLOC: 4847 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) || 4848 (nvme->n_idctl->id_oacs.oa_nsmgmt == 0)) 4849 return (ENOTSUP); 4850 4851 /* 4852 * To make this work on a devctl node, make this return the 4853 * identify data for namespace 1. We assume that any NVMe 4854 * device supports at least one namespace, which has ID 1. 4855 */ 4856 if (nsid == 0) 4857 nsid = 1; 4858 break; 4859 4860 case NVME_IDENTIFY_NSID_ALLOC_LIST: 4861 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) || 4862 (nvme->n_idctl->id_oacs.oa_nsmgmt == 0)) 4863 return (ENOTSUP); 4864 4865 /* 4866 * For now, always try to get the list of allocated NSIDs 4867 * starting at the first namespace. This will have to be 4868 * revisited should the need arise to support more than 1024 4869 * namespaces. 4870 */ 4871 nsid = 0; 4872 break; 4873 4874 case NVME_IDENTIFY_NSID_CTRL_LIST: 4875 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) || 4876 (nvme->n_idctl->id_oacs.oa_nsmgmt == 0)) 4877 return (ENOTSUP); 4878 4879 if (nsid == 0) 4880 return (EINVAL); 4881 break; 4882 4883 case NVME_IDENTIFY_CTRL_LIST: 4884 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) || 4885 (nvme->n_idctl->id_oacs.oa_nsmgmt == 0)) 4886 return (ENOTSUP); 4887 4888 if (nsid != 0) 4889 return (EINVAL); 4890 break; 4891 4892 default: 4893 return (EINVAL); 4894 } 4895 4896 if ((rv = nvme_identify(nvme, B_TRUE, nsid, nioc->n_arg & 0xff, 4897 (void **)&idctl)) != 0) 4898 return (rv); 4899 4900 if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode) 4901 != 0) 4902 rv = EFAULT; 4903 4904 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE); 4905 4906 return (rv); 4907 } 4908 4909 /* 4910 * Execute commands on behalf of the various ioctls. 4911 */ 4912 static int 4913 nvme_ioc_cmd(nvme_t *nvme, nvme_sqe_t *sqe, boolean_t is_admin, void *data_addr, 4914 uint32_t data_len, int rwk, nvme_cqe_t *cqe, uint_t timeout) 4915 { 4916 nvme_cmd_t *cmd; 4917 nvme_qpair_t *ioq; 4918 int rv = 0; 4919 4920 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 4921 if (is_admin) { 4922 cmd->nc_sqid = 0; 4923 ioq = nvme->n_adminq; 4924 } else { 4925 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 4926 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 4927 ioq = nvme->n_ioq[cmd->nc_sqid]; 4928 } 4929 4930 /* 4931 * This function is used to facilitate requests from 4932 * userspace, so don't panic if the command fails. This 4933 * is especially true for admin passthru commands, where 4934 * the actual command data structure is entirely defined 4935 * by userspace. 4936 */ 4937 cmd->nc_dontpanic = B_TRUE; 4938 4939 cmd->nc_callback = nvme_wakeup_cmd; 4940 cmd->nc_sqe = *sqe; 4941 4942 if ((rwk & (FREAD | FWRITE)) != 0) { 4943 if (data_addr == NULL) { 4944 rv = EINVAL; 4945 goto free_cmd; 4946 } 4947 4948 if (nvme_zalloc_dma(nvme, data_len, DDI_DMA_READ, 4949 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 4950 dev_err(nvme->n_dip, CE_WARN, 4951 "!nvme_zalloc_dma failed for nvme_ioc_cmd()"); 4952 4953 rv = ENOMEM; 4954 goto free_cmd; 4955 } 4956 4957 if ((rv = nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah)) != 0) 4958 goto free_cmd; 4959 4960 if ((rwk & FWRITE) != 0) { 4961 if (ddi_copyin(data_addr, cmd->nc_dma->nd_memp, 4962 data_len, rwk & FKIOCTL) != 0) { 4963 rv = EFAULT; 4964 goto free_cmd; 4965 } 4966 } 4967 } 4968 4969 if (is_admin) { 4970 nvme_admin_cmd(cmd, timeout); 4971 } else { 4972 mutex_enter(&cmd->nc_mutex); 4973 4974 rv = nvme_submit_io_cmd(ioq, cmd); 4975 4976 if (rv == EAGAIN) { 4977 mutex_exit(&cmd->nc_mutex); 4978 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 4979 "!nvme_ioc_cmd() failed, I/O Q full"); 4980 goto free_cmd; 4981 } 4982 4983 nvme_wait_cmd(cmd, timeout); 4984 4985 mutex_exit(&cmd->nc_mutex); 4986 } 4987 4988 if (cqe != NULL) 4989 *cqe = cmd->nc_cqe; 4990 4991 if ((rv = nvme_check_cmd_status(cmd)) != 0) { 4992 dev_err(nvme->n_dip, CE_WARN, 4993 "!nvme_ioc_cmd() failed with sct = %x, sc = %x", 4994 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 4995 4996 goto free_cmd; 4997 } 4998 4999 if ((rwk & FREAD) != 0) { 5000 if (ddi_copyout(cmd->nc_dma->nd_memp, 5001 data_addr, data_len, rwk & FKIOCTL) != 0) 5002 rv = EFAULT; 5003 } 5004 5005 free_cmd: 5006 nvme_free_cmd(cmd); 5007 5008 return (rv); 5009 } 5010 5011 static int 5012 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5013 int mode, cred_t *cred_p) 5014 { 5015 _NOTE(ARGUNUSED(nsid, cred_p)); 5016 int rv = 0; 5017 nvme_reg_cap_t cap = { 0 }; 5018 nvme_capabilities_t nc; 5019 5020 if ((mode & FREAD) == 0) 5021 return (EPERM); 5022 5023 if (nioc->n_len < sizeof (nc)) 5024 return (EINVAL); 5025 5026 cap.r = nvme_get64(nvme, NVME_REG_CAP); 5027 5028 /* 5029 * The MPSMIN and MPSMAX fields in the CAP register use 0 to 5030 * specify the base page size of 4k (1<<12), so add 12 here to 5031 * get the real page size value. 5032 */ 5033 nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax); 5034 nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin); 5035 5036 if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0) 5037 rv = EFAULT; 5038 5039 return (rv); 5040 } 5041 5042 static int 5043 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5044 int mode, cred_t *cred_p) 5045 { 5046 _NOTE(ARGUNUSED(cred_p)); 5047 void *log = NULL; 5048 size_t bufsize = 0; 5049 int rv = 0; 5050 5051 if ((mode & FREAD) == 0) 5052 return (EPERM); 5053 5054 if (nsid > 0 && !NVME_NSID2NS(nvme, nsid)->ns_active) 5055 return (EINVAL); 5056 5057 switch (nioc->n_arg) { 5058 case NVME_LOGPAGE_ERROR: 5059 if (nsid != 0) 5060 return (EINVAL); 5061 break; 5062 case NVME_LOGPAGE_HEALTH: 5063 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0) 5064 return (EINVAL); 5065 5066 if (nsid == 0) 5067 nsid = (uint32_t)-1; 5068 5069 break; 5070 case NVME_LOGPAGE_FWSLOT: 5071 if (nsid != 0) 5072 return (EINVAL); 5073 break; 5074 default: 5075 if (!NVME_IS_VENDOR_SPECIFIC_LOGPAGE(nioc->n_arg)) 5076 return (EINVAL); 5077 if (nioc->n_len > NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE) { 5078 dev_err(nvme->n_dip, CE_NOTE, "!Vendor-specific log " 5079 "page size exceeds device maximum supported size: " 5080 "%lu", NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE); 5081 return (EINVAL); 5082 } 5083 if (nioc->n_len == 0) 5084 return (EINVAL); 5085 bufsize = nioc->n_len; 5086 if (nsid == 0) 5087 nsid = (uint32_t)-1; 5088 } 5089 5090 if (nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, nioc->n_arg, nsid) 5091 != DDI_SUCCESS) 5092 return (EIO); 5093 5094 if (nioc->n_len < bufsize) { 5095 kmem_free(log, bufsize); 5096 return (EINVAL); 5097 } 5098 5099 if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0) 5100 rv = EFAULT; 5101 5102 nioc->n_len = bufsize; 5103 kmem_free(log, bufsize); 5104 5105 return (rv); 5106 } 5107 5108 static int 5109 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5110 int mode, cred_t *cred_p) 5111 { 5112 _NOTE(ARGUNUSED(cred_p)); 5113 void *buf = NULL; 5114 size_t bufsize = 0; 5115 uint32_t res = 0; 5116 uint8_t feature; 5117 int rv = 0; 5118 5119 if ((mode & FREAD) == 0) 5120 return (EPERM); 5121 5122 if (nsid > 0 && !NVME_NSID2NS(nvme, nsid)->ns_active) 5123 return (EINVAL); 5124 5125 if ((nioc->n_arg >> 32) > 0xff) 5126 return (EINVAL); 5127 5128 feature = (uint8_t)(nioc->n_arg >> 32); 5129 5130 switch (feature) { 5131 case NVME_FEAT_ARBITRATION: 5132 case NVME_FEAT_POWER_MGMT: 5133 case NVME_FEAT_ERROR: 5134 case NVME_FEAT_NQUEUES: 5135 case NVME_FEAT_INTR_COAL: 5136 case NVME_FEAT_WRITE_ATOM: 5137 case NVME_FEAT_ASYNC_EVENT: 5138 case NVME_FEAT_PROGRESS: 5139 if (nsid != 0) 5140 return (EINVAL); 5141 break; 5142 5143 case NVME_FEAT_TEMPERATURE: 5144 if (nsid != 0) 5145 return (EINVAL); 5146 res = nioc->n_arg & 0xffffffffUL; 5147 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) { 5148 nvme_temp_threshold_t tt; 5149 5150 tt.r = res; 5151 if (tt.b.tt_thsel != NVME_TEMP_THRESH_OVER && 5152 tt.b.tt_thsel != NVME_TEMP_THRESH_UNDER) { 5153 return (EINVAL); 5154 } 5155 5156 if (tt.b.tt_tmpsel > NVME_TEMP_THRESH_MAX_SENSOR) { 5157 return (EINVAL); 5158 } 5159 } else if (res != 0) { 5160 return (ENOTSUP); 5161 } 5162 break; 5163 5164 case NVME_FEAT_INTR_VECT: 5165 if (nsid != 0) 5166 return (EINVAL); 5167 5168 res = nioc->n_arg & 0xffffffffUL; 5169 if (res >= nvme->n_intr_cnt) 5170 return (EINVAL); 5171 break; 5172 5173 case NVME_FEAT_LBA_RANGE: 5174 if (nvme->n_lba_range_supported == B_FALSE) 5175 return (EINVAL); 5176 5177 if (nsid == 0 || 5178 nsid > nvme->n_namespace_count) 5179 return (EINVAL); 5180 5181 break; 5182 5183 case NVME_FEAT_WRITE_CACHE: 5184 if (nsid != 0) 5185 return (EINVAL); 5186 5187 if (!nvme->n_write_cache_present) 5188 return (EINVAL); 5189 5190 break; 5191 5192 case NVME_FEAT_AUTO_PST: 5193 if (nsid != 0) 5194 return (EINVAL); 5195 5196 if (!nvme->n_auto_pst_supported) 5197 return (EINVAL); 5198 5199 break; 5200 5201 default: 5202 return (EINVAL); 5203 } 5204 5205 rv = nvme_get_features(nvme, B_TRUE, nsid, feature, &res, &buf, 5206 &bufsize); 5207 if (rv != 0) 5208 return (rv); 5209 5210 if (nioc->n_len < bufsize) { 5211 kmem_free(buf, bufsize); 5212 return (EINVAL); 5213 } 5214 5215 if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0) 5216 rv = EFAULT; 5217 5218 kmem_free(buf, bufsize); 5219 nioc->n_arg = res; 5220 nioc->n_len = bufsize; 5221 5222 return (rv); 5223 } 5224 5225 static int 5226 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5227 cred_t *cred_p) 5228 { 5229 _NOTE(ARGUNUSED(nsid, mode, cred_p)); 5230 5231 if ((mode & FREAD) == 0) 5232 return (EPERM); 5233 5234 nioc->n_arg = nvme->n_intr_cnt; 5235 return (0); 5236 } 5237 5238 static int 5239 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5240 cred_t *cred_p) 5241 { 5242 _NOTE(ARGUNUSED(nsid, cred_p)); 5243 int rv = 0; 5244 5245 if ((mode & FREAD) == 0) 5246 return (EPERM); 5247 5248 if (nioc->n_len < sizeof (nvme->n_version)) 5249 return (ENOMEM); 5250 5251 if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf, 5252 sizeof (nvme->n_version), mode) != 0) 5253 rv = EFAULT; 5254 5255 return (rv); 5256 } 5257 5258 static int 5259 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5260 cred_t *cred_p) 5261 { 5262 _NOTE(ARGUNUSED(mode)); 5263 nvme_format_nvm_t frmt = { 0 }; 5264 int c_nsid = nsid != 0 ? nsid : 1; 5265 nvme_identify_nsid_t *idns; 5266 nvme_minor_state_t *nm; 5267 5268 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5269 return (EPERM); 5270 5271 nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor); 5272 if (nm->nm_oexcl != curthread) 5273 return (EACCES); 5274 5275 if (nsid != 0) { 5276 if (NVME_NSID2NS(nvme, nsid)->ns_attached) 5277 return (EBUSY); 5278 else if (!NVME_NSID2NS(nvme, nsid)->ns_active) 5279 return (EINVAL); 5280 } 5281 5282 frmt.r = nioc->n_arg & 0xffffffff; 5283 5284 /* 5285 * Check whether the FORMAT NVM command is supported. 5286 */ 5287 if (nvme->n_idctl->id_oacs.oa_format == 0) 5288 return (ENOTSUP); 5289 5290 /* 5291 * Don't allow format or secure erase of individual namespace if that 5292 * would cause a format or secure erase of all namespaces. 5293 */ 5294 if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0) 5295 return (EINVAL); 5296 5297 if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE && 5298 nvme->n_idctl->id_fna.fn_sec_erase != 0) 5299 return (EINVAL); 5300 5301 /* 5302 * Don't allow formatting with Protection Information. 5303 */ 5304 if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0) 5305 return (EINVAL); 5306 5307 /* 5308 * Don't allow formatting using an illegal LBA format, or any LBA format 5309 * that uses metadata. 5310 */ 5311 idns = NVME_NSID2NS(nvme, c_nsid)->ns_idns; 5312 if (frmt.b.fm_lbaf > idns->id_nlbaf || 5313 idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0) 5314 return (EINVAL); 5315 5316 /* 5317 * Don't allow formatting using an illegal Secure Erase setting. 5318 */ 5319 if (frmt.b.fm_ses > NVME_FRMT_MAX_SES || 5320 (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO && 5321 nvme->n_idctl->id_fna.fn_crypt_erase == 0)) 5322 return (EINVAL); 5323 5324 if (nsid == 0) 5325 nsid = (uint32_t)-1; 5326 5327 return (nvme_format_nvm(nvme, B_TRUE, nsid, frmt.b.fm_lbaf, B_FALSE, 0, 5328 B_FALSE, frmt.b.fm_ses)); 5329 } 5330 5331 static int 5332 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5333 cred_t *cred_p) 5334 { 5335 _NOTE(ARGUNUSED(nioc, mode)); 5336 int rv; 5337 5338 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5339 return (EPERM); 5340 5341 if (nsid == 0) 5342 return (EINVAL); 5343 5344 if (NVME_NSID2NS(nvme, nsid)->ns_minor.nm_oexcl != curthread) 5345 return (EACCES); 5346 5347 mutex_enter(&nvme->n_mgmt_mutex); 5348 5349 rv = nvme_detach_ns(nvme, nsid); 5350 5351 mutex_exit(&nvme->n_mgmt_mutex); 5352 5353 return (rv); 5354 } 5355 5356 static int 5357 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5358 cred_t *cred_p) 5359 { 5360 _NOTE(ARGUNUSED(nioc, mode)); 5361 int rv; 5362 5363 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5364 return (EPERM); 5365 5366 if (nsid == 0) 5367 return (EINVAL); 5368 5369 if (NVME_NSID2NS(nvme, nsid)->ns_minor.nm_oexcl != curthread) 5370 return (EACCES); 5371 5372 mutex_enter(&nvme->n_mgmt_mutex); 5373 5374 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) { 5375 mutex_exit(&nvme->n_mgmt_mutex); 5376 return (EIO); 5377 } 5378 5379 rv = nvme_attach_ns(nvme, nsid); 5380 5381 mutex_exit(&nvme->n_mgmt_mutex); 5382 return (rv); 5383 } 5384 5385 static void 5386 nvme_ufm_update(nvme_t *nvme) 5387 { 5388 mutex_enter(&nvme->n_fwslot_mutex); 5389 ddi_ufm_update(nvme->n_ufmh); 5390 if (nvme->n_fwslot != NULL) { 5391 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t)); 5392 nvme->n_fwslot = NULL; 5393 } 5394 mutex_exit(&nvme->n_fwslot_mutex); 5395 } 5396 5397 static int 5398 nvme_ioctl_firmware_download(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5399 int mode, cred_t *cred_p) 5400 { 5401 int rv = 0; 5402 size_t len, copylen; 5403 offset_t offset; 5404 uintptr_t buf; 5405 nvme_cqe_t cqe = { 0 }; 5406 nvme_sqe_t sqe = { 5407 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD 5408 }; 5409 5410 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5411 return (EPERM); 5412 5413 if (nvme->n_idctl->id_oacs.oa_firmware == 0) 5414 return (ENOTSUP); 5415 5416 if (nsid != 0) 5417 return (EINVAL); 5418 5419 /* 5420 * The offset (in n_len) is restricted to the number of DWORDs in 5421 * 32 bits. 5422 */ 5423 if (nioc->n_len > NVME_FW_OFFSETB_MAX) 5424 return (EINVAL); 5425 5426 /* Confirm that both offset and length are a multiple of DWORD bytes */ 5427 if ((nioc->n_len & NVME_DWORD_MASK) != 0 || 5428 (nioc->n_arg & NVME_DWORD_MASK) != 0) 5429 return (EINVAL); 5430 5431 len = nioc->n_len; 5432 offset = nioc->n_arg; 5433 buf = (uintptr_t)nioc->n_buf; 5434 5435 nioc->n_arg = 0; 5436 5437 while (len > 0 && rv == 0) { 5438 /* 5439 * nvme_ioc_cmd() does not use SGLs or PRP lists. 5440 * It is limited to 2 PRPs per NVM command, so limit 5441 * the size of the data to 2 pages. 5442 */ 5443 copylen = MIN(2 * nvme->n_pagesize, len); 5444 5445 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1; 5446 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT); 5447 5448 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void *)buf, copylen, 5449 FWRITE, &cqe, nvme_admin_cmd_timeout); 5450 5451 /* 5452 * Regardless of whether the command succeeded or not, whether 5453 * there's an errno in rv to be returned, we'll return any 5454 * command-specific status code in n_arg. 5455 * 5456 * As n_arg isn't cleared in all other possible code paths 5457 * returning an error, we return the status code as a negative 5458 * value so it can be distinguished easily from whatever value 5459 * was passed in n_arg originally. This of course only works as 5460 * long as arguments passed in n_arg are less than INT64_MAX, 5461 * which they currently are. 5462 */ 5463 if (cqe.cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 5464 nioc->n_arg = (uint64_t)-cqe.cqe_sf.sf_sc; 5465 5466 buf += copylen; 5467 offset += copylen; 5468 len -= copylen; 5469 } 5470 5471 /* 5472 * Let the DDI UFM subsystem know that the firmware information for 5473 * this device has changed. 5474 */ 5475 nvme_ufm_update(nvme); 5476 5477 return (rv); 5478 } 5479 5480 static int 5481 nvme_ioctl_firmware_commit(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5482 int mode, cred_t *cred_p) 5483 { 5484 nvme_firmware_commit_dw10_t fc_dw10 = { 0 }; 5485 uint32_t slot = nioc->n_arg & 0xffffffff; 5486 uint32_t action = nioc->n_arg >> 32; 5487 nvme_cqe_t cqe = { 0 }; 5488 nvme_sqe_t sqe = { 5489 .sqe_opc = NVME_OPC_FW_ACTIVATE 5490 }; 5491 int timeout; 5492 int rv; 5493 5494 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5495 return (EPERM); 5496 5497 if (nvme->n_idctl->id_oacs.oa_firmware == 0) 5498 return (ENOTSUP); 5499 5500 if (nsid != 0) 5501 return (EINVAL); 5502 5503 /* Validate slot is in range. */ 5504 if (slot < NVME_FW_SLOT_MIN || slot > NVME_FW_SLOT_MAX) 5505 return (EINVAL); 5506 5507 switch (action) { 5508 case NVME_FWC_SAVE: 5509 case NVME_FWC_SAVE_ACTIVATE: 5510 timeout = nvme_commit_save_cmd_timeout; 5511 if (slot == 1 && nvme->n_idctl->id_frmw.fw_readonly) 5512 return (EROFS); 5513 break; 5514 case NVME_FWC_ACTIVATE: 5515 case NVME_FWC_ACTIVATE_IMMED: 5516 timeout = nvme_admin_cmd_timeout; 5517 break; 5518 default: 5519 return (EINVAL); 5520 } 5521 5522 fc_dw10.b.fc_slot = slot; 5523 fc_dw10.b.fc_action = action; 5524 sqe.sqe_cdw10 = fc_dw10.r; 5525 5526 nioc->n_arg = 0; 5527 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, NULL, 0, 0, &cqe, timeout); 5528 5529 /* 5530 * Regardless of whether the command succeeded or not, whether 5531 * there's an errno in rv to be returned, we'll return any 5532 * command-specific status code in n_arg. 5533 * 5534 * As n_arg isn't cleared in all other possible code paths 5535 * returning an error, we return the status code as a negative 5536 * value so it can be distinguished easily from whatever value 5537 * was passed in n_arg originally. This of course only works as 5538 * long as arguments passed in n_arg are less than INT64_MAX, 5539 * which they currently are. 5540 */ 5541 if (cqe.cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 5542 nioc->n_arg = (uint64_t)-cqe.cqe_sf.sf_sc; 5543 5544 /* 5545 * Let the DDI UFM subsystem know that the firmware information for 5546 * this device has changed. 5547 */ 5548 nvme_ufm_update(nvme); 5549 5550 return (rv); 5551 } 5552 5553 /* 5554 * Helper to copy in a passthru command from userspace, handling 5555 * different data models. 5556 */ 5557 static int 5558 nvme_passthru_copy_cmd_in(const void *buf, nvme_passthru_cmd_t *cmd, int mode) 5559 { 5560 #ifdef _MULTI_DATAMODEL 5561 switch (ddi_model_convert_from(mode & FMODELS)) { 5562 case DDI_MODEL_ILP32: { 5563 nvme_passthru_cmd32_t cmd32; 5564 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0) 5565 return (-1); 5566 cmd->npc_opcode = cmd32.npc_opcode; 5567 cmd->npc_timeout = cmd32.npc_timeout; 5568 cmd->npc_flags = cmd32.npc_flags; 5569 cmd->npc_cdw12 = cmd32.npc_cdw12; 5570 cmd->npc_cdw13 = cmd32.npc_cdw13; 5571 cmd->npc_cdw14 = cmd32.npc_cdw14; 5572 cmd->npc_cdw15 = cmd32.npc_cdw15; 5573 cmd->npc_buflen = cmd32.npc_buflen; 5574 cmd->npc_buf = cmd32.npc_buf; 5575 break; 5576 } 5577 case DDI_MODEL_NONE: 5578 #endif 5579 if (ddi_copyin(buf, (void*)cmd, sizeof (nvme_passthru_cmd_t), 5580 mode) != 0) 5581 return (-1); 5582 #ifdef _MULTI_DATAMODEL 5583 break; 5584 } 5585 #endif 5586 return (0); 5587 } 5588 5589 /* 5590 * Helper to copy out a passthru command result to userspace, handling 5591 * different data models. 5592 */ 5593 static int 5594 nvme_passthru_copy_cmd_out(const nvme_passthru_cmd_t *cmd, void *buf, int mode) 5595 { 5596 #ifdef _MULTI_DATAMODEL 5597 switch (ddi_model_convert_from(mode & FMODELS)) { 5598 case DDI_MODEL_ILP32: { 5599 nvme_passthru_cmd32_t cmd32; 5600 bzero(&cmd32, sizeof (cmd32)); 5601 cmd32.npc_opcode = cmd->npc_opcode; 5602 cmd32.npc_status = cmd->npc_status; 5603 cmd32.npc_err = cmd->npc_err; 5604 cmd32.npc_timeout = cmd->npc_timeout; 5605 cmd32.npc_flags = cmd->npc_flags; 5606 cmd32.npc_cdw0 = cmd->npc_cdw0; 5607 cmd32.npc_cdw12 = cmd->npc_cdw12; 5608 cmd32.npc_cdw13 = cmd->npc_cdw13; 5609 cmd32.npc_cdw14 = cmd->npc_cdw14; 5610 cmd32.npc_cdw15 = cmd->npc_cdw15; 5611 cmd32.npc_buflen = (size32_t)cmd->npc_buflen; 5612 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf; 5613 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0) 5614 return (-1); 5615 break; 5616 } 5617 case DDI_MODEL_NONE: 5618 #endif 5619 if (ddi_copyout(cmd, buf, sizeof (nvme_passthru_cmd_t), 5620 mode) != 0) 5621 return (-1); 5622 #ifdef _MULTI_DATAMODEL 5623 break; 5624 } 5625 #endif 5626 return (0); 5627 } 5628 5629 /* 5630 * Run an arbitrary vendor-specific admin command on the device. 5631 */ 5632 static int 5633 nvme_ioctl_passthru(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5634 cred_t *cred_p) 5635 { 5636 int rv = 0; 5637 uint_t timeout = 0; 5638 int rwk = 0; 5639 nvme_passthru_cmd_t cmd; 5640 size_t expected_passthru_size = 0; 5641 nvme_sqe_t sqe; 5642 nvme_cqe_t cqe; 5643 5644 bzero(&cmd, sizeof (cmd)); 5645 bzero(&sqe, sizeof (sqe)); 5646 bzero(&cqe, sizeof (cqe)); 5647 5648 /* 5649 * Basic checks: permissions, data model, argument size. 5650 */ 5651 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5652 return (EPERM); 5653 5654 /* 5655 * Compute the expected size of the argument buffer 5656 */ 5657 #ifdef _MULTI_DATAMODEL 5658 switch (ddi_model_convert_from(mode & FMODELS)) { 5659 case DDI_MODEL_ILP32: 5660 expected_passthru_size = sizeof (nvme_passthru_cmd32_t); 5661 break; 5662 case DDI_MODEL_NONE: 5663 #endif 5664 expected_passthru_size = sizeof (nvme_passthru_cmd_t); 5665 #ifdef _MULTI_DATAMODEL 5666 break; 5667 } 5668 #endif 5669 5670 if (nioc->n_len != expected_passthru_size) { 5671 cmd.npc_err = NVME_PASSTHRU_ERR_CMD_SIZE; 5672 rv = EINVAL; 5673 goto out; 5674 } 5675 5676 /* 5677 * Ensure the device supports the standard vendor specific 5678 * admin command format. 5679 */ 5680 if (!nvme->n_idctl->id_nvscc.nv_spec) { 5681 cmd.npc_err = NVME_PASSTHRU_ERR_NOT_SUPPORTED; 5682 rv = ENOTSUP; 5683 goto out; 5684 } 5685 5686 if (nvme_passthru_copy_cmd_in((const void*)nioc->n_buf, &cmd, mode)) 5687 return (EFAULT); 5688 5689 if (!NVME_IS_VENDOR_SPECIFIC_CMD(cmd.npc_opcode)) { 5690 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_OPCODE; 5691 rv = EINVAL; 5692 goto out; 5693 } 5694 5695 /* 5696 * This restriction is not mandated by the spec, so future work 5697 * could relax this if it's necessary to support commands that both 5698 * read and write. 5699 */ 5700 if ((cmd.npc_flags & NVME_PASSTHRU_READ) != 0 && 5701 (cmd.npc_flags & NVME_PASSTHRU_WRITE) != 0) { 5702 cmd.npc_err = NVME_PASSTHRU_ERR_READ_AND_WRITE; 5703 rv = EINVAL; 5704 goto out; 5705 } 5706 if (cmd.npc_timeout > nvme_vendor_specific_admin_cmd_max_timeout) { 5707 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_TIMEOUT; 5708 rv = EINVAL; 5709 goto out; 5710 } 5711 timeout = cmd.npc_timeout; 5712 5713 /* 5714 * Passed-thru command buffer verification: 5715 * - Size is multiple of DWords 5716 * - Non-null iff the length is non-zero 5717 * - Null if neither reading nor writing data. 5718 * - Non-null if reading or writing. 5719 * - Maximum buffer size. 5720 */ 5721 if ((cmd.npc_buflen % sizeof (uint32_t)) != 0) { 5722 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5723 rv = EINVAL; 5724 goto out; 5725 } 5726 if (((void*)cmd.npc_buf != NULL && cmd.npc_buflen == 0) || 5727 ((void*)cmd.npc_buf == NULL && cmd.npc_buflen != 0)) { 5728 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5729 rv = EINVAL; 5730 goto out; 5731 } 5732 if (cmd.npc_flags == 0 && (void*)cmd.npc_buf != NULL) { 5733 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5734 rv = EINVAL; 5735 goto out; 5736 } 5737 if ((cmd.npc_flags != 0) && ((void*)cmd.npc_buf == NULL)) { 5738 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5739 rv = EINVAL; 5740 goto out; 5741 } 5742 if (cmd.npc_buflen > nvme_vendor_specific_admin_cmd_size) { 5743 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5744 rv = EINVAL; 5745 goto out; 5746 } 5747 if ((cmd.npc_buflen >> NVME_DWORD_SHIFT) > UINT32_MAX) { 5748 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5749 rv = EINVAL; 5750 goto out; 5751 } 5752 5753 sqe.sqe_opc = cmd.npc_opcode; 5754 sqe.sqe_nsid = nsid; 5755 sqe.sqe_cdw10 = (uint32_t)(cmd.npc_buflen >> NVME_DWORD_SHIFT); 5756 sqe.sqe_cdw12 = cmd.npc_cdw12; 5757 sqe.sqe_cdw13 = cmd.npc_cdw13; 5758 sqe.sqe_cdw14 = cmd.npc_cdw14; 5759 sqe.sqe_cdw15 = cmd.npc_cdw15; 5760 if ((cmd.npc_flags & NVME_PASSTHRU_READ) != 0) 5761 rwk = FREAD; 5762 else if ((cmd.npc_flags & NVME_PASSTHRU_WRITE) != 0) 5763 rwk = FWRITE; 5764 5765 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void*)cmd.npc_buf, 5766 cmd.npc_buflen, rwk, &cqe, timeout); 5767 cmd.npc_status = cqe.cqe_sf.sf_sc; 5768 cmd.npc_cdw0 = cqe.cqe_dw0; 5769 5770 out: 5771 if (nvme_passthru_copy_cmd_out(&cmd, (void*)nioc->n_buf, mode)) 5772 rv = EFAULT; 5773 return (rv); 5774 } 5775 5776 static int 5777 nvme_ioctl_ns_state(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5778 cred_t *cred_p) 5779 { 5780 _NOTE(ARGUNUSED(cred_p)); 5781 nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid); 5782 5783 if ((mode & FREAD) == 0) 5784 return (EPERM); 5785 5786 if (nsid == 0) 5787 return (EINVAL); 5788 5789 nioc->n_arg = 0; 5790 5791 mutex_enter(&nvme->n_mgmt_mutex); 5792 5793 if (ns->ns_allocated) 5794 nioc->n_arg |= NVME_NS_STATE_ALLOCATED; 5795 5796 if (ns->ns_active) 5797 nioc->n_arg |= NVME_NS_STATE_ACTIVE; 5798 5799 if (ns->ns_attached) 5800 nioc->n_arg |= NVME_NS_STATE_ATTACHED; 5801 5802 if (ns->ns_ignore) 5803 nioc->n_arg |= NVME_NS_STATE_IGNORED; 5804 5805 mutex_exit(&nvme->n_mgmt_mutex); 5806 5807 return (0); 5808 } 5809 5810 static int 5811 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 5812 int *rval_p) 5813 { 5814 #ifndef __lock_lint 5815 _NOTE(ARGUNUSED(rval_p)); 5816 #endif 5817 minor_t minor = getminor(dev); 5818 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 5819 int nsid = NVME_MINOR_NSID(minor); 5820 int rv = 0; 5821 nvme_ioctl_t nioc; 5822 5823 int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = { 5824 NULL, 5825 nvme_ioctl_identify, 5826 NULL, 5827 nvme_ioctl_capabilities, 5828 nvme_ioctl_get_logpage, 5829 nvme_ioctl_get_features, 5830 nvme_ioctl_intr_cnt, 5831 nvme_ioctl_version, 5832 nvme_ioctl_format, 5833 nvme_ioctl_detach, 5834 nvme_ioctl_attach, 5835 nvme_ioctl_firmware_download, 5836 nvme_ioctl_firmware_commit, 5837 nvme_ioctl_passthru, 5838 nvme_ioctl_ns_state 5839 }; 5840 5841 if (nvme == NULL) 5842 return (ENXIO); 5843 5844 if (nsid > nvme->n_namespace_count) 5845 return (ENXIO); 5846 5847 if (IS_DEVCTL(cmd)) 5848 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); 5849 5850 #ifdef _MULTI_DATAMODEL 5851 switch (ddi_model_convert_from(mode & FMODELS)) { 5852 case DDI_MODEL_ILP32: { 5853 nvme_ioctl32_t nioc32; 5854 if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t), 5855 mode) != 0) 5856 return (EFAULT); 5857 nioc.n_len = nioc32.n_len; 5858 nioc.n_buf = nioc32.n_buf; 5859 nioc.n_arg = nioc32.n_arg; 5860 break; 5861 } 5862 case DDI_MODEL_NONE: 5863 #endif 5864 if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode) 5865 != 0) 5866 return (EFAULT); 5867 #ifdef _MULTI_DATAMODEL 5868 break; 5869 } 5870 #endif 5871 5872 if (nvme->n_dead && cmd != NVME_IOC_DETACH) 5873 return (EIO); 5874 5875 if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL) 5876 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode, 5877 cred_p); 5878 else 5879 rv = EINVAL; 5880 5881 #ifdef _MULTI_DATAMODEL 5882 switch (ddi_model_convert_from(mode & FMODELS)) { 5883 case DDI_MODEL_ILP32: { 5884 nvme_ioctl32_t nioc32; 5885 5886 nioc32.n_len = (size32_t)nioc.n_len; 5887 nioc32.n_buf = (uintptr32_t)nioc.n_buf; 5888 nioc32.n_arg = nioc.n_arg; 5889 5890 if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t), 5891 mode) != 0) 5892 return (EFAULT); 5893 break; 5894 } 5895 case DDI_MODEL_NONE: 5896 #endif 5897 if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode) 5898 != 0) 5899 return (EFAULT); 5900 #ifdef _MULTI_DATAMODEL 5901 break; 5902 } 5903 #endif 5904 5905 return (rv); 5906 } 5907 5908 /* 5909 * DDI UFM Callbacks 5910 */ 5911 static int 5912 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 5913 ddi_ufm_image_t *img) 5914 { 5915 nvme_t *nvme = arg; 5916 5917 if (imgno != 0) 5918 return (EINVAL); 5919 5920 ddi_ufm_image_set_desc(img, "Firmware"); 5921 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot); 5922 5923 return (0); 5924 } 5925 5926 /* 5927 * Fill out firmware slot information for the requested slot. The firmware 5928 * slot information is gathered by requesting the Firmware Slot Information log 5929 * page. The format of the page is described in section 5.10.1.3. 5930 * 5931 * We lazily cache the log page on the first call and then invalidate the cache 5932 * data after a successful firmware download or firmware commit command. 5933 * The cached data is protected by a mutex as the state can change 5934 * asynchronous to this callback. 5935 */ 5936 static int 5937 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 5938 uint_t slotno, ddi_ufm_slot_t *slot) 5939 { 5940 nvme_t *nvme = arg; 5941 void *log = NULL; 5942 size_t bufsize; 5943 ddi_ufm_attr_t attr = 0; 5944 char fw_ver[NVME_FWVER_SZ + 1]; 5945 int ret; 5946 5947 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1)) 5948 return (EINVAL); 5949 5950 mutex_enter(&nvme->n_fwslot_mutex); 5951 if (nvme->n_fwslot == NULL) { 5952 ret = nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, 5953 NVME_LOGPAGE_FWSLOT, 0); 5954 if (ret != DDI_SUCCESS || 5955 bufsize != sizeof (nvme_fwslot_log_t)) { 5956 if (log != NULL) 5957 kmem_free(log, bufsize); 5958 mutex_exit(&nvme->n_fwslot_mutex); 5959 return (EIO); 5960 } 5961 nvme->n_fwslot = (nvme_fwslot_log_t *)log; 5962 } 5963 5964 /* 5965 * NVMe numbers firmware slots starting at 1 5966 */ 5967 if (slotno == (nvme->n_fwslot->fw_afi - 1)) 5968 attr |= DDI_UFM_ATTR_ACTIVE; 5969 5970 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0) 5971 attr |= DDI_UFM_ATTR_WRITEABLE; 5972 5973 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') { 5974 attr |= DDI_UFM_ATTR_EMPTY; 5975 } else { 5976 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno], 5977 NVME_FWVER_SZ); 5978 fw_ver[NVME_FWVER_SZ] = '\0'; 5979 ddi_ufm_slot_set_version(slot, fw_ver); 5980 } 5981 mutex_exit(&nvme->n_fwslot_mutex); 5982 5983 ddi_ufm_slot_set_attrs(slot, attr); 5984 5985 return (0); 5986 } 5987 5988 static int 5989 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 5990 { 5991 *caps = DDI_UFM_CAP_REPORT; 5992 return (0); 5993 } 5994