1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2018 Nexenta Systems, Inc. 14 * Copyright 2016 Tegile Systems, Inc. All rights reserved. 15 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 16 * Copyright 2020 Joyent, Inc. 17 * Copyright 2019 Western Digital Corporation. 18 * Copyright 2020 Racktop Systems. 19 * Copyright 2022 Oxide Computer Company. 20 */ 21 22 /* 23 * blkdev driver for NVMe compliant storage devices 24 * 25 * This driver targets and is designed to support all NVMe 1.x devices. 26 * Features are added to the driver as we encounter devices that require them 27 * and our needs, so some commands or log pages may not take advantage of newer 28 * features that devices support at this time. When you encounter such a case, 29 * it is generally fine to add that support to the driver as long as you take 30 * care to ensure that the requisite device version is met before using it. 31 * 32 * The driver has only been tested on x86 systems and will not work on big- 33 * endian systems without changes to the code accessing registers and data 34 * structures used by the hardware. 35 * 36 * 37 * Interrupt Usage: 38 * 39 * The driver will use a single interrupt while configuring the device as the 40 * specification requires, but contrary to the specification it will try to use 41 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it 42 * will switch to multiple-message MSI(-X) if supported. The driver wants to 43 * have one interrupt vector per CPU, but it will work correctly if less are 44 * available. Interrupts can be shared by queues, the interrupt handler will 45 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only 46 * the admin queue will share an interrupt with one I/O queue. The interrupt 47 * handler will retrieve completed commands from all queues sharing an interrupt 48 * vector and will post them to a taskq for completion processing. 49 * 50 * 51 * Command Processing: 52 * 53 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up 54 * to 65536 I/O commands. The driver will configure one I/O queue pair per 55 * available interrupt vector, with the queue length usually much smaller than 56 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 57 * interrupt vectors will be used. 58 * 59 * Additionally the hardware provides a single special admin queue pair that can 60 * hold up to 4096 admin commands. 61 * 62 * From the hardware perspective both queues of a queue pair are independent, 63 * but they share some driver state: the command array (holding pointers to 64 * commands currently being processed by the hardware) and the active command 65 * counter. Access to a submission queue and the shared state is protected by 66 * nq_mutex; completion queue is protected by ncq_mutex. 67 * 68 * When a command is submitted to a queue pair the active command counter is 69 * incremented and a pointer to the command is stored in the command array. The 70 * array index is used as command identifier (CID) in the submission queue 71 * entry. Some commands may take a very long time to complete, and if the queue 72 * wraps around in that time a submission may find the next array slot to still 73 * be used by a long-running command. In this case the array is sequentially 74 * searched for the next free slot. The length of the command array is the same 75 * as the configured queue length. Queue overrun is prevented by the semaphore, 76 * so a command submission may block if the queue is full. 77 * 78 * 79 * Polled I/O Support: 80 * 81 * For kernel core dump support the driver can do polled I/O. As interrupts are 82 * turned off while dumping the driver will just submit a command in the regular 83 * way, and then repeatedly attempt a command retrieval until it gets the 84 * command back. 85 * 86 * 87 * Namespace Support: 88 * 89 * NVMe devices can have multiple namespaces, each being a independent data 90 * store. The driver supports multiple namespaces and creates a blkdev interface 91 * for each namespace found. Namespaces can have various attributes to support 92 * protection information. This driver does not support any of this and ignores 93 * namespaces that have these attributes. 94 * 95 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier 96 * (EUI64). This driver uses the EUI64 if present to generate the devid and 97 * passes it to blkdev to use it in the device node names. As this is currently 98 * untested namespaces with EUI64 are ignored by default. 99 * 100 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a 101 * single controller. This is an artificial limit imposed by the driver to be 102 * able to address a reasonable number of controllers and namespaces using a 103 * 32bit minor node number. 104 * 105 * 106 * Minor nodes: 107 * 108 * For each NVMe device the driver exposes one minor node for the controller and 109 * one minor node for each namespace. The only operations supported by those 110 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the 111 * interface for the nvmeadm(1M) utility. 112 * 113 * 114 * Blkdev Interface: 115 * 116 * This driver uses blkdev to do all the heavy lifting involved with presenting 117 * a disk device to the system. As a result, the processing of I/O requests is 118 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 119 * setup, and splitting of transfers into manageable chunks. 120 * 121 * I/O requests coming in from blkdev are turned into NVM commands and posted to 122 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 123 * queues. There is currently no timeout handling of I/O commands. 124 * 125 * Blkdev also supports querying device/media information and generating a 126 * devid. The driver reports the best block size as determined by the namespace 127 * format back to blkdev as physical block size to support partition and block 128 * alignment. The devid is either based on the namespace EUI64, if present, or 129 * composed using the device vendor ID, model number, serial number, and the 130 * namespace ID. 131 * 132 * 133 * Error Handling: 134 * 135 * Error handling is currently limited to detecting fatal hardware errors, 136 * either by asynchronous events, or synchronously through command status or 137 * admin command timeouts. In case of severe errors the device is fenced off, 138 * all further requests will return EIO. FMA is then called to fault the device. 139 * 140 * The hardware has a limit for outstanding asynchronous event requests. Before 141 * this limit is known the driver assumes it is at least 1 and posts a single 142 * asynchronous request. Later when the limit is known more asynchronous event 143 * requests are posted to allow quicker reception of error information. When an 144 * asynchronous event is posted by the hardware the driver will parse the error 145 * status fields and log information or fault the device, depending on the 146 * severity of the asynchronous event. The asynchronous event request is then 147 * reused and posted to the admin queue again. 148 * 149 * On command completion the command status is checked for errors. In case of 150 * errors indicating a driver bug the driver panics. Almost all other error 151 * status values just cause EIO to be returned. 152 * 153 * Command timeouts are currently detected for all admin commands except 154 * asynchronous event requests. If a command times out and the hardware appears 155 * to be healthy the driver attempts to abort the command. The original command 156 * timeout is also applied to the abort command. If the abort times out too the 157 * driver assumes the device to be dead, fences it off, and calls FMA to retire 158 * it. In all other cases the aborted command should return immediately with a 159 * status indicating it was aborted, and the driver will wait indefinitely for 160 * that to happen. No timeout handling of normal I/O commands is presently done. 161 * 162 * Any command that times out due to the controller dropping dead will be put on 163 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA 164 * memory being reused by the system and later be written to by a "dead" NVMe 165 * controller. 166 * 167 * 168 * Locking: 169 * 170 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held 171 * when accessing shared state and submission queue registers, ncq_mutex 172 * is held when accessing completion queue state and registers. 173 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while 174 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both 175 * mutexes themselves. 176 * 177 * Each command also has its own nc_mutex, which is associated with the 178 * condition variable nc_cv. It is only used on admin commands which are run 179 * synchronously. In that case it must be held across calls to 180 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by 181 * nvme_admin_cmd(). It must also be held whenever the completion state of the 182 * command is changed or while a admin command timeout is handled. 183 * 184 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first. 185 * More than one nc_mutex may only be held when aborting commands. In this case, 186 * the nc_mutex of the command to be aborted must be held across the call to 187 * nvme_abort_cmd() to prevent the command from completing while the abort is in 188 * progress. 189 * 190 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be 191 * acquired first. More than one nq_mutex is never held by a single thread. 192 * The ncq_mutex is only held by nvme_retrieve_cmd() and 193 * nvme_process_iocq(). nvme_process_iocq() is only called from the 194 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the 195 * mutex is non-contentious but is required for implementation completeness 196 * and safety. 197 * 198 * Each minor node has its own nm_mutex, which protects the open count nm_ocnt 199 * and exclusive-open flag nm_oexcl. 200 * 201 * 202 * Quiesce / Fast Reboot: 203 * 204 * The driver currently does not support fast reboot. A quiesce(9E) entry point 205 * is still provided which is used to send a shutdown notification to the 206 * device. 207 * 208 * 209 * NVMe Hotplug: 210 * 211 * The driver supports hot removal. The driver uses the NDI event framework 212 * to register a callback, nvme_remove_callback, to clean up when a disk is 213 * removed. In particular, the driver will unqueue outstanding I/O commands and 214 * set n_dead on the softstate to true so that other operations, such as ioctls 215 * and command submissions, fail as well. 216 * 217 * While the callback registration relies on the NDI event framework, the 218 * removal event itself is kicked off in the PCIe hotplug framework, when the 219 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicatating that a 220 * device was removed from the slot. 221 * 222 * The NVMe driver instance itself will remain until the final close of the 223 * device. 224 * 225 * 226 * DDI UFM Support 227 * 228 * The driver supports the DDI UFM framework for reporting information about 229 * the device's firmware image and slot configuration. This data can be 230 * queried by userland software via ioctls to the ufm driver. For more 231 * information, see ddi_ufm(9E). 232 * 233 * 234 * Driver Configuration: 235 * 236 * The following driver properties can be changed to control some aspects of the 237 * drivers operation: 238 * - strict-version: can be set to 0 to allow devices conforming to newer 239 * major versions to be used 240 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 241 * specific command status as a fatal error leading device faulting 242 * - admin-queue-len: the maximum length of the admin queue (16-4096) 243 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536) 244 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536) 245 * - async-event-limit: the maximum number of asynchronous event requests to be 246 * posted by the driver 247 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 248 * cache 249 * - min-phys-block-size: the minimum physical block size to report to blkdev, 250 * which is among other things the basis for ZFS vdev ashift 251 * - max-submission-queues: the maximum number of I/O submission queues. 252 * - max-completion-queues: the maximum number of I/O completion queues, 253 * can be less than max-submission-queues, in which case the completion 254 * queues are shared. 255 * 256 * 257 * TODO: 258 * - figure out sane default for I/O queue depth reported to blkdev 259 * - FMA handling of media errors 260 * - support for devices supporting very large I/O requests using chained PRPs 261 * - support for configuring hardware parameters like interrupt coalescing 262 * - support for media formatting and hard partitioning into namespaces 263 * - support for big-endian systems 264 * - support for fast reboot 265 * - support for NVMe Subsystem Reset (1.1) 266 * - support for Scatter/Gather lists (1.1) 267 * - support for Reservations (1.1) 268 * - support for power management 269 */ 270 271 #include <sys/byteorder.h> 272 #ifdef _BIG_ENDIAN 273 #error nvme driver needs porting for big-endian platforms 274 #endif 275 276 #include <sys/modctl.h> 277 #include <sys/conf.h> 278 #include <sys/devops.h> 279 #include <sys/ddi.h> 280 #include <sys/ddi_ufm.h> 281 #include <sys/sunddi.h> 282 #include <sys/sunndi.h> 283 #include <sys/bitmap.h> 284 #include <sys/sysmacros.h> 285 #include <sys/param.h> 286 #include <sys/varargs.h> 287 #include <sys/cpuvar.h> 288 #include <sys/disp.h> 289 #include <sys/blkdev.h> 290 #include <sys/atomic.h> 291 #include <sys/archsystm.h> 292 #include <sys/sata/sata_hba.h> 293 #include <sys/stat.h> 294 #include <sys/policy.h> 295 #include <sys/list.h> 296 #include <sys/dkio.h> 297 298 #include <sys/nvme.h> 299 300 #ifdef __x86 301 #include <sys/x86_archext.h> 302 #endif 303 304 #include "nvme_reg.h" 305 #include "nvme_var.h" 306 307 /* 308 * Assertions to make sure that we've properly captured various aspects of the 309 * packed structures and haven't broken them during updates. 310 */ 311 CTASSERT(sizeof (nvme_identify_ctrl_t) == 0x1000); 312 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256); 313 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512); 314 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520); 315 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768); 316 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792); 317 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048); 318 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072); 319 320 CTASSERT(sizeof (nvme_identify_nsid_t) == 0x1000); 321 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32); 322 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92); 323 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104); 324 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128); 325 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384); 326 327 CTASSERT(sizeof (nvme_identify_primary_caps_t) == 0x1000); 328 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32); 329 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64); 330 331 332 /* NVMe spec version supported */ 333 static const int nvme_version_major = 1; 334 335 /* tunable for admin command timeout in seconds, default is 1s */ 336 int nvme_admin_cmd_timeout = 1; 337 338 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */ 339 int nvme_format_cmd_timeout = 600; 340 341 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */ 342 int nvme_commit_save_cmd_timeout = 15; 343 344 /* 345 * tunable for the size of arbitrary vendor specific admin commands, 346 * default is 16MiB. 347 */ 348 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24; 349 350 /* 351 * tunable for the max timeout of arbitary vendor specific admin commands, 352 * default is 60s. 353 */ 354 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60; 355 356 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 357 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 358 static int nvme_quiesce(dev_info_t *); 359 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 360 static int nvme_setup_interrupts(nvme_t *, int, int); 361 static void nvme_release_interrupts(nvme_t *); 362 static uint_t nvme_intr(caddr_t, caddr_t); 363 364 static void nvme_shutdown(nvme_t *, int, boolean_t); 365 static boolean_t nvme_reset(nvme_t *, boolean_t); 366 static int nvme_init(nvme_t *); 367 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 368 static void nvme_free_cmd(nvme_cmd_t *); 369 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 370 bd_xfer_t *); 371 static void nvme_admin_cmd(nvme_cmd_t *, int); 372 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *); 373 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *); 374 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *); 375 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int); 376 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 377 static void nvme_wait_cmd(nvme_cmd_t *, uint_t); 378 static void nvme_wakeup_cmd(void *); 379 static void nvme_async_event_task(void *); 380 381 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 382 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 383 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 384 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 385 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 386 static inline int nvme_check_cmd_status(nvme_cmd_t *); 387 388 static int nvme_abort_cmd(nvme_cmd_t *, uint_t); 389 static void nvme_async_event(nvme_t *); 390 static int nvme_format_nvm(nvme_t *, boolean_t, uint32_t, uint8_t, boolean_t, 391 uint8_t, boolean_t, uint8_t); 392 static int nvme_get_logpage(nvme_t *, boolean_t, void **, size_t *, uint8_t, 393 ...); 394 static int nvme_identify(nvme_t *, boolean_t, uint32_t, void **); 395 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t, 396 uint32_t *); 397 static int nvme_get_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t *, 398 void **, size_t *); 399 static int nvme_write_cache_set(nvme_t *, boolean_t); 400 static int nvme_set_nqueues(nvme_t *); 401 402 static void nvme_free_dma(nvme_dma_t *); 403 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 404 nvme_dma_t **); 405 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 406 nvme_dma_t **); 407 static void nvme_free_qpair(nvme_qpair_t *); 408 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t); 409 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 410 411 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 412 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 413 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 414 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 415 416 static boolean_t nvme_check_regs_hdl(nvme_t *); 417 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 418 419 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t); 420 421 static void nvme_bd_xfer_done(void *); 422 static void nvme_bd_driveinfo(void *, bd_drive_t *); 423 static int nvme_bd_mediainfo(void *, bd_media_t *); 424 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 425 static int nvme_bd_read(void *, bd_xfer_t *); 426 static int nvme_bd_write(void *, bd_xfer_t *); 427 static int nvme_bd_sync(void *, bd_xfer_t *); 428 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 429 static int nvme_bd_free_space(void *, bd_xfer_t *); 430 431 static int nvme_prp_dma_constructor(void *, void *, int); 432 static void nvme_prp_dma_destructor(void *, void *); 433 434 static void nvme_prepare_devid(nvme_t *, uint32_t); 435 436 /* DDI UFM callbacks */ 437 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 438 ddi_ufm_image_t *); 439 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 440 ddi_ufm_slot_t *); 441 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 442 443 static int nvme_open(dev_t *, int, int, cred_t *); 444 static int nvme_close(dev_t, int, int, cred_t *); 445 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 446 447 static ddi_ufm_ops_t nvme_ufm_ops = { 448 NULL, 449 nvme_ufm_fill_image, 450 nvme_ufm_fill_slot, 451 nvme_ufm_getcaps 452 }; 453 454 #define NVME_MINOR_INST_SHIFT 9 455 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid)) 456 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT) 457 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1)) 458 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2) 459 #define NVME_IS_VENDOR_SPECIFIC_CMD(x) (((x) >= 0xC0) && ((x) <= 0xFF)) 460 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MIN 0xC0 461 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MAX 0xFF 462 #define NVME_IS_VENDOR_SPECIFIC_LOGPAGE(x) \ 463 (((x) >= NVME_VENDOR_SPECIFIC_LOGPAGE_MIN) && \ 464 ((x) <= NVME_VENDOR_SPECIFIC_LOGPAGE_MAX)) 465 466 /* 467 * NVMe versions 1.3 and later actually support log pages up to UINT32_MAX 468 * DWords in size. However, revision 1.3 also modified the layout of the Get Log 469 * Page command significantly relative to version 1.2, including changing 470 * reserved bits, adding new bitfields, and requiring the use of command DWord 471 * 11 to fully specify the size of the log page (the lower and upper 16 bits of 472 * the number of DWords in the page are split between DWord 10 and DWord 11, 473 * respectively). 474 * 475 * All of these impose significantly different layout requirements on the 476 * `nvme_getlogpage_t` type. This could be solved with two different types, or a 477 * complicated/nested union with the two versions as the overlying members. Both 478 * of these are reasonable, if a bit convoluted. However, these is no current 479 * need for such large pages, or a way to test them, as most log pages actually 480 * fit within the current size limit. So for simplicity, we retain the size cap 481 * from version 1.2. 482 * 483 * Note that the number of DWords is zero-based, so we add 1. It is subtracted 484 * to form a zero-based value in `nvme_get_logpage`. 485 */ 486 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE \ 487 (((1 << 12) + 1) * sizeof (uint32_t)) 488 489 static void *nvme_state; 490 static kmem_cache_t *nvme_cmd_cache; 491 492 /* 493 * DMA attributes for queue DMA memory 494 * 495 * Queue DMA memory must be page aligned. The maximum length of a queue is 496 * 65536 entries, and an entry can be 64 bytes long. 497 */ 498 static ddi_dma_attr_t nvme_queue_dma_attr = { 499 .dma_attr_version = DMA_ATTR_V0, 500 .dma_attr_addr_lo = 0, 501 .dma_attr_addr_hi = 0xffffffffffffffffULL, 502 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 503 .dma_attr_align = 0x1000, 504 .dma_attr_burstsizes = 0x7ff, 505 .dma_attr_minxfer = 0x1000, 506 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 507 .dma_attr_seg = 0xffffffffffffffffULL, 508 .dma_attr_sgllen = 1, 509 .dma_attr_granular = 1, 510 .dma_attr_flags = 0, 511 }; 512 513 /* 514 * DMA attributes for transfers using Physical Region Page (PRP) entries 515 * 516 * A PRP entry describes one page of DMA memory using the page size specified 517 * in the controller configuration's memory page size register (CC.MPS). It uses 518 * a 64bit base address aligned to this page size. There is no limitation on 519 * chaining PRPs together for arbitrarily large DMA transfers. 520 */ 521 static ddi_dma_attr_t nvme_prp_dma_attr = { 522 .dma_attr_version = DMA_ATTR_V0, 523 .dma_attr_addr_lo = 0, 524 .dma_attr_addr_hi = 0xffffffffffffffffULL, 525 .dma_attr_count_max = 0xfff, 526 .dma_attr_align = 0x1000, 527 .dma_attr_burstsizes = 0x7ff, 528 .dma_attr_minxfer = 0x1000, 529 .dma_attr_maxxfer = 0x1000, 530 .dma_attr_seg = 0xfff, 531 .dma_attr_sgllen = -1, 532 .dma_attr_granular = 1, 533 .dma_attr_flags = 0, 534 }; 535 536 /* 537 * DMA attributes for transfers using scatter/gather lists 538 * 539 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 540 * 32bit length field. SGL Segment and SGL Last Segment entries require the 541 * length to be a multiple of 16 bytes. 542 */ 543 static ddi_dma_attr_t nvme_sgl_dma_attr = { 544 .dma_attr_version = DMA_ATTR_V0, 545 .dma_attr_addr_lo = 0, 546 .dma_attr_addr_hi = 0xffffffffffffffffULL, 547 .dma_attr_count_max = 0xffffffffUL, 548 .dma_attr_align = 1, 549 .dma_attr_burstsizes = 0x7ff, 550 .dma_attr_minxfer = 0x10, 551 .dma_attr_maxxfer = 0xfffffffffULL, 552 .dma_attr_seg = 0xffffffffffffffffULL, 553 .dma_attr_sgllen = -1, 554 .dma_attr_granular = 0x10, 555 .dma_attr_flags = 0 556 }; 557 558 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 559 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 560 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 561 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 562 }; 563 564 static struct cb_ops nvme_cb_ops = { 565 .cb_open = nvme_open, 566 .cb_close = nvme_close, 567 .cb_strategy = nodev, 568 .cb_print = nodev, 569 .cb_dump = nodev, 570 .cb_read = nodev, 571 .cb_write = nodev, 572 .cb_ioctl = nvme_ioctl, 573 .cb_devmap = nodev, 574 .cb_mmap = nodev, 575 .cb_segmap = nodev, 576 .cb_chpoll = nochpoll, 577 .cb_prop_op = ddi_prop_op, 578 .cb_str = 0, 579 .cb_flag = D_NEW | D_MP, 580 .cb_rev = CB_REV, 581 .cb_aread = nodev, 582 .cb_awrite = nodev 583 }; 584 585 static struct dev_ops nvme_dev_ops = { 586 .devo_rev = DEVO_REV, 587 .devo_refcnt = 0, 588 .devo_getinfo = ddi_no_info, 589 .devo_identify = nulldev, 590 .devo_probe = nulldev, 591 .devo_attach = nvme_attach, 592 .devo_detach = nvme_detach, 593 .devo_reset = nodev, 594 .devo_cb_ops = &nvme_cb_ops, 595 .devo_bus_ops = NULL, 596 .devo_power = NULL, 597 .devo_quiesce = nvme_quiesce, 598 }; 599 600 static struct modldrv nvme_modldrv = { 601 .drv_modops = &mod_driverops, 602 .drv_linkinfo = "NVMe v1.1b", 603 .drv_dev_ops = &nvme_dev_ops 604 }; 605 606 static struct modlinkage nvme_modlinkage = { 607 .ml_rev = MODREV_1, 608 .ml_linkage = { &nvme_modldrv, NULL } 609 }; 610 611 static bd_ops_t nvme_bd_ops = { 612 .o_version = BD_OPS_CURRENT_VERSION, 613 .o_drive_info = nvme_bd_driveinfo, 614 .o_media_info = nvme_bd_mediainfo, 615 .o_devid_init = nvme_bd_devid, 616 .o_sync_cache = nvme_bd_sync, 617 .o_read = nvme_bd_read, 618 .o_write = nvme_bd_write, 619 .o_free_space = nvme_bd_free_space, 620 }; 621 622 /* 623 * This list will hold commands that have timed out and couldn't be aborted. 624 * As we don't know what the hardware may still do with the DMA memory we can't 625 * free them, so we'll keep them forever on this list where we can easily look 626 * at them with mdb. 627 */ 628 static struct list nvme_lost_cmds; 629 static kmutex_t nvme_lc_mutex; 630 631 int 632 _init(void) 633 { 634 int error; 635 636 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 637 if (error != DDI_SUCCESS) 638 return (error); 639 640 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 641 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 642 643 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL); 644 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t), 645 offsetof(nvme_cmd_t, nc_list)); 646 647 bd_mod_init(&nvme_dev_ops); 648 649 error = mod_install(&nvme_modlinkage); 650 if (error != DDI_SUCCESS) { 651 ddi_soft_state_fini(&nvme_state); 652 mutex_destroy(&nvme_lc_mutex); 653 list_destroy(&nvme_lost_cmds); 654 bd_mod_fini(&nvme_dev_ops); 655 } 656 657 return (error); 658 } 659 660 int 661 _fini(void) 662 { 663 int error; 664 665 if (!list_is_empty(&nvme_lost_cmds)) 666 return (DDI_FAILURE); 667 668 error = mod_remove(&nvme_modlinkage); 669 if (error == DDI_SUCCESS) { 670 ddi_soft_state_fini(&nvme_state); 671 kmem_cache_destroy(nvme_cmd_cache); 672 mutex_destroy(&nvme_lc_mutex); 673 list_destroy(&nvme_lost_cmds); 674 bd_mod_fini(&nvme_dev_ops); 675 } 676 677 return (error); 678 } 679 680 int 681 _info(struct modinfo *modinfop) 682 { 683 return (mod_info(&nvme_modlinkage, modinfop)); 684 } 685 686 static inline void 687 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 688 { 689 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 690 691 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 692 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 693 } 694 695 static inline void 696 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 697 { 698 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 699 700 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 701 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 702 } 703 704 static inline uint64_t 705 nvme_get64(nvme_t *nvme, uintptr_t reg) 706 { 707 uint64_t val; 708 709 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 710 711 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 712 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 713 714 return (val); 715 } 716 717 static inline uint32_t 718 nvme_get32(nvme_t *nvme, uintptr_t reg) 719 { 720 uint32_t val; 721 722 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 723 724 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 725 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 726 727 return (val); 728 } 729 730 static boolean_t 731 nvme_check_regs_hdl(nvme_t *nvme) 732 { 733 ddi_fm_error_t error; 734 735 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 736 737 if (error.fme_status != DDI_FM_OK) 738 return (B_TRUE); 739 740 return (B_FALSE); 741 } 742 743 static boolean_t 744 nvme_check_dma_hdl(nvme_dma_t *dma) 745 { 746 ddi_fm_error_t error; 747 748 if (dma == NULL) 749 return (B_FALSE); 750 751 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 752 753 if (error.fme_status != DDI_FM_OK) 754 return (B_TRUE); 755 756 return (B_FALSE); 757 } 758 759 static void 760 nvme_free_dma_common(nvme_dma_t *dma) 761 { 762 if (dma->nd_dmah != NULL) 763 (void) ddi_dma_unbind_handle(dma->nd_dmah); 764 if (dma->nd_acch != NULL) 765 ddi_dma_mem_free(&dma->nd_acch); 766 if (dma->nd_dmah != NULL) 767 ddi_dma_free_handle(&dma->nd_dmah); 768 } 769 770 static void 771 nvme_free_dma(nvme_dma_t *dma) 772 { 773 nvme_free_dma_common(dma); 774 kmem_free(dma, sizeof (*dma)); 775 } 776 777 /* ARGSUSED */ 778 static void 779 nvme_prp_dma_destructor(void *buf, void *private) 780 { 781 nvme_dma_t *dma = (nvme_dma_t *)buf; 782 783 nvme_free_dma_common(dma); 784 } 785 786 static int 787 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 788 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 789 { 790 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 791 &dma->nd_dmah) != DDI_SUCCESS) { 792 /* 793 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 794 * the only other possible error is DDI_DMA_BADATTR which 795 * indicates a driver bug which should cause a panic. 796 */ 797 dev_err(nvme->n_dip, CE_PANIC, 798 "!failed to get DMA handle, check DMA attributes"); 799 return (DDI_FAILURE); 800 } 801 802 /* 803 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 804 * or the flags are conflicting, which isn't the case here. 805 */ 806 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 807 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 808 &dma->nd_len, &dma->nd_acch); 809 810 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 811 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 812 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 813 dev_err(nvme->n_dip, CE_WARN, 814 "!failed to bind DMA memory"); 815 atomic_inc_32(&nvme->n_dma_bind_err); 816 nvme_free_dma_common(dma); 817 return (DDI_FAILURE); 818 } 819 820 return (DDI_SUCCESS); 821 } 822 823 static int 824 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 825 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 826 { 827 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 828 829 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 830 DDI_SUCCESS) { 831 *ret = NULL; 832 kmem_free(dma, sizeof (nvme_dma_t)); 833 return (DDI_FAILURE); 834 } 835 836 bzero(dma->nd_memp, dma->nd_len); 837 838 *ret = dma; 839 return (DDI_SUCCESS); 840 } 841 842 /* ARGSUSED */ 843 static int 844 nvme_prp_dma_constructor(void *buf, void *private, int flags) 845 { 846 nvme_dma_t *dma = (nvme_dma_t *)buf; 847 nvme_t *nvme = (nvme_t *)private; 848 849 dma->nd_dmah = NULL; 850 dma->nd_acch = NULL; 851 852 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 853 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 854 return (-1); 855 } 856 857 ASSERT(dma->nd_ncookie == 1); 858 859 dma->nd_cached = B_TRUE; 860 861 return (0); 862 } 863 864 static int 865 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 866 uint_t flags, nvme_dma_t **dma) 867 { 868 uint32_t len = nentry * qe_len; 869 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 870 871 len = roundup(len, nvme->n_pagesize); 872 873 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 874 != DDI_SUCCESS) { 875 dev_err(nvme->n_dip, CE_WARN, 876 "!failed to get DMA memory for queue"); 877 goto fail; 878 } 879 880 if ((*dma)->nd_ncookie != 1) { 881 dev_err(nvme->n_dip, CE_WARN, 882 "!got too many cookies for queue DMA"); 883 goto fail; 884 } 885 886 return (DDI_SUCCESS); 887 888 fail: 889 if (*dma) { 890 nvme_free_dma(*dma); 891 *dma = NULL; 892 } 893 894 return (DDI_FAILURE); 895 } 896 897 static void 898 nvme_free_cq(nvme_cq_t *cq) 899 { 900 mutex_destroy(&cq->ncq_mutex); 901 902 if (cq->ncq_cmd_taskq != NULL) 903 taskq_destroy(cq->ncq_cmd_taskq); 904 905 if (cq->ncq_dma != NULL) 906 nvme_free_dma(cq->ncq_dma); 907 908 kmem_free(cq, sizeof (*cq)); 909 } 910 911 static void 912 nvme_free_qpair(nvme_qpair_t *qp) 913 { 914 int i; 915 916 mutex_destroy(&qp->nq_mutex); 917 sema_destroy(&qp->nq_sema); 918 919 if (qp->nq_sqdma != NULL) 920 nvme_free_dma(qp->nq_sqdma); 921 922 if (qp->nq_active_cmds > 0) 923 for (i = 0; i != qp->nq_nentry; i++) 924 if (qp->nq_cmd[i] != NULL) 925 nvme_free_cmd(qp->nq_cmd[i]); 926 927 if (qp->nq_cmd != NULL) 928 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 929 930 kmem_free(qp, sizeof (nvme_qpair_t)); 931 } 932 933 /* 934 * Destroy the pre-allocated cq array, but only free individual completion 935 * queues from the given starting index. 936 */ 937 static void 938 nvme_destroy_cq_array(nvme_t *nvme, uint_t start) 939 { 940 uint_t i; 941 942 for (i = start; i < nvme->n_cq_count; i++) 943 if (nvme->n_cq[i] != NULL) 944 nvme_free_cq(nvme->n_cq[i]); 945 946 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count); 947 } 948 949 static int 950 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx, 951 uint_t nthr) 952 { 953 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP); 954 char name[64]; /* large enough for the taskq name */ 955 956 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER, 957 DDI_INTR_PRI(nvme->n_intr_pri)); 958 959 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 960 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS) 961 goto fail; 962 963 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp; 964 cq->ncq_nentry = nentry; 965 cq->ncq_id = idx; 966 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx); 967 968 /* 969 * Each completion queue has its own command taskq. 970 */ 971 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u", 972 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx); 973 974 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX, 975 TASKQ_PREPOPULATE); 976 977 if (cq->ncq_cmd_taskq == NULL) { 978 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd " 979 "taskq for cq %u", idx); 980 goto fail; 981 } 982 983 *cqp = cq; 984 return (DDI_SUCCESS); 985 986 fail: 987 nvme_free_cq(cq); 988 *cqp = NULL; 989 990 return (DDI_FAILURE); 991 } 992 993 /* 994 * Create the n_cq array big enough to hold "ncq" completion queues. 995 * If the array already exists it will be re-sized (but only larger). 996 * The admin queue is included in this array, which boosts the 997 * max number of entries to UINT16_MAX + 1. 998 */ 999 static int 1000 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr) 1001 { 1002 nvme_cq_t **cq; 1003 uint_t i, cq_count; 1004 1005 ASSERT3U(ncq, >, nvme->n_cq_count); 1006 1007 cq = nvme->n_cq; 1008 cq_count = nvme->n_cq_count; 1009 1010 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP); 1011 nvme->n_cq_count = ncq; 1012 1013 for (i = 0; i < cq_count; i++) 1014 nvme->n_cq[i] = cq[i]; 1015 1016 for (; i < nvme->n_cq_count; i++) 1017 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) != 1018 DDI_SUCCESS) 1019 goto fail; 1020 1021 if (cq != NULL) 1022 kmem_free(cq, sizeof (*cq) * cq_count); 1023 1024 return (DDI_SUCCESS); 1025 1026 fail: 1027 nvme_destroy_cq_array(nvme, cq_count); 1028 /* 1029 * Restore the original array 1030 */ 1031 nvme->n_cq_count = cq_count; 1032 nvme->n_cq = cq; 1033 1034 return (DDI_FAILURE); 1035 } 1036 1037 static int 1038 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 1039 uint_t idx) 1040 { 1041 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 1042 uint_t cq_idx; 1043 1044 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 1045 DDI_INTR_PRI(nvme->n_intr_pri)); 1046 1047 /* 1048 * The NVMe spec defines that a full queue has one empty (unused) slot; 1049 * initialize the semaphore accordingly. 1050 */ 1051 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL); 1052 1053 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 1054 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 1055 goto fail; 1056 1057 /* 1058 * idx == 0 is adminq, those above 0 are shared io completion queues. 1059 */ 1060 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1); 1061 qp->nq_cq = nvme->n_cq[cq_idx]; 1062 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 1063 qp->nq_nentry = nentry; 1064 1065 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 1066 1067 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 1068 qp->nq_next_cmd = 0; 1069 1070 *nqp = qp; 1071 return (DDI_SUCCESS); 1072 1073 fail: 1074 nvme_free_qpair(qp); 1075 *nqp = NULL; 1076 1077 return (DDI_FAILURE); 1078 } 1079 1080 static nvme_cmd_t * 1081 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 1082 { 1083 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 1084 1085 if (cmd == NULL) 1086 return (cmd); 1087 1088 bzero(cmd, sizeof (nvme_cmd_t)); 1089 1090 cmd->nc_nvme = nvme; 1091 1092 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 1093 DDI_INTR_PRI(nvme->n_intr_pri)); 1094 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 1095 1096 return (cmd); 1097 } 1098 1099 static void 1100 nvme_free_cmd(nvme_cmd_t *cmd) 1101 { 1102 /* Don't free commands on the lost commands list. */ 1103 if (list_link_active(&cmd->nc_list)) 1104 return; 1105 1106 if (cmd->nc_dma) { 1107 nvme_free_dma(cmd->nc_dma); 1108 cmd->nc_dma = NULL; 1109 } 1110 1111 if (cmd->nc_prp) { 1112 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp); 1113 cmd->nc_prp = NULL; 1114 } 1115 1116 cv_destroy(&cmd->nc_cv); 1117 mutex_destroy(&cmd->nc_mutex); 1118 1119 kmem_cache_free(nvme_cmd_cache, cmd); 1120 } 1121 1122 static void 1123 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1124 { 1125 sema_p(&qp->nq_sema); 1126 nvme_submit_cmd_common(qp, cmd); 1127 } 1128 1129 static int 1130 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1131 { 1132 if (cmd->nc_nvme->n_dead) { 1133 return (EIO); 1134 } 1135 1136 if (sema_tryp(&qp->nq_sema) == 0) 1137 return (EAGAIN); 1138 1139 nvme_submit_cmd_common(qp, cmd); 1140 return (0); 1141 } 1142 1143 static void 1144 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1145 { 1146 nvme_reg_sqtdbl_t tail = { 0 }; 1147 1148 mutex_enter(&qp->nq_mutex); 1149 cmd->nc_completed = B_FALSE; 1150 1151 /* 1152 * Now that we hold the queue pair lock, we must check whether or not 1153 * the controller has been listed as dead (e.g. was removed due to 1154 * hotplug). This is necessary as otherwise we could race with 1155 * nvme_remove_callback(). Because this has not been enqueued, we don't 1156 * call nvme_unqueue_cmd(), which is why we must manually decrement the 1157 * semaphore. 1158 */ 1159 if (cmd->nc_nvme->n_dead) { 1160 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback, 1161 cmd, TQ_NOSLEEP, &cmd->nc_tqent); 1162 sema_v(&qp->nq_sema); 1163 mutex_exit(&qp->nq_mutex); 1164 return; 1165 } 1166 1167 /* 1168 * Try to insert the cmd into the active cmd array at the nq_next_cmd 1169 * slot. If the slot is already occupied advance to the next slot and 1170 * try again. This can happen for long running commands like async event 1171 * requests. 1172 */ 1173 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 1174 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1175 qp->nq_cmd[qp->nq_next_cmd] = cmd; 1176 1177 qp->nq_active_cmds++; 1178 1179 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 1180 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 1181 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 1182 sizeof (nvme_sqe_t) * qp->nq_sqtail, 1183 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 1184 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1185 1186 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 1187 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 1188 1189 mutex_exit(&qp->nq_mutex); 1190 } 1191 1192 static nvme_cmd_t * 1193 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid) 1194 { 1195 nvme_cmd_t *cmd; 1196 1197 ASSERT(mutex_owned(&qp->nq_mutex)); 1198 ASSERT3S(cid, <, qp->nq_nentry); 1199 1200 cmd = qp->nq_cmd[cid]; 1201 qp->nq_cmd[cid] = NULL; 1202 ASSERT3U(qp->nq_active_cmds, >, 0); 1203 qp->nq_active_cmds--; 1204 sema_v(&qp->nq_sema); 1205 1206 ASSERT3P(cmd, !=, NULL); 1207 ASSERT3P(cmd->nc_nvme, ==, nvme); 1208 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid); 1209 1210 return (cmd); 1211 } 1212 1213 /* 1214 * Get the command tied to the next completed cqe and bump along completion 1215 * queue head counter. 1216 */ 1217 static nvme_cmd_t * 1218 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq) 1219 { 1220 nvme_qpair_t *qp; 1221 nvme_cqe_t *cqe; 1222 nvme_cmd_t *cmd; 1223 1224 ASSERT(mutex_owned(&cq->ncq_mutex)); 1225 1226 cqe = &cq->ncq_cq[cq->ncq_head]; 1227 1228 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 1229 if (cqe->cqe_sf.sf_p == cq->ncq_phase) 1230 return (NULL); 1231 1232 qp = nvme->n_ioq[cqe->cqe_sqid]; 1233 1234 mutex_enter(&qp->nq_mutex); 1235 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); 1236 mutex_exit(&qp->nq_mutex); 1237 1238 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 1239 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 1240 1241 qp->nq_sqhead = cqe->cqe_sqhd; 1242 1243 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry; 1244 1245 /* Toggle phase on wrap-around. */ 1246 if (cq->ncq_head == 0) 1247 cq->ncq_phase = cq->ncq_phase ? 0 : 1; 1248 1249 return (cmd); 1250 } 1251 1252 /* 1253 * Process all completed commands on the io completion queue. 1254 */ 1255 static uint_t 1256 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq) 1257 { 1258 nvme_reg_cqhdbl_t head = { 0 }; 1259 nvme_cmd_t *cmd; 1260 uint_t completed = 0; 1261 1262 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1263 DDI_SUCCESS) 1264 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1265 __func__); 1266 1267 mutex_enter(&cq->ncq_mutex); 1268 1269 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1270 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd, 1271 TQ_NOSLEEP, &cmd->nc_tqent); 1272 1273 completed++; 1274 } 1275 1276 if (completed > 0) { 1277 /* 1278 * Update the completion queue head doorbell. 1279 */ 1280 head.b.cqhdbl_cqh = cq->ncq_head; 1281 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1282 } 1283 1284 mutex_exit(&cq->ncq_mutex); 1285 1286 return (completed); 1287 } 1288 1289 static nvme_cmd_t * 1290 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 1291 { 1292 nvme_cq_t *cq = qp->nq_cq; 1293 nvme_reg_cqhdbl_t head = { 0 }; 1294 nvme_cmd_t *cmd; 1295 1296 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1297 DDI_SUCCESS) 1298 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1299 __func__); 1300 1301 mutex_enter(&cq->ncq_mutex); 1302 1303 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1304 head.b.cqhdbl_cqh = cq->ncq_head; 1305 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1306 } 1307 1308 mutex_exit(&cq->ncq_mutex); 1309 1310 return (cmd); 1311 } 1312 1313 static int 1314 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 1315 { 1316 nvme_cqe_t *cqe = &cmd->nc_cqe; 1317 1318 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1319 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1320 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1321 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1322 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1323 1324 if (cmd->nc_xfer != NULL) 1325 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1326 1327 if (cmd->nc_nvme->n_strict_version) { 1328 cmd->nc_nvme->n_dead = B_TRUE; 1329 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1330 } 1331 1332 return (EIO); 1333 } 1334 1335 static int 1336 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 1337 { 1338 nvme_cqe_t *cqe = &cmd->nc_cqe; 1339 1340 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1341 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1342 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1343 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1344 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1345 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 1346 cmd->nc_nvme->n_dead = B_TRUE; 1347 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1348 } 1349 1350 return (EIO); 1351 } 1352 1353 static int 1354 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 1355 { 1356 nvme_cqe_t *cqe = &cmd->nc_cqe; 1357 1358 switch (cqe->cqe_sf.sf_sc) { 1359 case NVME_CQE_SC_INT_NVM_WRITE: 1360 /* write fail */ 1361 /* TODO: post ereport */ 1362 if (cmd->nc_xfer != NULL) 1363 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1364 return (EIO); 1365 1366 case NVME_CQE_SC_INT_NVM_READ: 1367 /* read fail */ 1368 /* TODO: post ereport */ 1369 if (cmd->nc_xfer != NULL) 1370 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1371 return (EIO); 1372 1373 default: 1374 return (nvme_check_unknown_cmd_status(cmd)); 1375 } 1376 } 1377 1378 static int 1379 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 1380 { 1381 nvme_cqe_t *cqe = &cmd->nc_cqe; 1382 1383 switch (cqe->cqe_sf.sf_sc) { 1384 case NVME_CQE_SC_GEN_SUCCESS: 1385 return (0); 1386 1387 /* 1388 * Errors indicating a bug in the driver should cause a panic. 1389 */ 1390 case NVME_CQE_SC_GEN_INV_OPC: 1391 /* Invalid Command Opcode */ 1392 if (!cmd->nc_dontpanic) 1393 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1394 "programming error: invalid opcode in cmd %p", 1395 (void *)cmd); 1396 return (EINVAL); 1397 1398 case NVME_CQE_SC_GEN_INV_FLD: 1399 /* Invalid Field in Command */ 1400 if (!cmd->nc_dontpanic) 1401 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1402 "programming error: invalid field in cmd %p", 1403 (void *)cmd); 1404 return (EIO); 1405 1406 case NVME_CQE_SC_GEN_ID_CNFL: 1407 /* Command ID Conflict */ 1408 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1409 "cmd ID conflict in cmd %p", (void *)cmd); 1410 return (0); 1411 1412 case NVME_CQE_SC_GEN_INV_NS: 1413 /* Invalid Namespace or Format */ 1414 if (!cmd->nc_dontpanic) 1415 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1416 "programming error: invalid NS/format in cmd %p", 1417 (void *)cmd); 1418 return (EINVAL); 1419 1420 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 1421 /* LBA Out Of Range */ 1422 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1423 "LBA out of range in cmd %p", (void *)cmd); 1424 return (0); 1425 1426 /* 1427 * Non-fatal errors, handle gracefully. 1428 */ 1429 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 1430 /* Data Transfer Error (DMA) */ 1431 /* TODO: post ereport */ 1432 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 1433 if (cmd->nc_xfer != NULL) 1434 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1435 return (EIO); 1436 1437 case NVME_CQE_SC_GEN_INTERNAL_ERR: 1438 /* 1439 * Internal Error. The spec (v1.0, section 4.5.1.2) says 1440 * detailed error information is returned as async event, 1441 * so we pretty much ignore the error here and handle it 1442 * in the async event handler. 1443 */ 1444 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 1445 if (cmd->nc_xfer != NULL) 1446 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1447 return (EIO); 1448 1449 case NVME_CQE_SC_GEN_ABORT_REQUEST: 1450 /* 1451 * Command Abort Requested. This normally happens only when a 1452 * command times out. 1453 */ 1454 /* TODO: post ereport or change blkdev to handle this? */ 1455 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 1456 return (ECANCELED); 1457 1458 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 1459 /* Command Aborted due to Power Loss Notification */ 1460 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1461 cmd->nc_nvme->n_dead = B_TRUE; 1462 return (EIO); 1463 1464 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 1465 /* Command Aborted due to SQ Deletion */ 1466 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 1467 return (EIO); 1468 1469 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 1470 /* Capacity Exceeded */ 1471 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 1472 if (cmd->nc_xfer != NULL) 1473 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1474 return (EIO); 1475 1476 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 1477 /* Namespace Not Ready */ 1478 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 1479 if (cmd->nc_xfer != NULL) 1480 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1481 return (EIO); 1482 1483 default: 1484 return (nvme_check_unknown_cmd_status(cmd)); 1485 } 1486 } 1487 1488 static int 1489 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 1490 { 1491 nvme_cqe_t *cqe = &cmd->nc_cqe; 1492 1493 switch (cqe->cqe_sf.sf_sc) { 1494 case NVME_CQE_SC_SPC_INV_CQ: 1495 /* Completion Queue Invalid */ 1496 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 1497 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 1498 return (EINVAL); 1499 1500 case NVME_CQE_SC_SPC_INV_QID: 1501 /* Invalid Queue Identifier */ 1502 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1503 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 1504 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 1505 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1506 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 1507 return (EINVAL); 1508 1509 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 1510 /* Max Queue Size Exceeded */ 1511 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1512 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1513 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 1514 return (EINVAL); 1515 1516 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 1517 /* Abort Command Limit Exceeded */ 1518 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 1519 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1520 "abort command limit exceeded in cmd %p", (void *)cmd); 1521 return (0); 1522 1523 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 1524 /* Async Event Request Limit Exceeded */ 1525 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 1526 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1527 "async event request limit exceeded in cmd %p", 1528 (void *)cmd); 1529 return (0); 1530 1531 case NVME_CQE_SC_SPC_INV_INT_VECT: 1532 /* Invalid Interrupt Vector */ 1533 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1534 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 1535 return (EINVAL); 1536 1537 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 1538 /* Invalid Log Page */ 1539 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 1540 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 1541 return (EINVAL); 1542 1543 case NVME_CQE_SC_SPC_INV_FORMAT: 1544 /* Invalid Format */ 1545 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 1546 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 1547 if (cmd->nc_xfer != NULL) 1548 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1549 return (EINVAL); 1550 1551 case NVME_CQE_SC_SPC_INV_Q_DEL: 1552 /* Invalid Queue Deletion */ 1553 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1554 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 1555 return (EINVAL); 1556 1557 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 1558 /* Conflicting Attributes */ 1559 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 1560 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1561 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1562 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 1563 if (cmd->nc_xfer != NULL) 1564 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1565 return (EINVAL); 1566 1567 case NVME_CQE_SC_SPC_NVM_INV_PROT: 1568 /* Invalid Protection Information */ 1569 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 1570 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1571 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1572 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1573 if (cmd->nc_xfer != NULL) 1574 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1575 return (EINVAL); 1576 1577 case NVME_CQE_SC_SPC_NVM_READONLY: 1578 /* Write to Read Only Range */ 1579 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1580 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1581 if (cmd->nc_xfer != NULL) 1582 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1583 return (EROFS); 1584 1585 case NVME_CQE_SC_SPC_INV_FW_SLOT: 1586 /* Invalid Firmware Slot */ 1587 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1588 return (EINVAL); 1589 1590 case NVME_CQE_SC_SPC_INV_FW_IMG: 1591 /* Invalid Firmware Image */ 1592 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1593 return (EINVAL); 1594 1595 case NVME_CQE_SC_SPC_FW_RESET: 1596 /* Conventional Reset Required */ 1597 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1598 return (0); 1599 1600 case NVME_CQE_SC_SPC_FW_NSSR: 1601 /* NVMe Subsystem Reset Required */ 1602 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1603 return (0); 1604 1605 case NVME_CQE_SC_SPC_FW_NEXT_RESET: 1606 /* Activation Requires Reset */ 1607 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1608 return (0); 1609 1610 case NVME_CQE_SC_SPC_FW_MTFA: 1611 /* Activation Requires Maximum Time Violation */ 1612 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1613 return (EAGAIN); 1614 1615 case NVME_CQE_SC_SPC_FW_PROHIBITED: 1616 /* Activation Prohibited */ 1617 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1618 return (EINVAL); 1619 1620 case NVME_CQE_SC_SPC_FW_OVERLAP: 1621 /* Overlapping Firmware Ranges */ 1622 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD); 1623 return (EINVAL); 1624 1625 default: 1626 return (nvme_check_unknown_cmd_status(cmd)); 1627 } 1628 } 1629 1630 static inline int 1631 nvme_check_cmd_status(nvme_cmd_t *cmd) 1632 { 1633 nvme_cqe_t *cqe = &cmd->nc_cqe; 1634 1635 /* 1636 * Take a shortcut if the controller is dead, or if 1637 * command status indicates no error. 1638 */ 1639 if (cmd->nc_nvme->n_dead) 1640 return (EIO); 1641 1642 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1643 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1644 return (0); 1645 1646 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1647 return (nvme_check_generic_cmd_status(cmd)); 1648 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1649 return (nvme_check_specific_cmd_status(cmd)); 1650 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1651 return (nvme_check_integrity_cmd_status(cmd)); 1652 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1653 return (nvme_check_vendor_cmd_status(cmd)); 1654 1655 return (nvme_check_unknown_cmd_status(cmd)); 1656 } 1657 1658 static int 1659 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec) 1660 { 1661 nvme_t *nvme = abort_cmd->nc_nvme; 1662 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1663 nvme_abort_cmd_t ac = { 0 }; 1664 int ret = 0; 1665 1666 sema_p(&nvme->n_abort_sema); 1667 1668 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1669 ac.b.ac_sqid = abort_cmd->nc_sqid; 1670 1671 cmd->nc_sqid = 0; 1672 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1673 cmd->nc_callback = nvme_wakeup_cmd; 1674 cmd->nc_sqe.sqe_cdw10 = ac.r; 1675 1676 /* 1677 * Send the ABORT to the hardware. The ABORT command will return _after_ 1678 * the aborted command has completed (aborted or otherwise), but since 1679 * we still hold the aborted command's mutex its callback hasn't been 1680 * processed yet. 1681 */ 1682 nvme_admin_cmd(cmd, sec); 1683 sema_v(&nvme->n_abort_sema); 1684 1685 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1686 dev_err(nvme->n_dip, CE_WARN, 1687 "!ABORT failed with sct = %x, sc = %x", 1688 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1689 atomic_inc_32(&nvme->n_abort_failed); 1690 } else { 1691 dev_err(nvme->n_dip, CE_WARN, 1692 "!ABORT of command %d/%d %ssuccessful", 1693 abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid, 1694 cmd->nc_cqe.cqe_dw0 & 1 ? "un" : ""); 1695 if ((cmd->nc_cqe.cqe_dw0 & 1) == 0) 1696 atomic_inc_32(&nvme->n_cmd_aborted); 1697 } 1698 1699 nvme_free_cmd(cmd); 1700 return (ret); 1701 } 1702 1703 /* 1704 * nvme_wait_cmd -- wait for command completion or timeout 1705 * 1706 * In case of a serious error or a timeout of the abort command the hardware 1707 * will be declared dead and FMA will be notified. 1708 */ 1709 static void 1710 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec) 1711 { 1712 clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC); 1713 nvme_t *nvme = cmd->nc_nvme; 1714 nvme_reg_csts_t csts; 1715 nvme_qpair_t *qp; 1716 1717 ASSERT(mutex_owned(&cmd->nc_mutex)); 1718 1719 while (!cmd->nc_completed) { 1720 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1721 break; 1722 } 1723 1724 if (cmd->nc_completed) 1725 return; 1726 1727 /* 1728 * The command timed out. 1729 * 1730 * Check controller for fatal status, any errors associated with the 1731 * register or DMA handle, or for a double timeout (abort command timed 1732 * out). If necessary log a warning and call FMA. 1733 */ 1734 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1735 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, " 1736 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 1737 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1738 atomic_inc_32(&nvme->n_cmd_timeout); 1739 1740 if (csts.b.csts_cfs || 1741 nvme_check_regs_hdl(nvme) || 1742 nvme_check_dma_hdl(cmd->nc_dma) || 1743 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1744 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1745 nvme->n_dead = B_TRUE; 1746 } else if (nvme_abort_cmd(cmd, sec) == 0) { 1747 /* 1748 * If the abort succeeded the command should complete 1749 * immediately with an appropriate status. 1750 */ 1751 while (!cmd->nc_completed) 1752 cv_wait(&cmd->nc_cv, &cmd->nc_mutex); 1753 1754 return; 1755 } 1756 1757 qp = nvme->n_ioq[cmd->nc_sqid]; 1758 1759 mutex_enter(&qp->nq_mutex); 1760 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 1761 mutex_exit(&qp->nq_mutex); 1762 1763 /* 1764 * As we don't know what the presumed dead hardware might still do with 1765 * the DMA memory, we'll put the command on the lost commands list if it 1766 * has any DMA memory. 1767 */ 1768 if (cmd->nc_dma != NULL) { 1769 mutex_enter(&nvme_lc_mutex); 1770 list_insert_head(&nvme_lost_cmds, cmd); 1771 mutex_exit(&nvme_lc_mutex); 1772 } 1773 } 1774 1775 static void 1776 nvme_wakeup_cmd(void *arg) 1777 { 1778 nvme_cmd_t *cmd = arg; 1779 1780 mutex_enter(&cmd->nc_mutex); 1781 cmd->nc_completed = B_TRUE; 1782 cv_signal(&cmd->nc_cv); 1783 mutex_exit(&cmd->nc_mutex); 1784 } 1785 1786 static void 1787 nvme_async_event_task(void *arg) 1788 { 1789 nvme_cmd_t *cmd = arg; 1790 nvme_t *nvme = cmd->nc_nvme; 1791 nvme_error_log_entry_t *error_log = NULL; 1792 nvme_health_log_t *health_log = NULL; 1793 size_t logsize = 0; 1794 nvme_async_event_t event; 1795 1796 /* 1797 * Check for errors associated with the async request itself. The only 1798 * command-specific error is "async event limit exceeded", which 1799 * indicates a programming error in the driver and causes a panic in 1800 * nvme_check_cmd_status(). 1801 * 1802 * Other possible errors are various scenarios where the async request 1803 * was aborted, or internal errors in the device. Internal errors are 1804 * reported to FMA, the command aborts need no special handling here. 1805 * 1806 * And finally, at least qemu nvme does not support async events, 1807 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we 1808 * will avoid posting async events. 1809 */ 1810 1811 if (nvme_check_cmd_status(cmd) != 0) { 1812 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1813 "!async event request returned failure, sct = %x, " 1814 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1815 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1816 cmd->nc_cqe.cqe_sf.sf_m); 1817 1818 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1819 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1820 cmd->nc_nvme->n_dead = B_TRUE; 1821 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1822 DDI_SERVICE_LOST); 1823 } 1824 1825 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1826 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC && 1827 cmd->nc_cqe.cqe_sf.sf_dnr == 1) { 1828 nvme->n_async_event_supported = B_FALSE; 1829 } 1830 1831 nvme_free_cmd(cmd); 1832 return; 1833 } 1834 1835 1836 event.r = cmd->nc_cqe.cqe_dw0; 1837 1838 /* Clear CQE and re-submit the async request. */ 1839 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1840 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 1841 1842 switch (event.b.ae_type) { 1843 case NVME_ASYNC_TYPE_ERROR: 1844 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1845 (void) nvme_get_logpage(nvme, B_FALSE, 1846 (void **)&error_log, &logsize, event.b.ae_logpage); 1847 } else { 1848 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1849 "async event reply: %d", event.b.ae_logpage); 1850 atomic_inc_32(&nvme->n_wrong_logpage); 1851 } 1852 1853 switch (event.b.ae_info) { 1854 case NVME_ASYNC_ERROR_INV_SQ: 1855 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1856 "invalid submission queue"); 1857 return; 1858 1859 case NVME_ASYNC_ERROR_INV_DBL: 1860 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1861 "invalid doorbell write value"); 1862 return; 1863 1864 case NVME_ASYNC_ERROR_DIAGFAIL: 1865 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1866 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1867 nvme->n_dead = B_TRUE; 1868 atomic_inc_32(&nvme->n_diagfail_event); 1869 break; 1870 1871 case NVME_ASYNC_ERROR_PERSISTENT: 1872 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1873 "device error"); 1874 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1875 nvme->n_dead = B_TRUE; 1876 atomic_inc_32(&nvme->n_persistent_event); 1877 break; 1878 1879 case NVME_ASYNC_ERROR_TRANSIENT: 1880 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1881 "device error"); 1882 /* TODO: send ereport */ 1883 atomic_inc_32(&nvme->n_transient_event); 1884 break; 1885 1886 case NVME_ASYNC_ERROR_FW_LOAD: 1887 dev_err(nvme->n_dip, CE_WARN, 1888 "!firmware image load error"); 1889 atomic_inc_32(&nvme->n_fw_load_event); 1890 break; 1891 } 1892 break; 1893 1894 case NVME_ASYNC_TYPE_HEALTH: 1895 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1896 (void) nvme_get_logpage(nvme, B_FALSE, 1897 (void **)&health_log, &logsize, event.b.ae_logpage, 1898 -1); 1899 } else { 1900 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1901 "async event reply: %d", event.b.ae_logpage); 1902 atomic_inc_32(&nvme->n_wrong_logpage); 1903 } 1904 1905 switch (event.b.ae_info) { 1906 case NVME_ASYNC_HEALTH_RELIABILITY: 1907 dev_err(nvme->n_dip, CE_WARN, 1908 "!device reliability compromised"); 1909 /* TODO: send ereport */ 1910 atomic_inc_32(&nvme->n_reliability_event); 1911 break; 1912 1913 case NVME_ASYNC_HEALTH_TEMPERATURE: 1914 dev_err(nvme->n_dip, CE_WARN, 1915 "!temperature above threshold"); 1916 /* TODO: send ereport */ 1917 atomic_inc_32(&nvme->n_temperature_event); 1918 break; 1919 1920 case NVME_ASYNC_HEALTH_SPARE: 1921 dev_err(nvme->n_dip, CE_WARN, 1922 "!spare space below threshold"); 1923 /* TODO: send ereport */ 1924 atomic_inc_32(&nvme->n_spare_event); 1925 break; 1926 } 1927 break; 1928 1929 case NVME_ASYNC_TYPE_VENDOR: 1930 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 1931 "received, info = %x, logpage = %x", event.b.ae_info, 1932 event.b.ae_logpage); 1933 atomic_inc_32(&nvme->n_vendor_event); 1934 break; 1935 1936 default: 1937 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 1938 "type = %x, info = %x, logpage = %x", event.b.ae_type, 1939 event.b.ae_info, event.b.ae_logpage); 1940 atomic_inc_32(&nvme->n_unknown_event); 1941 break; 1942 } 1943 1944 if (error_log) 1945 kmem_free(error_log, logsize); 1946 1947 if (health_log) 1948 kmem_free(health_log, logsize); 1949 } 1950 1951 static void 1952 nvme_admin_cmd(nvme_cmd_t *cmd, int sec) 1953 { 1954 mutex_enter(&cmd->nc_mutex); 1955 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd); 1956 nvme_wait_cmd(cmd, sec); 1957 mutex_exit(&cmd->nc_mutex); 1958 } 1959 1960 static void 1961 nvme_async_event(nvme_t *nvme) 1962 { 1963 nvme_cmd_t *cmd; 1964 1965 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1966 cmd->nc_sqid = 0; 1967 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 1968 cmd->nc_callback = nvme_async_event_task; 1969 cmd->nc_dontpanic = B_TRUE; 1970 1971 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 1972 } 1973 1974 static int 1975 nvme_format_nvm(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t lbaf, 1976 boolean_t ms, uint8_t pi, boolean_t pil, uint8_t ses) 1977 { 1978 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1979 nvme_format_nvm_t format_nvm = { 0 }; 1980 int ret; 1981 1982 format_nvm.b.fm_lbaf = lbaf & 0xf; 1983 format_nvm.b.fm_ms = ms ? 1 : 0; 1984 format_nvm.b.fm_pi = pi & 0x7; 1985 format_nvm.b.fm_pil = pil ? 1 : 0; 1986 format_nvm.b.fm_ses = ses & 0x7; 1987 1988 cmd->nc_sqid = 0; 1989 cmd->nc_callback = nvme_wakeup_cmd; 1990 cmd->nc_sqe.sqe_nsid = nsid; 1991 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT; 1992 cmd->nc_sqe.sqe_cdw10 = format_nvm.r; 1993 1994 /* 1995 * Some devices like Samsung SM951 don't allow formatting of all 1996 * namespaces in one command. Handle that gracefully. 1997 */ 1998 if (nsid == (uint32_t)-1) 1999 cmd->nc_dontpanic = B_TRUE; 2000 /* 2001 * If this format request was initiated by the user, then don't allow a 2002 * programmer error to panic the system. 2003 */ 2004 if (user) 2005 cmd->nc_dontpanic = B_TRUE; 2006 2007 nvme_admin_cmd(cmd, nvme_format_cmd_timeout); 2008 2009 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2010 dev_err(nvme->n_dip, CE_WARN, 2011 "!FORMAT failed with sct = %x, sc = %x", 2012 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2013 } 2014 2015 nvme_free_cmd(cmd); 2016 return (ret); 2017 } 2018 2019 /* 2020 * The `bufsize` parameter is usually an output parameter, set by this routine 2021 * when filling in the supported types of logpages from the device. However, for 2022 * vendor-specific pages, it is an input parameter, and must be set 2023 * appropriately by callers. 2024 */ 2025 static int 2026 nvme_get_logpage(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize, 2027 uint8_t logpage, ...) 2028 { 2029 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2030 nvme_getlogpage_t getlogpage = { 0 }; 2031 va_list ap; 2032 int ret; 2033 2034 va_start(ap, logpage); 2035 2036 cmd->nc_sqid = 0; 2037 cmd->nc_callback = nvme_wakeup_cmd; 2038 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 2039 2040 if (user) 2041 cmd->nc_dontpanic = B_TRUE; 2042 2043 getlogpage.b.lp_lid = logpage; 2044 2045 switch (logpage) { 2046 case NVME_LOGPAGE_ERROR: 2047 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 2048 *bufsize = MIN(NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE, 2049 nvme->n_error_log_len * sizeof (nvme_error_log_entry_t)); 2050 break; 2051 2052 case NVME_LOGPAGE_HEALTH: 2053 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 2054 *bufsize = sizeof (nvme_health_log_t); 2055 break; 2056 2057 case NVME_LOGPAGE_FWSLOT: 2058 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 2059 *bufsize = sizeof (nvme_fwslot_log_t); 2060 break; 2061 2062 default: 2063 /* 2064 * This intentionally only checks against the minimum valid 2065 * log page ID. `logpage` is a uint8_t, and `0xFF` is a valid 2066 * page ID, so this one-sided check avoids a compiler error 2067 * about a check that's always true. 2068 */ 2069 if (logpage < NVME_VENDOR_SPECIFIC_LOGPAGE_MIN) { 2070 dev_err(nvme->n_dip, CE_WARN, 2071 "!unknown log page requested: %d", logpage); 2072 atomic_inc_32(&nvme->n_unknown_logpage); 2073 ret = EINVAL; 2074 goto fail; 2075 } 2076 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 2077 } 2078 2079 va_end(ap); 2080 2081 getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1; 2082 2083 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 2084 2085 if (nvme_zalloc_dma(nvme, *bufsize, 2086 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2087 dev_err(nvme->n_dip, CE_WARN, 2088 "!nvme_zalloc_dma failed for GET LOG PAGE"); 2089 ret = ENOMEM; 2090 goto fail; 2091 } 2092 2093 if ((ret = nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah)) != 0) 2094 goto fail; 2095 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2096 2097 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2098 dev_err(nvme->n_dip, CE_WARN, 2099 "!GET LOG PAGE failed with sct = %x, sc = %x", 2100 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2101 goto fail; 2102 } 2103 2104 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2105 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2106 2107 fail: 2108 nvme_free_cmd(cmd); 2109 2110 return (ret); 2111 } 2112 2113 static int 2114 nvme_identify(nvme_t *nvme, boolean_t user, uint32_t nsid, void **buf) 2115 { 2116 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2117 int ret; 2118 2119 if (buf == NULL) 2120 return (EINVAL); 2121 2122 cmd->nc_sqid = 0; 2123 cmd->nc_callback = nvme_wakeup_cmd; 2124 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 2125 cmd->nc_sqe.sqe_nsid = nsid; 2126 cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL; 2127 2128 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 2129 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2130 dev_err(nvme->n_dip, CE_WARN, 2131 "!nvme_zalloc_dma failed for IDENTIFY"); 2132 ret = ENOMEM; 2133 goto fail; 2134 } 2135 2136 if (cmd->nc_dma->nd_ncookie > 2) { 2137 dev_err(nvme->n_dip, CE_WARN, 2138 "!too many DMA cookies for IDENTIFY"); 2139 atomic_inc_32(&nvme->n_too_many_cookies); 2140 ret = ENOMEM; 2141 goto fail; 2142 } 2143 2144 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 2145 if (cmd->nc_dma->nd_ncookie > 1) { 2146 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2147 &cmd->nc_dma->nd_cookie); 2148 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2149 cmd->nc_dma->nd_cookie.dmac_laddress; 2150 } 2151 2152 if (user) 2153 cmd->nc_dontpanic = B_TRUE; 2154 2155 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2156 2157 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2158 dev_err(nvme->n_dip, CE_WARN, 2159 "!IDENTIFY failed with sct = %x, sc = %x", 2160 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2161 goto fail; 2162 } 2163 2164 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 2165 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE); 2166 2167 fail: 2168 nvme_free_cmd(cmd); 2169 2170 return (ret); 2171 } 2172 2173 static int 2174 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2175 uint32_t val, uint32_t *res) 2176 { 2177 _NOTE(ARGUNUSED(nsid)); 2178 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2179 int ret = EINVAL; 2180 2181 ASSERT(res != NULL); 2182 2183 cmd->nc_sqid = 0; 2184 cmd->nc_callback = nvme_wakeup_cmd; 2185 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 2186 cmd->nc_sqe.sqe_cdw10 = feature; 2187 cmd->nc_sqe.sqe_cdw11 = val; 2188 2189 if (user) 2190 cmd->nc_dontpanic = B_TRUE; 2191 2192 switch (feature) { 2193 case NVME_FEAT_WRITE_CACHE: 2194 if (!nvme->n_write_cache_present) 2195 goto fail; 2196 break; 2197 2198 case NVME_FEAT_NQUEUES: 2199 break; 2200 2201 default: 2202 goto fail; 2203 } 2204 2205 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2206 2207 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2208 dev_err(nvme->n_dip, CE_WARN, 2209 "!SET FEATURES %d failed with sct = %x, sc = %x", 2210 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2211 cmd->nc_cqe.cqe_sf.sf_sc); 2212 goto fail; 2213 } 2214 2215 *res = cmd->nc_cqe.cqe_dw0; 2216 2217 fail: 2218 nvme_free_cmd(cmd); 2219 return (ret); 2220 } 2221 2222 static int 2223 nvme_get_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2224 uint32_t *res, void **buf, size_t *bufsize) 2225 { 2226 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2227 int ret = EINVAL; 2228 2229 ASSERT(res != NULL); 2230 2231 if (bufsize != NULL) 2232 *bufsize = 0; 2233 2234 cmd->nc_sqid = 0; 2235 cmd->nc_callback = nvme_wakeup_cmd; 2236 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES; 2237 cmd->nc_sqe.sqe_cdw10 = feature; 2238 cmd->nc_sqe.sqe_cdw11 = *res; 2239 2240 /* 2241 * For some of the optional features there doesn't seem to be a method 2242 * of detecting whether it is supported other than using it. This will 2243 * cause "Invalid Field in Command" error, which is normally considered 2244 * a programming error. Set the nc_dontpanic flag to override the panic 2245 * in nvme_check_generic_cmd_status(). 2246 */ 2247 switch (feature) { 2248 case NVME_FEAT_ARBITRATION: 2249 case NVME_FEAT_POWER_MGMT: 2250 case NVME_FEAT_TEMPERATURE: 2251 case NVME_FEAT_ERROR: 2252 case NVME_FEAT_NQUEUES: 2253 case NVME_FEAT_INTR_COAL: 2254 case NVME_FEAT_INTR_VECT: 2255 case NVME_FEAT_WRITE_ATOM: 2256 case NVME_FEAT_ASYNC_EVENT: 2257 break; 2258 2259 case NVME_FEAT_WRITE_CACHE: 2260 if (!nvme->n_write_cache_present) 2261 goto fail; 2262 break; 2263 2264 case NVME_FEAT_LBA_RANGE: 2265 if (!nvme->n_lba_range_supported) 2266 goto fail; 2267 2268 cmd->nc_dontpanic = B_TRUE; 2269 cmd->nc_sqe.sqe_nsid = nsid; 2270 ASSERT(bufsize != NULL); 2271 *bufsize = NVME_LBA_RANGE_BUFSIZE; 2272 break; 2273 2274 case NVME_FEAT_AUTO_PST: 2275 if (!nvme->n_auto_pst_supported) 2276 goto fail; 2277 2278 ASSERT(bufsize != NULL); 2279 *bufsize = NVME_AUTO_PST_BUFSIZE; 2280 break; 2281 2282 case NVME_FEAT_PROGRESS: 2283 if (!nvme->n_progress_supported) 2284 goto fail; 2285 2286 cmd->nc_dontpanic = B_TRUE; 2287 break; 2288 2289 default: 2290 goto fail; 2291 } 2292 2293 if (user) 2294 cmd->nc_dontpanic = B_TRUE; 2295 2296 if (bufsize != NULL && *bufsize != 0) { 2297 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ, 2298 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2299 dev_err(nvme->n_dip, CE_WARN, 2300 "!nvme_zalloc_dma failed for GET FEATURES"); 2301 ret = ENOMEM; 2302 goto fail; 2303 } 2304 2305 if (cmd->nc_dma->nd_ncookie > 2) { 2306 dev_err(nvme->n_dip, CE_WARN, 2307 "!too many DMA cookies for GET FEATURES"); 2308 atomic_inc_32(&nvme->n_too_many_cookies); 2309 ret = ENOMEM; 2310 goto fail; 2311 } 2312 2313 cmd->nc_sqe.sqe_dptr.d_prp[0] = 2314 cmd->nc_dma->nd_cookie.dmac_laddress; 2315 if (cmd->nc_dma->nd_ncookie > 1) { 2316 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2317 &cmd->nc_dma->nd_cookie); 2318 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2319 cmd->nc_dma->nd_cookie.dmac_laddress; 2320 } 2321 } 2322 2323 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2324 2325 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2326 boolean_t known = B_TRUE; 2327 2328 /* Check if this is unsupported optional feature */ 2329 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2330 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) { 2331 switch (feature) { 2332 case NVME_FEAT_LBA_RANGE: 2333 nvme->n_lba_range_supported = B_FALSE; 2334 break; 2335 case NVME_FEAT_PROGRESS: 2336 nvme->n_progress_supported = B_FALSE; 2337 break; 2338 default: 2339 known = B_FALSE; 2340 break; 2341 } 2342 } else { 2343 known = B_FALSE; 2344 } 2345 2346 /* Report the error otherwise */ 2347 if (!known) { 2348 dev_err(nvme->n_dip, CE_WARN, 2349 "!GET FEATURES %d failed with sct = %x, sc = %x", 2350 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2351 cmd->nc_cqe.cqe_sf.sf_sc); 2352 } 2353 2354 goto fail; 2355 } 2356 2357 if (bufsize != NULL && *bufsize != 0) { 2358 ASSERT(buf != NULL); 2359 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2360 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2361 } 2362 2363 *res = cmd->nc_cqe.cqe_dw0; 2364 2365 fail: 2366 nvme_free_cmd(cmd); 2367 return (ret); 2368 } 2369 2370 static int 2371 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 2372 { 2373 nvme_write_cache_t nwc = { 0 }; 2374 2375 if (enable) 2376 nwc.b.wc_wce = 1; 2377 2378 return (nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_WRITE_CACHE, 2379 nwc.r, &nwc.r)); 2380 } 2381 2382 static int 2383 nvme_set_nqueues(nvme_t *nvme) 2384 { 2385 nvme_nqueues_t nq = { 0 }; 2386 int ret; 2387 2388 /* 2389 * The default is to allocate one completion queue per vector. 2390 */ 2391 if (nvme->n_completion_queues == -1) 2392 nvme->n_completion_queues = nvme->n_intr_cnt; 2393 2394 /* 2395 * There is no point in having more compeletion queues than 2396 * interrupt vectors. 2397 */ 2398 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2399 nvme->n_intr_cnt); 2400 2401 /* 2402 * The default is to use one submission queue per completion queue. 2403 */ 2404 if (nvme->n_submission_queues == -1) 2405 nvme->n_submission_queues = nvme->n_completion_queues; 2406 2407 /* 2408 * There is no point in having more compeletion queues than 2409 * submission queues. 2410 */ 2411 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2412 nvme->n_submission_queues); 2413 2414 ASSERT(nvme->n_submission_queues > 0); 2415 ASSERT(nvme->n_completion_queues > 0); 2416 2417 nq.b.nq_nsq = nvme->n_submission_queues - 1; 2418 nq.b.nq_ncq = nvme->n_completion_queues - 1; 2419 2420 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r, 2421 &nq.r); 2422 2423 if (ret == 0) { 2424 /* 2425 * Never use more than the requested number of queues. 2426 */ 2427 nvme->n_submission_queues = MIN(nvme->n_submission_queues, 2428 nq.b.nq_nsq + 1); 2429 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2430 nq.b.nq_ncq + 1); 2431 } 2432 2433 return (ret); 2434 } 2435 2436 static int 2437 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq) 2438 { 2439 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2440 nvme_create_queue_dw10_t dw10 = { 0 }; 2441 nvme_create_cq_dw11_t c_dw11 = { 0 }; 2442 int ret; 2443 2444 dw10.b.q_qid = cq->ncq_id; 2445 dw10.b.q_qsize = cq->ncq_nentry - 1; 2446 2447 c_dw11.b.cq_pc = 1; 2448 c_dw11.b.cq_ien = 1; 2449 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt; 2450 2451 cmd->nc_sqid = 0; 2452 cmd->nc_callback = nvme_wakeup_cmd; 2453 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 2454 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2455 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 2456 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress; 2457 2458 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2459 2460 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2461 dev_err(nvme->n_dip, CE_WARN, 2462 "!CREATE CQUEUE failed with sct = %x, sc = %x", 2463 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2464 } 2465 2466 nvme_free_cmd(cmd); 2467 2468 return (ret); 2469 } 2470 2471 static int 2472 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 2473 { 2474 nvme_cq_t *cq = qp->nq_cq; 2475 nvme_cmd_t *cmd; 2476 nvme_create_queue_dw10_t dw10 = { 0 }; 2477 nvme_create_sq_dw11_t s_dw11 = { 0 }; 2478 int ret; 2479 2480 /* 2481 * It is possible to have more qpairs than completion queues, 2482 * and when the idx > ncq_id, that completion queue is shared 2483 * and has already been created. 2484 */ 2485 if (idx <= cq->ncq_id && 2486 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS) 2487 return (DDI_FAILURE); 2488 2489 dw10.b.q_qid = idx; 2490 dw10.b.q_qsize = qp->nq_nentry - 1; 2491 2492 s_dw11.b.sq_pc = 1; 2493 s_dw11.b.sq_cqid = cq->ncq_id; 2494 2495 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2496 cmd->nc_sqid = 0; 2497 cmd->nc_callback = nvme_wakeup_cmd; 2498 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 2499 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2500 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 2501 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 2502 2503 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2504 2505 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2506 dev_err(nvme->n_dip, CE_WARN, 2507 "!CREATE SQUEUE failed with sct = %x, sc = %x", 2508 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2509 } 2510 2511 nvme_free_cmd(cmd); 2512 2513 return (ret); 2514 } 2515 2516 static boolean_t 2517 nvme_reset(nvme_t *nvme, boolean_t quiesce) 2518 { 2519 nvme_reg_csts_t csts; 2520 int i; 2521 2522 nvme_put32(nvme, NVME_REG_CC, 0); 2523 2524 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2525 if (csts.b.csts_rdy == 1) { 2526 nvme_put32(nvme, NVME_REG_CC, 0); 2527 for (i = 0; i != nvme->n_timeout * 10; i++) { 2528 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2529 if (csts.b.csts_rdy == 0) 2530 break; 2531 2532 if (quiesce) 2533 drv_usecwait(50000); 2534 else 2535 delay(drv_usectohz(50000)); 2536 } 2537 } 2538 2539 nvme_put32(nvme, NVME_REG_AQA, 0); 2540 nvme_put32(nvme, NVME_REG_ASQ, 0); 2541 nvme_put32(nvme, NVME_REG_ACQ, 0); 2542 2543 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2544 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 2545 } 2546 2547 static void 2548 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) 2549 { 2550 nvme_reg_cc_t cc; 2551 nvme_reg_csts_t csts; 2552 int i; 2553 2554 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT); 2555 2556 cc.r = nvme_get32(nvme, NVME_REG_CC); 2557 cc.b.cc_shn = mode & 0x3; 2558 nvme_put32(nvme, NVME_REG_CC, cc.r); 2559 2560 for (i = 0; i != 10; i++) { 2561 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2562 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 2563 break; 2564 2565 if (quiesce) 2566 drv_usecwait(100000); 2567 else 2568 delay(drv_usectohz(100000)); 2569 } 2570 } 2571 2572 2573 static void 2574 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 2575 { 2576 /* 2577 * Section 7.7 of the spec describes how to get a unique ID for 2578 * the controller: the vendor ID, the model name and the serial 2579 * number shall be unique when combined. 2580 * 2581 * If a namespace has no EUI64 we use the above and add the hex 2582 * namespace ID to get a unique ID for the namespace. 2583 */ 2584 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2585 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 2586 2587 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2588 bcopy(nvme->n_idctl->id_serial, serial, 2589 sizeof (nvme->n_idctl->id_serial)); 2590 2591 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2592 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 2593 2594 nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X", 2595 nvme->n_idctl->id_vid, model, serial, nsid); 2596 } 2597 2598 static int 2599 nvme_init_ns(nvme_t *nvme, int nsid) 2600 { 2601 nvme_namespace_t *ns = &nvme->n_ns[nsid - 1]; 2602 nvme_identify_nsid_t *idns; 2603 boolean_t was_ignored; 2604 int last_rp; 2605 2606 ns->ns_nvme = nvme; 2607 2608 if (nvme_identify(nvme, B_FALSE, nsid, (void **)&idns) != 0) { 2609 dev_err(nvme->n_dip, CE_WARN, 2610 "!failed to identify namespace %d", nsid); 2611 return (DDI_FAILURE); 2612 } 2613 2614 ns->ns_idns = idns; 2615 ns->ns_id = nsid; 2616 ns->ns_block_count = idns->id_nsize; 2617 ns->ns_block_size = 2618 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 2619 ns->ns_best_block_size = ns->ns_block_size; 2620 2621 /* 2622 * Get the EUI64 if present. Use it for devid and device node names. 2623 */ 2624 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 2625 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64)); 2626 2627 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 2628 if (*(uint64_t *)ns->ns_eui64 != 0) { 2629 uint8_t *eui64 = ns->ns_eui64; 2630 2631 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), 2632 "%02x%02x%02x%02x%02x%02x%02x%02x", 2633 eui64[0], eui64[1], eui64[2], eui64[3], 2634 eui64[4], eui64[5], eui64[6], eui64[7]); 2635 } else { 2636 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%d", 2637 ns->ns_id); 2638 2639 nvme_prepare_devid(nvme, ns->ns_id); 2640 } 2641 2642 /* 2643 * Find the LBA format with no metadata and the best relative 2644 * performance. A value of 3 means "degraded", 0 is best. 2645 */ 2646 last_rp = 3; 2647 for (int j = 0; j <= idns->id_nlbaf; j++) { 2648 if (idns->id_lbaf[j].lbaf_lbads == 0) 2649 break; 2650 if (idns->id_lbaf[j].lbaf_ms != 0) 2651 continue; 2652 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 2653 continue; 2654 last_rp = idns->id_lbaf[j].lbaf_rp; 2655 ns->ns_best_block_size = 2656 1 << idns->id_lbaf[j].lbaf_lbads; 2657 } 2658 2659 if (ns->ns_best_block_size < nvme->n_min_block_size) 2660 ns->ns_best_block_size = nvme->n_min_block_size; 2661 2662 was_ignored = ns->ns_ignore; 2663 2664 /* 2665 * We currently don't support namespaces that use either: 2666 * - protection information 2667 * - illegal block size (< 512) 2668 */ 2669 if (idns->id_dps.dp_pinfo) { 2670 dev_err(nvme->n_dip, CE_WARN, 2671 "!ignoring namespace %d, unsupported feature: " 2672 "pinfo = %d", nsid, idns->id_dps.dp_pinfo); 2673 ns->ns_ignore = B_TRUE; 2674 } else if (ns->ns_block_size < 512) { 2675 dev_err(nvme->n_dip, CE_WARN, 2676 "!ignoring namespace %d, unsupported block size %"PRIu64, 2677 nsid, (uint64_t)ns->ns_block_size); 2678 ns->ns_ignore = B_TRUE; 2679 } else { 2680 ns->ns_ignore = B_FALSE; 2681 } 2682 2683 /* 2684 * Keep a count of namespaces which are attachable. 2685 * See comments in nvme_bd_driveinfo() to understand its effect. 2686 */ 2687 if (was_ignored) { 2688 /* 2689 * Previously ignored, but now not. Count it. 2690 */ 2691 if (!ns->ns_ignore) 2692 nvme->n_namespaces_attachable++; 2693 } else { 2694 /* 2695 * Wasn't ignored previously, but now needs to be. 2696 * Discount it. 2697 */ 2698 if (ns->ns_ignore) 2699 nvme->n_namespaces_attachable--; 2700 } 2701 2702 return (DDI_SUCCESS); 2703 } 2704 2705 static int 2706 nvme_init(nvme_t *nvme) 2707 { 2708 nvme_reg_cc_t cc = { 0 }; 2709 nvme_reg_aqa_t aqa = { 0 }; 2710 nvme_reg_asq_t asq = { 0 }; 2711 nvme_reg_acq_t acq = { 0 }; 2712 nvme_reg_cap_t cap; 2713 nvme_reg_vs_t vs; 2714 nvme_reg_csts_t csts; 2715 int i = 0; 2716 uint16_t nqueues; 2717 uint_t tq_threads; 2718 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2719 char *vendor, *product; 2720 2721 /* Check controller version */ 2722 vs.r = nvme_get32(nvme, NVME_REG_VS); 2723 nvme->n_version.v_major = vs.b.vs_mjr; 2724 nvme->n_version.v_minor = vs.b.vs_mnr; 2725 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 2726 nvme->n_version.v_major, nvme->n_version.v_minor); 2727 2728 if (nvme->n_version.v_major > nvme_version_major) { 2729 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x", 2730 nvme_version_major); 2731 if (nvme->n_strict_version) 2732 goto fail; 2733 } 2734 2735 /* retrieve controller configuration */ 2736 cap.r = nvme_get64(nvme, NVME_REG_CAP); 2737 2738 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 2739 dev_err(nvme->n_dip, CE_WARN, 2740 "!NVM command set not supported by hardware"); 2741 goto fail; 2742 } 2743 2744 nvme->n_nssr_supported = cap.b.cap_nssrs; 2745 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 2746 nvme->n_timeout = cap.b.cap_to; 2747 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 2748 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 2749 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 2750 2751 /* 2752 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 2753 * the base page size of 4k (1<<12), so add 12 here to get the real 2754 * page size value. 2755 */ 2756 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 2757 cap.b.cap_mpsmax + 12); 2758 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 2759 2760 /* 2761 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 2762 */ 2763 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 2764 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 2765 2766 /* 2767 * Set up PRP DMA to transfer 1 page-aligned page at a time. 2768 * Maxxfer may be increased after we identified the controller limits. 2769 */ 2770 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 2771 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 2772 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 2773 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 2774 2775 /* 2776 * Reset controller if it's still in ready state. 2777 */ 2778 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 2779 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 2780 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 2781 nvme->n_dead = B_TRUE; 2782 goto fail; 2783 } 2784 2785 /* 2786 * Create the cq array with one completion queue to be assigned 2787 * to the admin queue pair and a limited number of taskqs (4). 2788 */ 2789 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) != 2790 DDI_SUCCESS) { 2791 dev_err(nvme->n_dip, CE_WARN, 2792 "!failed to pre-allocate admin completion queue"); 2793 goto fail; 2794 } 2795 /* 2796 * Create the admin queue pair. 2797 */ 2798 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 2799 != DDI_SUCCESS) { 2800 dev_err(nvme->n_dip, CE_WARN, 2801 "!unable to allocate admin qpair"); 2802 goto fail; 2803 } 2804 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 2805 nvme->n_ioq[0] = nvme->n_adminq; 2806 2807 nvme->n_progress |= NVME_ADMIN_QUEUE; 2808 2809 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2810 "admin-queue-len", nvme->n_admin_queue_len); 2811 2812 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 2813 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 2814 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress; 2815 2816 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 2817 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 2818 2819 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 2820 nvme_put64(nvme, NVME_REG_ASQ, asq); 2821 nvme_put64(nvme, NVME_REG_ACQ, acq); 2822 2823 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 2824 cc.b.cc_css = 0; /* use NVM command set */ 2825 cc.b.cc_mps = nvme->n_pageshift - 12; 2826 cc.b.cc_shn = 0; /* no shutdown in progress */ 2827 cc.b.cc_en = 1; /* enable controller */ 2828 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 2829 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 2830 2831 nvme_put32(nvme, NVME_REG_CC, cc.r); 2832 2833 /* 2834 * Wait for the controller to become ready. 2835 */ 2836 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2837 if (csts.b.csts_rdy == 0) { 2838 for (i = 0; i != nvme->n_timeout * 10; i++) { 2839 delay(drv_usectohz(50000)); 2840 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2841 2842 if (csts.b.csts_cfs == 1) { 2843 dev_err(nvme->n_dip, CE_WARN, 2844 "!controller fatal status at init"); 2845 ddi_fm_service_impact(nvme->n_dip, 2846 DDI_SERVICE_LOST); 2847 nvme->n_dead = B_TRUE; 2848 goto fail; 2849 } 2850 2851 if (csts.b.csts_rdy == 1) 2852 break; 2853 } 2854 } 2855 2856 if (csts.b.csts_rdy == 0) { 2857 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 2858 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 2859 nvme->n_dead = B_TRUE; 2860 goto fail; 2861 } 2862 2863 /* 2864 * Assume an abort command limit of 1. We'll destroy and re-init 2865 * that later when we know the true abort command limit. 2866 */ 2867 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 2868 2869 /* 2870 * Setup initial interrupt for admin queue. 2871 */ 2872 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 2873 != DDI_SUCCESS) && 2874 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 2875 != DDI_SUCCESS) && 2876 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 2877 != DDI_SUCCESS)) { 2878 dev_err(nvme->n_dip, CE_WARN, 2879 "!failed to setup initial interrupt"); 2880 goto fail; 2881 } 2882 2883 /* 2884 * Post an asynchronous event command to catch errors. 2885 * We assume the asynchronous events are supported as required by 2886 * specification (Figure 40 in section 5 of NVMe 1.2). 2887 * However, since at least qemu does not follow the specification, 2888 * we need a mechanism to protect ourselves. 2889 */ 2890 nvme->n_async_event_supported = B_TRUE; 2891 nvme_async_event(nvme); 2892 2893 /* 2894 * Identify Controller 2895 */ 2896 if (nvme_identify(nvme, B_FALSE, 0, (void **)&nvme->n_idctl) != 0) { 2897 dev_err(nvme->n_dip, CE_WARN, 2898 "!failed to identify controller"); 2899 goto fail; 2900 } 2901 2902 /* 2903 * Get Vendor & Product ID 2904 */ 2905 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2906 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2907 sata_split_model(model, &vendor, &product); 2908 2909 if (vendor == NULL) 2910 nvme->n_vendor = strdup("NVMe"); 2911 else 2912 nvme->n_vendor = strdup(vendor); 2913 2914 nvme->n_product = strdup(product); 2915 2916 /* 2917 * Get controller limits. 2918 */ 2919 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 2920 MIN(nvme->n_admin_queue_len / 10, 2921 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 2922 2923 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2924 "async-event-limit", nvme->n_async_event_limit); 2925 2926 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 2927 2928 /* 2929 * Reinitialize the semaphore with the true abort command limit 2930 * supported by the hardware. It's not necessary to disable interrupts 2931 * as only command aborts use the semaphore, and no commands are 2932 * executed or aborted while we're here. 2933 */ 2934 sema_destroy(&nvme->n_abort_sema); 2935 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 2936 SEMA_DRIVER, NULL); 2937 2938 nvme->n_progress |= NVME_CTRL_LIMITS; 2939 2940 if (nvme->n_idctl->id_mdts == 0) 2941 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 2942 else 2943 nvme->n_max_data_transfer_size = 2944 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 2945 2946 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 2947 2948 /* 2949 * Limit n_max_data_transfer_size to what we can handle in one PRP. 2950 * Chained PRPs are currently unsupported. 2951 * 2952 * This is a no-op on hardware which doesn't support a transfer size 2953 * big enough to require chained PRPs. 2954 */ 2955 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 2956 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 2957 2958 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 2959 2960 /* 2961 * Make sure the minimum/maximum queue entry sizes are not 2962 * larger/smaller than the default. 2963 */ 2964 2965 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 2966 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 2967 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 2968 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 2969 goto fail; 2970 2971 /* 2972 * Check for the presence of a Volatile Write Cache. If present, 2973 * enable or disable based on the value of the property 2974 * volatile-write-cache-enable (default is enabled). 2975 */ 2976 nvme->n_write_cache_present = 2977 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 2978 2979 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2980 "volatile-write-cache-present", 2981 nvme->n_write_cache_present ? 1 : 0); 2982 2983 if (!nvme->n_write_cache_present) { 2984 nvme->n_write_cache_enabled = B_FALSE; 2985 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled) 2986 != 0) { 2987 dev_err(nvme->n_dip, CE_WARN, 2988 "!failed to %sable volatile write cache", 2989 nvme->n_write_cache_enabled ? "en" : "dis"); 2990 /* 2991 * Assume the cache is (still) enabled. 2992 */ 2993 nvme->n_write_cache_enabled = B_TRUE; 2994 } 2995 2996 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2997 "volatile-write-cache-enable", 2998 nvme->n_write_cache_enabled ? 1 : 0); 2999 3000 /* 3001 * Assume LBA Range Type feature is supported. If it isn't this 3002 * will be set to B_FALSE by nvme_get_features(). 3003 */ 3004 nvme->n_lba_range_supported = B_TRUE; 3005 3006 /* 3007 * Check support for Autonomous Power State Transition. 3008 */ 3009 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 3010 nvme->n_auto_pst_supported = 3011 nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE; 3012 3013 /* 3014 * Assume Software Progress Marker feature is supported. If it isn't 3015 * this will be set to B_FALSE by nvme_get_features(). 3016 */ 3017 nvme->n_progress_supported = B_TRUE; 3018 3019 /* 3020 * Identify Namespaces 3021 */ 3022 nvme->n_namespace_count = nvme->n_idctl->id_nn; 3023 3024 if (nvme->n_namespace_count == 0) { 3025 dev_err(nvme->n_dip, CE_WARN, 3026 "!controllers without namespaces are not supported"); 3027 goto fail; 3028 } 3029 3030 if (nvme->n_namespace_count > NVME_MINOR_MAX) { 3031 dev_err(nvme->n_dip, CE_WARN, 3032 "!too many namespaces: %d, limiting to %d\n", 3033 nvme->n_namespace_count, NVME_MINOR_MAX); 3034 nvme->n_namespace_count = NVME_MINOR_MAX; 3035 } 3036 3037 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 3038 nvme->n_namespace_count, KM_SLEEP); 3039 3040 for (i = 0; i != nvme->n_namespace_count; i++) { 3041 mutex_init(&nvme->n_ns[i].ns_minor.nm_mutex, NULL, MUTEX_DRIVER, 3042 NULL); 3043 nvme->n_ns[i].ns_ignore = B_TRUE; 3044 if (nvme_init_ns(nvme, i + 1) != DDI_SUCCESS) 3045 goto fail; 3046 } 3047 3048 /* 3049 * Try to set up MSI/MSI-X interrupts. 3050 */ 3051 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 3052 != 0) { 3053 nvme_release_interrupts(nvme); 3054 3055 nqueues = MIN(UINT16_MAX, ncpus); 3056 3057 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 3058 nqueues) != DDI_SUCCESS) && 3059 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 3060 nqueues) != DDI_SUCCESS)) { 3061 dev_err(nvme->n_dip, CE_WARN, 3062 "!failed to setup MSI/MSI-X interrupts"); 3063 goto fail; 3064 } 3065 } 3066 3067 /* 3068 * Create I/O queue pairs. 3069 */ 3070 3071 if (nvme_set_nqueues(nvme) != 0) { 3072 dev_err(nvme->n_dip, CE_WARN, 3073 "!failed to set number of I/O queues to %d", 3074 nvme->n_intr_cnt); 3075 goto fail; 3076 } 3077 3078 /* 3079 * Reallocate I/O queue array 3080 */ 3081 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 3082 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 3083 (nvme->n_submission_queues + 1), KM_SLEEP); 3084 nvme->n_ioq[0] = nvme->n_adminq; 3085 3086 /* 3087 * There should always be at least as many submission queues 3088 * as completion queues. 3089 */ 3090 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues); 3091 3092 nvme->n_ioq_count = nvme->n_submission_queues; 3093 3094 nvme->n_io_squeue_len = 3095 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries); 3096 3097 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len", 3098 nvme->n_io_squeue_len); 3099 3100 /* 3101 * Pre-allocate completion queues. 3102 * When there are the same number of submission and completion 3103 * queues there is no value in having a larger completion 3104 * queue length. 3105 */ 3106 if (nvme->n_submission_queues == nvme->n_completion_queues) 3107 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 3108 nvme->n_io_squeue_len); 3109 3110 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 3111 nvme->n_max_queue_entries); 3112 3113 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len", 3114 nvme->n_io_cqueue_len); 3115 3116 /* 3117 * Assign the equal quantity of taskq threads to each completion 3118 * queue, capping the total number of threads to the number 3119 * of CPUs. 3120 */ 3121 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues; 3122 3123 /* 3124 * In case the calculation above is zero, we need at least one 3125 * thread per completion queue. 3126 */ 3127 tq_threads = MAX(1, tq_threads); 3128 3129 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1, 3130 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) { 3131 dev_err(nvme->n_dip, CE_WARN, 3132 "!failed to pre-allocate completion queues"); 3133 goto fail; 3134 } 3135 3136 /* 3137 * If we use less completion queues than interrupt vectors return 3138 * some of the interrupt vectors back to the system. 3139 */ 3140 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) { 3141 nvme_release_interrupts(nvme); 3142 3143 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 3144 nvme->n_completion_queues + 1) != DDI_SUCCESS) { 3145 dev_err(nvme->n_dip, CE_WARN, 3146 "!failed to reduce number of interrupts"); 3147 goto fail; 3148 } 3149 } 3150 3151 /* 3152 * Alloc & register I/O queue pairs 3153 */ 3154 3155 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3156 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len, 3157 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 3158 dev_err(nvme->n_dip, CE_WARN, 3159 "!unable to allocate I/O qpair %d", i); 3160 goto fail; 3161 } 3162 3163 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) { 3164 dev_err(nvme->n_dip, CE_WARN, 3165 "!unable to create I/O qpair %d", i); 3166 goto fail; 3167 } 3168 } 3169 3170 /* 3171 * Post more asynchronous events commands to reduce event reporting 3172 * latency as suggested by the spec. 3173 */ 3174 if (nvme->n_async_event_supported) { 3175 for (i = 1; i != nvme->n_async_event_limit; i++) 3176 nvme_async_event(nvme); 3177 } 3178 3179 return (DDI_SUCCESS); 3180 3181 fail: 3182 (void) nvme_reset(nvme, B_FALSE); 3183 return (DDI_FAILURE); 3184 } 3185 3186 static uint_t 3187 nvme_intr(caddr_t arg1, caddr_t arg2) 3188 { 3189 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 3190 nvme_t *nvme = (nvme_t *)arg1; 3191 int inum = (int)(uintptr_t)arg2; 3192 int ccnt = 0; 3193 int qnum; 3194 3195 if (inum >= nvme->n_intr_cnt) 3196 return (DDI_INTR_UNCLAIMED); 3197 3198 if (nvme->n_dead) 3199 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ? 3200 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED); 3201 3202 /* 3203 * The interrupt vector a queue uses is calculated as queue_idx % 3204 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 3205 * in steps of n_intr_cnt to process all queues using this vector. 3206 */ 3207 for (qnum = inum; 3208 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL; 3209 qnum += nvme->n_intr_cnt) { 3210 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]); 3211 } 3212 3213 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 3214 } 3215 3216 static void 3217 nvme_release_interrupts(nvme_t *nvme) 3218 { 3219 int i; 3220 3221 for (i = 0; i < nvme->n_intr_cnt; i++) { 3222 if (nvme->n_inth[i] == NULL) 3223 break; 3224 3225 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3226 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 3227 else 3228 (void) ddi_intr_disable(nvme->n_inth[i]); 3229 3230 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 3231 (void) ddi_intr_free(nvme->n_inth[i]); 3232 } 3233 3234 kmem_free(nvme->n_inth, nvme->n_inth_sz); 3235 nvme->n_inth = NULL; 3236 nvme->n_inth_sz = 0; 3237 3238 nvme->n_progress &= ~NVME_INTERRUPTS; 3239 } 3240 3241 static int 3242 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 3243 { 3244 int nintrs, navail, count; 3245 int ret; 3246 int i; 3247 3248 if (nvme->n_intr_types == 0) { 3249 ret = ddi_intr_get_supported_types(nvme->n_dip, 3250 &nvme->n_intr_types); 3251 if (ret != DDI_SUCCESS) { 3252 dev_err(nvme->n_dip, CE_WARN, 3253 "!%s: ddi_intr_get_supported types failed", 3254 __func__); 3255 return (ret); 3256 } 3257 #ifdef __x86 3258 if (get_hwenv() == HW_VMWARE) 3259 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; 3260 #endif 3261 } 3262 3263 if ((nvme->n_intr_types & intr_type) == 0) 3264 return (DDI_FAILURE); 3265 3266 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 3267 if (ret != DDI_SUCCESS) { 3268 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 3269 __func__); 3270 return (ret); 3271 } 3272 3273 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 3274 if (ret != DDI_SUCCESS) { 3275 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 3276 __func__); 3277 return (ret); 3278 } 3279 3280 /* We want at most one interrupt per queue pair. */ 3281 if (navail > nqpairs) 3282 navail = nqpairs; 3283 3284 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 3285 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 3286 3287 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 3288 &count, 0); 3289 if (ret != DDI_SUCCESS) { 3290 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 3291 __func__); 3292 goto fail; 3293 } 3294 3295 nvme->n_intr_cnt = count; 3296 3297 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 3298 if (ret != DDI_SUCCESS) { 3299 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 3300 __func__); 3301 goto fail; 3302 } 3303 3304 for (i = 0; i < count; i++) { 3305 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 3306 (void *)nvme, (void *)(uintptr_t)i); 3307 if (ret != DDI_SUCCESS) { 3308 dev_err(nvme->n_dip, CE_WARN, 3309 "!%s: ddi_intr_add_handler failed", __func__); 3310 goto fail; 3311 } 3312 } 3313 3314 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 3315 3316 for (i = 0; i < count; i++) { 3317 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3318 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 3319 else 3320 ret = ddi_intr_enable(nvme->n_inth[i]); 3321 3322 if (ret != DDI_SUCCESS) { 3323 dev_err(nvme->n_dip, CE_WARN, 3324 "!%s: enabling interrupt %d failed", __func__, i); 3325 goto fail; 3326 } 3327 } 3328 3329 nvme->n_intr_type = intr_type; 3330 3331 nvme->n_progress |= NVME_INTERRUPTS; 3332 3333 return (DDI_SUCCESS); 3334 3335 fail: 3336 nvme_release_interrupts(nvme); 3337 3338 return (ret); 3339 } 3340 3341 static int 3342 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 3343 { 3344 _NOTE(ARGUNUSED(arg)); 3345 3346 pci_ereport_post(dip, fm_error, NULL); 3347 return (fm_error->fme_status); 3348 } 3349 3350 static void 3351 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a, 3352 void *b) 3353 { 3354 nvme_t *nvme = a; 3355 3356 nvme->n_dead = B_TRUE; 3357 3358 /* 3359 * Fail all outstanding commands, including those in the admin queue 3360 * (queue 0). 3361 */ 3362 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) { 3363 nvme_qpair_t *qp = nvme->n_ioq[i]; 3364 3365 mutex_enter(&qp->nq_mutex); 3366 for (size_t j = 0; j < qp->nq_nentry; j++) { 3367 nvme_cmd_t *cmd = qp->nq_cmd[j]; 3368 nvme_cmd_t *u_cmd; 3369 3370 if (cmd == NULL) { 3371 continue; 3372 } 3373 3374 /* 3375 * Since we have the queue lock held the entire time we 3376 * iterate over it, it's not possible for the queue to 3377 * change underneath us. Thus, we don't need to check 3378 * that the return value of nvme_unqueue_cmd matches the 3379 * requested cmd to unqueue. 3380 */ 3381 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 3382 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, 3383 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 3384 3385 ASSERT3P(u_cmd, ==, cmd); 3386 } 3387 mutex_exit(&qp->nq_mutex); 3388 } 3389 } 3390 3391 static int 3392 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 3393 { 3394 nvme_t *nvme; 3395 int instance; 3396 int nregs; 3397 off_t regsize; 3398 int i; 3399 char name[32]; 3400 bd_ops_t ops = nvme_bd_ops; 3401 3402 if (cmd != DDI_ATTACH) 3403 return (DDI_FAILURE); 3404 3405 instance = ddi_get_instance(dip); 3406 3407 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 3408 return (DDI_FAILURE); 3409 3410 nvme = ddi_get_soft_state(nvme_state, instance); 3411 ddi_set_driver_private(dip, nvme); 3412 nvme->n_dip = dip; 3413 3414 /* Set up event handlers for hot removal. */ 3415 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT, 3416 &nvme->n_rm_cookie) != DDI_SUCCESS) { 3417 goto fail; 3418 } 3419 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie, 3420 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) != 3421 DDI_SUCCESS) { 3422 goto fail; 3423 } 3424 3425 mutex_init(&nvme->n_minor.nm_mutex, NULL, MUTEX_DRIVER, NULL); 3426 3427 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3428 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 3429 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 3430 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 3431 B_TRUE : B_FALSE; 3432 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3433 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 3434 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3435 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN); 3436 /* 3437 * Double up the default for completion queues in case of 3438 * queue sharing. 3439 */ 3440 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3441 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN); 3442 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3443 DDI_PROP_DONTPASS, "async-event-limit", 3444 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 3445 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3446 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 3447 B_TRUE : B_FALSE; 3448 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3449 DDI_PROP_DONTPASS, "min-phys-block-size", 3450 NVME_DEFAULT_MIN_BLOCK_SIZE); 3451 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3452 DDI_PROP_DONTPASS, "max-submission-queues", -1); 3453 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3454 DDI_PROP_DONTPASS, "max-completion-queues", -1); 3455 3456 if (!ISP2(nvme->n_min_block_size) || 3457 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { 3458 dev_err(dip, CE_WARN, "!min-phys-block-size %s, " 3459 "using default %d", ISP2(nvme->n_min_block_size) ? 3460 "too low" : "not a power of 2", 3461 NVME_DEFAULT_MIN_BLOCK_SIZE); 3462 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 3463 } 3464 3465 if (nvme->n_submission_queues != -1 && 3466 (nvme->n_submission_queues < 1 || 3467 nvme->n_submission_queues > UINT16_MAX)) { 3468 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not " 3469 "valid. Must be [1..%d]", nvme->n_submission_queues, 3470 UINT16_MAX); 3471 nvme->n_submission_queues = -1; 3472 } 3473 3474 if (nvme->n_completion_queues != -1 && 3475 (nvme->n_completion_queues < 1 || 3476 nvme->n_completion_queues > UINT16_MAX)) { 3477 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not " 3478 "valid. Must be [1..%d]", nvme->n_completion_queues, 3479 UINT16_MAX); 3480 nvme->n_completion_queues = -1; 3481 } 3482 3483 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 3484 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 3485 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 3486 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 3487 3488 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN) 3489 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN; 3490 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN) 3491 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN; 3492 3493 if (nvme->n_async_event_limit < 1) 3494 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 3495 3496 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 3497 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 3498 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 3499 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 3500 3501 /* 3502 * Setup FMA support. 3503 */ 3504 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 3505 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 3506 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3507 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3508 3509 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 3510 3511 if (nvme->n_fm_cap) { 3512 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 3513 nvme->n_reg_acc_attr.devacc_attr_access = 3514 DDI_FLAGERR_ACC; 3515 3516 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 3517 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3518 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3519 } 3520 3521 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 3522 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3523 pci_ereport_setup(dip); 3524 3525 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3526 ddi_fm_handler_register(dip, nvme_fm_errcb, 3527 (void *)nvme); 3528 } 3529 3530 nvme->n_progress |= NVME_FMA_INIT; 3531 3532 /* 3533 * The spec defines several register sets. Only the controller 3534 * registers (set 1) are currently used. 3535 */ 3536 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 3537 nregs < 2 || 3538 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 3539 goto fail; 3540 3541 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 3542 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 3543 dev_err(dip, CE_WARN, "!failed to map regset 1"); 3544 goto fail; 3545 } 3546 3547 nvme->n_progress |= NVME_REGS_MAPPED; 3548 3549 /* 3550 * Create PRP DMA cache 3551 */ 3552 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 3553 ddi_driver_name(dip), ddi_get_instance(dip)); 3554 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 3555 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 3556 NULL, (void *)nvme, NULL, 0); 3557 3558 if (nvme_init(nvme) != DDI_SUCCESS) 3559 goto fail; 3560 3561 if (!nvme->n_idctl->id_oncs.on_dset_mgmt) 3562 ops.o_free_space = NULL; 3563 3564 /* 3565 * Initialize the driver with the UFM subsystem 3566 */ 3567 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops, 3568 &nvme->n_ufmh, nvme) != 0) { 3569 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem"); 3570 goto fail; 3571 } 3572 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL); 3573 ddi_ufm_update(nvme->n_ufmh); 3574 nvme->n_progress |= NVME_UFM_INIT; 3575 3576 /* 3577 * Attach the blkdev driver for each namespace. 3578 */ 3579 for (i = 0; i != nvme->n_namespace_count; i++) { 3580 if (ddi_create_minor_node(nvme->n_dip, nvme->n_ns[i].ns_name, 3581 S_IFCHR, NVME_MINOR(ddi_get_instance(nvme->n_dip), i + 1), 3582 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 3583 dev_err(dip, CE_WARN, 3584 "!failed to create minor node for namespace %d", i); 3585 goto fail; 3586 } 3587 3588 if (nvme->n_ns[i].ns_ignore) 3589 continue; 3590 3591 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i], 3592 &ops, &nvme->n_prp_dma_attr, KM_SLEEP); 3593 3594 if (nvme->n_ns[i].ns_bd_hdl == NULL) { 3595 dev_err(dip, CE_WARN, 3596 "!failed to get blkdev handle for namespace %d", i); 3597 goto fail; 3598 } 3599 3600 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl) 3601 != DDI_SUCCESS) { 3602 dev_err(dip, CE_WARN, 3603 "!failed to attach blkdev handle for namespace %d", 3604 i); 3605 goto fail; 3606 } 3607 } 3608 3609 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 3610 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) 3611 != DDI_SUCCESS) { 3612 dev_err(dip, CE_WARN, "nvme_attach: " 3613 "cannot create devctl minor node"); 3614 goto fail; 3615 } 3616 3617 return (DDI_SUCCESS); 3618 3619 fail: 3620 /* attach successful anyway so that FMA can retire the device */ 3621 if (nvme->n_dead) 3622 return (DDI_SUCCESS); 3623 3624 (void) nvme_detach(dip, DDI_DETACH); 3625 3626 return (DDI_FAILURE); 3627 } 3628 3629 static int 3630 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 3631 { 3632 int instance, i; 3633 nvme_t *nvme; 3634 3635 if (cmd != DDI_DETACH) 3636 return (DDI_FAILURE); 3637 3638 instance = ddi_get_instance(dip); 3639 3640 nvme = ddi_get_soft_state(nvme_state, instance); 3641 3642 if (nvme == NULL) 3643 return (DDI_FAILURE); 3644 3645 ddi_remove_minor_node(dip, "devctl"); 3646 mutex_destroy(&nvme->n_minor.nm_mutex); 3647 3648 if (nvme->n_ns) { 3649 for (i = 0; i != nvme->n_namespace_count; i++) { 3650 ddi_remove_minor_node(dip, nvme->n_ns[i].ns_name); 3651 mutex_destroy(&nvme->n_ns[i].ns_minor.nm_mutex); 3652 3653 if (nvme->n_ns[i].ns_bd_hdl) { 3654 (void) bd_detach_handle( 3655 nvme->n_ns[i].ns_bd_hdl); 3656 bd_free_handle(nvme->n_ns[i].ns_bd_hdl); 3657 } 3658 3659 if (nvme->n_ns[i].ns_idns) 3660 kmem_free(nvme->n_ns[i].ns_idns, 3661 sizeof (nvme_identify_nsid_t)); 3662 if (nvme->n_ns[i].ns_devid) 3663 strfree(nvme->n_ns[i].ns_devid); 3664 } 3665 3666 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 3667 nvme->n_namespace_count); 3668 } 3669 if (nvme->n_progress & NVME_UFM_INIT) { 3670 ddi_ufm_fini(nvme->n_ufmh); 3671 mutex_destroy(&nvme->n_fwslot_mutex); 3672 } 3673 3674 if (nvme->n_progress & NVME_INTERRUPTS) 3675 nvme_release_interrupts(nvme); 3676 3677 for (i = 0; i < nvme->n_cq_count; i++) { 3678 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL) 3679 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq); 3680 } 3681 3682 if (nvme->n_ioq_count > 0) { 3683 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3684 if (nvme->n_ioq[i] != NULL) { 3685 /* TODO: send destroy queue commands */ 3686 nvme_free_qpair(nvme->n_ioq[i]); 3687 } 3688 } 3689 3690 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 3691 (nvme->n_ioq_count + 1)); 3692 } 3693 3694 if (nvme->n_prp_cache != NULL) { 3695 kmem_cache_destroy(nvme->n_prp_cache); 3696 } 3697 3698 if (nvme->n_progress & NVME_REGS_MAPPED) { 3699 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); 3700 (void) nvme_reset(nvme, B_FALSE); 3701 } 3702 3703 if (nvme->n_progress & NVME_CTRL_LIMITS) 3704 sema_destroy(&nvme->n_abort_sema); 3705 3706 if (nvme->n_progress & NVME_ADMIN_QUEUE) 3707 nvme_free_qpair(nvme->n_adminq); 3708 3709 if (nvme->n_cq_count > 0) { 3710 nvme_destroy_cq_array(nvme, 0); 3711 nvme->n_cq = NULL; 3712 nvme->n_cq_count = 0; 3713 } 3714 3715 if (nvme->n_idctl) 3716 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); 3717 3718 if (nvme->n_progress & NVME_REGS_MAPPED) 3719 ddi_regs_map_free(&nvme->n_regh); 3720 3721 if (nvme->n_progress & NVME_FMA_INIT) { 3722 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3723 ddi_fm_handler_unregister(nvme->n_dip); 3724 3725 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 3726 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3727 pci_ereport_teardown(nvme->n_dip); 3728 3729 ddi_fm_fini(nvme->n_dip); 3730 } 3731 3732 if (nvme->n_vendor != NULL) 3733 strfree(nvme->n_vendor); 3734 3735 if (nvme->n_product != NULL) 3736 strfree(nvme->n_product); 3737 3738 /* Clean up hot removal event handler. */ 3739 if (nvme->n_ev_rm_cb_id != NULL) { 3740 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id); 3741 } 3742 nvme->n_ev_rm_cb_id = NULL; 3743 3744 ddi_soft_state_free(nvme_state, instance); 3745 3746 return (DDI_SUCCESS); 3747 } 3748 3749 static int 3750 nvme_quiesce(dev_info_t *dip) 3751 { 3752 int instance; 3753 nvme_t *nvme; 3754 3755 instance = ddi_get_instance(dip); 3756 3757 nvme = ddi_get_soft_state(nvme_state, instance); 3758 3759 if (nvme == NULL) 3760 return (DDI_FAILURE); 3761 3762 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); 3763 3764 (void) nvme_reset(nvme, B_TRUE); 3765 3766 return (DDI_FAILURE); 3767 } 3768 3769 static int 3770 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma) 3771 { 3772 nvme_t *nvme = cmd->nc_nvme; 3773 uint_t nprp_per_page, nprp; 3774 uint64_t *prp; 3775 const ddi_dma_cookie_t *cookie; 3776 uint_t idx; 3777 uint_t ncookies = ddi_dma_ncookies(dma); 3778 3779 if (ncookies == 0) 3780 return (DDI_FAILURE); 3781 3782 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL) 3783 return (DDI_FAILURE); 3784 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress; 3785 3786 if (ncookies == 1) { 3787 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 3788 return (DDI_SUCCESS); 3789 } else if (ncookies == 2) { 3790 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL) 3791 return (DDI_FAILURE); 3792 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress; 3793 return (DDI_SUCCESS); 3794 } 3795 3796 /* 3797 * At this point, we're always operating on cookies at 3798 * index >= 1 and writing the addresses of those cookies 3799 * into a new page. The address of that page is stored 3800 * as the second PRP entry. 3801 */ 3802 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t); 3803 ASSERT(nprp_per_page > 0); 3804 3805 /* 3806 * We currently don't support chained PRPs and set up our DMA 3807 * attributes to reflect that. If we still get an I/O request 3808 * that needs a chained PRP something is very wrong. Account 3809 * for the first cookie here, which we've placed in d_prp[0]. 3810 */ 3811 nprp = howmany(ncookies - 1, nprp_per_page); 3812 VERIFY(nprp == 1); 3813 3814 /* 3815 * Allocate a page of pointers, in which we'll write the 3816 * addresses of cookies 1 to `ncookies`. 3817 */ 3818 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 3819 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 3820 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress; 3821 3822 prp = (uint64_t *)cmd->nc_prp->nd_memp; 3823 for (idx = 1; idx < ncookies; idx++) { 3824 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL) 3825 return (DDI_FAILURE); 3826 *prp++ = cookie->dmac_laddress; 3827 } 3828 3829 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 3830 DDI_DMA_SYNC_FORDEV); 3831 return (DDI_SUCCESS); 3832 } 3833 3834 /* 3835 * The maximum number of requests supported for a deallocate request is 3836 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and 3837 * unchanged through at least 1.4a). The definition of nvme_range_t is also 3838 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for 3839 * a deallocate request will fit into the smallest supported namespace page 3840 * (4k). 3841 */ 3842 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096); 3843 3844 static int 3845 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize, 3846 int allocflag) 3847 { 3848 const dkioc_free_list_t *dfl = xfer->x_dfl; 3849 const dkioc_free_list_ext_t *exts = dfl->dfl_exts; 3850 nvme_t *nvme = cmd->nc_nvme; 3851 nvme_range_t *ranges = NULL; 3852 uint_t i; 3853 3854 /* 3855 * The number of ranges in the request is 0s based (that is 3856 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ..., 3857 * word10 == 255 -> 256 ranges). Therefore the allowed values are 3858 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request, 3859 * we either provided bad info in nvme_bd_driveinfo() or there is a bug 3860 * in blkdev. 3861 */ 3862 VERIFY3U(dfl->dfl_num_exts, >, 0); 3863 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES); 3864 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff; 3865 3866 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE; 3867 3868 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag); 3869 if (cmd->nc_prp == NULL) 3870 return (DDI_FAILURE); 3871 3872 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 3873 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp; 3874 3875 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress; 3876 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 3877 3878 for (i = 0; i < dfl->dfl_num_exts; i++) { 3879 uint64_t lba, len; 3880 3881 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize; 3882 len = exts[i].dfle_length / blocksize; 3883 3884 VERIFY3U(len, <=, UINT32_MAX); 3885 3886 /* No context attributes for a deallocate request */ 3887 ranges[i].nr_ctxattr = 0; 3888 ranges[i].nr_len = len; 3889 ranges[i].nr_lba = lba; 3890 } 3891 3892 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 3893 DDI_DMA_SYNC_FORDEV); 3894 3895 return (DDI_SUCCESS); 3896 } 3897 3898 static nvme_cmd_t * 3899 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 3900 { 3901 nvme_t *nvme = ns->ns_nvme; 3902 nvme_cmd_t *cmd; 3903 int allocflag; 3904 3905 /* 3906 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 3907 */ 3908 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP; 3909 cmd = nvme_alloc_cmd(nvme, allocflag); 3910 3911 if (cmd == NULL) 3912 return (NULL); 3913 3914 cmd->nc_sqe.sqe_opc = opc; 3915 cmd->nc_callback = nvme_bd_xfer_done; 3916 cmd->nc_xfer = xfer; 3917 3918 switch (opc) { 3919 case NVME_OPC_NVM_WRITE: 3920 case NVME_OPC_NVM_READ: 3921 VERIFY(xfer->x_nblks <= 0x10000); 3922 3923 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3924 3925 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 3926 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 3927 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 3928 3929 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS) 3930 goto fail; 3931 break; 3932 3933 case NVME_OPC_NVM_FLUSH: 3934 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3935 break; 3936 3937 case NVME_OPC_NVM_DSET_MGMT: 3938 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3939 3940 if (nvme_fill_ranges(cmd, xfer, 3941 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS) 3942 goto fail; 3943 break; 3944 3945 default: 3946 goto fail; 3947 } 3948 3949 return (cmd); 3950 3951 fail: 3952 nvme_free_cmd(cmd); 3953 return (NULL); 3954 } 3955 3956 static void 3957 nvme_bd_xfer_done(void *arg) 3958 { 3959 nvme_cmd_t *cmd = arg; 3960 bd_xfer_t *xfer = cmd->nc_xfer; 3961 int error = 0; 3962 3963 error = nvme_check_cmd_status(cmd); 3964 nvme_free_cmd(cmd); 3965 3966 bd_xfer_done(xfer, error); 3967 } 3968 3969 static void 3970 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 3971 { 3972 nvme_namespace_t *ns = arg; 3973 nvme_t *nvme = ns->ns_nvme; 3974 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable); 3975 3976 /* 3977 * Set the blkdev qcount to the number of submission queues. 3978 * It will then create one waitq/runq pair for each submission 3979 * queue and spread I/O requests across the queues. 3980 */ 3981 drive->d_qcount = nvme->n_ioq_count; 3982 3983 /* 3984 * I/O activity to individual namespaces is distributed across 3985 * each of the d_qcount blkdev queues (which has been set to 3986 * the number of nvme submission queues). d_qsize is the number 3987 * of submitted and not completed I/Os within each queue that blkdev 3988 * will allow before it starts holding them in the waitq. 3989 * 3990 * Each namespace will create a child blkdev instance, for each one 3991 * we try and set the d_qsize so that each namespace gets an 3992 * equal portion of the submission queue. 3993 * 3994 * If post instantiation of the nvme drive, n_namespaces_attachable 3995 * changes and a namespace is attached it could calculate a 3996 * different d_qsize. It may even be that the sum of the d_qsizes is 3997 * now beyond the submission queue size. Should that be the case 3998 * and the I/O rate is such that blkdev attempts to submit more 3999 * I/Os than the size of the submission queue, the excess I/Os 4000 * will be held behind the semaphore nq_sema. 4001 */ 4002 drive->d_qsize = nvme->n_io_squeue_len / ns_count; 4003 4004 /* 4005 * Don't let the queue size drop below the minimum, though. 4006 */ 4007 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN); 4008 4009 /* 4010 * d_maxxfer is not set, which means the value is taken from the DMA 4011 * attributes specified to bd_alloc_handle. 4012 */ 4013 4014 drive->d_removable = B_FALSE; 4015 drive->d_hotpluggable = B_FALSE; 4016 4017 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64)); 4018 drive->d_target = ns->ns_id; 4019 drive->d_lun = 0; 4020 4021 drive->d_model = nvme->n_idctl->id_model; 4022 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 4023 drive->d_vendor = nvme->n_vendor; 4024 drive->d_vendor_len = strlen(nvme->n_vendor); 4025 drive->d_product = nvme->n_product; 4026 drive->d_product_len = strlen(nvme->n_product); 4027 drive->d_serial = nvme->n_idctl->id_serial; 4028 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 4029 drive->d_revision = nvme->n_idctl->id_fwrev; 4030 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 4031 4032 /* 4033 * If we support the dataset management command, the only restrictions 4034 * on a discard request are the maximum number of ranges (segments) 4035 * per single request. 4036 */ 4037 if (nvme->n_idctl->id_oncs.on_dset_mgmt) 4038 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES; 4039 } 4040 4041 static int 4042 nvme_bd_mediainfo(void *arg, bd_media_t *media) 4043 { 4044 nvme_namespace_t *ns = arg; 4045 nvme_t *nvme = ns->ns_nvme; 4046 4047 if (nvme->n_dead) { 4048 return (EIO); 4049 } 4050 4051 media->m_nblks = ns->ns_block_count; 4052 media->m_blksize = ns->ns_block_size; 4053 media->m_readonly = B_FALSE; 4054 media->m_solidstate = B_TRUE; 4055 4056 media->m_pblksize = ns->ns_best_block_size; 4057 4058 return (0); 4059 } 4060 4061 static int 4062 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 4063 { 4064 nvme_t *nvme = ns->ns_nvme; 4065 nvme_cmd_t *cmd; 4066 nvme_qpair_t *ioq; 4067 boolean_t poll; 4068 int ret; 4069 4070 if (nvme->n_dead) { 4071 return (EIO); 4072 } 4073 4074 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 4075 if (cmd == NULL) 4076 return (ENOMEM); 4077 4078 cmd->nc_sqid = xfer->x_qnum + 1; 4079 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 4080 ioq = nvme->n_ioq[cmd->nc_sqid]; 4081 4082 /* 4083 * Get the polling flag before submitting the command. The command may 4084 * complete immediately after it was submitted, which means we must 4085 * treat both cmd and xfer as if they have been freed already. 4086 */ 4087 poll = (xfer->x_flags & BD_XFER_POLL) != 0; 4088 4089 ret = nvme_submit_io_cmd(ioq, cmd); 4090 4091 if (ret != 0) 4092 return (ret); 4093 4094 if (!poll) 4095 return (0); 4096 4097 do { 4098 cmd = nvme_retrieve_cmd(nvme, ioq); 4099 if (cmd != NULL) 4100 cmd->nc_callback(cmd); 4101 else 4102 drv_usecwait(10); 4103 } while (ioq->nq_active_cmds != 0); 4104 4105 return (0); 4106 } 4107 4108 static int 4109 nvme_bd_read(void *arg, bd_xfer_t *xfer) 4110 { 4111 nvme_namespace_t *ns = arg; 4112 4113 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 4114 } 4115 4116 static int 4117 nvme_bd_write(void *arg, bd_xfer_t *xfer) 4118 { 4119 nvme_namespace_t *ns = arg; 4120 4121 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 4122 } 4123 4124 static int 4125 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 4126 { 4127 nvme_namespace_t *ns = arg; 4128 4129 if (ns->ns_nvme->n_dead) 4130 return (EIO); 4131 4132 /* 4133 * If the volatile write cache is not present or not enabled the FLUSH 4134 * command is a no-op, so we can take a shortcut here. 4135 */ 4136 if (!ns->ns_nvme->n_write_cache_present) { 4137 bd_xfer_done(xfer, ENOTSUP); 4138 return (0); 4139 } 4140 4141 if (!ns->ns_nvme->n_write_cache_enabled) { 4142 bd_xfer_done(xfer, 0); 4143 return (0); 4144 } 4145 4146 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 4147 } 4148 4149 static int 4150 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 4151 { 4152 nvme_namespace_t *ns = arg; 4153 nvme_t *nvme = ns->ns_nvme; 4154 4155 if (nvme->n_dead) { 4156 return (EIO); 4157 } 4158 4159 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 4160 if (*(uint64_t *)ns->ns_eui64 != 0) { 4161 return (ddi_devid_init(devinfo, DEVID_SCSI3_WWN, 4162 sizeof (ns->ns_eui64), ns->ns_eui64, devid)); 4163 } else { 4164 return (ddi_devid_init(devinfo, DEVID_ENCAP, 4165 strlen(ns->ns_devid), ns->ns_devid, devid)); 4166 } 4167 } 4168 4169 static int 4170 nvme_bd_free_space(void *arg, bd_xfer_t *xfer) 4171 { 4172 nvme_namespace_t *ns = arg; 4173 4174 if (xfer->x_dfl == NULL) 4175 return (EINVAL); 4176 4177 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt) 4178 return (ENOTSUP); 4179 4180 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT)); 4181 } 4182 4183 static int 4184 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 4185 { 4186 #ifndef __lock_lint 4187 _NOTE(ARGUNUSED(cred_p)); 4188 #endif 4189 minor_t minor = getminor(*devp); 4190 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 4191 int nsid = NVME_MINOR_NSID(minor); 4192 nvme_minor_state_t *nm; 4193 int rv = 0; 4194 4195 if (otyp != OTYP_CHR) 4196 return (EINVAL); 4197 4198 if (nvme == NULL) 4199 return (ENXIO); 4200 4201 if (nsid > nvme->n_namespace_count) 4202 return (ENXIO); 4203 4204 if (nvme->n_dead) 4205 return (EIO); 4206 4207 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; 4208 4209 mutex_enter(&nm->nm_mutex); 4210 if (nm->nm_oexcl) { 4211 rv = EBUSY; 4212 goto out; 4213 } 4214 4215 if (flag & FEXCL) { 4216 if (nm->nm_ocnt != 0) { 4217 rv = EBUSY; 4218 goto out; 4219 } 4220 nm->nm_oexcl = B_TRUE; 4221 } 4222 4223 nm->nm_ocnt++; 4224 4225 out: 4226 mutex_exit(&nm->nm_mutex); 4227 return (rv); 4228 4229 } 4230 4231 static int 4232 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 4233 { 4234 #ifndef __lock_lint 4235 _NOTE(ARGUNUSED(cred_p)); 4236 _NOTE(ARGUNUSED(flag)); 4237 #endif 4238 minor_t minor = getminor(dev); 4239 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 4240 int nsid = NVME_MINOR_NSID(minor); 4241 nvme_minor_state_t *nm; 4242 4243 if (otyp != OTYP_CHR) 4244 return (ENXIO); 4245 4246 if (nvme == NULL) 4247 return (ENXIO); 4248 4249 if (nsid > nvme->n_namespace_count) 4250 return (ENXIO); 4251 4252 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; 4253 4254 mutex_enter(&nm->nm_mutex); 4255 if (nm->nm_oexcl) 4256 nm->nm_oexcl = B_FALSE; 4257 4258 ASSERT(nm->nm_ocnt > 0); 4259 nm->nm_ocnt--; 4260 mutex_exit(&nm->nm_mutex); 4261 4262 return (0); 4263 } 4264 4265 static int 4266 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4267 cred_t *cred_p) 4268 { 4269 _NOTE(ARGUNUSED(cred_p)); 4270 int rv = 0; 4271 void *idctl; 4272 4273 if ((mode & FREAD) == 0) 4274 return (EPERM); 4275 4276 if (nioc->n_len < NVME_IDENTIFY_BUFSIZE) 4277 return (EINVAL); 4278 4279 if ((rv = nvme_identify(nvme, B_TRUE, nsid, (void **)&idctl)) != 0) 4280 return (rv); 4281 4282 if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode) 4283 != 0) 4284 rv = EFAULT; 4285 4286 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE); 4287 4288 return (rv); 4289 } 4290 4291 /* 4292 * Execute commands on behalf of the various ioctls. 4293 */ 4294 static int 4295 nvme_ioc_cmd(nvme_t *nvme, nvme_sqe_t *sqe, boolean_t is_admin, void *data_addr, 4296 uint32_t data_len, int rwk, nvme_cqe_t *cqe, uint_t timeout) 4297 { 4298 nvme_cmd_t *cmd; 4299 nvme_qpair_t *ioq; 4300 int rv = 0; 4301 4302 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 4303 if (is_admin) { 4304 cmd->nc_sqid = 0; 4305 ioq = nvme->n_adminq; 4306 } else { 4307 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 4308 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 4309 ioq = nvme->n_ioq[cmd->nc_sqid]; 4310 } 4311 4312 /* 4313 * This function is used to faciliate requests from 4314 * userspace, so don't panic if the command fails. This 4315 * is especially true for admin passthru commands, where 4316 * the actual command data structure is entirely defined 4317 * by userspace. 4318 */ 4319 cmd->nc_dontpanic = B_TRUE; 4320 4321 cmd->nc_callback = nvme_wakeup_cmd; 4322 cmd->nc_sqe = *sqe; 4323 4324 if ((rwk & (FREAD | FWRITE)) != 0) { 4325 if (data_addr == NULL) { 4326 rv = EINVAL; 4327 goto free_cmd; 4328 } 4329 4330 if (nvme_zalloc_dma(nvme, data_len, DDI_DMA_READ, 4331 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 4332 dev_err(nvme->n_dip, CE_WARN, 4333 "!nvme_zalloc_dma failed for nvme_ioc_cmd()"); 4334 4335 rv = ENOMEM; 4336 goto free_cmd; 4337 } 4338 4339 if ((rv = nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah)) != 0) 4340 goto free_cmd; 4341 4342 if ((rwk & FWRITE) != 0) { 4343 if (ddi_copyin(data_addr, cmd->nc_dma->nd_memp, 4344 data_len, rwk & FKIOCTL) != 0) { 4345 rv = EFAULT; 4346 goto free_cmd; 4347 } 4348 } 4349 } 4350 4351 if (is_admin) { 4352 nvme_admin_cmd(cmd, timeout); 4353 } else { 4354 mutex_enter(&cmd->nc_mutex); 4355 4356 rv = nvme_submit_io_cmd(ioq, cmd); 4357 4358 if (rv == EAGAIN) { 4359 mutex_exit(&cmd->nc_mutex); 4360 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 4361 "!nvme_ioc_cmd() failed, I/O Q full"); 4362 goto free_cmd; 4363 } 4364 4365 nvme_wait_cmd(cmd, timeout); 4366 4367 mutex_exit(&cmd->nc_mutex); 4368 } 4369 4370 if (cqe != NULL) 4371 *cqe = cmd->nc_cqe; 4372 4373 if ((rv = nvme_check_cmd_status(cmd)) != 0) { 4374 dev_err(nvme->n_dip, CE_WARN, 4375 "!nvme_ioc_cmd() failed with sct = %x, sc = %x", 4376 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 4377 4378 goto free_cmd; 4379 } 4380 4381 if ((rwk & FREAD) != 0) { 4382 if (ddi_copyout(cmd->nc_dma->nd_memp, 4383 data_addr, data_len, rwk & FKIOCTL) != 0) 4384 rv = EFAULT; 4385 } 4386 4387 free_cmd: 4388 nvme_free_cmd(cmd); 4389 4390 return (rv); 4391 } 4392 4393 static int 4394 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4395 int mode, cred_t *cred_p) 4396 { 4397 _NOTE(ARGUNUSED(nsid, cred_p)); 4398 int rv = 0; 4399 nvme_reg_cap_t cap = { 0 }; 4400 nvme_capabilities_t nc; 4401 4402 if ((mode & FREAD) == 0) 4403 return (EPERM); 4404 4405 if (nioc->n_len < sizeof (nc)) 4406 return (EINVAL); 4407 4408 cap.r = nvme_get64(nvme, NVME_REG_CAP); 4409 4410 /* 4411 * The MPSMIN and MPSMAX fields in the CAP register use 0 to 4412 * specify the base page size of 4k (1<<12), so add 12 here to 4413 * get the real page size value. 4414 */ 4415 nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax); 4416 nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin); 4417 4418 if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0) 4419 rv = EFAULT; 4420 4421 return (rv); 4422 } 4423 4424 static int 4425 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4426 int mode, cred_t *cred_p) 4427 { 4428 _NOTE(ARGUNUSED(cred_p)); 4429 void *log = NULL; 4430 size_t bufsize = 0; 4431 int rv = 0; 4432 4433 if ((mode & FREAD) == 0) 4434 return (EPERM); 4435 4436 switch (nioc->n_arg) { 4437 case NVME_LOGPAGE_ERROR: 4438 if (nsid != 0) 4439 return (EINVAL); 4440 break; 4441 case NVME_LOGPAGE_HEALTH: 4442 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0) 4443 return (EINVAL); 4444 4445 if (nsid == 0) 4446 nsid = (uint32_t)-1; 4447 4448 break; 4449 case NVME_LOGPAGE_FWSLOT: 4450 if (nsid != 0) 4451 return (EINVAL); 4452 break; 4453 default: 4454 if (!NVME_IS_VENDOR_SPECIFIC_LOGPAGE(nioc->n_arg)) 4455 return (EINVAL); 4456 if (nioc->n_len > NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE) { 4457 dev_err(nvme->n_dip, CE_NOTE, "!Vendor-specific log " 4458 "page size exceeds device maximum supported size: " 4459 "%lu", NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE); 4460 return (EINVAL); 4461 } 4462 if (nioc->n_len == 0) 4463 return (EINVAL); 4464 bufsize = nioc->n_len; 4465 if (nsid == 0) 4466 nsid = (uint32_t)-1; 4467 } 4468 4469 if (nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, nioc->n_arg, nsid) 4470 != DDI_SUCCESS) 4471 return (EIO); 4472 4473 if (nioc->n_len < bufsize) { 4474 kmem_free(log, bufsize); 4475 return (EINVAL); 4476 } 4477 4478 if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0) 4479 rv = EFAULT; 4480 4481 nioc->n_len = bufsize; 4482 kmem_free(log, bufsize); 4483 4484 return (rv); 4485 } 4486 4487 static int 4488 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4489 int mode, cred_t *cred_p) 4490 { 4491 _NOTE(ARGUNUSED(cred_p)); 4492 void *buf = NULL; 4493 size_t bufsize = 0; 4494 uint32_t res = 0; 4495 uint8_t feature; 4496 int rv = 0; 4497 4498 if ((mode & FREAD) == 0) 4499 return (EPERM); 4500 4501 if ((nioc->n_arg >> 32) > 0xff) 4502 return (EINVAL); 4503 4504 feature = (uint8_t)(nioc->n_arg >> 32); 4505 4506 switch (feature) { 4507 case NVME_FEAT_ARBITRATION: 4508 case NVME_FEAT_POWER_MGMT: 4509 case NVME_FEAT_ERROR: 4510 case NVME_FEAT_NQUEUES: 4511 case NVME_FEAT_INTR_COAL: 4512 case NVME_FEAT_WRITE_ATOM: 4513 case NVME_FEAT_ASYNC_EVENT: 4514 case NVME_FEAT_PROGRESS: 4515 if (nsid != 0) 4516 return (EINVAL); 4517 break; 4518 4519 case NVME_FEAT_TEMPERATURE: 4520 if (nsid != 0) 4521 return (EINVAL); 4522 res = nioc->n_arg & 0xffffffffUL; 4523 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) { 4524 nvme_temp_threshold_t tt; 4525 4526 tt.r = res; 4527 if (tt.b.tt_thsel != NVME_TEMP_THRESH_OVER && 4528 tt.b.tt_thsel != NVME_TEMP_THRESH_UNDER) { 4529 return (EINVAL); 4530 } 4531 4532 if (tt.b.tt_tmpsel > NVME_TEMP_THRESH_MAX_SENSOR) { 4533 return (EINVAL); 4534 } 4535 } else if (res != 0) { 4536 return (EINVAL); 4537 } 4538 break; 4539 4540 case NVME_FEAT_INTR_VECT: 4541 if (nsid != 0) 4542 return (EINVAL); 4543 4544 res = nioc->n_arg & 0xffffffffUL; 4545 if (res >= nvme->n_intr_cnt) 4546 return (EINVAL); 4547 break; 4548 4549 case NVME_FEAT_LBA_RANGE: 4550 if (nvme->n_lba_range_supported == B_FALSE) 4551 return (EINVAL); 4552 4553 if (nsid == 0 || 4554 nsid > nvme->n_namespace_count) 4555 return (EINVAL); 4556 4557 break; 4558 4559 case NVME_FEAT_WRITE_CACHE: 4560 if (nsid != 0) 4561 return (EINVAL); 4562 4563 if (!nvme->n_write_cache_present) 4564 return (EINVAL); 4565 4566 break; 4567 4568 case NVME_FEAT_AUTO_PST: 4569 if (nsid != 0) 4570 return (EINVAL); 4571 4572 if (!nvme->n_auto_pst_supported) 4573 return (EINVAL); 4574 4575 break; 4576 4577 default: 4578 return (EINVAL); 4579 } 4580 4581 rv = nvme_get_features(nvme, B_TRUE, nsid, feature, &res, &buf, 4582 &bufsize); 4583 if (rv != 0) 4584 return (rv); 4585 4586 if (nioc->n_len < bufsize) { 4587 kmem_free(buf, bufsize); 4588 return (EINVAL); 4589 } 4590 4591 if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0) 4592 rv = EFAULT; 4593 4594 kmem_free(buf, bufsize); 4595 nioc->n_arg = res; 4596 nioc->n_len = bufsize; 4597 4598 return (rv); 4599 } 4600 4601 static int 4602 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4603 cred_t *cred_p) 4604 { 4605 _NOTE(ARGUNUSED(nsid, mode, cred_p)); 4606 4607 if ((mode & FREAD) == 0) 4608 return (EPERM); 4609 4610 nioc->n_arg = nvme->n_intr_cnt; 4611 return (0); 4612 } 4613 4614 static int 4615 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4616 cred_t *cred_p) 4617 { 4618 _NOTE(ARGUNUSED(nsid, cred_p)); 4619 int rv = 0; 4620 4621 if ((mode & FREAD) == 0) 4622 return (EPERM); 4623 4624 if (nioc->n_len < sizeof (nvme->n_version)) 4625 return (ENOMEM); 4626 4627 if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf, 4628 sizeof (nvme->n_version), mode) != 0) 4629 rv = EFAULT; 4630 4631 return (rv); 4632 } 4633 4634 static int 4635 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4636 cred_t *cred_p) 4637 { 4638 _NOTE(ARGUNUSED(mode)); 4639 nvme_format_nvm_t frmt = { 0 }; 4640 int c_nsid = nsid != 0 ? nsid - 1 : 0; 4641 4642 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4643 return (EPERM); 4644 4645 frmt.r = nioc->n_arg & 0xffffffff; 4646 4647 /* 4648 * Check whether the FORMAT NVM command is supported. 4649 */ 4650 if (nvme->n_idctl->id_oacs.oa_format == 0) 4651 return (EINVAL); 4652 4653 /* 4654 * Don't allow format or secure erase of individual namespace if that 4655 * would cause a format or secure erase of all namespaces. 4656 */ 4657 if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0) 4658 return (EINVAL); 4659 4660 if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE && 4661 nvme->n_idctl->id_fna.fn_sec_erase != 0) 4662 return (EINVAL); 4663 4664 /* 4665 * Don't allow formatting with Protection Information. 4666 */ 4667 if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0) 4668 return (EINVAL); 4669 4670 /* 4671 * Don't allow formatting using an illegal LBA format, or any LBA format 4672 * that uses metadata. 4673 */ 4674 if (frmt.b.fm_lbaf > nvme->n_ns[c_nsid].ns_idns->id_nlbaf || 4675 nvme->n_ns[c_nsid].ns_idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0) 4676 return (EINVAL); 4677 4678 /* 4679 * Don't allow formatting using an illegal Secure Erase setting. 4680 */ 4681 if (frmt.b.fm_ses > NVME_FRMT_MAX_SES || 4682 (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO && 4683 nvme->n_idctl->id_fna.fn_crypt_erase == 0)) 4684 return (EINVAL); 4685 4686 if (nsid == 0) 4687 nsid = (uint32_t)-1; 4688 4689 return (nvme_format_nvm(nvme, B_TRUE, nsid, frmt.b.fm_lbaf, B_FALSE, 0, 4690 B_FALSE, frmt.b.fm_ses)); 4691 } 4692 4693 static int 4694 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4695 cred_t *cred_p) 4696 { 4697 _NOTE(ARGUNUSED(nioc, mode)); 4698 int rv = 0; 4699 4700 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4701 return (EPERM); 4702 4703 if (nsid == 0) 4704 return (EINVAL); 4705 4706 if (nvme->n_ns[nsid - 1].ns_ignore) 4707 return (0); 4708 4709 rv = bd_detach_handle(nvme->n_ns[nsid - 1].ns_bd_hdl); 4710 if (rv != DDI_SUCCESS) 4711 rv = EBUSY; 4712 4713 return (rv); 4714 } 4715 4716 static int 4717 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4718 cred_t *cred_p) 4719 { 4720 _NOTE(ARGUNUSED(nioc, mode)); 4721 nvme_identify_nsid_t *idns; 4722 int rv = 0; 4723 4724 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4725 return (EPERM); 4726 4727 if (nsid == 0) 4728 return (EINVAL); 4729 4730 /* 4731 * Identify namespace again, free old identify data. 4732 */ 4733 idns = nvme->n_ns[nsid - 1].ns_idns; 4734 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) 4735 return (EIO); 4736 4737 kmem_free(idns, sizeof (nvme_identify_nsid_t)); 4738 4739 if (nvme->n_ns[nsid - 1].ns_ignore) 4740 return (ENOTSUP); 4741 4742 if (nvme->n_ns[nsid - 1].ns_bd_hdl == NULL) 4743 nvme->n_ns[nsid - 1].ns_bd_hdl = bd_alloc_handle( 4744 &nvme->n_ns[nsid - 1], &nvme_bd_ops, &nvme->n_prp_dma_attr, 4745 KM_SLEEP); 4746 4747 rv = bd_attach_handle(nvme->n_dip, nvme->n_ns[nsid - 1].ns_bd_hdl); 4748 if (rv != DDI_SUCCESS) 4749 rv = EBUSY; 4750 4751 return (rv); 4752 } 4753 4754 static void 4755 nvme_ufm_update(nvme_t *nvme) 4756 { 4757 mutex_enter(&nvme->n_fwslot_mutex); 4758 ddi_ufm_update(nvme->n_ufmh); 4759 if (nvme->n_fwslot != NULL) { 4760 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t)); 4761 nvme->n_fwslot = NULL; 4762 } 4763 mutex_exit(&nvme->n_fwslot_mutex); 4764 } 4765 4766 static int 4767 nvme_ioctl_firmware_download(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4768 int mode, cred_t *cred_p) 4769 { 4770 int rv = 0; 4771 size_t len, copylen; 4772 offset_t offset; 4773 uintptr_t buf; 4774 nvme_sqe_t sqe = { 4775 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD 4776 }; 4777 4778 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4779 return (EPERM); 4780 4781 if (nsid != 0) 4782 return (EINVAL); 4783 4784 /* 4785 * The offset (in n_len) is restricted to the number of DWORDs in 4786 * 32 bits. 4787 */ 4788 if (nioc->n_len > NVME_FW_OFFSETB_MAX) 4789 return (EINVAL); 4790 4791 /* Confirm that both offset and length are a multiple of DWORD bytes */ 4792 if ((nioc->n_len & NVME_DWORD_MASK) != 0 || 4793 (nioc->n_arg & NVME_DWORD_MASK) != 0) 4794 return (EINVAL); 4795 4796 len = nioc->n_len; 4797 offset = nioc->n_arg; 4798 buf = (uintptr_t)nioc->n_buf; 4799 while (len > 0 && rv == 0) { 4800 /* 4801 * nvme_ioc_cmd() does not use SGLs or PRP lists. 4802 * It is limited to 2 PRPs per NVM command, so limit 4803 * the size of the data to 2 pages. 4804 */ 4805 copylen = MIN(2 * nvme->n_pagesize, len); 4806 4807 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1; 4808 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT); 4809 4810 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void *)buf, copylen, 4811 FWRITE, NULL, nvme_admin_cmd_timeout); 4812 4813 buf += copylen; 4814 offset += copylen; 4815 len -= copylen; 4816 } 4817 4818 /* 4819 * Let the DDI UFM subsystem know that the firmware information for 4820 * this device has changed. 4821 */ 4822 nvme_ufm_update(nvme); 4823 4824 return (rv); 4825 } 4826 4827 static int 4828 nvme_ioctl_firmware_commit(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4829 int mode, cred_t *cred_p) 4830 { 4831 nvme_firmware_commit_dw10_t fc_dw10 = { 0 }; 4832 uint32_t slot = nioc->n_arg & 0xffffffff; 4833 uint32_t action = nioc->n_arg >> 32; 4834 nvme_cqe_t cqe = { 0 }; 4835 nvme_sqe_t sqe = { 4836 .sqe_opc = NVME_OPC_FW_ACTIVATE 4837 }; 4838 int timeout; 4839 int rv; 4840 4841 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4842 return (EPERM); 4843 4844 if (nsid != 0) 4845 return (EINVAL); 4846 4847 /* Validate slot is in range. */ 4848 if (slot < NVME_FW_SLOT_MIN || slot > NVME_FW_SLOT_MAX) 4849 return (EINVAL); 4850 4851 switch (action) { 4852 case NVME_FWC_SAVE: 4853 case NVME_FWC_SAVE_ACTIVATE: 4854 timeout = nvme_commit_save_cmd_timeout; 4855 break; 4856 case NVME_FWC_ACTIVATE: 4857 case NVME_FWC_ACTIVATE_IMMED: 4858 timeout = nvme_admin_cmd_timeout; 4859 break; 4860 default: 4861 return (EINVAL); 4862 } 4863 4864 fc_dw10.b.fc_slot = slot; 4865 fc_dw10.b.fc_action = action; 4866 sqe.sqe_cdw10 = fc_dw10.r; 4867 4868 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, NULL, 0, 0, &cqe, timeout); 4869 4870 nioc->n_arg = ((uint64_t)cqe.cqe_sf.sf_sct << 16) | cqe.cqe_sf.sf_sc; 4871 4872 /* 4873 * Let the DDI UFM subsystem know that the firmware information for 4874 * this device has changed. 4875 */ 4876 nvme_ufm_update(nvme); 4877 4878 return (rv); 4879 } 4880 4881 /* 4882 * Helper to copy in a passthru command from userspace, handling 4883 * different data models. 4884 */ 4885 static int 4886 nvme_passthru_copy_cmd_in(const void *buf, nvme_passthru_cmd_t *cmd, int mode) 4887 { 4888 #ifdef _MULTI_DATAMODEL 4889 switch (ddi_model_convert_from(mode & FMODELS)) { 4890 case DDI_MODEL_ILP32: { 4891 nvme_passthru_cmd32_t cmd32; 4892 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0) 4893 return (-1); 4894 cmd->npc_opcode = cmd32.npc_opcode; 4895 cmd->npc_timeout = cmd32.npc_timeout; 4896 cmd->npc_flags = cmd32.npc_flags; 4897 cmd->npc_cdw12 = cmd32.npc_cdw12; 4898 cmd->npc_cdw13 = cmd32.npc_cdw13; 4899 cmd->npc_cdw14 = cmd32.npc_cdw14; 4900 cmd->npc_cdw15 = cmd32.npc_cdw15; 4901 cmd->npc_buflen = cmd32.npc_buflen; 4902 cmd->npc_buf = cmd32.npc_buf; 4903 break; 4904 } 4905 case DDI_MODEL_NONE: 4906 #endif 4907 if (ddi_copyin(buf, (void*)cmd, sizeof (nvme_passthru_cmd_t), 4908 mode) != 0) 4909 return (-1); 4910 #ifdef _MULTI_DATAMODEL 4911 break; 4912 } 4913 #endif 4914 return (0); 4915 } 4916 4917 /* 4918 * Helper to copy out a passthru command result to userspace, handling 4919 * different data models. 4920 */ 4921 static int 4922 nvme_passthru_copy_cmd_out(const nvme_passthru_cmd_t *cmd, void *buf, int mode) 4923 { 4924 #ifdef _MULTI_DATAMODEL 4925 switch (ddi_model_convert_from(mode & FMODELS)) { 4926 case DDI_MODEL_ILP32: { 4927 nvme_passthru_cmd32_t cmd32; 4928 bzero(&cmd32, sizeof (cmd32)); 4929 cmd32.npc_opcode = cmd->npc_opcode; 4930 cmd32.npc_status = cmd->npc_status; 4931 cmd32.npc_err = cmd->npc_err; 4932 cmd32.npc_timeout = cmd->npc_timeout; 4933 cmd32.npc_flags = cmd->npc_flags; 4934 cmd32.npc_cdw0 = cmd->npc_cdw0; 4935 cmd32.npc_cdw12 = cmd->npc_cdw12; 4936 cmd32.npc_cdw13 = cmd->npc_cdw13; 4937 cmd32.npc_cdw14 = cmd->npc_cdw14; 4938 cmd32.npc_cdw15 = cmd->npc_cdw15; 4939 cmd32.npc_buflen = (size32_t)cmd->npc_buflen; 4940 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf; 4941 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0) 4942 return (-1); 4943 break; 4944 } 4945 case DDI_MODEL_NONE: 4946 #endif 4947 if (ddi_copyout(cmd, buf, sizeof (nvme_passthru_cmd_t), 4948 mode) != 0) 4949 return (-1); 4950 #ifdef _MULTI_DATAMODEL 4951 break; 4952 } 4953 #endif 4954 return (0); 4955 } 4956 4957 /* 4958 * Run an arbitrary vendor-specific admin command on the device. 4959 */ 4960 static int 4961 nvme_ioctl_passthru(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4962 cred_t *cred_p) 4963 { 4964 int rv = 0; 4965 uint_t timeout = 0; 4966 int rwk = 0; 4967 nvme_passthru_cmd_t cmd; 4968 size_t expected_passthru_size = 0; 4969 nvme_sqe_t sqe; 4970 nvme_cqe_t cqe; 4971 4972 bzero(&cmd, sizeof (cmd)); 4973 bzero(&sqe, sizeof (sqe)); 4974 bzero(&cqe, sizeof (cqe)); 4975 4976 /* 4977 * Basic checks: permissions, data model, argument size. 4978 */ 4979 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4980 return (EPERM); 4981 4982 /* 4983 * Compute the expected size of the argument buffer 4984 */ 4985 #ifdef _MULTI_DATAMODEL 4986 switch (ddi_model_convert_from(mode & FMODELS)) { 4987 case DDI_MODEL_ILP32: 4988 expected_passthru_size = sizeof (nvme_passthru_cmd32_t); 4989 break; 4990 case DDI_MODEL_NONE: 4991 #endif 4992 expected_passthru_size = sizeof (nvme_passthru_cmd_t); 4993 #ifdef _MULTI_DATAMODEL 4994 break; 4995 } 4996 #endif 4997 4998 if (nioc->n_len != expected_passthru_size) { 4999 cmd.npc_err = NVME_PASSTHRU_ERR_CMD_SIZE; 5000 rv = EINVAL; 5001 goto out; 5002 } 5003 5004 /* 5005 * Ensure the device supports the standard vendor specific 5006 * admin command format. 5007 */ 5008 if (!nvme->n_idctl->id_nvscc.nv_spec) { 5009 cmd.npc_err = NVME_PASSTHRU_ERR_NOT_SUPPORTED; 5010 rv = ENOTSUP; 5011 goto out; 5012 } 5013 5014 if (nvme_passthru_copy_cmd_in((const void*)nioc->n_buf, &cmd, mode)) 5015 return (EFAULT); 5016 5017 if (!NVME_IS_VENDOR_SPECIFIC_CMD(cmd.npc_opcode)) { 5018 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_OPCODE; 5019 rv = EINVAL; 5020 goto out; 5021 } 5022 5023 /* 5024 * This restriction is not mandated by the spec, so future work 5025 * could relax this if it's necessary to support commands that both 5026 * read and write. 5027 */ 5028 if ((cmd.npc_flags & NVME_PASSTHRU_READ) != 0 && 5029 (cmd.npc_flags & NVME_PASSTHRU_WRITE) != 0) { 5030 cmd.npc_err = NVME_PASSTHRU_ERR_READ_AND_WRITE; 5031 rv = EINVAL; 5032 goto out; 5033 } 5034 if (cmd.npc_timeout > nvme_vendor_specific_admin_cmd_max_timeout) { 5035 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_TIMEOUT; 5036 rv = EINVAL; 5037 goto out; 5038 } 5039 timeout = cmd.npc_timeout; 5040 5041 /* 5042 * Passed-thru command buffer verification: 5043 * - Size is multiple of DWords 5044 * - Non-null iff the length is non-zero 5045 * - Null if neither reading nor writing data. 5046 * - Non-null if reading or writing. 5047 * - Maximum buffer size. 5048 */ 5049 if ((cmd.npc_buflen % sizeof (uint32_t)) != 0) { 5050 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5051 rv = EINVAL; 5052 goto out; 5053 } 5054 if (((void*)cmd.npc_buf != NULL && cmd.npc_buflen == 0) || 5055 ((void*)cmd.npc_buf == NULL && cmd.npc_buflen != 0)) { 5056 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5057 rv = EINVAL; 5058 goto out; 5059 } 5060 if (cmd.npc_flags == 0 && (void*)cmd.npc_buf != NULL) { 5061 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5062 rv = EINVAL; 5063 goto out; 5064 } 5065 if ((cmd.npc_flags != 0) && ((void*)cmd.npc_buf == NULL)) { 5066 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5067 rv = EINVAL; 5068 goto out; 5069 } 5070 if (cmd.npc_buflen > nvme_vendor_specific_admin_cmd_size) { 5071 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5072 rv = EINVAL; 5073 goto out; 5074 } 5075 if ((cmd.npc_buflen >> NVME_DWORD_SHIFT) > UINT32_MAX) { 5076 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5077 rv = EINVAL; 5078 goto out; 5079 } 5080 5081 sqe.sqe_opc = cmd.npc_opcode; 5082 sqe.sqe_nsid = nsid; 5083 sqe.sqe_cdw10 = (uint32_t)(cmd.npc_buflen >> NVME_DWORD_SHIFT); 5084 sqe.sqe_cdw12 = cmd.npc_cdw12; 5085 sqe.sqe_cdw13 = cmd.npc_cdw13; 5086 sqe.sqe_cdw14 = cmd.npc_cdw14; 5087 sqe.sqe_cdw15 = cmd.npc_cdw15; 5088 if ((cmd.npc_flags & NVME_PASSTHRU_READ) != 0) 5089 rwk = FREAD; 5090 else if ((cmd.npc_flags & NVME_PASSTHRU_WRITE) != 0) 5091 rwk = FWRITE; 5092 5093 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void*)cmd.npc_buf, 5094 cmd.npc_buflen, rwk, &cqe, timeout); 5095 cmd.npc_status = cqe.cqe_sf.sf_sc; 5096 cmd.npc_cdw0 = cqe.cqe_dw0; 5097 5098 out: 5099 if (nvme_passthru_copy_cmd_out(&cmd, (void*)nioc->n_buf, mode)) 5100 rv = EFAULT; 5101 return (rv); 5102 } 5103 5104 static int 5105 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 5106 int *rval_p) 5107 { 5108 #ifndef __lock_lint 5109 _NOTE(ARGUNUSED(rval_p)); 5110 #endif 5111 minor_t minor = getminor(dev); 5112 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 5113 int nsid = NVME_MINOR_NSID(minor); 5114 int rv = 0; 5115 nvme_ioctl_t nioc; 5116 5117 int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = { 5118 NULL, 5119 nvme_ioctl_identify, 5120 nvme_ioctl_identify, 5121 nvme_ioctl_capabilities, 5122 nvme_ioctl_get_logpage, 5123 nvme_ioctl_get_features, 5124 nvme_ioctl_intr_cnt, 5125 nvme_ioctl_version, 5126 nvme_ioctl_format, 5127 nvme_ioctl_detach, 5128 nvme_ioctl_attach, 5129 nvme_ioctl_firmware_download, 5130 nvme_ioctl_firmware_commit, 5131 nvme_ioctl_passthru 5132 }; 5133 5134 if (nvme == NULL) 5135 return (ENXIO); 5136 5137 if (nsid > nvme->n_namespace_count) 5138 return (ENXIO); 5139 5140 if (IS_DEVCTL(cmd)) 5141 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); 5142 5143 #ifdef _MULTI_DATAMODEL 5144 switch (ddi_model_convert_from(mode & FMODELS)) { 5145 case DDI_MODEL_ILP32: { 5146 nvme_ioctl32_t nioc32; 5147 if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t), 5148 mode) != 0) 5149 return (EFAULT); 5150 nioc.n_len = nioc32.n_len; 5151 nioc.n_buf = nioc32.n_buf; 5152 nioc.n_arg = nioc32.n_arg; 5153 break; 5154 } 5155 case DDI_MODEL_NONE: 5156 #endif 5157 if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode) 5158 != 0) 5159 return (EFAULT); 5160 #ifdef _MULTI_DATAMODEL 5161 break; 5162 } 5163 #endif 5164 5165 if (nvme->n_dead && cmd != NVME_IOC_DETACH) 5166 return (EIO); 5167 5168 5169 if (cmd == NVME_IOC_IDENTIFY_CTRL) { 5170 /* 5171 * This makes NVME_IOC_IDENTIFY_CTRL work the same on devctl and 5172 * attachment point nodes. 5173 */ 5174 nsid = 0; 5175 } else if (cmd == NVME_IOC_IDENTIFY_NSID && nsid == 0) { 5176 /* 5177 * This makes NVME_IOC_IDENTIFY_NSID work on a devctl node, it 5178 * will always return identify data for namespace 1. 5179 */ 5180 nsid = 1; 5181 } 5182 5183 if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL) 5184 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode, 5185 cred_p); 5186 else 5187 rv = EINVAL; 5188 5189 #ifdef _MULTI_DATAMODEL 5190 switch (ddi_model_convert_from(mode & FMODELS)) { 5191 case DDI_MODEL_ILP32: { 5192 nvme_ioctl32_t nioc32; 5193 5194 nioc32.n_len = (size32_t)nioc.n_len; 5195 nioc32.n_buf = (uintptr32_t)nioc.n_buf; 5196 nioc32.n_arg = nioc.n_arg; 5197 5198 if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t), 5199 mode) != 0) 5200 return (EFAULT); 5201 break; 5202 } 5203 case DDI_MODEL_NONE: 5204 #endif 5205 if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode) 5206 != 0) 5207 return (EFAULT); 5208 #ifdef _MULTI_DATAMODEL 5209 break; 5210 } 5211 #endif 5212 5213 return (rv); 5214 } 5215 5216 /* 5217 * DDI UFM Callbacks 5218 */ 5219 static int 5220 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 5221 ddi_ufm_image_t *img) 5222 { 5223 nvme_t *nvme = arg; 5224 5225 if (imgno != 0) 5226 return (EINVAL); 5227 5228 ddi_ufm_image_set_desc(img, "Firmware"); 5229 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot); 5230 5231 return (0); 5232 } 5233 5234 /* 5235 * Fill out firmware slot information for the requested slot. The firmware 5236 * slot information is gathered by requesting the Firmware Slot Information log 5237 * page. The format of the page is described in section 5.10.1.3. 5238 * 5239 * We lazily cache the log page on the first call and then invalidate the cache 5240 * data after a successful firmware download or firmware commit command. 5241 * The cached data is protected by a mutex as the state can change 5242 * asynchronous to this callback. 5243 */ 5244 static int 5245 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 5246 uint_t slotno, ddi_ufm_slot_t *slot) 5247 { 5248 nvme_t *nvme = arg; 5249 void *log = NULL; 5250 size_t bufsize; 5251 ddi_ufm_attr_t attr = 0; 5252 char fw_ver[NVME_FWVER_SZ + 1]; 5253 int ret; 5254 5255 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1)) 5256 return (EINVAL); 5257 5258 mutex_enter(&nvme->n_fwslot_mutex); 5259 if (nvme->n_fwslot == NULL) { 5260 ret = nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, 5261 NVME_LOGPAGE_FWSLOT, 0); 5262 if (ret != DDI_SUCCESS || 5263 bufsize != sizeof (nvme_fwslot_log_t)) { 5264 if (log != NULL) 5265 kmem_free(log, bufsize); 5266 mutex_exit(&nvme->n_fwslot_mutex); 5267 return (EIO); 5268 } 5269 nvme->n_fwslot = (nvme_fwslot_log_t *)log; 5270 } 5271 5272 /* 5273 * NVMe numbers firmware slots starting at 1 5274 */ 5275 if (slotno == (nvme->n_fwslot->fw_afi - 1)) 5276 attr |= DDI_UFM_ATTR_ACTIVE; 5277 5278 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0) 5279 attr |= DDI_UFM_ATTR_WRITEABLE; 5280 5281 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') { 5282 attr |= DDI_UFM_ATTR_EMPTY; 5283 } else { 5284 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno], 5285 NVME_FWVER_SZ); 5286 fw_ver[NVME_FWVER_SZ] = '\0'; 5287 ddi_ufm_slot_set_version(slot, fw_ver); 5288 } 5289 mutex_exit(&nvme->n_fwslot_mutex); 5290 5291 ddi_ufm_slot_set_attrs(slot, attr); 5292 5293 return (0); 5294 } 5295 5296 static int 5297 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 5298 { 5299 *caps = DDI_UFM_CAP_REPORT; 5300 return (0); 5301 } 5302