1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2018 Nexenta Systems, Inc. 14 * Copyright 2016 Tegile Systems, Inc. All rights reserved. 15 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 16 * Copyright 2018 Joyent, Inc. 17 * Copyright 2019 Western Digital Corporation. 18 */ 19 20 /* 21 * blkdev driver for NVMe compliant storage devices 22 * 23 * This driver was written to conform to version 1.2.1 of the NVMe 24 * specification. It may work with newer versions, but that is completely 25 * untested and disabled by default. 26 * 27 * The driver has only been tested on x86 systems and will not work on big- 28 * endian systems without changes to the code accessing registers and data 29 * structures used by the hardware. 30 * 31 * 32 * Interrupt Usage: 33 * 34 * The driver will use a single interrupt while configuring the device as the 35 * specification requires, but contrary to the specification it will try to use 36 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it 37 * will switch to multiple-message MSI(-X) if supported. The driver wants to 38 * have one interrupt vector per CPU, but it will work correctly if less are 39 * available. Interrupts can be shared by queues, the interrupt handler will 40 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only 41 * the admin queue will share an interrupt with one I/O queue. The interrupt 42 * handler will retrieve completed commands from all queues sharing an interrupt 43 * vector and will post them to a taskq for completion processing. 44 * 45 * 46 * Command Processing: 47 * 48 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up 49 * to 65536 I/O commands. The driver will configure one I/O queue pair per 50 * available interrupt vector, with the queue length usually much smaller than 51 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 52 * interrupt vectors will be used. 53 * 54 * Additionally the hardware provides a single special admin queue pair that can 55 * hold up to 4096 admin commands. 56 * 57 * From the hardware perspective both queues of a queue pair are independent, 58 * but they share some driver state: the command array (holding pointers to 59 * commands currently being processed by the hardware) and the active command 60 * counter. Access to a submission queue and the shared state is protected by 61 * nq_mutex, completion queue is protected by ncq_mutex. 62 * 63 * When a command is submitted to a queue pair the active command counter is 64 * incremented and a pointer to the command is stored in the command array. The 65 * array index is used as command identifier (CID) in the submission queue 66 * entry. Some commands may take a very long time to complete, and if the queue 67 * wraps around in that time a submission may find the next array slot to still 68 * be used by a long-running command. In this case the array is sequentially 69 * searched for the next free slot. The length of the command array is the same 70 * as the configured queue length. Queue overrun is prevented by the semaphore, 71 * so a command submission may block if the queue is full. 72 * 73 * 74 * Polled I/O Support: 75 * 76 * For kernel core dump support the driver can do polled I/O. As interrupts are 77 * turned off while dumping the driver will just submit a command in the regular 78 * way, and then repeatedly attempt a command retrieval until it gets the 79 * command back. 80 * 81 * 82 * Namespace Support: 83 * 84 * NVMe devices can have multiple namespaces, each being a independent data 85 * store. The driver supports multiple namespaces and creates a blkdev interface 86 * for each namespace found. Namespaces can have various attributes to support 87 * protection information. This driver does not support any of this and ignores 88 * namespaces that have these attributes. 89 * 90 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier 91 * (EUI64). This driver uses the EUI64 if present to generate the devid and 92 * passes it to blkdev to use it in the device node names. As this is currently 93 * untested namespaces with EUI64 are ignored by default. 94 * 95 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a 96 * single controller. This is an artificial limit imposed by the driver to be 97 * able to address a reasonable number of controllers and namespaces using a 98 * 32bit minor node number. 99 * 100 * 101 * Minor nodes: 102 * 103 * For each NVMe device the driver exposes one minor node for the controller and 104 * one minor node for each namespace. The only operations supported by those 105 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the 106 * interface for the nvmeadm(1M) utility. 107 * 108 * 109 * Blkdev Interface: 110 * 111 * This driver uses blkdev to do all the heavy lifting involved with presenting 112 * a disk device to the system. As a result, the processing of I/O requests is 113 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 114 * setup, and splitting of transfers into manageable chunks. 115 * 116 * I/O requests coming in from blkdev are turned into NVM commands and posted to 117 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 118 * queues. There is currently no timeout handling of I/O commands. 119 * 120 * Blkdev also supports querying device/media information and generating a 121 * devid. The driver reports the best block size as determined by the namespace 122 * format back to blkdev as physical block size to support partition and block 123 * alignment. The devid is either based on the namespace EUI64, if present, or 124 * composed using the device vendor ID, model number, serial number, and the 125 * namespace ID. 126 * 127 * 128 * Error Handling: 129 * 130 * Error handling is currently limited to detecting fatal hardware errors, 131 * either by asynchronous events, or synchronously through command status or 132 * admin command timeouts. In case of severe errors the device is fenced off, 133 * all further requests will return EIO. FMA is then called to fault the device. 134 * 135 * The hardware has a limit for outstanding asynchronous event requests. Before 136 * this limit is known the driver assumes it is at least 1 and posts a single 137 * asynchronous request. Later when the limit is known more asynchronous event 138 * requests are posted to allow quicker reception of error information. When an 139 * asynchronous event is posted by the hardware the driver will parse the error 140 * status fields and log information or fault the device, depending on the 141 * severity of the asynchronous event. The asynchronous event request is then 142 * reused and posted to the admin queue again. 143 * 144 * On command completion the command status is checked for errors. In case of 145 * errors indicating a driver bug the driver panics. Almost all other error 146 * status values just cause EIO to be returned. 147 * 148 * Command timeouts are currently detected for all admin commands except 149 * asynchronous event requests. If a command times out and the hardware appears 150 * to be healthy the driver attempts to abort the command. The original command 151 * timeout is also applied to the abort command. If the abort times out too the 152 * driver assumes the device to be dead, fences it off, and calls FMA to retire 153 * it. In all other cases the aborted command should return immediately with a 154 * status indicating it was aborted, and the driver will wait indefinitely for 155 * that to happen. No timeout handling of normal I/O commands is presently done. 156 * 157 * Any command that times out due to the controller dropping dead will be put on 158 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA 159 * memory being reused by the system and later be written to by a "dead" NVMe 160 * controller. 161 * 162 * 163 * Locking: 164 * 165 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held 166 * when accessing shared state and submission queue registers, ncq_mutex 167 * is held when accessing completion queue state and registers. 168 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while 169 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both 170 * mutexes themselves. 171 * 172 * Each command also has its own nc_mutex, which is associated with the 173 * condition variable nc_cv. It is only used on admin commands which are run 174 * synchronously. In that case it must be held across calls to 175 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by 176 * nvme_admin_cmd(). It must also be held whenever the completion state of the 177 * command is changed or while a admin command timeout is handled. 178 * 179 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first. 180 * More than one nc_mutex may only be held when aborting commands. In this case, 181 * the nc_mutex of the command to be aborted must be held across the call to 182 * nvme_abort_cmd() to prevent the command from completing while the abort is in 183 * progress. 184 * 185 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be 186 * acquired first. More than one nq_mutex is never held by a single thread. 187 * The ncq_mutex is only held by nvme_retrieve_cmd() and 188 * nvme_process_iocq(). nvme_process_iocq() is only called from the 189 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the 190 * mutex is non-contentious but is required for implementation completeness 191 * and safety. 192 * 193 * Each minor node has its own nm_mutex, which protects the open count nm_ocnt 194 * and exclusive-open flag nm_oexcl. 195 * 196 * 197 * Quiesce / Fast Reboot: 198 * 199 * The driver currently does not support fast reboot. A quiesce(9E) entry point 200 * is still provided which is used to send a shutdown notification to the 201 * device. 202 * 203 * 204 * DDI UFM Support 205 * 206 * The driver supports the DDI UFM framework for reporting information about 207 * the device's firmware image and slot configuration. This data can be 208 * queried by userland software via ioctls to the ufm driver. For more 209 * information, see ddi_ufm(9E). 210 * 211 * 212 * Driver Configuration: 213 * 214 * The following driver properties can be changed to control some aspects of the 215 * drivers operation: 216 * - strict-version: can be set to 0 to allow devices conforming to newer 217 * major versions to be used 218 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 219 * specific command status as a fatal error leading device faulting 220 * - admin-queue-len: the maximum length of the admin queue (16-4096) 221 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536) 222 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536) 223 * - async-event-limit: the maximum number of asynchronous event requests to be 224 * posted by the driver 225 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 226 * cache 227 * - min-phys-block-size: the minimum physical block size to report to blkdev, 228 * which is among other things the basis for ZFS vdev ashift 229 * - max-submission-queues: the maximum number of I/O submission queues. 230 * - max-completion-queues: the maximum number of I/O completion queues, 231 * can be less than max-submission-queues, in which case the completion 232 * queues are shared. 233 * 234 * 235 * TODO: 236 * - figure out sane default for I/O queue depth reported to blkdev 237 * - FMA handling of media errors 238 * - support for devices supporting very large I/O requests using chained PRPs 239 * - support for configuring hardware parameters like interrupt coalescing 240 * - support for media formatting and hard partitioning into namespaces 241 * - support for big-endian systems 242 * - support for fast reboot 243 * - support for NVMe Subsystem Reset (1.1) 244 * - support for Scatter/Gather lists (1.1) 245 * - support for Reservations (1.1) 246 * - support for power management 247 */ 248 249 #include <sys/byteorder.h> 250 #ifdef _BIG_ENDIAN 251 #error nvme driver needs porting for big-endian platforms 252 #endif 253 254 #include <sys/modctl.h> 255 #include <sys/conf.h> 256 #include <sys/devops.h> 257 #include <sys/ddi.h> 258 #include <sys/ddi_ufm.h> 259 #include <sys/sunddi.h> 260 #include <sys/sunndi.h> 261 #include <sys/bitmap.h> 262 #include <sys/sysmacros.h> 263 #include <sys/param.h> 264 #include <sys/varargs.h> 265 #include <sys/cpuvar.h> 266 #include <sys/disp.h> 267 #include <sys/blkdev.h> 268 #include <sys/atomic.h> 269 #include <sys/archsystm.h> 270 #include <sys/sata/sata_hba.h> 271 #include <sys/stat.h> 272 #include <sys/policy.h> 273 #include <sys/list.h> 274 275 #include <sys/nvme.h> 276 277 #ifdef __x86 278 #include <sys/x86_archext.h> 279 #endif 280 281 #include "nvme_reg.h" 282 #include "nvme_var.h" 283 284 /* 285 * Assertions to make sure that we've properly captured various aspects of the 286 * packed structures and haven't broken them during updates. 287 */ 288 CTASSERT(sizeof (nvme_identify_ctrl_t) == 0x1000); 289 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256); 290 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512); 291 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768); 292 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792); 293 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048); 294 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072); 295 296 CTASSERT(sizeof (nvme_identify_nsid_t) == 0x1000); 297 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32); 298 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104); 299 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128); 300 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384); 301 302 CTASSERT(sizeof (nvme_identify_primary_caps_t) == 0x1000); 303 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32); 304 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64); 305 306 307 /* NVMe spec version supported */ 308 static const int nvme_version_major = 1; 309 310 /* tunable for admin command timeout in seconds, default is 1s */ 311 int nvme_admin_cmd_timeout = 1; 312 313 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */ 314 int nvme_format_cmd_timeout = 600; 315 316 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */ 317 int nvme_commit_save_cmd_timeout = 15; 318 319 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 320 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 321 static int nvme_quiesce(dev_info_t *); 322 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 323 static int nvme_setup_interrupts(nvme_t *, int, int); 324 static void nvme_release_interrupts(nvme_t *); 325 static uint_t nvme_intr(caddr_t, caddr_t); 326 327 static void nvme_shutdown(nvme_t *, int, boolean_t); 328 static boolean_t nvme_reset(nvme_t *, boolean_t); 329 static int nvme_init(nvme_t *); 330 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 331 static void nvme_free_cmd(nvme_cmd_t *); 332 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 333 bd_xfer_t *); 334 static void nvme_admin_cmd(nvme_cmd_t *, int); 335 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *); 336 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *); 337 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *); 338 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int); 339 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 340 static void nvme_wait_cmd(nvme_cmd_t *, uint_t); 341 static void nvme_wakeup_cmd(void *); 342 static void nvme_async_event_task(void *); 343 344 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 345 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 346 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 347 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 348 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 349 static inline int nvme_check_cmd_status(nvme_cmd_t *); 350 351 static int nvme_abort_cmd(nvme_cmd_t *, uint_t); 352 static void nvme_async_event(nvme_t *); 353 static int nvme_format_nvm(nvme_t *, boolean_t, uint32_t, uint8_t, boolean_t, 354 uint8_t, boolean_t, uint8_t); 355 static int nvme_get_logpage(nvme_t *, boolean_t, void **, size_t *, uint8_t, 356 ...); 357 static int nvme_identify(nvme_t *, boolean_t, uint32_t, void **); 358 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t, 359 uint32_t *); 360 static int nvme_get_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t *, 361 void **, size_t *); 362 static int nvme_write_cache_set(nvme_t *, boolean_t); 363 static int nvme_set_nqueues(nvme_t *); 364 365 static void nvme_free_dma(nvme_dma_t *); 366 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 367 nvme_dma_t **); 368 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 369 nvme_dma_t **); 370 static void nvme_free_qpair(nvme_qpair_t *); 371 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t); 372 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 373 374 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 375 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 376 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 377 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 378 379 static boolean_t nvme_check_regs_hdl(nvme_t *); 380 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 381 382 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *); 383 384 static void nvme_bd_xfer_done(void *); 385 static void nvme_bd_driveinfo(void *, bd_drive_t *); 386 static int nvme_bd_mediainfo(void *, bd_media_t *); 387 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 388 static int nvme_bd_read(void *, bd_xfer_t *); 389 static int nvme_bd_write(void *, bd_xfer_t *); 390 static int nvme_bd_sync(void *, bd_xfer_t *); 391 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 392 393 static int nvme_prp_dma_constructor(void *, void *, int); 394 static void nvme_prp_dma_destructor(void *, void *); 395 396 static void nvme_prepare_devid(nvme_t *, uint32_t); 397 398 /* DDI UFM callbacks */ 399 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 400 ddi_ufm_image_t *); 401 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 402 ddi_ufm_slot_t *); 403 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 404 405 static int nvme_open(dev_t *, int, int, cred_t *); 406 static int nvme_close(dev_t, int, int, cred_t *); 407 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 408 409 static ddi_ufm_ops_t nvme_ufm_ops = { 410 NULL, 411 nvme_ufm_fill_image, 412 nvme_ufm_fill_slot, 413 nvme_ufm_getcaps 414 }; 415 416 #define NVME_MINOR_INST_SHIFT 9 417 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid)) 418 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT) 419 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1)) 420 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2) 421 422 static void *nvme_state; 423 static kmem_cache_t *nvme_cmd_cache; 424 425 /* 426 * DMA attributes for queue DMA memory 427 * 428 * Queue DMA memory must be page aligned. The maximum length of a queue is 429 * 65536 entries, and an entry can be 64 bytes long. 430 */ 431 static ddi_dma_attr_t nvme_queue_dma_attr = { 432 .dma_attr_version = DMA_ATTR_V0, 433 .dma_attr_addr_lo = 0, 434 .dma_attr_addr_hi = 0xffffffffffffffffULL, 435 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 436 .dma_attr_align = 0x1000, 437 .dma_attr_burstsizes = 0x7ff, 438 .dma_attr_minxfer = 0x1000, 439 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 440 .dma_attr_seg = 0xffffffffffffffffULL, 441 .dma_attr_sgllen = 1, 442 .dma_attr_granular = 1, 443 .dma_attr_flags = 0, 444 }; 445 446 /* 447 * DMA attributes for transfers using Physical Region Page (PRP) entries 448 * 449 * A PRP entry describes one page of DMA memory using the page size specified 450 * in the controller configuration's memory page size register (CC.MPS). It uses 451 * a 64bit base address aligned to this page size. There is no limitation on 452 * chaining PRPs together for arbitrarily large DMA transfers. 453 */ 454 static ddi_dma_attr_t nvme_prp_dma_attr = { 455 .dma_attr_version = DMA_ATTR_V0, 456 .dma_attr_addr_lo = 0, 457 .dma_attr_addr_hi = 0xffffffffffffffffULL, 458 .dma_attr_count_max = 0xfff, 459 .dma_attr_align = 0x1000, 460 .dma_attr_burstsizes = 0x7ff, 461 .dma_attr_minxfer = 0x1000, 462 .dma_attr_maxxfer = 0x1000, 463 .dma_attr_seg = 0xfff, 464 .dma_attr_sgllen = -1, 465 .dma_attr_granular = 1, 466 .dma_attr_flags = 0, 467 }; 468 469 /* 470 * DMA attributes for transfers using scatter/gather lists 471 * 472 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 473 * 32bit length field. SGL Segment and SGL Last Segment entries require the 474 * length to be a multiple of 16 bytes. 475 */ 476 static ddi_dma_attr_t nvme_sgl_dma_attr = { 477 .dma_attr_version = DMA_ATTR_V0, 478 .dma_attr_addr_lo = 0, 479 .dma_attr_addr_hi = 0xffffffffffffffffULL, 480 .dma_attr_count_max = 0xffffffffUL, 481 .dma_attr_align = 1, 482 .dma_attr_burstsizes = 0x7ff, 483 .dma_attr_minxfer = 0x10, 484 .dma_attr_maxxfer = 0xfffffffffULL, 485 .dma_attr_seg = 0xffffffffffffffffULL, 486 .dma_attr_sgllen = -1, 487 .dma_attr_granular = 0x10, 488 .dma_attr_flags = 0 489 }; 490 491 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 492 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 493 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 494 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 495 }; 496 497 static struct cb_ops nvme_cb_ops = { 498 .cb_open = nvme_open, 499 .cb_close = nvme_close, 500 .cb_strategy = nodev, 501 .cb_print = nodev, 502 .cb_dump = nodev, 503 .cb_read = nodev, 504 .cb_write = nodev, 505 .cb_ioctl = nvme_ioctl, 506 .cb_devmap = nodev, 507 .cb_mmap = nodev, 508 .cb_segmap = nodev, 509 .cb_chpoll = nochpoll, 510 .cb_prop_op = ddi_prop_op, 511 .cb_str = 0, 512 .cb_flag = D_NEW | D_MP, 513 .cb_rev = CB_REV, 514 .cb_aread = nodev, 515 .cb_awrite = nodev 516 }; 517 518 static struct dev_ops nvme_dev_ops = { 519 .devo_rev = DEVO_REV, 520 .devo_refcnt = 0, 521 .devo_getinfo = ddi_no_info, 522 .devo_identify = nulldev, 523 .devo_probe = nulldev, 524 .devo_attach = nvme_attach, 525 .devo_detach = nvme_detach, 526 .devo_reset = nodev, 527 .devo_cb_ops = &nvme_cb_ops, 528 .devo_bus_ops = NULL, 529 .devo_power = NULL, 530 .devo_quiesce = nvme_quiesce, 531 }; 532 533 static struct modldrv nvme_modldrv = { 534 .drv_modops = &mod_driverops, 535 .drv_linkinfo = "NVMe v1.1b", 536 .drv_dev_ops = &nvme_dev_ops 537 }; 538 539 static struct modlinkage nvme_modlinkage = { 540 .ml_rev = MODREV_1, 541 .ml_linkage = { &nvme_modldrv, NULL } 542 }; 543 544 static bd_ops_t nvme_bd_ops = { 545 .o_version = BD_OPS_CURRENT_VERSION, 546 .o_drive_info = nvme_bd_driveinfo, 547 .o_media_info = nvme_bd_mediainfo, 548 .o_devid_init = nvme_bd_devid, 549 .o_sync_cache = nvme_bd_sync, 550 .o_read = nvme_bd_read, 551 .o_write = nvme_bd_write, 552 }; 553 554 /* 555 * This list will hold commands that have timed out and couldn't be aborted. 556 * As we don't know what the hardware may still do with the DMA memory we can't 557 * free them, so we'll keep them forever on this list where we can easily look 558 * at them with mdb. 559 */ 560 static struct list nvme_lost_cmds; 561 static kmutex_t nvme_lc_mutex; 562 563 int 564 _init(void) 565 { 566 int error; 567 568 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 569 if (error != DDI_SUCCESS) 570 return (error); 571 572 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 573 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 574 575 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL); 576 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t), 577 offsetof(nvme_cmd_t, nc_list)); 578 579 bd_mod_init(&nvme_dev_ops); 580 581 error = mod_install(&nvme_modlinkage); 582 if (error != DDI_SUCCESS) { 583 ddi_soft_state_fini(&nvme_state); 584 mutex_destroy(&nvme_lc_mutex); 585 list_destroy(&nvme_lost_cmds); 586 bd_mod_fini(&nvme_dev_ops); 587 } 588 589 return (error); 590 } 591 592 int 593 _fini(void) 594 { 595 int error; 596 597 if (!list_is_empty(&nvme_lost_cmds)) 598 return (DDI_FAILURE); 599 600 error = mod_remove(&nvme_modlinkage); 601 if (error == DDI_SUCCESS) { 602 ddi_soft_state_fini(&nvme_state); 603 kmem_cache_destroy(nvme_cmd_cache); 604 mutex_destroy(&nvme_lc_mutex); 605 list_destroy(&nvme_lost_cmds); 606 bd_mod_fini(&nvme_dev_ops); 607 } 608 609 return (error); 610 } 611 612 int 613 _info(struct modinfo *modinfop) 614 { 615 return (mod_info(&nvme_modlinkage, modinfop)); 616 } 617 618 static inline void 619 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 620 { 621 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 622 623 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 624 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 625 } 626 627 static inline void 628 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 629 { 630 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 631 632 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 633 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 634 } 635 636 static inline uint64_t 637 nvme_get64(nvme_t *nvme, uintptr_t reg) 638 { 639 uint64_t val; 640 641 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 642 643 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 644 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 645 646 return (val); 647 } 648 649 static inline uint32_t 650 nvme_get32(nvme_t *nvme, uintptr_t reg) 651 { 652 uint32_t val; 653 654 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 655 656 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 657 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 658 659 return (val); 660 } 661 662 static boolean_t 663 nvme_check_regs_hdl(nvme_t *nvme) 664 { 665 ddi_fm_error_t error; 666 667 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 668 669 if (error.fme_status != DDI_FM_OK) 670 return (B_TRUE); 671 672 return (B_FALSE); 673 } 674 675 static boolean_t 676 nvme_check_dma_hdl(nvme_dma_t *dma) 677 { 678 ddi_fm_error_t error; 679 680 if (dma == NULL) 681 return (B_FALSE); 682 683 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 684 685 if (error.fme_status != DDI_FM_OK) 686 return (B_TRUE); 687 688 return (B_FALSE); 689 } 690 691 static void 692 nvme_free_dma_common(nvme_dma_t *dma) 693 { 694 if (dma->nd_dmah != NULL) 695 (void) ddi_dma_unbind_handle(dma->nd_dmah); 696 if (dma->nd_acch != NULL) 697 ddi_dma_mem_free(&dma->nd_acch); 698 if (dma->nd_dmah != NULL) 699 ddi_dma_free_handle(&dma->nd_dmah); 700 } 701 702 static void 703 nvme_free_dma(nvme_dma_t *dma) 704 { 705 nvme_free_dma_common(dma); 706 kmem_free(dma, sizeof (*dma)); 707 } 708 709 /* ARGSUSED */ 710 static void 711 nvme_prp_dma_destructor(void *buf, void *private) 712 { 713 nvme_dma_t *dma = (nvme_dma_t *)buf; 714 715 nvme_free_dma_common(dma); 716 } 717 718 static int 719 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 720 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 721 { 722 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 723 &dma->nd_dmah) != DDI_SUCCESS) { 724 /* 725 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 726 * the only other possible error is DDI_DMA_BADATTR which 727 * indicates a driver bug which should cause a panic. 728 */ 729 dev_err(nvme->n_dip, CE_PANIC, 730 "!failed to get DMA handle, check DMA attributes"); 731 return (DDI_FAILURE); 732 } 733 734 /* 735 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 736 * or the flags are conflicting, which isn't the case here. 737 */ 738 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 739 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 740 &dma->nd_len, &dma->nd_acch); 741 742 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 743 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 744 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 745 dev_err(nvme->n_dip, CE_WARN, 746 "!failed to bind DMA memory"); 747 atomic_inc_32(&nvme->n_dma_bind_err); 748 nvme_free_dma_common(dma); 749 return (DDI_FAILURE); 750 } 751 752 return (DDI_SUCCESS); 753 } 754 755 static int 756 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 757 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 758 { 759 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 760 761 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 762 DDI_SUCCESS) { 763 *ret = NULL; 764 kmem_free(dma, sizeof (nvme_dma_t)); 765 return (DDI_FAILURE); 766 } 767 768 bzero(dma->nd_memp, dma->nd_len); 769 770 *ret = dma; 771 return (DDI_SUCCESS); 772 } 773 774 /* ARGSUSED */ 775 static int 776 nvme_prp_dma_constructor(void *buf, void *private, int flags) 777 { 778 nvme_dma_t *dma = (nvme_dma_t *)buf; 779 nvme_t *nvme = (nvme_t *)private; 780 781 dma->nd_dmah = NULL; 782 dma->nd_acch = NULL; 783 784 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 785 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 786 return (-1); 787 } 788 789 ASSERT(dma->nd_ncookie == 1); 790 791 dma->nd_cached = B_TRUE; 792 793 return (0); 794 } 795 796 static int 797 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 798 uint_t flags, nvme_dma_t **dma) 799 { 800 uint32_t len = nentry * qe_len; 801 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 802 803 len = roundup(len, nvme->n_pagesize); 804 805 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 806 != DDI_SUCCESS) { 807 dev_err(nvme->n_dip, CE_WARN, 808 "!failed to get DMA memory for queue"); 809 goto fail; 810 } 811 812 if ((*dma)->nd_ncookie != 1) { 813 dev_err(nvme->n_dip, CE_WARN, 814 "!got too many cookies for queue DMA"); 815 goto fail; 816 } 817 818 return (DDI_SUCCESS); 819 820 fail: 821 if (*dma) { 822 nvme_free_dma(*dma); 823 *dma = NULL; 824 } 825 826 return (DDI_FAILURE); 827 } 828 829 static void 830 nvme_free_cq(nvme_cq_t *cq) 831 { 832 mutex_destroy(&cq->ncq_mutex); 833 834 if (cq->ncq_cmd_taskq != NULL) 835 taskq_destroy(cq->ncq_cmd_taskq); 836 837 if (cq->ncq_dma != NULL) 838 nvme_free_dma(cq->ncq_dma); 839 840 kmem_free(cq, sizeof (*cq)); 841 } 842 843 static void 844 nvme_free_qpair(nvme_qpair_t *qp) 845 { 846 int i; 847 848 mutex_destroy(&qp->nq_mutex); 849 sema_destroy(&qp->nq_sema); 850 851 if (qp->nq_sqdma != NULL) 852 nvme_free_dma(qp->nq_sqdma); 853 854 if (qp->nq_active_cmds > 0) 855 for (i = 0; i != qp->nq_nentry; i++) 856 if (qp->nq_cmd[i] != NULL) 857 nvme_free_cmd(qp->nq_cmd[i]); 858 859 if (qp->nq_cmd != NULL) 860 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 861 862 kmem_free(qp, sizeof (nvme_qpair_t)); 863 } 864 865 /* 866 * Destroy the pre-allocated cq array, but only free individual completion 867 * queues from the given starting index. 868 */ 869 static void 870 nvme_destroy_cq_array(nvme_t *nvme, uint_t start) 871 { 872 uint_t i; 873 874 for (i = start; i < nvme->n_cq_count; i++) 875 if (nvme->n_cq[i] != NULL) 876 nvme_free_cq(nvme->n_cq[i]); 877 878 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count); 879 } 880 881 static int 882 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx, 883 uint_t nthr) 884 { 885 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP); 886 char name[64]; /* large enough for the taskq name */ 887 888 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER, 889 DDI_INTR_PRI(nvme->n_intr_pri)); 890 891 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 892 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS) 893 goto fail; 894 895 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp; 896 cq->ncq_nentry = nentry; 897 cq->ncq_id = idx; 898 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx); 899 900 /* 901 * Each completion queue has its own command taskq. 902 */ 903 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u", 904 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx); 905 906 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX, 907 TASKQ_PREPOPULATE); 908 909 if (cq->ncq_cmd_taskq == NULL) { 910 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd " 911 "taskq for cq %u", idx); 912 goto fail; 913 } 914 915 *cqp = cq; 916 return (DDI_SUCCESS); 917 918 fail: 919 nvme_free_cq(cq); 920 *cqp = NULL; 921 922 return (DDI_FAILURE); 923 } 924 925 /* 926 * Create the n_cq array big enough to hold "ncq" completion queues. 927 * If the array already exists it will be re-sized (but only larger). 928 * The admin queue is included in this array, which boosts the 929 * max number of entries to UINT16_MAX + 1. 930 */ 931 static int 932 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr) 933 { 934 nvme_cq_t **cq; 935 uint_t i, cq_count; 936 937 ASSERT3U(ncq, >, nvme->n_cq_count); 938 939 cq = nvme->n_cq; 940 cq_count = nvme->n_cq_count; 941 942 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP); 943 nvme->n_cq_count = ncq; 944 945 for (i = 0; i < cq_count; i++) 946 nvme->n_cq[i] = cq[i]; 947 948 for (; i < nvme->n_cq_count; i++) 949 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) != 950 DDI_SUCCESS) 951 goto fail; 952 953 if (cq != NULL) 954 kmem_free(cq, sizeof (*cq) * cq_count); 955 956 return (DDI_SUCCESS); 957 958 fail: 959 nvme_destroy_cq_array(nvme, cq_count); 960 /* 961 * Restore the original array 962 */ 963 nvme->n_cq_count = cq_count; 964 nvme->n_cq = cq; 965 966 return (DDI_FAILURE); 967 } 968 969 static int 970 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 971 uint_t idx) 972 { 973 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 974 uint_t cq_idx; 975 976 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 977 DDI_INTR_PRI(nvme->n_intr_pri)); 978 979 /* 980 * The NVMe spec defines that a full queue has one empty (unused) slot; 981 * initialize the semaphore accordingly. 982 */ 983 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL); 984 985 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 986 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 987 goto fail; 988 989 /* 990 * idx == 0 is adminq, those above 0 are shared io completion queues. 991 */ 992 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1); 993 qp->nq_cq = nvme->n_cq[cq_idx]; 994 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 995 qp->nq_nentry = nentry; 996 997 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 998 999 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 1000 qp->nq_next_cmd = 0; 1001 1002 *nqp = qp; 1003 return (DDI_SUCCESS); 1004 1005 fail: 1006 nvme_free_qpair(qp); 1007 *nqp = NULL; 1008 1009 return (DDI_FAILURE); 1010 } 1011 1012 static nvme_cmd_t * 1013 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 1014 { 1015 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 1016 1017 if (cmd == NULL) 1018 return (cmd); 1019 1020 bzero(cmd, sizeof (nvme_cmd_t)); 1021 1022 cmd->nc_nvme = nvme; 1023 1024 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 1025 DDI_INTR_PRI(nvme->n_intr_pri)); 1026 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 1027 1028 return (cmd); 1029 } 1030 1031 static void 1032 nvme_free_cmd(nvme_cmd_t *cmd) 1033 { 1034 /* Don't free commands on the lost commands list. */ 1035 if (list_link_active(&cmd->nc_list)) 1036 return; 1037 1038 if (cmd->nc_dma) { 1039 if (cmd->nc_dma->nd_cached) 1040 kmem_cache_free(cmd->nc_nvme->n_prp_cache, 1041 cmd->nc_dma); 1042 else 1043 nvme_free_dma(cmd->nc_dma); 1044 cmd->nc_dma = NULL; 1045 } 1046 1047 cv_destroy(&cmd->nc_cv); 1048 mutex_destroy(&cmd->nc_mutex); 1049 1050 kmem_cache_free(nvme_cmd_cache, cmd); 1051 } 1052 1053 static void 1054 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1055 { 1056 sema_p(&qp->nq_sema); 1057 nvme_submit_cmd_common(qp, cmd); 1058 } 1059 1060 static int 1061 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1062 { 1063 if (sema_tryp(&qp->nq_sema) == 0) 1064 return (EAGAIN); 1065 1066 nvme_submit_cmd_common(qp, cmd); 1067 return (0); 1068 } 1069 1070 static void 1071 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1072 { 1073 nvme_reg_sqtdbl_t tail = { 0 }; 1074 1075 mutex_enter(&qp->nq_mutex); 1076 cmd->nc_completed = B_FALSE; 1077 1078 /* 1079 * Try to insert the cmd into the active cmd array at the nq_next_cmd 1080 * slot. If the slot is already occupied advance to the next slot and 1081 * try again. This can happen for long running commands like async event 1082 * requests. 1083 */ 1084 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 1085 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1086 qp->nq_cmd[qp->nq_next_cmd] = cmd; 1087 1088 qp->nq_active_cmds++; 1089 1090 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 1091 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 1092 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 1093 sizeof (nvme_sqe_t) * qp->nq_sqtail, 1094 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 1095 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1096 1097 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 1098 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 1099 1100 mutex_exit(&qp->nq_mutex); 1101 } 1102 1103 static nvme_cmd_t * 1104 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid) 1105 { 1106 nvme_cmd_t *cmd; 1107 1108 ASSERT(mutex_owned(&qp->nq_mutex)); 1109 ASSERT3S(cid, <, qp->nq_nentry); 1110 1111 cmd = qp->nq_cmd[cid]; 1112 qp->nq_cmd[cid] = NULL; 1113 ASSERT3U(qp->nq_active_cmds, >, 0); 1114 qp->nq_active_cmds--; 1115 sema_v(&qp->nq_sema); 1116 1117 ASSERT3P(cmd, !=, NULL); 1118 ASSERT3P(cmd->nc_nvme, ==, nvme); 1119 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid); 1120 1121 return (cmd); 1122 } 1123 1124 /* 1125 * Get the command tied to the next completed cqe and bump along completion 1126 * queue head counter. 1127 */ 1128 static nvme_cmd_t * 1129 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq) 1130 { 1131 nvme_qpair_t *qp; 1132 nvme_cqe_t *cqe; 1133 nvme_cmd_t *cmd; 1134 1135 ASSERT(mutex_owned(&cq->ncq_mutex)); 1136 1137 cqe = &cq->ncq_cq[cq->ncq_head]; 1138 1139 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 1140 if (cqe->cqe_sf.sf_p == cq->ncq_phase) 1141 return (NULL); 1142 1143 qp = nvme->n_ioq[cqe->cqe_sqid]; 1144 1145 mutex_enter(&qp->nq_mutex); 1146 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); 1147 mutex_exit(&qp->nq_mutex); 1148 1149 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 1150 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 1151 1152 qp->nq_sqhead = cqe->cqe_sqhd; 1153 1154 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry; 1155 1156 /* Toggle phase on wrap-around. */ 1157 if (cq->ncq_head == 0) 1158 cq->ncq_phase = cq->ncq_phase ? 0 : 1; 1159 1160 return (cmd); 1161 } 1162 1163 /* 1164 * Process all completed commands on the io completion queue. 1165 */ 1166 static uint_t 1167 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq) 1168 { 1169 nvme_reg_cqhdbl_t head = { 0 }; 1170 nvme_cmd_t *cmd; 1171 uint_t completed = 0; 1172 1173 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1174 DDI_SUCCESS) 1175 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1176 __func__); 1177 1178 mutex_enter(&cq->ncq_mutex); 1179 1180 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1181 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd, 1182 TQ_NOSLEEP, &cmd->nc_tqent); 1183 1184 completed++; 1185 } 1186 1187 if (completed > 0) { 1188 /* 1189 * Update the completion queue head doorbell. 1190 */ 1191 head.b.cqhdbl_cqh = cq->ncq_head; 1192 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1193 } 1194 1195 mutex_exit(&cq->ncq_mutex); 1196 1197 return (completed); 1198 } 1199 1200 static nvme_cmd_t * 1201 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 1202 { 1203 nvme_cq_t *cq = qp->nq_cq; 1204 nvme_reg_cqhdbl_t head = { 0 }; 1205 nvme_cmd_t *cmd; 1206 1207 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1208 DDI_SUCCESS) 1209 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1210 __func__); 1211 1212 mutex_enter(&cq->ncq_mutex); 1213 1214 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1215 head.b.cqhdbl_cqh = cq->ncq_head; 1216 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1217 } 1218 1219 mutex_exit(&cq->ncq_mutex); 1220 1221 return (cmd); 1222 } 1223 1224 static int 1225 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 1226 { 1227 nvme_cqe_t *cqe = &cmd->nc_cqe; 1228 1229 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1230 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1231 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1232 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1233 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1234 1235 if (cmd->nc_xfer != NULL) 1236 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1237 1238 if (cmd->nc_nvme->n_strict_version) { 1239 cmd->nc_nvme->n_dead = B_TRUE; 1240 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1241 } 1242 1243 return (EIO); 1244 } 1245 1246 static int 1247 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 1248 { 1249 nvme_cqe_t *cqe = &cmd->nc_cqe; 1250 1251 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1252 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1253 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1254 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1255 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1256 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 1257 cmd->nc_nvme->n_dead = B_TRUE; 1258 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1259 } 1260 1261 return (EIO); 1262 } 1263 1264 static int 1265 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 1266 { 1267 nvme_cqe_t *cqe = &cmd->nc_cqe; 1268 1269 switch (cqe->cqe_sf.sf_sc) { 1270 case NVME_CQE_SC_INT_NVM_WRITE: 1271 /* write fail */ 1272 /* TODO: post ereport */ 1273 if (cmd->nc_xfer != NULL) 1274 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1275 return (EIO); 1276 1277 case NVME_CQE_SC_INT_NVM_READ: 1278 /* read fail */ 1279 /* TODO: post ereport */ 1280 if (cmd->nc_xfer != NULL) 1281 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1282 return (EIO); 1283 1284 default: 1285 return (nvme_check_unknown_cmd_status(cmd)); 1286 } 1287 } 1288 1289 static int 1290 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 1291 { 1292 nvme_cqe_t *cqe = &cmd->nc_cqe; 1293 1294 switch (cqe->cqe_sf.sf_sc) { 1295 case NVME_CQE_SC_GEN_SUCCESS: 1296 return (0); 1297 1298 /* 1299 * Errors indicating a bug in the driver should cause a panic. 1300 */ 1301 case NVME_CQE_SC_GEN_INV_OPC: 1302 /* Invalid Command Opcode */ 1303 if (!cmd->nc_dontpanic) 1304 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1305 "programming error: invalid opcode in cmd %p", 1306 (void *)cmd); 1307 return (EINVAL); 1308 1309 case NVME_CQE_SC_GEN_INV_FLD: 1310 /* Invalid Field in Command */ 1311 if (!cmd->nc_dontpanic) 1312 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1313 "programming error: invalid field in cmd %p", 1314 (void *)cmd); 1315 return (EIO); 1316 1317 case NVME_CQE_SC_GEN_ID_CNFL: 1318 /* Command ID Conflict */ 1319 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1320 "cmd ID conflict in cmd %p", (void *)cmd); 1321 return (0); 1322 1323 case NVME_CQE_SC_GEN_INV_NS: 1324 /* Invalid Namespace or Format */ 1325 if (!cmd->nc_dontpanic) 1326 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1327 "programming error: invalid NS/format in cmd %p", 1328 (void *)cmd); 1329 return (EINVAL); 1330 1331 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 1332 /* LBA Out Of Range */ 1333 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1334 "LBA out of range in cmd %p", (void *)cmd); 1335 return (0); 1336 1337 /* 1338 * Non-fatal errors, handle gracefully. 1339 */ 1340 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 1341 /* Data Transfer Error (DMA) */ 1342 /* TODO: post ereport */ 1343 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 1344 if (cmd->nc_xfer != NULL) 1345 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1346 return (EIO); 1347 1348 case NVME_CQE_SC_GEN_INTERNAL_ERR: 1349 /* 1350 * Internal Error. The spec (v1.0, section 4.5.1.2) says 1351 * detailed error information is returned as async event, 1352 * so we pretty much ignore the error here and handle it 1353 * in the async event handler. 1354 */ 1355 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 1356 if (cmd->nc_xfer != NULL) 1357 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1358 return (EIO); 1359 1360 case NVME_CQE_SC_GEN_ABORT_REQUEST: 1361 /* 1362 * Command Abort Requested. This normally happens only when a 1363 * command times out. 1364 */ 1365 /* TODO: post ereport or change blkdev to handle this? */ 1366 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 1367 return (ECANCELED); 1368 1369 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 1370 /* Command Aborted due to Power Loss Notification */ 1371 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1372 cmd->nc_nvme->n_dead = B_TRUE; 1373 return (EIO); 1374 1375 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 1376 /* Command Aborted due to SQ Deletion */ 1377 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 1378 return (EIO); 1379 1380 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 1381 /* Capacity Exceeded */ 1382 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 1383 if (cmd->nc_xfer != NULL) 1384 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1385 return (EIO); 1386 1387 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 1388 /* Namespace Not Ready */ 1389 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 1390 if (cmd->nc_xfer != NULL) 1391 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1392 return (EIO); 1393 1394 default: 1395 return (nvme_check_unknown_cmd_status(cmd)); 1396 } 1397 } 1398 1399 static int 1400 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 1401 { 1402 nvme_cqe_t *cqe = &cmd->nc_cqe; 1403 1404 switch (cqe->cqe_sf.sf_sc) { 1405 case NVME_CQE_SC_SPC_INV_CQ: 1406 /* Completion Queue Invalid */ 1407 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 1408 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 1409 return (EINVAL); 1410 1411 case NVME_CQE_SC_SPC_INV_QID: 1412 /* Invalid Queue Identifier */ 1413 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1414 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 1415 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 1416 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1417 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 1418 return (EINVAL); 1419 1420 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 1421 /* Max Queue Size Exceeded */ 1422 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1423 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1424 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 1425 return (EINVAL); 1426 1427 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 1428 /* Abort Command Limit Exceeded */ 1429 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 1430 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1431 "abort command limit exceeded in cmd %p", (void *)cmd); 1432 return (0); 1433 1434 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 1435 /* Async Event Request Limit Exceeded */ 1436 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 1437 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1438 "async event request limit exceeded in cmd %p", 1439 (void *)cmd); 1440 return (0); 1441 1442 case NVME_CQE_SC_SPC_INV_INT_VECT: 1443 /* Invalid Interrupt Vector */ 1444 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1445 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 1446 return (EINVAL); 1447 1448 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 1449 /* Invalid Log Page */ 1450 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 1451 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 1452 return (EINVAL); 1453 1454 case NVME_CQE_SC_SPC_INV_FORMAT: 1455 /* Invalid Format */ 1456 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 1457 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 1458 if (cmd->nc_xfer != NULL) 1459 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1460 return (EINVAL); 1461 1462 case NVME_CQE_SC_SPC_INV_Q_DEL: 1463 /* Invalid Queue Deletion */ 1464 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1465 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 1466 return (EINVAL); 1467 1468 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 1469 /* Conflicting Attributes */ 1470 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 1471 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1472 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1473 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 1474 if (cmd->nc_xfer != NULL) 1475 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1476 return (EINVAL); 1477 1478 case NVME_CQE_SC_SPC_NVM_INV_PROT: 1479 /* Invalid Protection Information */ 1480 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 1481 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1482 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1483 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1484 if (cmd->nc_xfer != NULL) 1485 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1486 return (EINVAL); 1487 1488 case NVME_CQE_SC_SPC_NVM_READONLY: 1489 /* Write to Read Only Range */ 1490 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1491 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1492 if (cmd->nc_xfer != NULL) 1493 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1494 return (EROFS); 1495 1496 case NVME_CQE_SC_SPC_INV_FW_SLOT: 1497 /* Invalid Firmware Slot */ 1498 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1499 return (EINVAL); 1500 1501 case NVME_CQE_SC_SPC_INV_FW_IMG: 1502 /* Invalid Firmware Image */ 1503 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1504 return (EINVAL); 1505 1506 case NVME_CQE_SC_SPC_FW_RESET: 1507 /* Conventional Reset Required */ 1508 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1509 return (0); 1510 1511 case NVME_CQE_SC_SPC_FW_NSSR: 1512 /* NVMe Subsystem Reset Required */ 1513 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1514 return (0); 1515 1516 case NVME_CQE_SC_SPC_FW_NEXT_RESET: 1517 /* Activation Requires Reset */ 1518 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1519 return (0); 1520 1521 case NVME_CQE_SC_SPC_FW_MTFA: 1522 /* Activation Requires Maximum Time Violation */ 1523 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1524 return (EAGAIN); 1525 1526 case NVME_CQE_SC_SPC_FW_PROHIBITED: 1527 /* Activation Prohibited */ 1528 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1529 return (EINVAL); 1530 1531 case NVME_CQE_SC_SPC_FW_OVERLAP: 1532 /* Overlapping Firmware Ranges */ 1533 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD); 1534 return (EINVAL); 1535 1536 default: 1537 return (nvme_check_unknown_cmd_status(cmd)); 1538 } 1539 } 1540 1541 static inline int 1542 nvme_check_cmd_status(nvme_cmd_t *cmd) 1543 { 1544 nvme_cqe_t *cqe = &cmd->nc_cqe; 1545 1546 /* 1547 * Take a shortcut if the controller is dead, or if 1548 * command status indicates no error. 1549 */ 1550 if (cmd->nc_nvme->n_dead) 1551 return (EIO); 1552 1553 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1554 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1555 return (0); 1556 1557 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1558 return (nvme_check_generic_cmd_status(cmd)); 1559 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1560 return (nvme_check_specific_cmd_status(cmd)); 1561 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1562 return (nvme_check_integrity_cmd_status(cmd)); 1563 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1564 return (nvme_check_vendor_cmd_status(cmd)); 1565 1566 return (nvme_check_unknown_cmd_status(cmd)); 1567 } 1568 1569 static int 1570 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec) 1571 { 1572 nvme_t *nvme = abort_cmd->nc_nvme; 1573 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1574 nvme_abort_cmd_t ac = { 0 }; 1575 int ret = 0; 1576 1577 sema_p(&nvme->n_abort_sema); 1578 1579 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1580 ac.b.ac_sqid = abort_cmd->nc_sqid; 1581 1582 cmd->nc_sqid = 0; 1583 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1584 cmd->nc_callback = nvme_wakeup_cmd; 1585 cmd->nc_sqe.sqe_cdw10 = ac.r; 1586 1587 /* 1588 * Send the ABORT to the hardware. The ABORT command will return _after_ 1589 * the aborted command has completed (aborted or otherwise), but since 1590 * we still hold the aborted command's mutex its callback hasn't been 1591 * processed yet. 1592 */ 1593 nvme_admin_cmd(cmd, sec); 1594 sema_v(&nvme->n_abort_sema); 1595 1596 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1597 dev_err(nvme->n_dip, CE_WARN, 1598 "!ABORT failed with sct = %x, sc = %x", 1599 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1600 atomic_inc_32(&nvme->n_abort_failed); 1601 } else { 1602 dev_err(nvme->n_dip, CE_WARN, 1603 "!ABORT of command %d/%d %ssuccessful", 1604 abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid, 1605 cmd->nc_cqe.cqe_dw0 & 1 ? "un" : ""); 1606 if ((cmd->nc_cqe.cqe_dw0 & 1) == 0) 1607 atomic_inc_32(&nvme->n_cmd_aborted); 1608 } 1609 1610 nvme_free_cmd(cmd); 1611 return (ret); 1612 } 1613 1614 /* 1615 * nvme_wait_cmd -- wait for command completion or timeout 1616 * 1617 * In case of a serious error or a timeout of the abort command the hardware 1618 * will be declared dead and FMA will be notified. 1619 */ 1620 static void 1621 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec) 1622 { 1623 clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC); 1624 nvme_t *nvme = cmd->nc_nvme; 1625 nvme_reg_csts_t csts; 1626 nvme_qpair_t *qp; 1627 1628 ASSERT(mutex_owned(&cmd->nc_mutex)); 1629 1630 while (!cmd->nc_completed) { 1631 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1632 break; 1633 } 1634 1635 if (cmd->nc_completed) 1636 return; 1637 1638 /* 1639 * The command timed out. 1640 * 1641 * Check controller for fatal status, any errors associated with the 1642 * register or DMA handle, or for a double timeout (abort command timed 1643 * out). If necessary log a warning and call FMA. 1644 */ 1645 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1646 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, " 1647 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 1648 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1649 atomic_inc_32(&nvme->n_cmd_timeout); 1650 1651 if (csts.b.csts_cfs || 1652 nvme_check_regs_hdl(nvme) || 1653 nvme_check_dma_hdl(cmd->nc_dma) || 1654 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1655 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1656 nvme->n_dead = B_TRUE; 1657 } else if (nvme_abort_cmd(cmd, sec) == 0) { 1658 /* 1659 * If the abort succeeded the command should complete 1660 * immediately with an appropriate status. 1661 */ 1662 while (!cmd->nc_completed) 1663 cv_wait(&cmd->nc_cv, &cmd->nc_mutex); 1664 1665 return; 1666 } 1667 1668 qp = nvme->n_ioq[cmd->nc_sqid]; 1669 1670 mutex_enter(&qp->nq_mutex); 1671 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 1672 mutex_exit(&qp->nq_mutex); 1673 1674 /* 1675 * As we don't know what the presumed dead hardware might still do with 1676 * the DMA memory, we'll put the command on the lost commands list if it 1677 * has any DMA memory. 1678 */ 1679 if (cmd->nc_dma != NULL) { 1680 mutex_enter(&nvme_lc_mutex); 1681 list_insert_head(&nvme_lost_cmds, cmd); 1682 mutex_exit(&nvme_lc_mutex); 1683 } 1684 } 1685 1686 static void 1687 nvme_wakeup_cmd(void *arg) 1688 { 1689 nvme_cmd_t *cmd = arg; 1690 1691 mutex_enter(&cmd->nc_mutex); 1692 cmd->nc_completed = B_TRUE; 1693 cv_signal(&cmd->nc_cv); 1694 mutex_exit(&cmd->nc_mutex); 1695 } 1696 1697 static void 1698 nvme_async_event_task(void *arg) 1699 { 1700 nvme_cmd_t *cmd = arg; 1701 nvme_t *nvme = cmd->nc_nvme; 1702 nvme_error_log_entry_t *error_log = NULL; 1703 nvme_health_log_t *health_log = NULL; 1704 size_t logsize = 0; 1705 nvme_async_event_t event; 1706 1707 /* 1708 * Check for errors associated with the async request itself. The only 1709 * command-specific error is "async event limit exceeded", which 1710 * indicates a programming error in the driver and causes a panic in 1711 * nvme_check_cmd_status(). 1712 * 1713 * Other possible errors are various scenarios where the async request 1714 * was aborted, or internal errors in the device. Internal errors are 1715 * reported to FMA, the command aborts need no special handling here. 1716 * 1717 * And finally, at least qemu nvme does not support async events, 1718 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we 1719 * will avoid posting async events. 1720 */ 1721 1722 if (nvme_check_cmd_status(cmd) != 0) { 1723 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1724 "!async event request returned failure, sct = %x, " 1725 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1726 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1727 cmd->nc_cqe.cqe_sf.sf_m); 1728 1729 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1730 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1731 cmd->nc_nvme->n_dead = B_TRUE; 1732 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1733 DDI_SERVICE_LOST); 1734 } 1735 1736 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1737 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC && 1738 cmd->nc_cqe.cqe_sf.sf_dnr == 1) { 1739 nvme->n_async_event_supported = B_FALSE; 1740 } 1741 1742 nvme_free_cmd(cmd); 1743 return; 1744 } 1745 1746 1747 event.r = cmd->nc_cqe.cqe_dw0; 1748 1749 /* Clear CQE and re-submit the async request. */ 1750 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1751 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 1752 1753 switch (event.b.ae_type) { 1754 case NVME_ASYNC_TYPE_ERROR: 1755 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1756 (void) nvme_get_logpage(nvme, B_FALSE, 1757 (void **)&error_log, &logsize, event.b.ae_logpage); 1758 } else { 1759 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1760 "async event reply: %d", event.b.ae_logpage); 1761 atomic_inc_32(&nvme->n_wrong_logpage); 1762 } 1763 1764 switch (event.b.ae_info) { 1765 case NVME_ASYNC_ERROR_INV_SQ: 1766 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1767 "invalid submission queue"); 1768 return; 1769 1770 case NVME_ASYNC_ERROR_INV_DBL: 1771 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1772 "invalid doorbell write value"); 1773 return; 1774 1775 case NVME_ASYNC_ERROR_DIAGFAIL: 1776 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1777 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1778 nvme->n_dead = B_TRUE; 1779 atomic_inc_32(&nvme->n_diagfail_event); 1780 break; 1781 1782 case NVME_ASYNC_ERROR_PERSISTENT: 1783 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1784 "device error"); 1785 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1786 nvme->n_dead = B_TRUE; 1787 atomic_inc_32(&nvme->n_persistent_event); 1788 break; 1789 1790 case NVME_ASYNC_ERROR_TRANSIENT: 1791 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1792 "device error"); 1793 /* TODO: send ereport */ 1794 atomic_inc_32(&nvme->n_transient_event); 1795 break; 1796 1797 case NVME_ASYNC_ERROR_FW_LOAD: 1798 dev_err(nvme->n_dip, CE_WARN, 1799 "!firmware image load error"); 1800 atomic_inc_32(&nvme->n_fw_load_event); 1801 break; 1802 } 1803 break; 1804 1805 case NVME_ASYNC_TYPE_HEALTH: 1806 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1807 (void) nvme_get_logpage(nvme, B_FALSE, 1808 (void **)&health_log, &logsize, event.b.ae_logpage, 1809 -1); 1810 } else { 1811 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1812 "async event reply: %d", event.b.ae_logpage); 1813 atomic_inc_32(&nvme->n_wrong_logpage); 1814 } 1815 1816 switch (event.b.ae_info) { 1817 case NVME_ASYNC_HEALTH_RELIABILITY: 1818 dev_err(nvme->n_dip, CE_WARN, 1819 "!device reliability compromised"); 1820 /* TODO: send ereport */ 1821 atomic_inc_32(&nvme->n_reliability_event); 1822 break; 1823 1824 case NVME_ASYNC_HEALTH_TEMPERATURE: 1825 dev_err(nvme->n_dip, CE_WARN, 1826 "!temperature above threshold"); 1827 /* TODO: send ereport */ 1828 atomic_inc_32(&nvme->n_temperature_event); 1829 break; 1830 1831 case NVME_ASYNC_HEALTH_SPARE: 1832 dev_err(nvme->n_dip, CE_WARN, 1833 "!spare space below threshold"); 1834 /* TODO: send ereport */ 1835 atomic_inc_32(&nvme->n_spare_event); 1836 break; 1837 } 1838 break; 1839 1840 case NVME_ASYNC_TYPE_VENDOR: 1841 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 1842 "received, info = %x, logpage = %x", event.b.ae_info, 1843 event.b.ae_logpage); 1844 atomic_inc_32(&nvme->n_vendor_event); 1845 break; 1846 1847 default: 1848 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 1849 "type = %x, info = %x, logpage = %x", event.b.ae_type, 1850 event.b.ae_info, event.b.ae_logpage); 1851 atomic_inc_32(&nvme->n_unknown_event); 1852 break; 1853 } 1854 1855 if (error_log) 1856 kmem_free(error_log, logsize); 1857 1858 if (health_log) 1859 kmem_free(health_log, logsize); 1860 } 1861 1862 static void 1863 nvme_admin_cmd(nvme_cmd_t *cmd, int sec) 1864 { 1865 mutex_enter(&cmd->nc_mutex); 1866 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd); 1867 nvme_wait_cmd(cmd, sec); 1868 mutex_exit(&cmd->nc_mutex); 1869 } 1870 1871 static void 1872 nvme_async_event(nvme_t *nvme) 1873 { 1874 nvme_cmd_t *cmd; 1875 1876 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1877 cmd->nc_sqid = 0; 1878 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 1879 cmd->nc_callback = nvme_async_event_task; 1880 cmd->nc_dontpanic = B_TRUE; 1881 1882 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 1883 } 1884 1885 static int 1886 nvme_format_nvm(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t lbaf, 1887 boolean_t ms, uint8_t pi, boolean_t pil, uint8_t ses) 1888 { 1889 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1890 nvme_format_nvm_t format_nvm = { 0 }; 1891 int ret; 1892 1893 format_nvm.b.fm_lbaf = lbaf & 0xf; 1894 format_nvm.b.fm_ms = ms ? 1 : 0; 1895 format_nvm.b.fm_pi = pi & 0x7; 1896 format_nvm.b.fm_pil = pil ? 1 : 0; 1897 format_nvm.b.fm_ses = ses & 0x7; 1898 1899 cmd->nc_sqid = 0; 1900 cmd->nc_callback = nvme_wakeup_cmd; 1901 cmd->nc_sqe.sqe_nsid = nsid; 1902 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT; 1903 cmd->nc_sqe.sqe_cdw10 = format_nvm.r; 1904 1905 /* 1906 * Some devices like Samsung SM951 don't allow formatting of all 1907 * namespaces in one command. Handle that gracefully. 1908 */ 1909 if (nsid == (uint32_t)-1) 1910 cmd->nc_dontpanic = B_TRUE; 1911 /* 1912 * If this format request was initiated by the user, then don't allow a 1913 * programmer error to panic the system. 1914 */ 1915 if (user) 1916 cmd->nc_dontpanic = B_TRUE; 1917 1918 nvme_admin_cmd(cmd, nvme_format_cmd_timeout); 1919 1920 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1921 dev_err(nvme->n_dip, CE_WARN, 1922 "!FORMAT failed with sct = %x, sc = %x", 1923 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1924 } 1925 1926 nvme_free_cmd(cmd); 1927 return (ret); 1928 } 1929 1930 static int 1931 nvme_get_logpage(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize, 1932 uint8_t logpage, ...) 1933 { 1934 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1935 nvme_getlogpage_t getlogpage = { 0 }; 1936 va_list ap; 1937 int ret; 1938 1939 va_start(ap, logpage); 1940 1941 cmd->nc_sqid = 0; 1942 cmd->nc_callback = nvme_wakeup_cmd; 1943 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 1944 1945 if (user) 1946 cmd->nc_dontpanic = B_TRUE; 1947 1948 getlogpage.b.lp_lid = logpage; 1949 1950 switch (logpage) { 1951 case NVME_LOGPAGE_ERROR: 1952 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1953 /* 1954 * The GET LOG PAGE command can use at most 2 pages to return 1955 * data, PRP lists are not supported. 1956 */ 1957 *bufsize = MIN(2 * nvme->n_pagesize, 1958 nvme->n_error_log_len * sizeof (nvme_error_log_entry_t)); 1959 break; 1960 1961 case NVME_LOGPAGE_HEALTH: 1962 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 1963 *bufsize = sizeof (nvme_health_log_t); 1964 break; 1965 1966 case NVME_LOGPAGE_FWSLOT: 1967 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1968 *bufsize = sizeof (nvme_fwslot_log_t); 1969 break; 1970 1971 default: 1972 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d", 1973 logpage); 1974 atomic_inc_32(&nvme->n_unknown_logpage); 1975 ret = EINVAL; 1976 goto fail; 1977 } 1978 1979 va_end(ap); 1980 1981 getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1; 1982 1983 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 1984 1985 if (nvme_zalloc_dma(nvme, *bufsize, 1986 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1987 dev_err(nvme->n_dip, CE_WARN, 1988 "!nvme_zalloc_dma failed for GET LOG PAGE"); 1989 ret = ENOMEM; 1990 goto fail; 1991 } 1992 1993 if (cmd->nc_dma->nd_ncookie > 2) { 1994 dev_err(nvme->n_dip, CE_WARN, 1995 "!too many DMA cookies for GET LOG PAGE"); 1996 atomic_inc_32(&nvme->n_too_many_cookies); 1997 ret = ENOMEM; 1998 goto fail; 1999 } 2000 2001 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 2002 if (cmd->nc_dma->nd_ncookie > 1) { 2003 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2004 &cmd->nc_dma->nd_cookie); 2005 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2006 cmd->nc_dma->nd_cookie.dmac_laddress; 2007 } 2008 2009 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2010 2011 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2012 dev_err(nvme->n_dip, CE_WARN, 2013 "!GET LOG PAGE failed with sct = %x, sc = %x", 2014 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2015 goto fail; 2016 } 2017 2018 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2019 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2020 2021 fail: 2022 nvme_free_cmd(cmd); 2023 2024 return (ret); 2025 } 2026 2027 static int 2028 nvme_identify(nvme_t *nvme, boolean_t user, uint32_t nsid, void **buf) 2029 { 2030 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2031 int ret; 2032 2033 if (buf == NULL) 2034 return (EINVAL); 2035 2036 cmd->nc_sqid = 0; 2037 cmd->nc_callback = nvme_wakeup_cmd; 2038 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 2039 cmd->nc_sqe.sqe_nsid = nsid; 2040 cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL; 2041 2042 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 2043 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2044 dev_err(nvme->n_dip, CE_WARN, 2045 "!nvme_zalloc_dma failed for IDENTIFY"); 2046 ret = ENOMEM; 2047 goto fail; 2048 } 2049 2050 if (cmd->nc_dma->nd_ncookie > 2) { 2051 dev_err(nvme->n_dip, CE_WARN, 2052 "!too many DMA cookies for IDENTIFY"); 2053 atomic_inc_32(&nvme->n_too_many_cookies); 2054 ret = ENOMEM; 2055 goto fail; 2056 } 2057 2058 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 2059 if (cmd->nc_dma->nd_ncookie > 1) { 2060 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2061 &cmd->nc_dma->nd_cookie); 2062 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2063 cmd->nc_dma->nd_cookie.dmac_laddress; 2064 } 2065 2066 if (user) 2067 cmd->nc_dontpanic = B_TRUE; 2068 2069 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2070 2071 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2072 dev_err(nvme->n_dip, CE_WARN, 2073 "!IDENTIFY failed with sct = %x, sc = %x", 2074 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2075 goto fail; 2076 } 2077 2078 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 2079 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE); 2080 2081 fail: 2082 nvme_free_cmd(cmd); 2083 2084 return (ret); 2085 } 2086 2087 static int 2088 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2089 uint32_t val, uint32_t *res) 2090 { 2091 _NOTE(ARGUNUSED(nsid)); 2092 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2093 int ret = EINVAL; 2094 2095 ASSERT(res != NULL); 2096 2097 cmd->nc_sqid = 0; 2098 cmd->nc_callback = nvme_wakeup_cmd; 2099 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 2100 cmd->nc_sqe.sqe_cdw10 = feature; 2101 cmd->nc_sqe.sqe_cdw11 = val; 2102 2103 if (user) 2104 cmd->nc_dontpanic = B_TRUE; 2105 2106 switch (feature) { 2107 case NVME_FEAT_WRITE_CACHE: 2108 if (!nvme->n_write_cache_present) 2109 goto fail; 2110 break; 2111 2112 case NVME_FEAT_NQUEUES: 2113 break; 2114 2115 default: 2116 goto fail; 2117 } 2118 2119 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2120 2121 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2122 dev_err(nvme->n_dip, CE_WARN, 2123 "!SET FEATURES %d failed with sct = %x, sc = %x", 2124 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2125 cmd->nc_cqe.cqe_sf.sf_sc); 2126 goto fail; 2127 } 2128 2129 *res = cmd->nc_cqe.cqe_dw0; 2130 2131 fail: 2132 nvme_free_cmd(cmd); 2133 return (ret); 2134 } 2135 2136 static int 2137 nvme_get_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2138 uint32_t *res, void **buf, size_t *bufsize) 2139 { 2140 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2141 int ret = EINVAL; 2142 2143 ASSERT(res != NULL); 2144 2145 if (bufsize != NULL) 2146 *bufsize = 0; 2147 2148 cmd->nc_sqid = 0; 2149 cmd->nc_callback = nvme_wakeup_cmd; 2150 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES; 2151 cmd->nc_sqe.sqe_cdw10 = feature; 2152 cmd->nc_sqe.sqe_cdw11 = *res; 2153 2154 /* 2155 * For some of the optional features there doesn't seem to be a method 2156 * of detecting whether it is supported other than using it. This will 2157 * cause "Invalid Field in Command" error, which is normally considered 2158 * a programming error. Set the nc_dontpanic flag to override the panic 2159 * in nvme_check_generic_cmd_status(). 2160 */ 2161 switch (feature) { 2162 case NVME_FEAT_ARBITRATION: 2163 case NVME_FEAT_POWER_MGMT: 2164 case NVME_FEAT_TEMPERATURE: 2165 case NVME_FEAT_ERROR: 2166 case NVME_FEAT_NQUEUES: 2167 case NVME_FEAT_INTR_COAL: 2168 case NVME_FEAT_INTR_VECT: 2169 case NVME_FEAT_WRITE_ATOM: 2170 case NVME_FEAT_ASYNC_EVENT: 2171 break; 2172 2173 case NVME_FEAT_WRITE_CACHE: 2174 if (!nvme->n_write_cache_present) 2175 goto fail; 2176 break; 2177 2178 case NVME_FEAT_LBA_RANGE: 2179 if (!nvme->n_lba_range_supported) 2180 goto fail; 2181 2182 cmd->nc_dontpanic = B_TRUE; 2183 cmd->nc_sqe.sqe_nsid = nsid; 2184 ASSERT(bufsize != NULL); 2185 *bufsize = NVME_LBA_RANGE_BUFSIZE; 2186 break; 2187 2188 case NVME_FEAT_AUTO_PST: 2189 if (!nvme->n_auto_pst_supported) 2190 goto fail; 2191 2192 ASSERT(bufsize != NULL); 2193 *bufsize = NVME_AUTO_PST_BUFSIZE; 2194 break; 2195 2196 case NVME_FEAT_PROGRESS: 2197 if (!nvme->n_progress_supported) 2198 goto fail; 2199 2200 cmd->nc_dontpanic = B_TRUE; 2201 break; 2202 2203 default: 2204 goto fail; 2205 } 2206 2207 if (user) 2208 cmd->nc_dontpanic = B_TRUE; 2209 2210 if (bufsize != NULL && *bufsize != 0) { 2211 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ, 2212 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2213 dev_err(nvme->n_dip, CE_WARN, 2214 "!nvme_zalloc_dma failed for GET FEATURES"); 2215 ret = ENOMEM; 2216 goto fail; 2217 } 2218 2219 if (cmd->nc_dma->nd_ncookie > 2) { 2220 dev_err(nvme->n_dip, CE_WARN, 2221 "!too many DMA cookies for GET FEATURES"); 2222 atomic_inc_32(&nvme->n_too_many_cookies); 2223 ret = ENOMEM; 2224 goto fail; 2225 } 2226 2227 cmd->nc_sqe.sqe_dptr.d_prp[0] = 2228 cmd->nc_dma->nd_cookie.dmac_laddress; 2229 if (cmd->nc_dma->nd_ncookie > 1) { 2230 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2231 &cmd->nc_dma->nd_cookie); 2232 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2233 cmd->nc_dma->nd_cookie.dmac_laddress; 2234 } 2235 } 2236 2237 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2238 2239 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2240 boolean_t known = B_TRUE; 2241 2242 /* Check if this is unsupported optional feature */ 2243 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2244 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) { 2245 switch (feature) { 2246 case NVME_FEAT_LBA_RANGE: 2247 nvme->n_lba_range_supported = B_FALSE; 2248 break; 2249 case NVME_FEAT_PROGRESS: 2250 nvme->n_progress_supported = B_FALSE; 2251 break; 2252 default: 2253 known = B_FALSE; 2254 break; 2255 } 2256 } else { 2257 known = B_FALSE; 2258 } 2259 2260 /* Report the error otherwise */ 2261 if (!known) { 2262 dev_err(nvme->n_dip, CE_WARN, 2263 "!GET FEATURES %d failed with sct = %x, sc = %x", 2264 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2265 cmd->nc_cqe.cqe_sf.sf_sc); 2266 } 2267 2268 goto fail; 2269 } 2270 2271 if (bufsize != NULL && *bufsize != 0) { 2272 ASSERT(buf != NULL); 2273 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2274 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2275 } 2276 2277 *res = cmd->nc_cqe.cqe_dw0; 2278 2279 fail: 2280 nvme_free_cmd(cmd); 2281 return (ret); 2282 } 2283 2284 static int 2285 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 2286 { 2287 nvme_write_cache_t nwc = { 0 }; 2288 2289 if (enable) 2290 nwc.b.wc_wce = 1; 2291 2292 return (nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_WRITE_CACHE, 2293 nwc.r, &nwc.r)); 2294 } 2295 2296 static int 2297 nvme_set_nqueues(nvme_t *nvme) 2298 { 2299 nvme_nqueues_t nq = { 0 }; 2300 int ret; 2301 2302 /* 2303 * The default is to allocate one completion queue per vector. 2304 */ 2305 if (nvme->n_completion_queues == -1) 2306 nvme->n_completion_queues = nvme->n_intr_cnt; 2307 2308 /* 2309 * There is no point in having more compeletion queues than 2310 * interrupt vectors. 2311 */ 2312 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2313 nvme->n_intr_cnt); 2314 2315 /* 2316 * The default is to use one submission queue per completion queue. 2317 */ 2318 if (nvme->n_submission_queues == -1) 2319 nvme->n_submission_queues = nvme->n_completion_queues; 2320 2321 /* 2322 * There is no point in having more compeletion queues than 2323 * submission queues. 2324 */ 2325 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2326 nvme->n_submission_queues); 2327 2328 ASSERT(nvme->n_submission_queues > 0); 2329 ASSERT(nvme->n_completion_queues > 0); 2330 2331 nq.b.nq_nsq = nvme->n_submission_queues - 1; 2332 nq.b.nq_ncq = nvme->n_completion_queues - 1; 2333 2334 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r, 2335 &nq.r); 2336 2337 if (ret == 0) { 2338 /* 2339 * Never use more than the requested number of queues. 2340 */ 2341 nvme->n_submission_queues = MIN(nvme->n_submission_queues, 2342 nq.b.nq_nsq + 1); 2343 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2344 nq.b.nq_ncq + 1); 2345 } 2346 2347 return (ret); 2348 } 2349 2350 static int 2351 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq) 2352 { 2353 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2354 nvme_create_queue_dw10_t dw10 = { 0 }; 2355 nvme_create_cq_dw11_t c_dw11 = { 0 }; 2356 int ret; 2357 2358 dw10.b.q_qid = cq->ncq_id; 2359 dw10.b.q_qsize = cq->ncq_nentry - 1; 2360 2361 c_dw11.b.cq_pc = 1; 2362 c_dw11.b.cq_ien = 1; 2363 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt; 2364 2365 cmd->nc_sqid = 0; 2366 cmd->nc_callback = nvme_wakeup_cmd; 2367 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 2368 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2369 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 2370 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress; 2371 2372 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2373 2374 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2375 dev_err(nvme->n_dip, CE_WARN, 2376 "!CREATE CQUEUE failed with sct = %x, sc = %x", 2377 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2378 } 2379 2380 nvme_free_cmd(cmd); 2381 2382 return (ret); 2383 } 2384 2385 static int 2386 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 2387 { 2388 nvme_cq_t *cq = qp->nq_cq; 2389 nvme_cmd_t *cmd; 2390 nvme_create_queue_dw10_t dw10 = { 0 }; 2391 nvme_create_sq_dw11_t s_dw11 = { 0 }; 2392 int ret; 2393 2394 /* 2395 * It is possible to have more qpairs than completion queues, 2396 * and when the idx > ncq_id, that completion queue is shared 2397 * and has already been created. 2398 */ 2399 if (idx <= cq->ncq_id && 2400 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS) 2401 return (DDI_FAILURE); 2402 2403 dw10.b.q_qid = idx; 2404 dw10.b.q_qsize = qp->nq_nentry - 1; 2405 2406 s_dw11.b.sq_pc = 1; 2407 s_dw11.b.sq_cqid = cq->ncq_id; 2408 2409 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2410 cmd->nc_sqid = 0; 2411 cmd->nc_callback = nvme_wakeup_cmd; 2412 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 2413 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2414 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 2415 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 2416 2417 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2418 2419 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2420 dev_err(nvme->n_dip, CE_WARN, 2421 "!CREATE SQUEUE failed with sct = %x, sc = %x", 2422 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2423 } 2424 2425 nvme_free_cmd(cmd); 2426 2427 return (ret); 2428 } 2429 2430 static boolean_t 2431 nvme_reset(nvme_t *nvme, boolean_t quiesce) 2432 { 2433 nvme_reg_csts_t csts; 2434 int i; 2435 2436 nvme_put32(nvme, NVME_REG_CC, 0); 2437 2438 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2439 if (csts.b.csts_rdy == 1) { 2440 nvme_put32(nvme, NVME_REG_CC, 0); 2441 for (i = 0; i != nvme->n_timeout * 10; i++) { 2442 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2443 if (csts.b.csts_rdy == 0) 2444 break; 2445 2446 if (quiesce) 2447 drv_usecwait(50000); 2448 else 2449 delay(drv_usectohz(50000)); 2450 } 2451 } 2452 2453 nvme_put32(nvme, NVME_REG_AQA, 0); 2454 nvme_put32(nvme, NVME_REG_ASQ, 0); 2455 nvme_put32(nvme, NVME_REG_ACQ, 0); 2456 2457 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2458 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 2459 } 2460 2461 static void 2462 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) 2463 { 2464 nvme_reg_cc_t cc; 2465 nvme_reg_csts_t csts; 2466 int i; 2467 2468 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT); 2469 2470 cc.r = nvme_get32(nvme, NVME_REG_CC); 2471 cc.b.cc_shn = mode & 0x3; 2472 nvme_put32(nvme, NVME_REG_CC, cc.r); 2473 2474 for (i = 0; i != 10; i++) { 2475 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2476 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 2477 break; 2478 2479 if (quiesce) 2480 drv_usecwait(100000); 2481 else 2482 delay(drv_usectohz(100000)); 2483 } 2484 } 2485 2486 2487 static void 2488 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 2489 { 2490 /* 2491 * Section 7.7 of the spec describes how to get a unique ID for 2492 * the controller: the vendor ID, the model name and the serial 2493 * number shall be unique when combined. 2494 * 2495 * If a namespace has no EUI64 we use the above and add the hex 2496 * namespace ID to get a unique ID for the namespace. 2497 */ 2498 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2499 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 2500 2501 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2502 bcopy(nvme->n_idctl->id_serial, serial, 2503 sizeof (nvme->n_idctl->id_serial)); 2504 2505 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2506 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 2507 2508 nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X", 2509 nvme->n_idctl->id_vid, model, serial, nsid); 2510 } 2511 2512 static int 2513 nvme_init_ns(nvme_t *nvme, int nsid) 2514 { 2515 nvme_namespace_t *ns = &nvme->n_ns[nsid - 1]; 2516 nvme_identify_nsid_t *idns; 2517 boolean_t was_ignored; 2518 int last_rp; 2519 2520 ns->ns_nvme = nvme; 2521 2522 if (nvme_identify(nvme, B_FALSE, nsid, (void **)&idns) != 0) { 2523 dev_err(nvme->n_dip, CE_WARN, 2524 "!failed to identify namespace %d", nsid); 2525 return (DDI_FAILURE); 2526 } 2527 2528 ns->ns_idns = idns; 2529 ns->ns_id = nsid; 2530 ns->ns_block_count = idns->id_nsize; 2531 ns->ns_block_size = 2532 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 2533 ns->ns_best_block_size = ns->ns_block_size; 2534 2535 /* 2536 * Get the EUI64 if present. Use it for devid and device node names. 2537 */ 2538 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 2539 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64)); 2540 2541 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 2542 if (*(uint64_t *)ns->ns_eui64 != 0) { 2543 uint8_t *eui64 = ns->ns_eui64; 2544 2545 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), 2546 "%02x%02x%02x%02x%02x%02x%02x%02x", 2547 eui64[0], eui64[1], eui64[2], eui64[3], 2548 eui64[4], eui64[5], eui64[6], eui64[7]); 2549 } else { 2550 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%d", 2551 ns->ns_id); 2552 2553 nvme_prepare_devid(nvme, ns->ns_id); 2554 } 2555 2556 /* 2557 * Find the LBA format with no metadata and the best relative 2558 * performance. A value of 3 means "degraded", 0 is best. 2559 */ 2560 last_rp = 3; 2561 for (int j = 0; j <= idns->id_nlbaf; j++) { 2562 if (idns->id_lbaf[j].lbaf_lbads == 0) 2563 break; 2564 if (idns->id_lbaf[j].lbaf_ms != 0) 2565 continue; 2566 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 2567 continue; 2568 last_rp = idns->id_lbaf[j].lbaf_rp; 2569 ns->ns_best_block_size = 2570 1 << idns->id_lbaf[j].lbaf_lbads; 2571 } 2572 2573 if (ns->ns_best_block_size < nvme->n_min_block_size) 2574 ns->ns_best_block_size = nvme->n_min_block_size; 2575 2576 was_ignored = ns->ns_ignore; 2577 2578 /* 2579 * We currently don't support namespaces that use either: 2580 * - protection information 2581 * - illegal block size (< 512) 2582 */ 2583 if (idns->id_dps.dp_pinfo) { 2584 dev_err(nvme->n_dip, CE_WARN, 2585 "!ignoring namespace %d, unsupported feature: " 2586 "pinfo = %d", nsid, idns->id_dps.dp_pinfo); 2587 ns->ns_ignore = B_TRUE; 2588 } else if (ns->ns_block_size < 512) { 2589 dev_err(nvme->n_dip, CE_WARN, 2590 "!ignoring namespace %d, unsupported block size %"PRIu64, 2591 nsid, (uint64_t)ns->ns_block_size); 2592 ns->ns_ignore = B_TRUE; 2593 } else { 2594 ns->ns_ignore = B_FALSE; 2595 } 2596 2597 /* 2598 * Keep a count of namespaces which are attachable. 2599 * See comments in nvme_bd_driveinfo() to understand its effect. 2600 */ 2601 if (was_ignored) { 2602 /* 2603 * Previously ignored, but now not. Count it. 2604 */ 2605 if (!ns->ns_ignore) 2606 nvme->n_namespaces_attachable++; 2607 } else { 2608 /* 2609 * Wasn't ignored previously, but now needs to be. 2610 * Discount it. 2611 */ 2612 if (ns->ns_ignore) 2613 nvme->n_namespaces_attachable--; 2614 } 2615 2616 return (DDI_SUCCESS); 2617 } 2618 2619 static int 2620 nvme_init(nvme_t *nvme) 2621 { 2622 nvme_reg_cc_t cc = { 0 }; 2623 nvme_reg_aqa_t aqa = { 0 }; 2624 nvme_reg_asq_t asq = { 0 }; 2625 nvme_reg_acq_t acq = { 0 }; 2626 nvme_reg_cap_t cap; 2627 nvme_reg_vs_t vs; 2628 nvme_reg_csts_t csts; 2629 int i = 0; 2630 uint16_t nqueues; 2631 uint_t tq_threads; 2632 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2633 char *vendor, *product; 2634 2635 /* Check controller version */ 2636 vs.r = nvme_get32(nvme, NVME_REG_VS); 2637 nvme->n_version.v_major = vs.b.vs_mjr; 2638 nvme->n_version.v_minor = vs.b.vs_mnr; 2639 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 2640 nvme->n_version.v_major, nvme->n_version.v_minor); 2641 2642 if (nvme->n_version.v_major > nvme_version_major) { 2643 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x", 2644 nvme_version_major); 2645 if (nvme->n_strict_version) 2646 goto fail; 2647 } 2648 2649 /* retrieve controller configuration */ 2650 cap.r = nvme_get64(nvme, NVME_REG_CAP); 2651 2652 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 2653 dev_err(nvme->n_dip, CE_WARN, 2654 "!NVM command set not supported by hardware"); 2655 goto fail; 2656 } 2657 2658 nvme->n_nssr_supported = cap.b.cap_nssrs; 2659 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 2660 nvme->n_timeout = cap.b.cap_to; 2661 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 2662 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 2663 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 2664 2665 /* 2666 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 2667 * the base page size of 4k (1<<12), so add 12 here to get the real 2668 * page size value. 2669 */ 2670 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 2671 cap.b.cap_mpsmax + 12); 2672 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 2673 2674 /* 2675 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 2676 */ 2677 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 2678 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 2679 2680 /* 2681 * Set up PRP DMA to transfer 1 page-aligned page at a time. 2682 * Maxxfer may be increased after we identified the controller limits. 2683 */ 2684 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 2685 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 2686 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 2687 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 2688 2689 /* 2690 * Reset controller if it's still in ready state. 2691 */ 2692 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 2693 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 2694 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 2695 nvme->n_dead = B_TRUE; 2696 goto fail; 2697 } 2698 2699 /* 2700 * Create the cq array with one completion queue to be assigned 2701 * to the admin queue pair and a limited number of taskqs (4). 2702 */ 2703 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) != 2704 DDI_SUCCESS) { 2705 dev_err(nvme->n_dip, CE_WARN, 2706 "!failed to pre-allocate admin completion queue"); 2707 goto fail; 2708 } 2709 /* 2710 * Create the admin queue pair. 2711 */ 2712 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 2713 != DDI_SUCCESS) { 2714 dev_err(nvme->n_dip, CE_WARN, 2715 "!unable to allocate admin qpair"); 2716 goto fail; 2717 } 2718 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 2719 nvme->n_ioq[0] = nvme->n_adminq; 2720 2721 nvme->n_progress |= NVME_ADMIN_QUEUE; 2722 2723 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2724 "admin-queue-len", nvme->n_admin_queue_len); 2725 2726 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 2727 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 2728 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress; 2729 2730 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 2731 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 2732 2733 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 2734 nvme_put64(nvme, NVME_REG_ASQ, asq); 2735 nvme_put64(nvme, NVME_REG_ACQ, acq); 2736 2737 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 2738 cc.b.cc_css = 0; /* use NVM command set */ 2739 cc.b.cc_mps = nvme->n_pageshift - 12; 2740 cc.b.cc_shn = 0; /* no shutdown in progress */ 2741 cc.b.cc_en = 1; /* enable controller */ 2742 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 2743 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 2744 2745 nvme_put32(nvme, NVME_REG_CC, cc.r); 2746 2747 /* 2748 * Wait for the controller to become ready. 2749 */ 2750 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2751 if (csts.b.csts_rdy == 0) { 2752 for (i = 0; i != nvme->n_timeout * 10; i++) { 2753 delay(drv_usectohz(50000)); 2754 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2755 2756 if (csts.b.csts_cfs == 1) { 2757 dev_err(nvme->n_dip, CE_WARN, 2758 "!controller fatal status at init"); 2759 ddi_fm_service_impact(nvme->n_dip, 2760 DDI_SERVICE_LOST); 2761 nvme->n_dead = B_TRUE; 2762 goto fail; 2763 } 2764 2765 if (csts.b.csts_rdy == 1) 2766 break; 2767 } 2768 } 2769 2770 if (csts.b.csts_rdy == 0) { 2771 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 2772 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 2773 nvme->n_dead = B_TRUE; 2774 goto fail; 2775 } 2776 2777 /* 2778 * Assume an abort command limit of 1. We'll destroy and re-init 2779 * that later when we know the true abort command limit. 2780 */ 2781 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 2782 2783 /* 2784 * Setup initial interrupt for admin queue. 2785 */ 2786 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 2787 != DDI_SUCCESS) && 2788 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 2789 != DDI_SUCCESS) && 2790 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 2791 != DDI_SUCCESS)) { 2792 dev_err(nvme->n_dip, CE_WARN, 2793 "!failed to setup initial interrupt"); 2794 goto fail; 2795 } 2796 2797 /* 2798 * Post an asynchronous event command to catch errors. 2799 * We assume the asynchronous events are supported as required by 2800 * specification (Figure 40 in section 5 of NVMe 1.2). 2801 * However, since at least qemu does not follow the specification, 2802 * we need a mechanism to protect ourselves. 2803 */ 2804 nvme->n_async_event_supported = B_TRUE; 2805 nvme_async_event(nvme); 2806 2807 /* 2808 * Identify Controller 2809 */ 2810 if (nvme_identify(nvme, B_FALSE, 0, (void **)&nvme->n_idctl) != 0) { 2811 dev_err(nvme->n_dip, CE_WARN, 2812 "!failed to identify controller"); 2813 goto fail; 2814 } 2815 2816 /* 2817 * Get Vendor & Product ID 2818 */ 2819 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2820 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2821 sata_split_model(model, &vendor, &product); 2822 2823 if (vendor == NULL) 2824 nvme->n_vendor = strdup("NVMe"); 2825 else 2826 nvme->n_vendor = strdup(vendor); 2827 2828 nvme->n_product = strdup(product); 2829 2830 /* 2831 * Get controller limits. 2832 */ 2833 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 2834 MIN(nvme->n_admin_queue_len / 10, 2835 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 2836 2837 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2838 "async-event-limit", nvme->n_async_event_limit); 2839 2840 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 2841 2842 /* 2843 * Reinitialize the semaphore with the true abort command limit 2844 * supported by the hardware. It's not necessary to disable interrupts 2845 * as only command aborts use the semaphore, and no commands are 2846 * executed or aborted while we're here. 2847 */ 2848 sema_destroy(&nvme->n_abort_sema); 2849 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 2850 SEMA_DRIVER, NULL); 2851 2852 nvme->n_progress |= NVME_CTRL_LIMITS; 2853 2854 if (nvme->n_idctl->id_mdts == 0) 2855 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 2856 else 2857 nvme->n_max_data_transfer_size = 2858 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 2859 2860 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 2861 2862 /* 2863 * Limit n_max_data_transfer_size to what we can handle in one PRP. 2864 * Chained PRPs are currently unsupported. 2865 * 2866 * This is a no-op on hardware which doesn't support a transfer size 2867 * big enough to require chained PRPs. 2868 */ 2869 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 2870 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 2871 2872 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 2873 2874 /* 2875 * Make sure the minimum/maximum queue entry sizes are not 2876 * larger/smaller than the default. 2877 */ 2878 2879 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 2880 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 2881 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 2882 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 2883 goto fail; 2884 2885 /* 2886 * Check for the presence of a Volatile Write Cache. If present, 2887 * enable or disable based on the value of the property 2888 * volatile-write-cache-enable (default is enabled). 2889 */ 2890 nvme->n_write_cache_present = 2891 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 2892 2893 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2894 "volatile-write-cache-present", 2895 nvme->n_write_cache_present ? 1 : 0); 2896 2897 if (!nvme->n_write_cache_present) { 2898 nvme->n_write_cache_enabled = B_FALSE; 2899 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled) 2900 != 0) { 2901 dev_err(nvme->n_dip, CE_WARN, 2902 "!failed to %sable volatile write cache", 2903 nvme->n_write_cache_enabled ? "en" : "dis"); 2904 /* 2905 * Assume the cache is (still) enabled. 2906 */ 2907 nvme->n_write_cache_enabled = B_TRUE; 2908 } 2909 2910 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2911 "volatile-write-cache-enable", 2912 nvme->n_write_cache_enabled ? 1 : 0); 2913 2914 /* 2915 * Assume LBA Range Type feature is supported. If it isn't this 2916 * will be set to B_FALSE by nvme_get_features(). 2917 */ 2918 nvme->n_lba_range_supported = B_TRUE; 2919 2920 /* 2921 * Check support for Autonomous Power State Transition. 2922 */ 2923 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 2924 nvme->n_auto_pst_supported = 2925 nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE; 2926 2927 /* 2928 * Assume Software Progress Marker feature is supported. If it isn't 2929 * this will be set to B_FALSE by nvme_get_features(). 2930 */ 2931 nvme->n_progress_supported = B_TRUE; 2932 2933 /* 2934 * Identify Namespaces 2935 */ 2936 nvme->n_namespace_count = nvme->n_idctl->id_nn; 2937 2938 if (nvme->n_namespace_count == 0) { 2939 dev_err(nvme->n_dip, CE_WARN, 2940 "!controllers without namespaces are not supported"); 2941 goto fail; 2942 } 2943 2944 if (nvme->n_namespace_count > NVME_MINOR_MAX) { 2945 dev_err(nvme->n_dip, CE_WARN, 2946 "!too many namespaces: %d, limiting to %d\n", 2947 nvme->n_namespace_count, NVME_MINOR_MAX); 2948 nvme->n_namespace_count = NVME_MINOR_MAX; 2949 } 2950 2951 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 2952 nvme->n_namespace_count, KM_SLEEP); 2953 2954 for (i = 0; i != nvme->n_namespace_count; i++) { 2955 mutex_init(&nvme->n_ns[i].ns_minor.nm_mutex, NULL, MUTEX_DRIVER, 2956 NULL); 2957 nvme->n_ns[i].ns_ignore = B_TRUE; 2958 if (nvme_init_ns(nvme, i + 1) != DDI_SUCCESS) 2959 goto fail; 2960 } 2961 2962 /* 2963 * Try to set up MSI/MSI-X interrupts. 2964 */ 2965 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 2966 != 0) { 2967 nvme_release_interrupts(nvme); 2968 2969 nqueues = MIN(UINT16_MAX, ncpus); 2970 2971 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 2972 nqueues) != DDI_SUCCESS) && 2973 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 2974 nqueues) != DDI_SUCCESS)) { 2975 dev_err(nvme->n_dip, CE_WARN, 2976 "!failed to setup MSI/MSI-X interrupts"); 2977 goto fail; 2978 } 2979 } 2980 2981 /* 2982 * Create I/O queue pairs. 2983 */ 2984 2985 if (nvme_set_nqueues(nvme) != 0) { 2986 dev_err(nvme->n_dip, CE_WARN, 2987 "!failed to set number of I/O queues to %d", 2988 nvme->n_intr_cnt); 2989 goto fail; 2990 } 2991 2992 /* 2993 * Reallocate I/O queue array 2994 */ 2995 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 2996 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 2997 (nvme->n_submission_queues + 1), KM_SLEEP); 2998 nvme->n_ioq[0] = nvme->n_adminq; 2999 3000 /* 3001 * There should always be at least as many submission queues 3002 * as completion queues. 3003 */ 3004 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues); 3005 3006 nvme->n_ioq_count = nvme->n_submission_queues; 3007 3008 nvme->n_io_squeue_len = 3009 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries); 3010 3011 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len", 3012 nvme->n_io_squeue_len); 3013 3014 /* 3015 * Pre-allocate completion queues. 3016 * When there are the same number of submission and completion 3017 * queues there is no value in having a larger completion 3018 * queue length. 3019 */ 3020 if (nvme->n_submission_queues == nvme->n_completion_queues) 3021 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 3022 nvme->n_io_squeue_len); 3023 3024 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 3025 nvme->n_max_queue_entries); 3026 3027 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len", 3028 nvme->n_io_cqueue_len); 3029 3030 /* 3031 * Assign the equal quantity of taskq threads to each completion 3032 * queue, capping the total number of threads to the number 3033 * of CPUs. 3034 */ 3035 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues; 3036 3037 /* 3038 * In case the calculation above is zero, we need at least one 3039 * thread per completion queue. 3040 */ 3041 tq_threads = MAX(1, tq_threads); 3042 3043 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1, 3044 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) { 3045 dev_err(nvme->n_dip, CE_WARN, 3046 "!failed to pre-allocate completion queues"); 3047 goto fail; 3048 } 3049 3050 /* 3051 * If we use less completion queues than interrupt vectors return 3052 * some of the interrupt vectors back to the system. 3053 */ 3054 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) { 3055 nvme_release_interrupts(nvme); 3056 3057 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 3058 nvme->n_completion_queues + 1) != DDI_SUCCESS) { 3059 dev_err(nvme->n_dip, CE_WARN, 3060 "!failed to reduce number of interrupts"); 3061 goto fail; 3062 } 3063 } 3064 3065 /* 3066 * Alloc & register I/O queue pairs 3067 */ 3068 3069 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3070 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len, 3071 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 3072 dev_err(nvme->n_dip, CE_WARN, 3073 "!unable to allocate I/O qpair %d", i); 3074 goto fail; 3075 } 3076 3077 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) { 3078 dev_err(nvme->n_dip, CE_WARN, 3079 "!unable to create I/O qpair %d", i); 3080 goto fail; 3081 } 3082 } 3083 3084 /* 3085 * Post more asynchronous events commands to reduce event reporting 3086 * latency as suggested by the spec. 3087 */ 3088 if (nvme->n_async_event_supported) { 3089 for (i = 1; i != nvme->n_async_event_limit; i++) 3090 nvme_async_event(nvme); 3091 } 3092 3093 return (DDI_SUCCESS); 3094 3095 fail: 3096 (void) nvme_reset(nvme, B_FALSE); 3097 return (DDI_FAILURE); 3098 } 3099 3100 static uint_t 3101 nvme_intr(caddr_t arg1, caddr_t arg2) 3102 { 3103 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 3104 nvme_t *nvme = (nvme_t *)arg1; 3105 int inum = (int)(uintptr_t)arg2; 3106 int ccnt = 0; 3107 int qnum; 3108 3109 if (inum >= nvme->n_intr_cnt) 3110 return (DDI_INTR_UNCLAIMED); 3111 3112 if (nvme->n_dead) 3113 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ? 3114 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED); 3115 3116 /* 3117 * The interrupt vector a queue uses is calculated as queue_idx % 3118 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 3119 * in steps of n_intr_cnt to process all queues using this vector. 3120 */ 3121 for (qnum = inum; 3122 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL; 3123 qnum += nvme->n_intr_cnt) { 3124 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]); 3125 } 3126 3127 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 3128 } 3129 3130 static void 3131 nvme_release_interrupts(nvme_t *nvme) 3132 { 3133 int i; 3134 3135 for (i = 0; i < nvme->n_intr_cnt; i++) { 3136 if (nvme->n_inth[i] == NULL) 3137 break; 3138 3139 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3140 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 3141 else 3142 (void) ddi_intr_disable(nvme->n_inth[i]); 3143 3144 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 3145 (void) ddi_intr_free(nvme->n_inth[i]); 3146 } 3147 3148 kmem_free(nvme->n_inth, nvme->n_inth_sz); 3149 nvme->n_inth = NULL; 3150 nvme->n_inth_sz = 0; 3151 3152 nvme->n_progress &= ~NVME_INTERRUPTS; 3153 } 3154 3155 static int 3156 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 3157 { 3158 int nintrs, navail, count; 3159 int ret; 3160 int i; 3161 3162 if (nvme->n_intr_types == 0) { 3163 ret = ddi_intr_get_supported_types(nvme->n_dip, 3164 &nvme->n_intr_types); 3165 if (ret != DDI_SUCCESS) { 3166 dev_err(nvme->n_dip, CE_WARN, 3167 "!%s: ddi_intr_get_supported types failed", 3168 __func__); 3169 return (ret); 3170 } 3171 #ifdef __x86 3172 if (get_hwenv() == HW_VMWARE) 3173 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; 3174 #endif 3175 } 3176 3177 if ((nvme->n_intr_types & intr_type) == 0) 3178 return (DDI_FAILURE); 3179 3180 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 3181 if (ret != DDI_SUCCESS) { 3182 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 3183 __func__); 3184 return (ret); 3185 } 3186 3187 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 3188 if (ret != DDI_SUCCESS) { 3189 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 3190 __func__); 3191 return (ret); 3192 } 3193 3194 /* We want at most one interrupt per queue pair. */ 3195 if (navail > nqpairs) 3196 navail = nqpairs; 3197 3198 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 3199 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 3200 3201 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 3202 &count, 0); 3203 if (ret != DDI_SUCCESS) { 3204 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 3205 __func__); 3206 goto fail; 3207 } 3208 3209 nvme->n_intr_cnt = count; 3210 3211 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 3212 if (ret != DDI_SUCCESS) { 3213 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 3214 __func__); 3215 goto fail; 3216 } 3217 3218 for (i = 0; i < count; i++) { 3219 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 3220 (void *)nvme, (void *)(uintptr_t)i); 3221 if (ret != DDI_SUCCESS) { 3222 dev_err(nvme->n_dip, CE_WARN, 3223 "!%s: ddi_intr_add_handler failed", __func__); 3224 goto fail; 3225 } 3226 } 3227 3228 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 3229 3230 for (i = 0; i < count; i++) { 3231 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3232 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 3233 else 3234 ret = ddi_intr_enable(nvme->n_inth[i]); 3235 3236 if (ret != DDI_SUCCESS) { 3237 dev_err(nvme->n_dip, CE_WARN, 3238 "!%s: enabling interrupt %d failed", __func__, i); 3239 goto fail; 3240 } 3241 } 3242 3243 nvme->n_intr_type = intr_type; 3244 3245 nvme->n_progress |= NVME_INTERRUPTS; 3246 3247 return (DDI_SUCCESS); 3248 3249 fail: 3250 nvme_release_interrupts(nvme); 3251 3252 return (ret); 3253 } 3254 3255 static int 3256 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 3257 { 3258 _NOTE(ARGUNUSED(arg)); 3259 3260 pci_ereport_post(dip, fm_error, NULL); 3261 return (fm_error->fme_status); 3262 } 3263 3264 static int 3265 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 3266 { 3267 nvme_t *nvme; 3268 int instance; 3269 int nregs; 3270 off_t regsize; 3271 int i; 3272 char name[32]; 3273 3274 if (cmd != DDI_ATTACH) 3275 return (DDI_FAILURE); 3276 3277 instance = ddi_get_instance(dip); 3278 3279 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 3280 return (DDI_FAILURE); 3281 3282 nvme = ddi_get_soft_state(nvme_state, instance); 3283 ddi_set_driver_private(dip, nvme); 3284 nvme->n_dip = dip; 3285 3286 mutex_init(&nvme->n_minor.nm_mutex, NULL, MUTEX_DRIVER, NULL); 3287 3288 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3289 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 3290 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 3291 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 3292 B_TRUE : B_FALSE; 3293 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3294 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 3295 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3296 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN); 3297 /* 3298 * Double up the default for completion queues in case of 3299 * queue sharing. 3300 */ 3301 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3302 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN); 3303 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3304 DDI_PROP_DONTPASS, "async-event-limit", 3305 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 3306 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3307 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 3308 B_TRUE : B_FALSE; 3309 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3310 DDI_PROP_DONTPASS, "min-phys-block-size", 3311 NVME_DEFAULT_MIN_BLOCK_SIZE); 3312 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3313 DDI_PROP_DONTPASS, "max-submission-queues", -1); 3314 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3315 DDI_PROP_DONTPASS, "max-completion-queues", -1); 3316 3317 if (!ISP2(nvme->n_min_block_size) || 3318 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { 3319 dev_err(dip, CE_WARN, "!min-phys-block-size %s, " 3320 "using default %d", ISP2(nvme->n_min_block_size) ? 3321 "too low" : "not a power of 2", 3322 NVME_DEFAULT_MIN_BLOCK_SIZE); 3323 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 3324 } 3325 3326 if (nvme->n_submission_queues != -1 && 3327 (nvme->n_submission_queues < 1 || 3328 nvme->n_submission_queues > UINT16_MAX)) { 3329 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not " 3330 "valid. Must be [1..%d]", nvme->n_submission_queues, 3331 UINT16_MAX); 3332 nvme->n_submission_queues = -1; 3333 } 3334 3335 if (nvme->n_completion_queues != -1 && 3336 (nvme->n_completion_queues < 1 || 3337 nvme->n_completion_queues > UINT16_MAX)) { 3338 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not " 3339 "valid. Must be [1..%d]", nvme->n_completion_queues, 3340 UINT16_MAX); 3341 nvme->n_completion_queues = -1; 3342 } 3343 3344 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 3345 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 3346 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 3347 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 3348 3349 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN) 3350 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN; 3351 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN) 3352 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN; 3353 3354 if (nvme->n_async_event_limit < 1) 3355 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 3356 3357 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 3358 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 3359 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 3360 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 3361 3362 /* 3363 * Setup FMA support. 3364 */ 3365 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 3366 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 3367 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3368 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3369 3370 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 3371 3372 if (nvme->n_fm_cap) { 3373 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 3374 nvme->n_reg_acc_attr.devacc_attr_access = 3375 DDI_FLAGERR_ACC; 3376 3377 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 3378 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3379 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3380 } 3381 3382 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 3383 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3384 pci_ereport_setup(dip); 3385 3386 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3387 ddi_fm_handler_register(dip, nvme_fm_errcb, 3388 (void *)nvme); 3389 } 3390 3391 nvme->n_progress |= NVME_FMA_INIT; 3392 3393 /* 3394 * The spec defines several register sets. Only the controller 3395 * registers (set 1) are currently used. 3396 */ 3397 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 3398 nregs < 2 || 3399 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 3400 goto fail; 3401 3402 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 3403 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 3404 dev_err(dip, CE_WARN, "!failed to map regset 1"); 3405 goto fail; 3406 } 3407 3408 nvme->n_progress |= NVME_REGS_MAPPED; 3409 3410 /* 3411 * Create PRP DMA cache 3412 */ 3413 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 3414 ddi_driver_name(dip), ddi_get_instance(dip)); 3415 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 3416 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 3417 NULL, (void *)nvme, NULL, 0); 3418 3419 if (nvme_init(nvme) != DDI_SUCCESS) 3420 goto fail; 3421 3422 /* 3423 * Initialize the driver with the UFM subsystem 3424 */ 3425 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops, 3426 &nvme->n_ufmh, nvme) != 0) { 3427 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem"); 3428 goto fail; 3429 } 3430 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL); 3431 ddi_ufm_update(nvme->n_ufmh); 3432 nvme->n_progress |= NVME_UFM_INIT; 3433 3434 /* 3435 * Attach the blkdev driver for each namespace. 3436 */ 3437 for (i = 0; i != nvme->n_namespace_count; i++) { 3438 if (ddi_create_minor_node(nvme->n_dip, nvme->n_ns[i].ns_name, 3439 S_IFCHR, NVME_MINOR(ddi_get_instance(nvme->n_dip), i + 1), 3440 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 3441 dev_err(dip, CE_WARN, 3442 "!failed to create minor node for namespace %d", i); 3443 goto fail; 3444 } 3445 3446 if (nvme->n_ns[i].ns_ignore) 3447 continue; 3448 3449 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i], 3450 &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP); 3451 3452 if (nvme->n_ns[i].ns_bd_hdl == NULL) { 3453 dev_err(dip, CE_WARN, 3454 "!failed to get blkdev handle for namespace %d", i); 3455 goto fail; 3456 } 3457 3458 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl) 3459 != DDI_SUCCESS) { 3460 dev_err(dip, CE_WARN, 3461 "!failed to attach blkdev handle for namespace %d", 3462 i); 3463 goto fail; 3464 } 3465 } 3466 3467 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 3468 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) 3469 != DDI_SUCCESS) { 3470 dev_err(dip, CE_WARN, "nvme_attach: " 3471 "cannot create devctl minor node"); 3472 goto fail; 3473 } 3474 3475 return (DDI_SUCCESS); 3476 3477 fail: 3478 /* attach successful anyway so that FMA can retire the device */ 3479 if (nvme->n_dead) 3480 return (DDI_SUCCESS); 3481 3482 (void) nvme_detach(dip, DDI_DETACH); 3483 3484 return (DDI_FAILURE); 3485 } 3486 3487 static int 3488 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 3489 { 3490 int instance, i; 3491 nvme_t *nvme; 3492 3493 if (cmd != DDI_DETACH) 3494 return (DDI_FAILURE); 3495 3496 instance = ddi_get_instance(dip); 3497 3498 nvme = ddi_get_soft_state(nvme_state, instance); 3499 3500 if (nvme == NULL) 3501 return (DDI_FAILURE); 3502 3503 ddi_remove_minor_node(dip, "devctl"); 3504 mutex_destroy(&nvme->n_minor.nm_mutex); 3505 3506 if (nvme->n_ns) { 3507 for (i = 0; i != nvme->n_namespace_count; i++) { 3508 ddi_remove_minor_node(dip, nvme->n_ns[i].ns_name); 3509 mutex_destroy(&nvme->n_ns[i].ns_minor.nm_mutex); 3510 3511 if (nvme->n_ns[i].ns_bd_hdl) { 3512 (void) bd_detach_handle( 3513 nvme->n_ns[i].ns_bd_hdl); 3514 bd_free_handle(nvme->n_ns[i].ns_bd_hdl); 3515 } 3516 3517 if (nvme->n_ns[i].ns_idns) 3518 kmem_free(nvme->n_ns[i].ns_idns, 3519 sizeof (nvme_identify_nsid_t)); 3520 if (nvme->n_ns[i].ns_devid) 3521 strfree(nvme->n_ns[i].ns_devid); 3522 } 3523 3524 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 3525 nvme->n_namespace_count); 3526 } 3527 if (nvme->n_progress & NVME_UFM_INIT) { 3528 ddi_ufm_fini(nvme->n_ufmh); 3529 mutex_destroy(&nvme->n_fwslot_mutex); 3530 } 3531 3532 if (nvme->n_progress & NVME_INTERRUPTS) 3533 nvme_release_interrupts(nvme); 3534 3535 for (i = 0; i < nvme->n_cq_count; i++) { 3536 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL) 3537 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq); 3538 } 3539 3540 if (nvme->n_ioq_count > 0) { 3541 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3542 if (nvme->n_ioq[i] != NULL) { 3543 /* TODO: send destroy queue commands */ 3544 nvme_free_qpair(nvme->n_ioq[i]); 3545 } 3546 } 3547 3548 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 3549 (nvme->n_ioq_count + 1)); 3550 } 3551 3552 if (nvme->n_prp_cache != NULL) { 3553 kmem_cache_destroy(nvme->n_prp_cache); 3554 } 3555 3556 if (nvme->n_progress & NVME_REGS_MAPPED) { 3557 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); 3558 (void) nvme_reset(nvme, B_FALSE); 3559 } 3560 3561 if (nvme->n_progress & NVME_CTRL_LIMITS) 3562 sema_destroy(&nvme->n_abort_sema); 3563 3564 if (nvme->n_progress & NVME_ADMIN_QUEUE) 3565 nvme_free_qpair(nvme->n_adminq); 3566 3567 if (nvme->n_cq_count > 0) { 3568 nvme_destroy_cq_array(nvme, 0); 3569 nvme->n_cq = NULL; 3570 nvme->n_cq_count = 0; 3571 } 3572 3573 if (nvme->n_idctl) 3574 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); 3575 3576 if (nvme->n_progress & NVME_REGS_MAPPED) 3577 ddi_regs_map_free(&nvme->n_regh); 3578 3579 if (nvme->n_progress & NVME_FMA_INIT) { 3580 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3581 ddi_fm_handler_unregister(nvme->n_dip); 3582 3583 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 3584 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3585 pci_ereport_teardown(nvme->n_dip); 3586 3587 ddi_fm_fini(nvme->n_dip); 3588 } 3589 3590 if (nvme->n_vendor != NULL) 3591 strfree(nvme->n_vendor); 3592 3593 if (nvme->n_product != NULL) 3594 strfree(nvme->n_product); 3595 3596 ddi_soft_state_free(nvme_state, instance); 3597 3598 return (DDI_SUCCESS); 3599 } 3600 3601 static int 3602 nvme_quiesce(dev_info_t *dip) 3603 { 3604 int instance; 3605 nvme_t *nvme; 3606 3607 instance = ddi_get_instance(dip); 3608 3609 nvme = ddi_get_soft_state(nvme_state, instance); 3610 3611 if (nvme == NULL) 3612 return (DDI_FAILURE); 3613 3614 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); 3615 3616 (void) nvme_reset(nvme, B_TRUE); 3617 3618 return (DDI_FAILURE); 3619 } 3620 3621 static int 3622 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer) 3623 { 3624 nvme_t *nvme = cmd->nc_nvme; 3625 int nprp_page, nprp; 3626 uint64_t *prp; 3627 3628 if (xfer->x_ndmac == 0) 3629 return (DDI_FAILURE); 3630 3631 cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress; 3632 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 3633 3634 if (xfer->x_ndmac == 1) { 3635 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 3636 return (DDI_SUCCESS); 3637 } else if (xfer->x_ndmac == 2) { 3638 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress; 3639 return (DDI_SUCCESS); 3640 } 3641 3642 xfer->x_ndmac--; 3643 3644 nprp_page = nvme->n_pagesize / sizeof (uint64_t); 3645 ASSERT(nprp_page > 0); 3646 nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page; 3647 3648 /* 3649 * We currently don't support chained PRPs and set up our DMA 3650 * attributes to reflect that. If we still get an I/O request 3651 * that needs a chained PRP something is very wrong. 3652 */ 3653 VERIFY(nprp == 1); 3654 3655 cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 3656 bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len); 3657 3658 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress; 3659 3660 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 3661 for (prp = (uint64_t *)cmd->nc_dma->nd_memp; 3662 xfer->x_ndmac > 0; 3663 prp++, xfer->x_ndmac--) { 3664 *prp = xfer->x_dmac.dmac_laddress; 3665 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 3666 } 3667 3668 (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len, 3669 DDI_DMA_SYNC_FORDEV); 3670 return (DDI_SUCCESS); 3671 } 3672 3673 static nvme_cmd_t * 3674 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 3675 { 3676 nvme_t *nvme = ns->ns_nvme; 3677 nvme_cmd_t *cmd; 3678 3679 /* 3680 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 3681 */ 3682 cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ? 3683 KM_NOSLEEP : KM_SLEEP); 3684 3685 if (cmd == NULL) 3686 return (NULL); 3687 3688 cmd->nc_sqe.sqe_opc = opc; 3689 cmd->nc_callback = nvme_bd_xfer_done; 3690 cmd->nc_xfer = xfer; 3691 3692 switch (opc) { 3693 case NVME_OPC_NVM_WRITE: 3694 case NVME_OPC_NVM_READ: 3695 VERIFY(xfer->x_nblks <= 0x10000); 3696 3697 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3698 3699 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 3700 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 3701 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 3702 3703 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS) 3704 goto fail; 3705 break; 3706 3707 case NVME_OPC_NVM_FLUSH: 3708 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3709 break; 3710 3711 default: 3712 goto fail; 3713 } 3714 3715 return (cmd); 3716 3717 fail: 3718 nvme_free_cmd(cmd); 3719 return (NULL); 3720 } 3721 3722 static void 3723 nvme_bd_xfer_done(void *arg) 3724 { 3725 nvme_cmd_t *cmd = arg; 3726 bd_xfer_t *xfer = cmd->nc_xfer; 3727 int error = 0; 3728 3729 error = nvme_check_cmd_status(cmd); 3730 nvme_free_cmd(cmd); 3731 3732 bd_xfer_done(xfer, error); 3733 } 3734 3735 static void 3736 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 3737 { 3738 nvme_namespace_t *ns = arg; 3739 nvme_t *nvme = ns->ns_nvme; 3740 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable); 3741 3742 /* 3743 * Set the blkdev qcount to the number of submission queues. 3744 * It will then create one waitq/runq pair for each submission 3745 * queue and spread I/O requests across the queues. 3746 */ 3747 drive->d_qcount = nvme->n_ioq_count; 3748 3749 /* 3750 * I/O activity to individual namespaces is distributed across 3751 * each of the d_qcount blkdev queues (which has been set to 3752 * the number of nvme submission queues). d_qsize is the number 3753 * of submitted and not completed I/Os within each queue that blkdev 3754 * will allow before it starts holding them in the waitq. 3755 * 3756 * Each namespace will create a child blkdev instance, for each one 3757 * we try and set the d_qsize so that each namespace gets an 3758 * equal portion of the submission queue. 3759 * 3760 * If post instantiation of the nvme drive, n_namespaces_attachable 3761 * changes and a namespace is attached it could calculate a 3762 * different d_qsize. It may even be that the sum of the d_qsizes is 3763 * now beyond the submission queue size. Should that be the case 3764 * and the I/O rate is such that blkdev attempts to submit more 3765 * I/Os than the size of the submission queue, the excess I/Os 3766 * will be held behind the semaphore nq_sema. 3767 */ 3768 drive->d_qsize = nvme->n_io_squeue_len / ns_count; 3769 3770 /* 3771 * Don't let the queue size drop below the minimum, though. 3772 */ 3773 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN); 3774 3775 /* 3776 * d_maxxfer is not set, which means the value is taken from the DMA 3777 * attributes specified to bd_alloc_handle. 3778 */ 3779 3780 drive->d_removable = B_FALSE; 3781 drive->d_hotpluggable = B_FALSE; 3782 3783 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64)); 3784 drive->d_target = ns->ns_id; 3785 drive->d_lun = 0; 3786 3787 drive->d_model = nvme->n_idctl->id_model; 3788 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 3789 drive->d_vendor = nvme->n_vendor; 3790 drive->d_vendor_len = strlen(nvme->n_vendor); 3791 drive->d_product = nvme->n_product; 3792 drive->d_product_len = strlen(nvme->n_product); 3793 drive->d_serial = nvme->n_idctl->id_serial; 3794 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 3795 drive->d_revision = nvme->n_idctl->id_fwrev; 3796 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 3797 } 3798 3799 static int 3800 nvme_bd_mediainfo(void *arg, bd_media_t *media) 3801 { 3802 nvme_namespace_t *ns = arg; 3803 3804 media->m_nblks = ns->ns_block_count; 3805 media->m_blksize = ns->ns_block_size; 3806 media->m_readonly = B_FALSE; 3807 media->m_solidstate = B_TRUE; 3808 3809 media->m_pblksize = ns->ns_best_block_size; 3810 3811 return (0); 3812 } 3813 3814 static int 3815 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 3816 { 3817 nvme_t *nvme = ns->ns_nvme; 3818 nvme_cmd_t *cmd; 3819 nvme_qpair_t *ioq; 3820 boolean_t poll; 3821 int ret; 3822 3823 if (nvme->n_dead) 3824 return (EIO); 3825 3826 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 3827 if (cmd == NULL) 3828 return (ENOMEM); 3829 3830 cmd->nc_sqid = xfer->x_qnum + 1; 3831 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 3832 ioq = nvme->n_ioq[cmd->nc_sqid]; 3833 3834 /* 3835 * Get the polling flag before submitting the command. The command may 3836 * complete immediately after it was submitted, which means we must 3837 * treat both cmd and xfer as if they have been freed already. 3838 */ 3839 poll = (xfer->x_flags & BD_XFER_POLL) != 0; 3840 3841 ret = nvme_submit_io_cmd(ioq, cmd); 3842 3843 if (ret != 0) 3844 return (ret); 3845 3846 if (!poll) 3847 return (0); 3848 3849 do { 3850 cmd = nvme_retrieve_cmd(nvme, ioq); 3851 if (cmd != NULL) 3852 cmd->nc_callback(cmd); 3853 else 3854 drv_usecwait(10); 3855 } while (ioq->nq_active_cmds != 0); 3856 3857 return (0); 3858 } 3859 3860 static int 3861 nvme_bd_read(void *arg, bd_xfer_t *xfer) 3862 { 3863 nvme_namespace_t *ns = arg; 3864 3865 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 3866 } 3867 3868 static int 3869 nvme_bd_write(void *arg, bd_xfer_t *xfer) 3870 { 3871 nvme_namespace_t *ns = arg; 3872 3873 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 3874 } 3875 3876 static int 3877 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 3878 { 3879 nvme_namespace_t *ns = arg; 3880 3881 if (ns->ns_nvme->n_dead) 3882 return (EIO); 3883 3884 /* 3885 * If the volatile write cache is not present or not enabled the FLUSH 3886 * command is a no-op, so we can take a shortcut here. 3887 */ 3888 if (!ns->ns_nvme->n_write_cache_present) { 3889 bd_xfer_done(xfer, ENOTSUP); 3890 return (0); 3891 } 3892 3893 if (!ns->ns_nvme->n_write_cache_enabled) { 3894 bd_xfer_done(xfer, 0); 3895 return (0); 3896 } 3897 3898 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 3899 } 3900 3901 static int 3902 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 3903 { 3904 nvme_namespace_t *ns = arg; 3905 3906 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 3907 if (*(uint64_t *)ns->ns_eui64 != 0) { 3908 return (ddi_devid_init(devinfo, DEVID_SCSI3_WWN, 3909 sizeof (ns->ns_eui64), ns->ns_eui64, devid)); 3910 } else { 3911 return (ddi_devid_init(devinfo, DEVID_ENCAP, 3912 strlen(ns->ns_devid), ns->ns_devid, devid)); 3913 } 3914 } 3915 3916 static int 3917 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 3918 { 3919 #ifndef __lock_lint 3920 _NOTE(ARGUNUSED(cred_p)); 3921 #endif 3922 minor_t minor = getminor(*devp); 3923 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 3924 int nsid = NVME_MINOR_NSID(minor); 3925 nvme_minor_state_t *nm; 3926 int rv = 0; 3927 3928 if (otyp != OTYP_CHR) 3929 return (EINVAL); 3930 3931 if (nvme == NULL) 3932 return (ENXIO); 3933 3934 if (nsid > nvme->n_namespace_count) 3935 return (ENXIO); 3936 3937 if (nvme->n_dead) 3938 return (EIO); 3939 3940 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; 3941 3942 mutex_enter(&nm->nm_mutex); 3943 if (nm->nm_oexcl) { 3944 rv = EBUSY; 3945 goto out; 3946 } 3947 3948 if (flag & FEXCL) { 3949 if (nm->nm_ocnt != 0) { 3950 rv = EBUSY; 3951 goto out; 3952 } 3953 nm->nm_oexcl = B_TRUE; 3954 } 3955 3956 nm->nm_ocnt++; 3957 3958 out: 3959 mutex_exit(&nm->nm_mutex); 3960 return (rv); 3961 3962 } 3963 3964 static int 3965 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 3966 { 3967 #ifndef __lock_lint 3968 _NOTE(ARGUNUSED(cred_p)); 3969 _NOTE(ARGUNUSED(flag)); 3970 #endif 3971 minor_t minor = getminor(dev); 3972 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 3973 int nsid = NVME_MINOR_NSID(minor); 3974 nvme_minor_state_t *nm; 3975 3976 if (otyp != OTYP_CHR) 3977 return (ENXIO); 3978 3979 if (nvme == NULL) 3980 return (ENXIO); 3981 3982 if (nsid > nvme->n_namespace_count) 3983 return (ENXIO); 3984 3985 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; 3986 3987 mutex_enter(&nm->nm_mutex); 3988 if (nm->nm_oexcl) 3989 nm->nm_oexcl = B_FALSE; 3990 3991 ASSERT(nm->nm_ocnt > 0); 3992 nm->nm_ocnt--; 3993 mutex_exit(&nm->nm_mutex); 3994 3995 return (0); 3996 } 3997 3998 static int 3999 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4000 cred_t *cred_p) 4001 { 4002 _NOTE(ARGUNUSED(cred_p)); 4003 int rv = 0; 4004 void *idctl; 4005 4006 if ((mode & FREAD) == 0) 4007 return (EPERM); 4008 4009 if (nioc->n_len < NVME_IDENTIFY_BUFSIZE) 4010 return (EINVAL); 4011 4012 if ((rv = nvme_identify(nvme, B_TRUE, nsid, (void **)&idctl)) != 0) 4013 return (rv); 4014 4015 if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode) 4016 != 0) 4017 rv = EFAULT; 4018 4019 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE); 4020 4021 return (rv); 4022 } 4023 4024 /* 4025 * Execute commands on behalf of the various ioctls. 4026 */ 4027 static int 4028 nvme_ioc_cmd(nvme_t *nvme, nvme_sqe_t *sqe, boolean_t is_admin, void *data_addr, 4029 uint32_t data_len, int rwk, nvme_cqe_t *cqe, uint_t timeout) 4030 { 4031 nvme_cmd_t *cmd; 4032 nvme_qpair_t *ioq; 4033 int rv = 0; 4034 4035 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 4036 if (is_admin) { 4037 cmd->nc_sqid = 0; 4038 ioq = nvme->n_adminq; 4039 } else { 4040 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 4041 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 4042 ioq = nvme->n_ioq[cmd->nc_sqid]; 4043 } 4044 4045 cmd->nc_callback = nvme_wakeup_cmd; 4046 cmd->nc_sqe = *sqe; 4047 4048 if ((rwk & (FREAD | FWRITE)) != 0) { 4049 if (data_addr == NULL) { 4050 rv = EINVAL; 4051 goto free_cmd; 4052 } 4053 4054 /* 4055 * Because we use PRPs and haven't implemented PRP 4056 * lists here, the maximum data size is restricted to 4057 * 2 pages. 4058 */ 4059 if (data_len > 2 * nvme->n_pagesize) { 4060 dev_err(nvme->n_dip, CE_WARN, "!Data size %u is too " 4061 "large for nvme_ioc_cmd(). Limit is 2 pages " 4062 "(%u bytes)", data_len, 2 * nvme->n_pagesize); 4063 4064 rv = EINVAL; 4065 goto free_cmd; 4066 } 4067 4068 if (nvme_zalloc_dma(nvme, data_len, DDI_DMA_READ, 4069 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 4070 dev_err(nvme->n_dip, CE_WARN, 4071 "!nvme_zalloc_dma failed for nvme_ioc_cmd()"); 4072 4073 rv = ENOMEM; 4074 goto free_cmd; 4075 } 4076 4077 if (cmd->nc_dma->nd_ncookie > 2) { 4078 dev_err(nvme->n_dip, CE_WARN, 4079 "!too many DMA cookies for nvme_ioc_cmd()"); 4080 atomic_inc_32(&nvme->n_too_many_cookies); 4081 4082 rv = E2BIG; 4083 goto free_cmd; 4084 } 4085 4086 cmd->nc_sqe.sqe_dptr.d_prp[0] = 4087 cmd->nc_dma->nd_cookie.dmac_laddress; 4088 4089 if (cmd->nc_dma->nd_ncookie > 1) { 4090 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 4091 &cmd->nc_dma->nd_cookie); 4092 cmd->nc_sqe.sqe_dptr.d_prp[1] = 4093 cmd->nc_dma->nd_cookie.dmac_laddress; 4094 } 4095 4096 if ((rwk & FWRITE) != 0) { 4097 if (ddi_copyin(data_addr, cmd->nc_dma->nd_memp, 4098 data_len, rwk & FKIOCTL) != 0) { 4099 rv = EFAULT; 4100 goto free_cmd; 4101 } 4102 } 4103 } 4104 4105 if (is_admin) { 4106 nvme_admin_cmd(cmd, timeout); 4107 } else { 4108 mutex_enter(&cmd->nc_mutex); 4109 4110 rv = nvme_submit_io_cmd(ioq, cmd); 4111 4112 if (rv == EAGAIN) { 4113 mutex_exit(&cmd->nc_mutex); 4114 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 4115 "!nvme_ioc_cmd() failed, I/O Q full"); 4116 goto free_cmd; 4117 } 4118 4119 nvme_wait_cmd(cmd, timeout); 4120 4121 mutex_exit(&cmd->nc_mutex); 4122 } 4123 4124 if (cqe != NULL) 4125 *cqe = cmd->nc_cqe; 4126 4127 if ((rv = nvme_check_cmd_status(cmd)) != 0) { 4128 dev_err(nvme->n_dip, CE_WARN, 4129 "!nvme_ioc_cmd() failed with sct = %x, sc = %x", 4130 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 4131 4132 goto free_cmd; 4133 } 4134 4135 if ((rwk & FREAD) != 0) { 4136 if (ddi_copyout(cmd->nc_dma->nd_memp, 4137 data_addr, data_len, rwk & FKIOCTL) != 0) 4138 rv = EFAULT; 4139 } 4140 4141 free_cmd: 4142 nvme_free_cmd(cmd); 4143 4144 return (rv); 4145 } 4146 4147 static int 4148 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4149 int mode, cred_t *cred_p) 4150 { 4151 _NOTE(ARGUNUSED(nsid, cred_p)); 4152 int rv = 0; 4153 nvme_reg_cap_t cap = { 0 }; 4154 nvme_capabilities_t nc; 4155 4156 if ((mode & FREAD) == 0) 4157 return (EPERM); 4158 4159 if (nioc->n_len < sizeof (nc)) 4160 return (EINVAL); 4161 4162 cap.r = nvme_get64(nvme, NVME_REG_CAP); 4163 4164 /* 4165 * The MPSMIN and MPSMAX fields in the CAP register use 0 to 4166 * specify the base page size of 4k (1<<12), so add 12 here to 4167 * get the real page size value. 4168 */ 4169 nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax); 4170 nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin); 4171 4172 if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0) 4173 rv = EFAULT; 4174 4175 return (rv); 4176 } 4177 4178 static int 4179 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4180 int mode, cred_t *cred_p) 4181 { 4182 _NOTE(ARGUNUSED(cred_p)); 4183 void *log = NULL; 4184 size_t bufsize = 0; 4185 int rv = 0; 4186 4187 if ((mode & FREAD) == 0) 4188 return (EPERM); 4189 4190 switch (nioc->n_arg) { 4191 case NVME_LOGPAGE_ERROR: 4192 if (nsid != 0) 4193 return (EINVAL); 4194 break; 4195 case NVME_LOGPAGE_HEALTH: 4196 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0) 4197 return (EINVAL); 4198 4199 if (nsid == 0) 4200 nsid = (uint32_t)-1; 4201 4202 break; 4203 case NVME_LOGPAGE_FWSLOT: 4204 if (nsid != 0) 4205 return (EINVAL); 4206 break; 4207 default: 4208 return (EINVAL); 4209 } 4210 4211 if (nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, nioc->n_arg, nsid) 4212 != DDI_SUCCESS) 4213 return (EIO); 4214 4215 if (nioc->n_len < bufsize) { 4216 kmem_free(log, bufsize); 4217 return (EINVAL); 4218 } 4219 4220 if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0) 4221 rv = EFAULT; 4222 4223 nioc->n_len = bufsize; 4224 kmem_free(log, bufsize); 4225 4226 return (rv); 4227 } 4228 4229 static int 4230 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4231 int mode, cred_t *cred_p) 4232 { 4233 _NOTE(ARGUNUSED(cred_p)); 4234 void *buf = NULL; 4235 size_t bufsize = 0; 4236 uint32_t res = 0; 4237 uint8_t feature; 4238 int rv = 0; 4239 4240 if ((mode & FREAD) == 0) 4241 return (EPERM); 4242 4243 if ((nioc->n_arg >> 32) > 0xff) 4244 return (EINVAL); 4245 4246 feature = (uint8_t)(nioc->n_arg >> 32); 4247 4248 switch (feature) { 4249 case NVME_FEAT_ARBITRATION: 4250 case NVME_FEAT_POWER_MGMT: 4251 case NVME_FEAT_TEMPERATURE: 4252 case NVME_FEAT_ERROR: 4253 case NVME_FEAT_NQUEUES: 4254 case NVME_FEAT_INTR_COAL: 4255 case NVME_FEAT_WRITE_ATOM: 4256 case NVME_FEAT_ASYNC_EVENT: 4257 case NVME_FEAT_PROGRESS: 4258 if (nsid != 0) 4259 return (EINVAL); 4260 break; 4261 4262 case NVME_FEAT_INTR_VECT: 4263 if (nsid != 0) 4264 return (EINVAL); 4265 4266 res = nioc->n_arg & 0xffffffffUL; 4267 if (res >= nvme->n_intr_cnt) 4268 return (EINVAL); 4269 break; 4270 4271 case NVME_FEAT_LBA_RANGE: 4272 if (nvme->n_lba_range_supported == B_FALSE) 4273 return (EINVAL); 4274 4275 if (nsid == 0 || 4276 nsid > nvme->n_namespace_count) 4277 return (EINVAL); 4278 4279 break; 4280 4281 case NVME_FEAT_WRITE_CACHE: 4282 if (nsid != 0) 4283 return (EINVAL); 4284 4285 if (!nvme->n_write_cache_present) 4286 return (EINVAL); 4287 4288 break; 4289 4290 case NVME_FEAT_AUTO_PST: 4291 if (nsid != 0) 4292 return (EINVAL); 4293 4294 if (!nvme->n_auto_pst_supported) 4295 return (EINVAL); 4296 4297 break; 4298 4299 default: 4300 return (EINVAL); 4301 } 4302 4303 rv = nvme_get_features(nvme, B_TRUE, nsid, feature, &res, &buf, 4304 &bufsize); 4305 if (rv != 0) 4306 return (rv); 4307 4308 if (nioc->n_len < bufsize) { 4309 kmem_free(buf, bufsize); 4310 return (EINVAL); 4311 } 4312 4313 if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0) 4314 rv = EFAULT; 4315 4316 kmem_free(buf, bufsize); 4317 nioc->n_arg = res; 4318 nioc->n_len = bufsize; 4319 4320 return (rv); 4321 } 4322 4323 static int 4324 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4325 cred_t *cred_p) 4326 { 4327 _NOTE(ARGUNUSED(nsid, mode, cred_p)); 4328 4329 if ((mode & FREAD) == 0) 4330 return (EPERM); 4331 4332 nioc->n_arg = nvme->n_intr_cnt; 4333 return (0); 4334 } 4335 4336 static int 4337 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4338 cred_t *cred_p) 4339 { 4340 _NOTE(ARGUNUSED(nsid, cred_p)); 4341 int rv = 0; 4342 4343 if ((mode & FREAD) == 0) 4344 return (EPERM); 4345 4346 if (nioc->n_len < sizeof (nvme->n_version)) 4347 return (ENOMEM); 4348 4349 if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf, 4350 sizeof (nvme->n_version), mode) != 0) 4351 rv = EFAULT; 4352 4353 return (rv); 4354 } 4355 4356 static int 4357 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4358 cred_t *cred_p) 4359 { 4360 _NOTE(ARGUNUSED(mode)); 4361 nvme_format_nvm_t frmt = { 0 }; 4362 int c_nsid = nsid != 0 ? nsid - 1 : 0; 4363 4364 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4365 return (EPERM); 4366 4367 frmt.r = nioc->n_arg & 0xffffffff; 4368 4369 /* 4370 * Check whether the FORMAT NVM command is supported. 4371 */ 4372 if (nvme->n_idctl->id_oacs.oa_format == 0) 4373 return (EINVAL); 4374 4375 /* 4376 * Don't allow format or secure erase of individual namespace if that 4377 * would cause a format or secure erase of all namespaces. 4378 */ 4379 if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0) 4380 return (EINVAL); 4381 4382 if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE && 4383 nvme->n_idctl->id_fna.fn_sec_erase != 0) 4384 return (EINVAL); 4385 4386 /* 4387 * Don't allow formatting with Protection Information. 4388 */ 4389 if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0) 4390 return (EINVAL); 4391 4392 /* 4393 * Don't allow formatting using an illegal LBA format, or any LBA format 4394 * that uses metadata. 4395 */ 4396 if (frmt.b.fm_lbaf > nvme->n_ns[c_nsid].ns_idns->id_nlbaf || 4397 nvme->n_ns[c_nsid].ns_idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0) 4398 return (EINVAL); 4399 4400 /* 4401 * Don't allow formatting using an illegal Secure Erase setting. 4402 */ 4403 if (frmt.b.fm_ses > NVME_FRMT_MAX_SES || 4404 (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO && 4405 nvme->n_idctl->id_fna.fn_crypt_erase == 0)) 4406 return (EINVAL); 4407 4408 if (nsid == 0) 4409 nsid = (uint32_t)-1; 4410 4411 return (nvme_format_nvm(nvme, B_TRUE, nsid, frmt.b.fm_lbaf, B_FALSE, 0, 4412 B_FALSE, frmt.b.fm_ses)); 4413 } 4414 4415 static int 4416 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4417 cred_t *cred_p) 4418 { 4419 _NOTE(ARGUNUSED(nioc, mode)); 4420 int rv = 0; 4421 4422 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4423 return (EPERM); 4424 4425 if (nsid == 0) 4426 return (EINVAL); 4427 4428 rv = bd_detach_handle(nvme->n_ns[nsid - 1].ns_bd_hdl); 4429 if (rv != DDI_SUCCESS) 4430 rv = EBUSY; 4431 4432 return (rv); 4433 } 4434 4435 static int 4436 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4437 cred_t *cred_p) 4438 { 4439 _NOTE(ARGUNUSED(nioc, mode)); 4440 nvme_identify_nsid_t *idns; 4441 int rv = 0; 4442 4443 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4444 return (EPERM); 4445 4446 if (nsid == 0) 4447 return (EINVAL); 4448 4449 /* 4450 * Identify namespace again, free old identify data. 4451 */ 4452 idns = nvme->n_ns[nsid - 1].ns_idns; 4453 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) 4454 return (EIO); 4455 4456 kmem_free(idns, sizeof (nvme_identify_nsid_t)); 4457 4458 rv = bd_attach_handle(nvme->n_dip, nvme->n_ns[nsid - 1].ns_bd_hdl); 4459 if (rv != DDI_SUCCESS) 4460 rv = EBUSY; 4461 4462 return (rv); 4463 } 4464 4465 static void 4466 nvme_ufm_update(nvme_t *nvme) 4467 { 4468 mutex_enter(&nvme->n_fwslot_mutex); 4469 ddi_ufm_update(nvme->n_ufmh); 4470 if (nvme->n_fwslot != NULL) { 4471 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t)); 4472 nvme->n_fwslot = NULL; 4473 } 4474 mutex_exit(&nvme->n_fwslot_mutex); 4475 } 4476 4477 static int 4478 nvme_ioctl_firmware_download(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4479 int mode, cred_t *cred_p) 4480 { 4481 int rv = 0; 4482 size_t len, copylen; 4483 offset_t offset; 4484 uintptr_t buf; 4485 nvme_sqe_t sqe = { 4486 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD 4487 }; 4488 4489 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4490 return (EPERM); 4491 4492 if (nsid != 0) 4493 return (EINVAL); 4494 4495 /* 4496 * The offset (in n_len) is restricted to the number of DWORDs in 4497 * 32 bits. 4498 */ 4499 if (nioc->n_len > NVME_FW_OFFSETB_MAX) 4500 return (EINVAL); 4501 4502 /* Confirm that both offset and length are a multiple of DWORD bytes */ 4503 if ((nioc->n_len & NVME_DWORD_MASK) != 0 || 4504 (nioc->n_arg & NVME_DWORD_MASK) != 0) 4505 return (EINVAL); 4506 4507 len = nioc->n_len; 4508 offset = nioc->n_arg; 4509 buf = (uintptr_t)nioc->n_buf; 4510 while (len > 0 && rv == 0) { 4511 /* 4512 * nvme_ioc_cmd() does not use SGLs or PRP lists. 4513 * It is limited to 2 PRPs per NVM command, so limit 4514 * the size of the data to 2 pages. 4515 */ 4516 copylen = MIN(2 * nvme->n_pagesize, len); 4517 4518 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1; 4519 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT); 4520 4521 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void *)buf, copylen, 4522 FWRITE, NULL, nvme_admin_cmd_timeout); 4523 4524 buf += copylen; 4525 offset += copylen; 4526 len -= copylen; 4527 } 4528 4529 /* 4530 * Let the DDI UFM subsystem know that the firmware information for 4531 * this device has changed. 4532 */ 4533 nvme_ufm_update(nvme); 4534 4535 return (rv); 4536 } 4537 4538 static int 4539 nvme_ioctl_firmware_commit(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4540 int mode, cred_t *cred_p) 4541 { 4542 nvme_firmware_commit_dw10_t fc_dw10 = { 0 }; 4543 uint32_t slot = nioc->n_arg & 0xffffffff; 4544 uint32_t action = nioc->n_arg >> 32; 4545 nvme_cqe_t cqe = { 0 }; 4546 nvme_sqe_t sqe = { 4547 .sqe_opc = NVME_OPC_FW_ACTIVATE 4548 }; 4549 int timeout; 4550 int rv; 4551 4552 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4553 return (EPERM); 4554 4555 if (nsid != 0) 4556 return (EINVAL); 4557 4558 /* Validate slot is in range. */ 4559 if (slot < NVME_FW_SLOT_MIN || slot > NVME_FW_SLOT_MAX) 4560 return (EINVAL); 4561 4562 switch (action) { 4563 case NVME_FWC_SAVE: 4564 case NVME_FWC_SAVE_ACTIVATE: 4565 timeout = nvme_commit_save_cmd_timeout; 4566 break; 4567 case NVME_FWC_ACTIVATE: 4568 case NVME_FWC_ACTIVATE_IMMED: 4569 timeout = nvme_admin_cmd_timeout; 4570 break; 4571 default: 4572 return (EINVAL); 4573 } 4574 4575 fc_dw10.b.fc_slot = slot; 4576 fc_dw10.b.fc_action = action; 4577 sqe.sqe_cdw10 = fc_dw10.r; 4578 4579 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, NULL, 0, 0, &cqe, timeout); 4580 4581 nioc->n_arg = ((uint64_t)cqe.cqe_sf.sf_sct << 16) | cqe.cqe_sf.sf_sc; 4582 4583 /* 4584 * Let the DDI UFM subsystem know that the firmware information for 4585 * this device has changed. 4586 */ 4587 nvme_ufm_update(nvme); 4588 4589 return (rv); 4590 } 4591 4592 static int 4593 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 4594 int *rval_p) 4595 { 4596 #ifndef __lock_lint 4597 _NOTE(ARGUNUSED(rval_p)); 4598 #endif 4599 minor_t minor = getminor(dev); 4600 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 4601 int nsid = NVME_MINOR_NSID(minor); 4602 int rv = 0; 4603 nvme_ioctl_t nioc; 4604 4605 int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = { 4606 NULL, 4607 nvme_ioctl_identify, 4608 nvme_ioctl_identify, 4609 nvme_ioctl_capabilities, 4610 nvme_ioctl_get_logpage, 4611 nvme_ioctl_get_features, 4612 nvme_ioctl_intr_cnt, 4613 nvme_ioctl_version, 4614 nvme_ioctl_format, 4615 nvme_ioctl_detach, 4616 nvme_ioctl_attach, 4617 nvme_ioctl_firmware_download, 4618 nvme_ioctl_firmware_commit 4619 }; 4620 4621 if (nvme == NULL) 4622 return (ENXIO); 4623 4624 if (nsid > nvme->n_namespace_count) 4625 return (ENXIO); 4626 4627 if (IS_DEVCTL(cmd)) 4628 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); 4629 4630 #ifdef _MULTI_DATAMODEL 4631 switch (ddi_model_convert_from(mode & FMODELS)) { 4632 case DDI_MODEL_ILP32: { 4633 nvme_ioctl32_t nioc32; 4634 if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t), 4635 mode) != 0) 4636 return (EFAULT); 4637 nioc.n_len = nioc32.n_len; 4638 nioc.n_buf = nioc32.n_buf; 4639 nioc.n_arg = nioc32.n_arg; 4640 break; 4641 } 4642 case DDI_MODEL_NONE: 4643 #endif 4644 if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode) 4645 != 0) 4646 return (EFAULT); 4647 #ifdef _MULTI_DATAMODEL 4648 break; 4649 } 4650 #endif 4651 4652 if (nvme->n_dead && cmd != NVME_IOC_DETACH) 4653 return (EIO); 4654 4655 4656 if (cmd == NVME_IOC_IDENTIFY_CTRL) { 4657 /* 4658 * This makes NVME_IOC_IDENTIFY_CTRL work the same on devctl and 4659 * attachment point nodes. 4660 */ 4661 nsid = 0; 4662 } else if (cmd == NVME_IOC_IDENTIFY_NSID && nsid == 0) { 4663 /* 4664 * This makes NVME_IOC_IDENTIFY_NSID work on a devctl node, it 4665 * will always return identify data for namespace 1. 4666 */ 4667 nsid = 1; 4668 } 4669 4670 if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL) 4671 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode, 4672 cred_p); 4673 else 4674 rv = EINVAL; 4675 4676 #ifdef _MULTI_DATAMODEL 4677 switch (ddi_model_convert_from(mode & FMODELS)) { 4678 case DDI_MODEL_ILP32: { 4679 nvme_ioctl32_t nioc32; 4680 4681 nioc32.n_len = (size32_t)nioc.n_len; 4682 nioc32.n_buf = (uintptr32_t)nioc.n_buf; 4683 nioc32.n_arg = nioc.n_arg; 4684 4685 if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t), 4686 mode) != 0) 4687 return (EFAULT); 4688 break; 4689 } 4690 case DDI_MODEL_NONE: 4691 #endif 4692 if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode) 4693 != 0) 4694 return (EFAULT); 4695 #ifdef _MULTI_DATAMODEL 4696 break; 4697 } 4698 #endif 4699 4700 return (rv); 4701 } 4702 4703 /* 4704 * DDI UFM Callbacks 4705 */ 4706 static int 4707 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 4708 ddi_ufm_image_t *img) 4709 { 4710 nvme_t *nvme = arg; 4711 4712 if (imgno != 0) 4713 return (EINVAL); 4714 4715 ddi_ufm_image_set_desc(img, "Firmware"); 4716 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot); 4717 4718 return (0); 4719 } 4720 4721 /* 4722 * Fill out firmware slot information for the requested slot. The firmware 4723 * slot information is gathered by requesting the Firmware Slot Information log 4724 * page. The format of the page is described in section 5.10.1.3. 4725 * 4726 * We lazily cache the log page on the first call and then invalidate the cache 4727 * data after a successful firmware download or firmware commit command. 4728 * The cached data is protected by a mutex as the state can change 4729 * asynchronous to this callback. 4730 */ 4731 static int 4732 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 4733 uint_t slotno, ddi_ufm_slot_t *slot) 4734 { 4735 nvme_t *nvme = arg; 4736 void *log = NULL; 4737 size_t bufsize; 4738 ddi_ufm_attr_t attr = 0; 4739 char fw_ver[NVME_FWVER_SZ + 1]; 4740 int ret; 4741 4742 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1)) 4743 return (EINVAL); 4744 4745 mutex_enter(&nvme->n_fwslot_mutex); 4746 if (nvme->n_fwslot == NULL) { 4747 ret = nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, 4748 NVME_LOGPAGE_FWSLOT, 0); 4749 if (ret != DDI_SUCCESS || 4750 bufsize != sizeof (nvme_fwslot_log_t)) { 4751 if (log != NULL) 4752 kmem_free(log, bufsize); 4753 mutex_exit(&nvme->n_fwslot_mutex); 4754 return (EIO); 4755 } 4756 nvme->n_fwslot = (nvme_fwslot_log_t *)log; 4757 } 4758 4759 /* 4760 * NVMe numbers firmware slots starting at 1 4761 */ 4762 if (slotno == (nvme->n_fwslot->fw_afi - 1)) 4763 attr |= DDI_UFM_ATTR_ACTIVE; 4764 4765 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0) 4766 attr |= DDI_UFM_ATTR_WRITEABLE; 4767 4768 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') { 4769 attr |= DDI_UFM_ATTR_EMPTY; 4770 } else { 4771 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno], 4772 NVME_FWVER_SZ); 4773 fw_ver[NVME_FWVER_SZ] = '\0'; 4774 ddi_ufm_slot_set_version(slot, fw_ver); 4775 } 4776 mutex_exit(&nvme->n_fwslot_mutex); 4777 4778 ddi_ufm_slot_set_attrs(slot, attr); 4779 4780 return (0); 4781 } 4782 4783 static int 4784 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 4785 { 4786 *caps = DDI_UFM_CAP_REPORT; 4787 return (0); 4788 } 4789