1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2016 The MathWorks, Inc. All rights reserved. 14 * Copyright 2019 Joyent, Inc. 15 * Copyright 2019 Unix Software Ltd. 16 * Copyright 2026 Oxide Computer Company. 17 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association. 18 * Copyright 2022 Tintri by DDN, Inc. All rights reserved. 19 */ 20 21 #ifndef _NVME_VAR_H 22 #define _NVME_VAR_H 23 24 #include <sys/ddi.h> 25 #include <sys/sunddi.h> 26 #include <sys/blkdev.h> 27 #include <sys/taskq_impl.h> 28 #include <sys/list.h> 29 #include <sys/ddi_ufm.h> 30 #include <nvme_common.h> 31 32 /* 33 * NVMe driver state 34 */ 35 36 #ifdef __cplusplus 37 extern "C" { 38 #endif 39 40 #define NVME_MODULE_NAME "nvme" 41 42 typedef enum { 43 NVME_PCI_CONFIG = 1 << 0, 44 NVME_FMA_INIT = 1 << 1, 45 NVME_REGS_MAPPED = 1 << 2, 46 NVME_ADMIN_QUEUE = 1 << 3, 47 NVME_CTRL_LIMITS = 1 << 4, 48 NVME_INTERRUPTS = 1 << 5, 49 NVME_UFM_INIT = 1 << 6, 50 NVME_MUTEX_INIT = 1 << 7, 51 NVME_MGMT_INIT = 1 << 8, 52 NVME_STAT_INIT = 1 << 9, 53 NVME_NS_INIT = 1 << 10 54 } nvme_progress_t; 55 56 typedef enum { 57 NVME_NS_LOCK = 1 << 0, 58 /* 59 * This flag indicates whether or not we've created a minor node for 60 * this namespace. We limit the number of minor nodes that we actually 61 * create in the file system due to minor node constraints. The 62 * controller minors are preferred to the namespace minors, so the lack 63 * of such a minor is considered a non-fatal condition. Minor nodes are 64 * removed all in one go right now when we detach, so this currently 65 * serves as an internal signifier. 66 */ 67 NVME_NS_MINOR = 1 << 1 68 } nvme_ns_progress_t; 69 70 typedef enum { 71 /* 72 * The controller fails to properly process commands on the admin queue 73 * if the first one has CID 0. Subsequent use of CID 0 doesn't present 74 * a problem. 75 */ 76 NVME_QUIRK_START_CID = 1 << 0, 77 } nvme_quirk_t; 78 79 #define NVME_MIN_ADMIN_QUEUE_LEN 16 80 #define NVME_MIN_IO_QUEUE_LEN 16 81 #define NVME_DEFAULT_ADMIN_QUEUE_LEN 256 82 #define NVME_DEFAULT_IO_QUEUE_LEN 1024 83 #define NVME_DEFAULT_ASYNC_EVENT_LIMIT 10 84 #define NVME_MIN_ASYNC_EVENT_LIMIT 1 85 #define NVME_DEFAULT_MIN_BLOCK_SIZE 512 86 87 typedef struct nvme nvme_t; 88 typedef struct nvme_namespace nvme_namespace_t; 89 typedef struct nvme_minor nvme_minor_t; 90 typedef struct nvme_lock nvme_lock_t; 91 typedef struct nvme_minor_lock_info nvme_minor_lock_info_t; 92 typedef struct nvme_dma nvme_dma_t; 93 typedef struct nvme_cmd nvme_cmd_t; 94 typedef struct nvme_cq nvme_cq_t; 95 typedef struct nvme_qpair nvme_qpair_t; 96 typedef struct nvme_task_arg nvme_task_arg_t; 97 typedef struct nvme_device_stat nvme_device_stat_t; 98 typedef struct nvme_admin_stat nvme_admin_stat_t; 99 100 /* 101 * These states represent the minor's perspective. That is, of a minor's 102 * namespace and controller lock, where is it? 103 */ 104 typedef enum { 105 NVME_LOCK_STATE_UNLOCKED = 0, 106 NVME_LOCK_STATE_BLOCKED, 107 NVME_LOCK_STATE_ACQUIRED 108 } nvme_minor_lock_state_t; 109 110 struct nvme_minor_lock_info { 111 list_node_t nli_node; 112 nvme_lock_t *nli_lock; 113 nvme_minor_lock_state_t nli_state; 114 nvme_lock_level_t nli_curlevel; 115 /* 116 * While the minor points back to itself and the nvme_t should always 117 * point to the current controller, the namespace should only point to 118 * one if this is a particular namespace lock. The former two are 119 * initialized at minor initialization time. 120 */ 121 nvme_minor_t *nli_minor; 122 nvme_t *nli_nvme; 123 nvme_namespace_t *nli_ns; 124 /* 125 * This is the common ioctl information that should be filled in when 126 * we're being woken up for any reason other than an interrupted signal. 127 * This should only be set while blocking. 128 */ 129 nvme_ioctl_common_t *nli_ioc; 130 /* 131 * The following are provided for debugging purposes. In particular, 132 * information like the kthread_t and related that performed this should 133 * be considered suspect as it represents who took the operation, not 134 * who performed the operation (unless we're actively blocking). 135 */ 136 hrtime_t nli_last_change; 137 uintptr_t nli_acq_kthread; 138 pid_t nli_acq_pid; 139 }; 140 141 struct nvme_minor { 142 /* 143 * The following three fields are set when this is created. 144 */ 145 id_t nm_minor; 146 nvme_t *nm_ctrl; 147 nvme_namespace_t *nm_ns; 148 /* 149 * This link is used to index this minor on the global list of active 150 * open-related minors. This is only manipulated under the 151 * nvme_open_minors_mutex. 152 */ 153 avl_node_t nm_avl; 154 /* 155 * Information related to locking. Note, there is no pointer to a locked 156 * controller as the only one can be the one specified here. This data 157 * is protected by the controller's n_minor_mutex. 158 */ 159 kcondvar_t nm_cv; 160 nvme_minor_lock_info_t nm_ctrl_lock; 161 nvme_minor_lock_info_t nm_ns_lock; 162 }; 163 164 struct nvme_lock { 165 nvme_minor_lock_info_t *nl_writer; 166 list_t nl_readers; 167 list_t nl_pend_readers; 168 list_t nl_pend_writers; 169 /* 170 * The following are stats to indicate how often certain locking 171 * activities have occurred for debugging purposes. 172 */ 173 uint32_t nl_nwrite_locks; 174 uint32_t nl_nread_locks; 175 uint32_t nl_npend_writes; 176 uint32_t nl_npend_reads; 177 uint32_t nl_nnonblock; 178 uint32_t nl_nsignals; 179 uint32_t nl_nsig_unlock; 180 uint32_t nl_nsig_blocks; 181 uint32_t nl_nsig_acq; 182 }; 183 184 struct nvme_dma { 185 ddi_dma_handle_t nd_dmah; 186 ddi_acc_handle_t nd_acch; 187 ddi_dma_cookie_t nd_cookie; 188 uint_t nd_ncookie; 189 caddr_t nd_memp; 190 size_t nd_len; 191 boolean_t nd_cached; 192 }; 193 194 typedef enum { 195 NVME_CMD_ALLOCATED = 0, 196 NVME_CMD_SUBMITTED, 197 NVME_CMD_QUEUED, 198 NVME_CMD_COMPLETED, 199 NVME_CMD_LOST 200 } nvme_cmd_state_t; 201 202 typedef enum { 203 NVME_CMD_F_DONTPANIC = 1 << 0, 204 NVME_CMD_F_USELOCK = 1 << 1, 205 } nvme_cmd_flag_t; 206 207 /* 208 * This command structure is shared between admin and I/O commands. When used 209 * for an admin command, nc_mutex and nc_cv are used to synchronise access to 210 * various fields, and to signal command completion. NVME_CMD_F_USELOCK in 211 * nc_flags indicates whether the lock and CV are in use. For I/O commands, 212 * these are neither initialised nor used. 213 */ 214 struct nvme_cmd { 215 struct list_node nc_list; 216 217 nvme_sqe_t nc_sqe; 218 nvme_cqe_t nc_cqe; 219 220 void (*nc_callback)(void *); 221 bd_xfer_t *nc_xfer; 222 223 uint32_t nc_timeout; 224 nvme_cmd_flag_t nc_flags; 225 nvme_cmd_state_t nc_state; /* Protected by nc_mutex iff F_USELOCK */ 226 uint16_t nc_sqid; 227 228 hrtime_t nc_submit_ts; 229 hrtime_t nc_queue_ts; 230 231 nvme_dma_t *nc_dma; 232 nvme_dma_t *nc_prp; /* DMA for PRP lists */ 233 234 kmutex_t nc_mutex; 235 kcondvar_t nc_cv; 236 237 taskq_ent_t nc_tqent; 238 nvme_t *nc_nvme; 239 }; 240 241 struct nvme_cq { 242 size_t ncq_nentry; 243 uint16_t ncq_id; 244 245 nvme_dma_t *ncq_dma; 246 nvme_cqe_t *ncq_cq; 247 uint_t ncq_head; 248 uintptr_t ncq_hdbl; 249 int ncq_phase; 250 251 taskq_t *ncq_cmd_taskq; 252 253 kmutex_t ncq_mutex; 254 }; 255 256 struct nvme_qpair { 257 size_t nq_nentry; 258 259 /* submission fields */ 260 nvme_dma_t *nq_sqdma; 261 nvme_sqe_t *nq_sq; 262 uint_t nq_sqhead; 263 uint_t nq_sqtail; 264 uintptr_t nq_sqtdbl; 265 266 /* completion */ 267 nvme_cq_t *nq_cq; 268 269 /* shared structures for completion and submission */ 270 nvme_cmd_t **nq_cmd; /* active command array */ 271 uint16_t nq_next_cmd; /* next potential empty queue slot */ 272 uint_t nq_active_cmds; /* number of active cmds */ 273 uint32_t nq_active_timeout; /* sum of the timeouts of active cmds */ 274 275 kmutex_t nq_mutex; /* protects shared state */ 276 ksema_t nq_sema; /* semaphore to ensure q always has >= 1 empty slot */ 277 }; 278 279 typedef struct nvme_mgmt_lock { 280 kmutex_t nml_lock; 281 kcondvar_t nml_cv; 282 uintptr_t nml_bd_own; 283 } nvme_mgmt_lock_t; 284 285 struct nvme_device_stat { 286 /* Errors detected by driver */ 287 kstat_named_t nds_dma_bind_err; 288 kstat_named_t nds_abort_timeout; 289 kstat_named_t nds_abort_failed; 290 kstat_named_t nds_abort_successful; 291 kstat_named_t nds_abort_unsuccessful; 292 kstat_named_t nds_cmd_timeout; 293 kstat_named_t nds_wrong_logpage; 294 kstat_named_t nds_unknown_logpage; 295 kstat_named_t nds_too_many_cookies; 296 kstat_named_t nds_unknown_cid; 297 298 /* Errors detected by hardware */ 299 kstat_named_t nds_inv_cmd_err; 300 kstat_named_t nds_inv_field_err; 301 kstat_named_t nds_inv_nsfmt_err; 302 kstat_named_t nds_data_xfr_err; 303 kstat_named_t nds_internal_err; 304 kstat_named_t nds_abort_rq_err; 305 kstat_named_t nds_abort_pwrloss_err; 306 kstat_named_t nds_abort_sq_del; 307 kstat_named_t nds_nvm_cap_exc; 308 kstat_named_t nds_nvm_ns_notrdy; 309 kstat_named_t nds_nvm_ns_formatting; 310 kstat_named_t nds_inv_cq_err; 311 kstat_named_t nds_inv_qid_err; 312 kstat_named_t nds_max_qsz_exc; 313 kstat_named_t nds_inv_int_vect; 314 kstat_named_t nds_inv_log_page; 315 kstat_named_t nds_inv_format; 316 kstat_named_t nds_inv_q_del; 317 kstat_named_t nds_cnfl_attr; 318 kstat_named_t nds_inv_prot; 319 kstat_named_t nds_readonly; 320 kstat_named_t nds_inv_fwslot; 321 kstat_named_t nds_inv_fwimg; 322 kstat_named_t nds_fwact_creset; 323 kstat_named_t nds_fwact_nssr; 324 kstat_named_t nds_fwact_reset; 325 kstat_named_t nds_fwact_mtfa; 326 kstat_named_t nds_fwact_prohibited; 327 kstat_named_t nds_fw_overlap; 328 kstat_named_t nds_inv_cmdseq_err; 329 kstat_named_t nds_ns_attached; 330 kstat_named_t nds_ns_priv; 331 kstat_named_t nds_ns_not_attached; 332 kstat_named_t nds_inc_ctrl_list; 333 kstat_named_t nds_ana_attach; 334 kstat_named_t nds_ns_attach_lim; 335 336 /* Errors reported by asynchronous events */ 337 kstat_named_t nds_diagfail_event; 338 kstat_named_t nds_persistent_event; 339 kstat_named_t nds_transient_event; 340 kstat_named_t nds_fw_load_event; 341 kstat_named_t nds_reliability_event; 342 kstat_named_t nds_temperature_event; 343 kstat_named_t nds_spare_event; 344 kstat_named_t nds_vendor_event; 345 kstat_named_t nds_notice_event; 346 kstat_named_t nds_unknown_event; 347 }; 348 349 #define NAS_CNT 0 350 #define NAS_AVG 1 351 #define NAS_MAX 2 352 struct nvme_admin_stat { 353 kstat_named_t nas_getlogpage[3]; 354 kstat_named_t nas_identify[3]; 355 kstat_named_t nas_abort[3]; 356 kstat_named_t nas_fwactivate[3]; 357 kstat_named_t nas_fwimgload[3]; 358 kstat_named_t nas_nsformat[3]; 359 kstat_named_t nas_vendor[3]; 360 kstat_named_t nas_other[3]; 361 }; 362 363 struct nvme { 364 dev_info_t *n_dip; 365 nvme_progress_t n_progress; 366 nvme_quirk_t n_quirks; 367 368 caddr_t n_regs; 369 ddi_acc_handle_t n_regh; 370 371 kmem_cache_t *n_cmd_cache; 372 kmem_cache_t *n_prp_cache; 373 374 size_t n_inth_sz; 375 ddi_intr_handle_t *n_inth; 376 int n_intr_cnt; 377 uint_t n_intr_pri; 378 int n_intr_cap; 379 int n_intr_type; 380 int n_intr_types; 381 382 ddi_acc_handle_t n_pcicfg_handle; 383 uint16_t n_vendor_id; 384 uint16_t n_device_id; 385 uint16_t n_subsystem_vendor_id; 386 uint16_t n_subsystem_device_id; 387 uint8_t n_revision_id; 388 389 char *n_product; 390 char *n_vendor; 391 392 nvme_version_t n_version; 393 boolean_t n_dead; 394 nvme_ioctl_errno_t n_dead_status; 395 taskq_ent_t n_dead_tqent; 396 boolean_t n_strict_version; 397 boolean_t n_ignore_unknown_vendor_status; 398 uint32_t n_admin_queue_len; 399 uint32_t n_io_squeue_len; 400 uint32_t n_io_cqueue_len; 401 uint16_t n_async_event_limit; 402 uint_t n_min_block_size; 403 uint16_t n_abort_command_limit; 404 uint64_t n_max_data_transfer_size; 405 boolean_t n_write_cache_present; 406 boolean_t n_write_cache_enabled; 407 int n_error_log_len; 408 boolean_t n_async_event_supported; 409 int n_submission_queues_supported; 410 int n_completion_queues_supported; 411 int n_submission_queues; 412 int n_completion_queues; 413 414 int n_nssr_supported; 415 int n_doorbell_stride; 416 int n_timeout; 417 int n_arbitration_mechanisms; 418 int n_cont_queues_reqd; 419 int n_max_queue_entries; 420 int n_pageshift; 421 int n_pagesize; 422 423 uint32_t n_namespace_count; 424 uint_t n_namespaces_attachable; 425 uint_t n_ioq_count; 426 uint_t n_cq_count; 427 428 /* 429 * This is cached identify controller and common namespace data that 430 * exists in the system. This generally can be used in the kernel; 431 * however, we have to be careful about what we use here because these 432 * values are not refreshed after attach. Therefore these are good for 433 * answering the question what does the controller support or what is in 434 * the common namespace information, but not otherwise. That means you 435 * shouldn't use this to try to answer how much capacity is still in the 436 * controller because this information is just cached. 437 */ 438 nvme_identify_ctrl_t *n_idctl; 439 nvme_identify_nsid_t *n_idcomns; 440 441 /* Pointer to the admin queue, which is always queue 0 in n_ioq. */ 442 nvme_qpair_t *n_adminq; 443 /* 444 * All command queues, including the admin queue. 445 * Its length is: n_ioq_count + 1. 446 */ 447 nvme_qpair_t **n_ioq; 448 nvme_cq_t **n_cq; 449 450 nvme_namespace_t *n_ns; 451 452 ddi_dma_attr_t n_queue_dma_attr; 453 ddi_dma_attr_t n_prp_dma_attr; 454 ddi_dma_attr_t n_sgl_dma_attr; 455 ddi_device_acc_attr_t n_reg_acc_attr; 456 ddi_iblock_cookie_t n_fm_ibc; 457 int n_fm_cap; 458 459 ksema_t n_abort_sema; 460 461 /* protects namespace management operations */ 462 nvme_mgmt_lock_t n_mgmt; 463 464 /* 465 * This lock protects the minor node locking state across the controller 466 * and all related namespaces. 467 */ 468 kmutex_t n_minor_mutex; 469 nvme_lock_t n_lock; 470 471 kstat_t *n_device_kstat; 472 nvme_device_stat_t n_device_stat; 473 474 kstat_t *n_admin_kstat; 475 kmutex_t n_admin_stat_mutex; 476 nvme_admin_stat_t n_admin_stat; 477 478 /* hot removal NDI event handling */ 479 ddi_eventcookie_t n_rm_cookie; 480 ddi_callback_id_t n_ev_rm_cb_id; 481 482 /* DDI UFM handle */ 483 ddi_ufm_handle_t *n_ufmh; 484 /* Cached Firmware Slot Information log page */ 485 nvme_fwslot_log_t *n_fwslot; 486 /* Lock protecting the cached firmware slot info */ 487 kmutex_t n_fwslot_mutex; 488 }; 489 490 struct nvme_namespace { 491 nvme_t *ns_nvme; 492 nvme_ns_progress_t ns_progress; 493 uint8_t ns_eui64[8]; 494 uint8_t ns_nguid[16]; 495 char ns_name[11]; 496 497 bd_handle_t ns_bd_hdl; 498 499 uint32_t ns_id; 500 size_t ns_block_count; 501 size_t ns_block_size; 502 size_t ns_best_block_size; 503 nvme_ns_state_t ns_state; 504 505 nvme_identify_nsid_t *ns_idns; 506 507 /* 508 * Namespace lock, see the theory statement for more information. 509 */ 510 nvme_lock_t ns_lock; 511 512 /* 513 * If a namespace has neither NGUID nor EUI64, we create a devid in 514 * nvme_prepare_devid(). 515 */ 516 char *ns_devid; 517 }; 518 519 struct nvme_task_arg { 520 nvme_t *nt_nvme; 521 nvme_cmd_t *nt_cmd; 522 }; 523 524 typedef enum { 525 /* 526 * This indicates that there is no exclusive access required for this 527 * operation. However, this operation will fail if someone attempts to 528 * perform this operation and someone else holds a write lock. 529 */ 530 NVME_IOCTL_EXCL_NONE = 0, 531 /* 532 * This indicates that a write lock is required to perform the 533 * operation. 534 */ 535 NVME_IOCTL_EXCL_WRITE, 536 /* 537 * This indicates that a write lock over the controller is required to 538 * perform the operation. An example of this is creating a namespace 539 * because it operates on the controller as a whole. 540 */ 541 NVME_IOCTL_EXCL_CTRL, 542 /* 543 * This indicates that the exclusive check should be skipped. The only 544 * case this should be used in is the lock and unlock ioctls as they 545 * should be able to proceed even when the controller is being used 546 * exclusively. 547 */ 548 NVME_IOCTL_EXCL_SKIP 549 } nvme_ioctl_excl_t; 550 551 /* 552 * This structure represents the set of checks that we apply to ioctl's using 553 * the nvme_ioctl_common_t structure as part of validation. 554 */ 555 typedef struct nvme_ioctl_check { 556 /* 557 * This indicates whether or not the command in question allows a 558 * namespace to be specified at all. If this is false, a namespace minor 559 * cannot be used and a controller minor must leave the nsid set to 560 * zero. 561 */ 562 boolean_t nck_ns_ok; 563 /* 564 * This indicates that a minor node corresponding to a namespace is 565 * allowed to issue this. 566 */ 567 boolean_t nck_ns_minor_ok; 568 /* 569 * This indicates that the controller should be skipped from all of the 570 * following processing behavior. That is, it's allowed to specify 571 * whatever it wants in the nsid field, regardless if it is valid or 572 * not. This is required for some of the Identify Command options that 573 * list endpoints. This should generally not be used and the driver 574 * should still validate the nuance here. 575 */ 576 boolean_t nck_skip_ctrl; 577 /* 578 * This indicates that if we're on the controller's minor and we don't 579 * have an explicit namespace ID (i.e. 0), should the namespace be 580 * rewritten to be the broadcast namespace. 581 */ 582 boolean_t nck_ctrl_rewrite; 583 /* 584 * This indicates whether or not the broadcast NSID is acceptable for 585 * the controller node. 586 */ 587 boolean_t nck_bcast_ok; 588 589 /* 590 * This indicates to the lock checking code what kind of exclusive 591 * access is required. This check occurs after any namespace rewriting 592 * has occurred. When looking at exclusivity, a broadcast namespace or 593 * namespace 0 indicate that the controller is the target, otherwise the 594 * target namespace will be checked for a write lock. 595 */ 596 nvme_ioctl_excl_t nck_excl; 597 } nvme_ioctl_check_t; 598 599 /* 600 * Constants 601 */ 602 extern uint_t nvme_vendor_specific_admin_cmd_max_timeout; 603 extern uint32_t nvme_vendor_specific_admin_cmd_size; 604 605 /* 606 * Common functions. 607 */ 608 extern nvme_namespace_t *nvme_nsid2ns(nvme_t *, uint32_t); 609 extern boolean_t nvme_ioctl_error(nvme_ioctl_common_t *, nvme_ioctl_errno_t, 610 uint32_t, uint32_t); 611 extern boolean_t nvme_ctrl_atleast(nvme_t *, const nvme_version_t *); 612 extern void nvme_ioctl_success(nvme_ioctl_common_t *); 613 614 /* 615 * Validation related functions and kernel tunable limits. 616 */ 617 extern boolean_t nvme_validate_logpage(nvme_t *, nvme_ioctl_get_logpage_t *); 618 extern boolean_t nvme_validate_identify(nvme_t *, nvme_ioctl_identify_t *, 619 boolean_t); 620 extern boolean_t nvme_validate_get_feature(nvme_t *, 621 nvme_ioctl_get_feature_t *); 622 extern boolean_t nvme_validate_vuc(nvme_t *, nvme_ioctl_passthru_t *); 623 extern boolean_t nvme_validate_format(nvme_t *, nvme_ioctl_format_t *); 624 extern boolean_t nvme_validate_fw_load(nvme_t *, nvme_ioctl_fw_load_t *); 625 extern boolean_t nvme_validate_fw_commit(nvme_t *, nvme_ioctl_fw_commit_t *); 626 extern boolean_t nvme_validate_ctrl_attach_detach_ns(nvme_t *, 627 nvme_ioctl_common_t *); 628 extern boolean_t nvme_validate_ns_delete(nvme_t *, nvme_ioctl_common_t *); 629 extern boolean_t nvme_validate_ns_create(nvme_t *, nvme_ioctl_ns_create_t *); 630 631 /* 632 * Locking functions 633 */ 634 extern void nvme_rwlock(nvme_minor_t *, nvme_ioctl_lock_t *); 635 extern void nvme_rwunlock(nvme_minor_lock_info_t *, nvme_lock_t *); 636 extern void nvme_rwlock_ctrl_dead(void *); 637 extern void nvme_lock_init(nvme_lock_t *); 638 extern void nvme_lock_fini(nvme_lock_t *); 639 640 /* 641 * Statistics functions 642 */ 643 extern boolean_t nvme_stat_init(nvme_t *); 644 extern void nvme_stat_cleanup(nvme_t *); 645 extern void nvme_admin_stat_cmd(nvme_t *, nvme_cmd_t *); 646 647 #ifdef __cplusplus 648 } 649 #endif 650 651 #endif /* _NVME_VAR_H */ 652