1 /*- 2 * Copyright (C) 2012-2013 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef __NVME_H__ 30 #define __NVME_H__ 31 32 #ifdef _KERNEL 33 #include <sys/types.h> 34 #endif 35 36 #include <sys/param.h> 37 38 #define NVME_PASSTHROUGH_CMD _IOWR('n', 0, struct nvme_pt_command) 39 #define NVME_RESET_CONTROLLER _IO('n', 1) 40 41 #define NVME_IO_TEST _IOWR('n', 100, struct nvme_io_test) 42 #define NVME_BIO_TEST _IOWR('n', 101, struct nvme_io_test) 43 44 /* 45 * Use to mark a command to apply to all namespaces, or to retrieve global 46 * log pages. 47 */ 48 #define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF) 49 50 #define NVME_MAX_XFER_SIZE MAXPHYS 51 52 union cap_lo_register { 53 uint32_t raw; 54 struct { 55 /** maximum queue entries supported */ 56 uint32_t mqes : 16; 57 58 /** contiguous queues required */ 59 uint32_t cqr : 1; 60 61 /** arbitration mechanism supported */ 62 uint32_t ams : 2; 63 64 uint32_t reserved1 : 5; 65 66 /** timeout */ 67 uint32_t to : 8; 68 } bits __packed; 69 } __packed; 70 71 union cap_hi_register { 72 uint32_t raw; 73 struct { 74 /** doorbell stride */ 75 uint32_t dstrd : 4; 76 77 uint32_t reserved3 : 1; 78 79 /** command sets supported */ 80 uint32_t css_nvm : 1; 81 82 uint32_t css_reserved : 3; 83 uint32_t reserved2 : 7; 84 85 /** memory page size minimum */ 86 uint32_t mpsmin : 4; 87 88 /** memory page size maximum */ 89 uint32_t mpsmax : 4; 90 91 uint32_t reserved1 : 8; 92 } bits __packed; 93 } __packed; 94 95 union cc_register { 96 uint32_t raw; 97 struct { 98 /** enable */ 99 uint32_t en : 1; 100 101 uint32_t reserved1 : 3; 102 103 /** i/o command set selected */ 104 uint32_t css : 3; 105 106 /** memory page size */ 107 uint32_t mps : 4; 108 109 /** arbitration mechanism selected */ 110 uint32_t ams : 3; 111 112 /** shutdown notification */ 113 uint32_t shn : 2; 114 115 /** i/o submission queue entry size */ 116 uint32_t iosqes : 4; 117 118 /** i/o completion queue entry size */ 119 uint32_t iocqes : 4; 120 121 uint32_t reserved2 : 8; 122 } bits __packed; 123 } __packed; 124 125 enum shn_value { 126 NVME_SHN_NORMAL = 0x1, 127 NVME_SHN_ABRUPT = 0x2, 128 }; 129 130 union csts_register { 131 uint32_t raw; 132 struct { 133 /** ready */ 134 uint32_t rdy : 1; 135 136 /** controller fatal status */ 137 uint32_t cfs : 1; 138 139 /** shutdown status */ 140 uint32_t shst : 2; 141 142 uint32_t reserved1 : 28; 143 } bits __packed; 144 } __packed; 145 146 enum shst_value { 147 NVME_SHST_NORMAL = 0x0, 148 NVME_SHST_OCCURRING = 0x1, 149 NVME_SHST_COMPLETE = 0x2, 150 }; 151 152 union aqa_register { 153 uint32_t raw; 154 struct { 155 /** admin submission queue size */ 156 uint32_t asqs : 12; 157 158 uint32_t reserved1 : 4; 159 160 /** admin completion queue size */ 161 uint32_t acqs : 12; 162 163 uint32_t reserved2 : 4; 164 } bits __packed; 165 } __packed; 166 167 struct nvme_registers 168 { 169 /** controller capabilities */ 170 union cap_lo_register cap_lo; 171 union cap_hi_register cap_hi; 172 173 uint32_t vs; /* version */ 174 uint32_t intms; /* interrupt mask set */ 175 uint32_t intmc; /* interrupt mask clear */ 176 177 /** controller configuration */ 178 union cc_register cc; 179 180 uint32_t reserved1; 181 uint32_t csts; /* controller status */ 182 uint32_t reserved2; 183 184 /** admin queue attributes */ 185 union aqa_register aqa; 186 187 uint64_t asq; /* admin submission queue base addr */ 188 uint64_t acq; /* admin completion queue base addr */ 189 uint32_t reserved3[0x3f2]; 190 191 struct { 192 uint32_t sq_tdbl; /* submission queue tail doorbell */ 193 uint32_t cq_hdbl; /* completion queue head doorbell */ 194 } doorbell[1] __packed; 195 } __packed; 196 197 struct nvme_command 198 { 199 /* dword 0 */ 200 uint16_t opc : 8; /* opcode */ 201 uint16_t fuse : 2; /* fused operation */ 202 uint16_t rsvd1 : 6; 203 uint16_t cid; /* command identifier */ 204 205 /* dword 1 */ 206 uint32_t nsid; /* namespace identifier */ 207 208 /* dword 2-3 */ 209 uint32_t rsvd2; 210 uint32_t rsvd3; 211 212 /* dword 4-5 */ 213 uint64_t mptr; /* metadata pointer */ 214 215 /* dword 6-7 */ 216 uint64_t prp1; /* prp entry 1 */ 217 218 /* dword 8-9 */ 219 uint64_t prp2; /* prp entry 2 */ 220 221 /* dword 10-15 */ 222 uint32_t cdw10; /* command-specific */ 223 uint32_t cdw11; /* command-specific */ 224 uint32_t cdw12; /* command-specific */ 225 uint32_t cdw13; /* command-specific */ 226 uint32_t cdw14; /* command-specific */ 227 uint32_t cdw15; /* command-specific */ 228 } __packed; 229 230 struct nvme_status { 231 232 uint16_t p : 1; /* phase tag */ 233 uint16_t sc : 8; /* status code */ 234 uint16_t sct : 3; /* status code type */ 235 uint16_t rsvd2 : 2; 236 uint16_t m : 1; /* more */ 237 uint16_t dnr : 1; /* do not retry */ 238 } __packed; 239 240 struct nvme_completion { 241 242 /* dword 0 */ 243 uint32_t cdw0; /* command-specific */ 244 245 /* dword 1 */ 246 uint32_t rsvd1; 247 248 /* dword 2 */ 249 uint16_t sqhd; /* submission queue head pointer */ 250 uint16_t sqid; /* submission queue identifier */ 251 252 /* dword 3 */ 253 uint16_t cid; /* command identifier */ 254 struct nvme_status status; 255 } __packed; 256 257 struct nvme_dsm_range { 258 259 uint32_t attributes; 260 uint32_t length; 261 uint64_t starting_lba; 262 } __packed; 263 264 /* status code types */ 265 enum nvme_status_code_type { 266 NVME_SCT_GENERIC = 0x0, 267 NVME_SCT_COMMAND_SPECIFIC = 0x1, 268 NVME_SCT_MEDIA_ERROR = 0x2, 269 /* 0x3-0x6 - reserved */ 270 NVME_SCT_VENDOR_SPECIFIC = 0x7, 271 }; 272 273 /* generic command status codes */ 274 enum nvme_generic_command_status_code { 275 NVME_SC_SUCCESS = 0x00, 276 NVME_SC_INVALID_OPCODE = 0x01, 277 NVME_SC_INVALID_FIELD = 0x02, 278 NVME_SC_COMMAND_ID_CONFLICT = 0x03, 279 NVME_SC_DATA_TRANSFER_ERROR = 0x04, 280 NVME_SC_ABORTED_POWER_LOSS = 0x05, 281 NVME_SC_INTERNAL_DEVICE_ERROR = 0x06, 282 NVME_SC_ABORTED_BY_REQUEST = 0x07, 283 NVME_SC_ABORTED_SQ_DELETION = 0x08, 284 NVME_SC_ABORTED_FAILED_FUSED = 0x09, 285 NVME_SC_ABORTED_MISSING_FUSED = 0x0a, 286 NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b, 287 NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c, 288 289 NVME_SC_LBA_OUT_OF_RANGE = 0x80, 290 NVME_SC_CAPACITY_EXCEEDED = 0x81, 291 NVME_SC_NAMESPACE_NOT_READY = 0x82, 292 }; 293 294 /* command specific status codes */ 295 enum nvme_command_specific_status_code { 296 NVME_SC_COMPLETION_QUEUE_INVALID = 0x00, 297 NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01, 298 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02, 299 NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03, 300 /* 0x04 - reserved */ 301 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05, 302 NVME_SC_INVALID_FIRMWARE_SLOT = 0x06, 303 NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07, 304 NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08, 305 NVME_SC_INVALID_LOG_PAGE = 0x09, 306 NVME_SC_INVALID_FORMAT = 0x0a, 307 NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b, 308 309 NVME_SC_CONFLICTING_ATTRIBUTES = 0x80, 310 NVME_SC_INVALID_PROTECTION_INFO = 0x81, 311 NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82, 312 }; 313 314 /* media error status codes */ 315 enum nvme_media_error_status_code { 316 NVME_SC_WRITE_FAULTS = 0x80, 317 NVME_SC_UNRECOVERED_READ_ERROR = 0x81, 318 NVME_SC_GUARD_CHECK_ERROR = 0x82, 319 NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83, 320 NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84, 321 NVME_SC_COMPARE_FAILURE = 0x85, 322 NVME_SC_ACCESS_DENIED = 0x86, 323 }; 324 325 /* admin opcodes */ 326 enum nvme_admin_opcode { 327 NVME_OPC_DELETE_IO_SQ = 0x00, 328 NVME_OPC_CREATE_IO_SQ = 0x01, 329 NVME_OPC_GET_LOG_PAGE = 0x02, 330 /* 0x03 - reserved */ 331 NVME_OPC_DELETE_IO_CQ = 0x04, 332 NVME_OPC_CREATE_IO_CQ = 0x05, 333 NVME_OPC_IDENTIFY = 0x06, 334 /* 0x07 - reserved */ 335 NVME_OPC_ABORT = 0x08, 336 NVME_OPC_SET_FEATURES = 0x09, 337 NVME_OPC_GET_FEATURES = 0x0a, 338 /* 0x0b - reserved */ 339 NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c, 340 /* 0x0d-0x0f - reserved */ 341 NVME_OPC_FIRMWARE_ACTIVATE = 0x10, 342 NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11, 343 344 NVME_OPC_FORMAT_NVM = 0x80, 345 NVME_OPC_SECURITY_SEND = 0x81, 346 NVME_OPC_SECURITY_RECEIVE = 0x82, 347 }; 348 349 /* nvme nvm opcodes */ 350 enum nvme_nvm_opcode { 351 NVME_OPC_FLUSH = 0x00, 352 NVME_OPC_WRITE = 0x01, 353 NVME_OPC_READ = 0x02, 354 /* 0x03 - reserved */ 355 NVME_OPC_WRITE_UNCORRECTABLE = 0x04, 356 NVME_OPC_COMPARE = 0x05, 357 /* 0x06-0x07 - reserved */ 358 NVME_OPC_DATASET_MANAGEMENT = 0x09, 359 }; 360 361 enum nvme_feature { 362 /* 0x00 - reserved */ 363 NVME_FEAT_ARBITRATION = 0x01, 364 NVME_FEAT_POWER_MANAGEMENT = 0x02, 365 NVME_FEAT_LBA_RANGE_TYPE = 0x03, 366 NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04, 367 NVME_FEAT_ERROR_RECOVERY = 0x05, 368 NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06, 369 NVME_FEAT_NUMBER_OF_QUEUES = 0x07, 370 NVME_FEAT_INTERRUPT_COALESCING = 0x08, 371 NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09, 372 NVME_FEAT_WRITE_ATOMICITY = 0x0A, 373 NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B, 374 /* 0x0C-0x7F - reserved */ 375 NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80, 376 /* 0x81-0xBF - command set specific (reserved) */ 377 /* 0xC0-0xFF - vendor specific */ 378 }; 379 380 enum nvme_dsm_attribute { 381 NVME_DSM_ATTR_INTEGRAL_READ = 0x1, 382 NVME_DSM_ATTR_INTEGRAL_WRITE = 0x2, 383 NVME_DSM_ATTR_DEALLOCATE = 0x4, 384 }; 385 386 enum nvme_activate_action { 387 NVME_AA_REPLACE_NO_ACTIVATE = 0x0, 388 NVME_AA_REPLACE_ACTIVATE = 0x1, 389 NVME_AA_ACTIVATE = 0x2, 390 }; 391 392 #define NVME_SERIAL_NUMBER_LENGTH 20 393 #define NVME_MODEL_NUMBER_LENGTH 40 394 #define NVME_FIRMWARE_REVISION_LENGTH 8 395 396 struct nvme_controller_data { 397 398 /* bytes 0-255: controller capabilities and features */ 399 400 /** pci vendor id */ 401 uint16_t vid; 402 403 /** pci subsystem vendor id */ 404 uint16_t ssvid; 405 406 /** serial number */ 407 uint8_t sn[NVME_SERIAL_NUMBER_LENGTH]; 408 409 /** model number */ 410 uint8_t mn[NVME_MODEL_NUMBER_LENGTH]; 411 412 /** firmware revision */ 413 uint8_t fr[NVME_FIRMWARE_REVISION_LENGTH]; 414 415 /** recommended arbitration burst */ 416 uint8_t rab; 417 418 /** ieee oui identifier */ 419 uint8_t ieee[3]; 420 421 /** multi-interface capabilities */ 422 uint8_t mic; 423 424 /** maximum data transfer size */ 425 uint8_t mdts; 426 427 uint8_t reserved1[178]; 428 429 /* bytes 256-511: admin command set attributes */ 430 431 /** optional admin command support */ 432 struct { 433 /* supports security send/receive commands */ 434 uint16_t security : 1; 435 436 /* supports format nvm command */ 437 uint16_t format : 1; 438 439 /* supports firmware activate/download commands */ 440 uint16_t firmware : 1; 441 442 uint16_t oacs_rsvd : 13; 443 } __packed oacs; 444 445 /** abort command limit */ 446 uint8_t acl; 447 448 /** asynchronous event request limit */ 449 uint8_t aerl; 450 451 /** firmware updates */ 452 struct { 453 /* first slot is read-only */ 454 uint8_t slot1_ro : 1; 455 456 /* number of firmware slots */ 457 uint8_t num_slots : 3; 458 459 uint8_t frmw_rsvd : 4; 460 } __packed frmw; 461 462 /** log page attributes */ 463 struct { 464 /* per namespace smart/health log page */ 465 uint8_t ns_smart : 1; 466 467 uint8_t lpa_rsvd : 7; 468 } __packed lpa; 469 470 /** error log page entries */ 471 uint8_t elpe; 472 473 /** number of power states supported */ 474 uint8_t npss; 475 476 /** admin vendor specific command configuration */ 477 struct { 478 /* admin vendor specific commands use spec format */ 479 uint8_t spec_format : 1; 480 481 uint8_t avscc_rsvd : 7; 482 } __packed avscc; 483 484 uint8_t reserved2[247]; 485 486 /* bytes 512-703: nvm command set attributes */ 487 488 /** submission queue entry size */ 489 struct { 490 uint8_t min : 4; 491 uint8_t max : 4; 492 } __packed sqes; 493 494 /** completion queue entry size */ 495 struct { 496 uint8_t min : 4; 497 uint8_t max : 4; 498 } __packed cqes; 499 500 uint8_t reserved3[2]; 501 502 /** number of namespaces */ 503 uint32_t nn; 504 505 /** optional nvm command support */ 506 struct { 507 uint16_t compare : 1; 508 uint16_t write_unc : 1; 509 uint16_t dsm: 1; 510 uint16_t reserved: 13; 511 } __packed oncs; 512 513 /** fused operation support */ 514 uint16_t fuses; 515 516 /** format nvm attributes */ 517 uint8_t fna; 518 519 /** volatile write cache */ 520 struct { 521 uint8_t present : 1; 522 uint8_t reserved : 7; 523 } __packed vwc; 524 525 /* TODO: flesh out remaining nvm command set attributes */ 526 uint8_t reserved4[178]; 527 528 /* bytes 704-2047: i/o command set attributes */ 529 uint8_t reserved5[1344]; 530 531 /* bytes 2048-3071: power state descriptors */ 532 uint8_t reserved6[1024]; 533 534 /* bytes 3072-4095: vendor specific */ 535 uint8_t reserved7[1024]; 536 } __packed __aligned(4); 537 538 struct nvme_namespace_data { 539 540 /** namespace size */ 541 uint64_t nsze; 542 543 /** namespace capacity */ 544 uint64_t ncap; 545 546 /** namespace utilization */ 547 uint64_t nuse; 548 549 /** namespace features */ 550 struct { 551 /** thin provisioning */ 552 uint8_t thin_prov : 1; 553 uint8_t reserved1 : 7; 554 } __packed nsfeat; 555 556 /** number of lba formats */ 557 uint8_t nlbaf; 558 559 /** formatted lba size */ 560 struct { 561 uint8_t format : 4; 562 uint8_t extended : 1; 563 uint8_t reserved2 : 3; 564 } __packed flbas; 565 566 /** metadata capabilities */ 567 struct { 568 /* metadata can be transferred as part of data prp list */ 569 uint8_t extended : 1; 570 571 /* metadata can be transferred with separate metadata pointer */ 572 uint8_t pointer : 1; 573 574 uint8_t reserved3 : 6; 575 } __packed mc; 576 577 /** end-to-end data protection capabilities */ 578 struct { 579 /* protection information type 1 */ 580 uint8_t pit1 : 1; 581 582 /* protection information type 2 */ 583 uint8_t pit2 : 1; 584 585 /* protection information type 3 */ 586 uint8_t pit3 : 1; 587 588 /* first eight bytes of metadata */ 589 uint8_t md_start : 1; 590 591 /* last eight bytes of metadata */ 592 uint8_t md_end : 1; 593 } __packed dpc; 594 595 /** end-to-end data protection type settings */ 596 struct { 597 /* protection information type */ 598 uint8_t pit : 3; 599 600 /* 1 == protection info transferred at start of metadata */ 601 /* 0 == protection info transferred at end of metadata */ 602 uint8_t md_start : 1; 603 604 uint8_t reserved4 : 4; 605 } __packed dps; 606 607 uint8_t reserved5[98]; 608 609 /** lba format support */ 610 struct { 611 /** metadata size */ 612 uint32_t ms : 16; 613 614 /** lba data size */ 615 uint32_t lbads : 8; 616 617 /** relative performance */ 618 uint32_t rp : 2; 619 620 uint32_t reserved6 : 6; 621 } __packed lbaf[16]; 622 623 uint8_t reserved6[192]; 624 625 uint8_t vendor_specific[3712]; 626 } __packed __aligned(4); 627 628 enum nvme_log_page { 629 630 /* 0x00 - reserved */ 631 NVME_LOG_ERROR = 0x01, 632 NVME_LOG_HEALTH_INFORMATION = 0x02, 633 NVME_LOG_FIRMWARE_SLOT = 0x03, 634 /* 0x04-0x7F - reserved */ 635 /* 0x80-0xBF - I/O command set specific */ 636 /* 0xC0-0xFF - vendor specific */ 637 }; 638 639 struct nvme_error_information_entry { 640 641 uint64_t error_count; 642 uint16_t sqid; 643 uint16_t cid; 644 struct nvme_status status; 645 uint16_t error_location; 646 uint64_t lba; 647 uint32_t nsid; 648 uint8_t vendor_specific; 649 uint8_t reserved[35]; 650 } __packed __aligned(4); 651 652 union nvme_critical_warning_state { 653 654 uint8_t raw; 655 656 struct { 657 uint8_t available_spare : 1; 658 uint8_t temperature : 1; 659 uint8_t device_reliability : 1; 660 uint8_t read_only : 1; 661 uint8_t volatile_memory_backup : 1; 662 uint8_t reserved : 3; 663 } __packed bits; 664 } __packed; 665 666 struct nvme_health_information_page { 667 668 union nvme_critical_warning_state critical_warning; 669 670 uint16_t temperature; 671 uint8_t available_spare; 672 uint8_t available_spare_threshold; 673 uint8_t percentage_used; 674 675 uint8_t reserved[26]; 676 677 /* 678 * Note that the following are 128-bit values, but are 679 * defined as an array of 2 64-bit values. 680 */ 681 /* Data Units Read is always in 512-byte units. */ 682 uint64_t data_units_read[2]; 683 /* Data Units Written is always in 512-byte units. */ 684 uint64_t data_units_written[2]; 685 /* For NVM command set, this includes Compare commands. */ 686 uint64_t host_read_commands[2]; 687 uint64_t host_write_commands[2]; 688 /* Controller Busy Time is reported in minutes. */ 689 uint64_t controller_busy_time[2]; 690 uint64_t power_cycles[2]; 691 uint64_t power_on_hours[2]; 692 uint64_t unsafe_shutdowns[2]; 693 uint64_t media_errors[2]; 694 uint64_t num_error_info_log_entries[2]; 695 696 uint8_t reserved2[320]; 697 } __packed __aligned(4); 698 699 struct nvme_firmware_page { 700 701 struct { 702 uint8_t slot : 3; /* slot for current FW */ 703 uint8_t reserved : 5; 704 } __packed afi; 705 706 uint8_t reserved[7]; 707 uint64_t revision[7]; /* revisions for 7 slots */ 708 uint8_t reserved2[448]; 709 } __packed __aligned(4); 710 711 #define NVME_TEST_MAX_THREADS 128 712 713 struct nvme_io_test { 714 715 enum nvme_nvm_opcode opc; 716 uint32_t size; 717 uint32_t time; /* in seconds */ 718 uint32_t num_threads; 719 uint32_t flags; 720 uint32_t io_completed[NVME_TEST_MAX_THREADS]; 721 }; 722 723 enum nvme_io_test_flags { 724 725 /* 726 * Specifies whether dev_refthread/dev_relthread should be 727 * called during NVME_BIO_TEST. Ignored for other test 728 * types. 729 */ 730 NVME_TEST_FLAG_REFTHREAD = 0x1, 731 }; 732 733 struct nvme_pt_command { 734 735 /* 736 * cmd is used to specify a passthrough command to a controller or 737 * namespace. 738 * 739 * The following fields from cmd may be specified by the caller: 740 * * opc (opcode) 741 * * nsid (namespace id) - for admin commands only 742 * * cdw10-cdw15 743 * 744 * Remaining fields must be set to 0 by the caller. 745 */ 746 struct nvme_command cmd; 747 748 /* 749 * cpl returns completion status for the passthrough command 750 * specified by cmd. 751 * 752 * The following fields will be filled out by the driver, for 753 * consumption by the caller: 754 * * cdw0 755 * * status (except for phase) 756 * 757 * Remaining fields will be set to 0 by the driver. 758 */ 759 struct nvme_completion cpl; 760 761 /* buf is the data buffer associated with this passthrough command. */ 762 void * buf; 763 764 /* 765 * len is the length of the data buffer associated with this 766 * passthrough command. 767 */ 768 uint32_t len; 769 770 /* 771 * is_read = 1 if the passthrough command will read data into the 772 * supplied buffer from the controller. 773 * 774 * is_read = 0 if the passthrough command will write data from the 775 * supplied buffer to the controller. 776 */ 777 uint32_t is_read; 778 779 /* 780 * driver_lock is used by the driver only. It must be set to 0 781 * by the caller. 782 */ 783 struct mtx * driver_lock; 784 }; 785 786 #define nvme_completion_is_error(cpl) \ 787 ((cpl)->status.sc != 0 || (cpl)->status.sct != 0) 788 789 void nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen); 790 791 #ifdef _KERNEL 792 793 struct bio; 794 795 struct nvme_namespace; 796 struct nvme_controller; 797 struct nvme_consumer; 798 799 typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *); 800 801 typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *); 802 typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *); 803 typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *, 804 uint32_t, void *, uint32_t); 805 typedef void (*nvme_cons_fail_fn_t)(void *); 806 807 enum nvme_namespace_flags { 808 NVME_NS_DEALLOCATE_SUPPORTED = 0x1, 809 NVME_NS_FLUSH_SUPPORTED = 0x2, 810 }; 811 812 int nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 813 struct nvme_pt_command *pt, 814 uint32_t nsid, int is_user_buffer, 815 int is_admin_cmd); 816 817 /* Admin functions */ 818 void nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, 819 uint8_t feature, uint32_t cdw11, 820 void *payload, uint32_t payload_size, 821 nvme_cb_fn_t cb_fn, void *cb_arg); 822 void nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, 823 uint8_t feature, uint32_t cdw11, 824 void *payload, uint32_t payload_size, 825 nvme_cb_fn_t cb_fn, void *cb_arg); 826 void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, 827 uint8_t log_page, uint32_t nsid, 828 void *payload, uint32_t payload_size, 829 nvme_cb_fn_t cb_fn, void *cb_arg); 830 831 /* NVM I/O functions */ 832 int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, 833 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, 834 void *cb_arg); 835 int nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp, 836 nvme_cb_fn_t cb_fn, void *cb_arg); 837 int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, 838 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, 839 void *cb_arg); 840 int nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp, 841 nvme_cb_fn_t cb_fn, void *cb_arg); 842 int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload, 843 uint8_t num_ranges, nvme_cb_fn_t cb_fn, 844 void *cb_arg); 845 int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, 846 void *cb_arg); 847 848 /* Registration functions */ 849 struct nvme_consumer * nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, 850 nvme_cons_ctrlr_fn_t ctrlr_fn, 851 nvme_cons_async_fn_t async_fn, 852 nvme_cons_fail_fn_t fail_fn); 853 void nvme_unregister_consumer(struct nvme_consumer *consumer); 854 855 /* Controller helper functions */ 856 device_t nvme_ctrlr_get_device(struct nvme_controller *ctrlr); 857 const struct nvme_controller_data * 858 nvme_ctrlr_get_data(struct nvme_controller *ctrlr); 859 860 /* Namespace helper functions */ 861 uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns); 862 uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns); 863 uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns); 864 uint64_t nvme_ns_get_size(struct nvme_namespace *ns); 865 uint32_t nvme_ns_get_flags(struct nvme_namespace *ns); 866 const char * nvme_ns_get_serial_number(struct nvme_namespace *ns); 867 const char * nvme_ns_get_model_number(struct nvme_namespace *ns); 868 const struct nvme_namespace_data * 869 nvme_ns_get_data(struct nvme_namespace *ns); 870 871 int nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp, 872 nvme_cb_fn_t cb_fn); 873 874 #endif /* _KERNEL */ 875 876 #endif /* __NVME_H__ */ 877