1 /*- 2 * Copyright (C) 2012-2013 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef __NVME_H__ 30 #define __NVME_H__ 31 32 #ifdef _KERNEL 33 #include <sys/types.h> 34 #endif 35 36 #include <sys/param.h> 37 38 #define NVME_PASSTHROUGH_CMD _IOWR('n', 0, struct nvme_pt_command) 39 #define NVME_RESET_CONTROLLER _IO('n', 1) 40 41 #define NVME_IO_TEST _IOWR('n', 100, struct nvme_io_test) 42 #define NVME_BIO_TEST _IOWR('n', 101, struct nvme_io_test) 43 44 /* 45 * Use to mark a command to apply to all namespaces, or to retrieve global 46 * log pages. 47 */ 48 #define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF) 49 50 /* Cap nvme to 1MB transfers driver explodes with larger sizes */ 51 #define NVME_MAX_XFER_SIZE (MAXPHYS < (1<<20) ? MAXPHYS : (1<<20)) 52 53 union cap_lo_register { 54 uint32_t raw; 55 struct { 56 /** maximum queue entries supported */ 57 uint32_t mqes : 16; 58 59 /** contiguous queues required */ 60 uint32_t cqr : 1; 61 62 /** arbitration mechanism supported */ 63 uint32_t ams : 2; 64 65 uint32_t reserved1 : 5; 66 67 /** timeout */ 68 uint32_t to : 8; 69 } bits __packed; 70 } __packed; 71 72 union cap_hi_register { 73 uint32_t raw; 74 struct { 75 /** doorbell stride */ 76 uint32_t dstrd : 4; 77 78 uint32_t reserved3 : 1; 79 80 /** command sets supported */ 81 uint32_t css_nvm : 1; 82 83 uint32_t css_reserved : 3; 84 uint32_t reserved2 : 7; 85 86 /** memory page size minimum */ 87 uint32_t mpsmin : 4; 88 89 /** memory page size maximum */ 90 uint32_t mpsmax : 4; 91 92 uint32_t reserved1 : 8; 93 } bits __packed; 94 } __packed; 95 96 union cc_register { 97 uint32_t raw; 98 struct { 99 /** enable */ 100 uint32_t en : 1; 101 102 uint32_t reserved1 : 3; 103 104 /** i/o command set selected */ 105 uint32_t css : 3; 106 107 /** memory page size */ 108 uint32_t mps : 4; 109 110 /** arbitration mechanism selected */ 111 uint32_t ams : 3; 112 113 /** shutdown notification */ 114 uint32_t shn : 2; 115 116 /** i/o submission queue entry size */ 117 uint32_t iosqes : 4; 118 119 /** i/o completion queue entry size */ 120 uint32_t iocqes : 4; 121 122 uint32_t reserved2 : 8; 123 } bits __packed; 124 } __packed; 125 126 enum shn_value { 127 NVME_SHN_NORMAL = 0x1, 128 NVME_SHN_ABRUPT = 0x2, 129 }; 130 131 union csts_register { 132 uint32_t raw; 133 struct { 134 /** ready */ 135 uint32_t rdy : 1; 136 137 /** controller fatal status */ 138 uint32_t cfs : 1; 139 140 /** shutdown status */ 141 uint32_t shst : 2; 142 143 uint32_t reserved1 : 28; 144 } bits __packed; 145 } __packed; 146 147 enum shst_value { 148 NVME_SHST_NORMAL = 0x0, 149 NVME_SHST_OCCURRING = 0x1, 150 NVME_SHST_COMPLETE = 0x2, 151 }; 152 153 union aqa_register { 154 uint32_t raw; 155 struct { 156 /** admin submission queue size */ 157 uint32_t asqs : 12; 158 159 uint32_t reserved1 : 4; 160 161 /** admin completion queue size */ 162 uint32_t acqs : 12; 163 164 uint32_t reserved2 : 4; 165 } bits __packed; 166 } __packed; 167 168 struct nvme_registers 169 { 170 /** controller capabilities */ 171 union cap_lo_register cap_lo; 172 union cap_hi_register cap_hi; 173 174 uint32_t vs; /* version */ 175 uint32_t intms; /* interrupt mask set */ 176 uint32_t intmc; /* interrupt mask clear */ 177 178 /** controller configuration */ 179 union cc_register cc; 180 181 uint32_t reserved1; 182 183 /** controller status */ 184 union csts_register csts; 185 186 uint32_t reserved2; 187 188 /** admin queue attributes */ 189 union aqa_register aqa; 190 191 uint64_t asq; /* admin submission queue base addr */ 192 uint64_t acq; /* admin completion queue base addr */ 193 uint32_t reserved3[0x3f2]; 194 195 struct { 196 uint32_t sq_tdbl; /* submission queue tail doorbell */ 197 uint32_t cq_hdbl; /* completion queue head doorbell */ 198 } doorbell[1] __packed; 199 } __packed; 200 201 struct nvme_command 202 { 203 /* dword 0 */ 204 uint16_t opc : 8; /* opcode */ 205 uint16_t fuse : 2; /* fused operation */ 206 uint16_t rsvd1 : 6; 207 uint16_t cid; /* command identifier */ 208 209 /* dword 1 */ 210 uint32_t nsid; /* namespace identifier */ 211 212 /* dword 2-3 */ 213 uint32_t rsvd2; 214 uint32_t rsvd3; 215 216 /* dword 4-5 */ 217 uint64_t mptr; /* metadata pointer */ 218 219 /* dword 6-7 */ 220 uint64_t prp1; /* prp entry 1 */ 221 222 /* dword 8-9 */ 223 uint64_t prp2; /* prp entry 2 */ 224 225 /* dword 10-15 */ 226 uint32_t cdw10; /* command-specific */ 227 uint32_t cdw11; /* command-specific */ 228 uint32_t cdw12; /* command-specific */ 229 uint32_t cdw13; /* command-specific */ 230 uint32_t cdw14; /* command-specific */ 231 uint32_t cdw15; /* command-specific */ 232 } __packed; 233 234 struct nvme_status { 235 236 uint16_t p : 1; /* phase tag */ 237 uint16_t sc : 8; /* status code */ 238 uint16_t sct : 3; /* status code type */ 239 uint16_t rsvd2 : 2; 240 uint16_t m : 1; /* more */ 241 uint16_t dnr : 1; /* do not retry */ 242 } __packed; 243 244 struct nvme_completion { 245 246 /* dword 0 */ 247 uint32_t cdw0; /* command-specific */ 248 249 /* dword 1 */ 250 uint32_t rsvd1; 251 252 /* dword 2 */ 253 uint16_t sqhd; /* submission queue head pointer */ 254 uint16_t sqid; /* submission queue identifier */ 255 256 /* dword 3 */ 257 uint16_t cid; /* command identifier */ 258 struct nvme_status status; 259 } __packed; 260 261 struct nvme_dsm_range { 262 263 uint32_t attributes; 264 uint32_t length; 265 uint64_t starting_lba; 266 } __packed; 267 268 /* status code types */ 269 enum nvme_status_code_type { 270 NVME_SCT_GENERIC = 0x0, 271 NVME_SCT_COMMAND_SPECIFIC = 0x1, 272 NVME_SCT_MEDIA_ERROR = 0x2, 273 /* 0x3-0x6 - reserved */ 274 NVME_SCT_VENDOR_SPECIFIC = 0x7, 275 }; 276 277 /* generic command status codes */ 278 enum nvme_generic_command_status_code { 279 NVME_SC_SUCCESS = 0x00, 280 NVME_SC_INVALID_OPCODE = 0x01, 281 NVME_SC_INVALID_FIELD = 0x02, 282 NVME_SC_COMMAND_ID_CONFLICT = 0x03, 283 NVME_SC_DATA_TRANSFER_ERROR = 0x04, 284 NVME_SC_ABORTED_POWER_LOSS = 0x05, 285 NVME_SC_INTERNAL_DEVICE_ERROR = 0x06, 286 NVME_SC_ABORTED_BY_REQUEST = 0x07, 287 NVME_SC_ABORTED_SQ_DELETION = 0x08, 288 NVME_SC_ABORTED_FAILED_FUSED = 0x09, 289 NVME_SC_ABORTED_MISSING_FUSED = 0x0a, 290 NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b, 291 NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c, 292 293 NVME_SC_LBA_OUT_OF_RANGE = 0x80, 294 NVME_SC_CAPACITY_EXCEEDED = 0x81, 295 NVME_SC_NAMESPACE_NOT_READY = 0x82, 296 }; 297 298 /* command specific status codes */ 299 enum nvme_command_specific_status_code { 300 NVME_SC_COMPLETION_QUEUE_INVALID = 0x00, 301 NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01, 302 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02, 303 NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03, 304 /* 0x04 - reserved */ 305 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05, 306 NVME_SC_INVALID_FIRMWARE_SLOT = 0x06, 307 NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07, 308 NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08, 309 NVME_SC_INVALID_LOG_PAGE = 0x09, 310 NVME_SC_INVALID_FORMAT = 0x0a, 311 NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b, 312 313 NVME_SC_CONFLICTING_ATTRIBUTES = 0x80, 314 NVME_SC_INVALID_PROTECTION_INFO = 0x81, 315 NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82, 316 }; 317 318 /* media error status codes */ 319 enum nvme_media_error_status_code { 320 NVME_SC_WRITE_FAULTS = 0x80, 321 NVME_SC_UNRECOVERED_READ_ERROR = 0x81, 322 NVME_SC_GUARD_CHECK_ERROR = 0x82, 323 NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83, 324 NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84, 325 NVME_SC_COMPARE_FAILURE = 0x85, 326 NVME_SC_ACCESS_DENIED = 0x86, 327 }; 328 329 /* admin opcodes */ 330 enum nvme_admin_opcode { 331 NVME_OPC_DELETE_IO_SQ = 0x00, 332 NVME_OPC_CREATE_IO_SQ = 0x01, 333 NVME_OPC_GET_LOG_PAGE = 0x02, 334 /* 0x03 - reserved */ 335 NVME_OPC_DELETE_IO_CQ = 0x04, 336 NVME_OPC_CREATE_IO_CQ = 0x05, 337 NVME_OPC_IDENTIFY = 0x06, 338 /* 0x07 - reserved */ 339 NVME_OPC_ABORT = 0x08, 340 NVME_OPC_SET_FEATURES = 0x09, 341 NVME_OPC_GET_FEATURES = 0x0a, 342 /* 0x0b - reserved */ 343 NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c, 344 NVME_OPC_NAMESPACE_MANAGEMENT = 0x0d, 345 /* 0x0e-0x0f - reserved */ 346 NVME_OPC_FIRMWARE_ACTIVATE = 0x10, 347 NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11, 348 NVME_OPC_NAMESPACE_ATTACHMENT = 0x15, 349 350 NVME_OPC_FORMAT_NVM = 0x80, 351 NVME_OPC_SECURITY_SEND = 0x81, 352 NVME_OPC_SECURITY_RECEIVE = 0x82, 353 }; 354 355 /* nvme nvm opcodes */ 356 enum nvme_nvm_opcode { 357 NVME_OPC_FLUSH = 0x00, 358 NVME_OPC_WRITE = 0x01, 359 NVME_OPC_READ = 0x02, 360 /* 0x03 - reserved */ 361 NVME_OPC_WRITE_UNCORRECTABLE = 0x04, 362 NVME_OPC_COMPARE = 0x05, 363 /* 0x06-0x07 - reserved */ 364 NVME_OPC_DATASET_MANAGEMENT = 0x09, 365 }; 366 367 enum nvme_feature { 368 /* 0x00 - reserved */ 369 NVME_FEAT_ARBITRATION = 0x01, 370 NVME_FEAT_POWER_MANAGEMENT = 0x02, 371 NVME_FEAT_LBA_RANGE_TYPE = 0x03, 372 NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04, 373 NVME_FEAT_ERROR_RECOVERY = 0x05, 374 NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06, 375 NVME_FEAT_NUMBER_OF_QUEUES = 0x07, 376 NVME_FEAT_INTERRUPT_COALESCING = 0x08, 377 NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09, 378 NVME_FEAT_WRITE_ATOMICITY = 0x0A, 379 NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B, 380 /* 0x0C-0x7F - reserved */ 381 NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80, 382 /* 0x81-0xBF - command set specific (reserved) */ 383 /* 0xC0-0xFF - vendor specific */ 384 }; 385 386 enum nvme_dsm_attribute { 387 NVME_DSM_ATTR_INTEGRAL_READ = 0x1, 388 NVME_DSM_ATTR_INTEGRAL_WRITE = 0x2, 389 NVME_DSM_ATTR_DEALLOCATE = 0x4, 390 }; 391 392 enum nvme_activate_action { 393 NVME_AA_REPLACE_NO_ACTIVATE = 0x0, 394 NVME_AA_REPLACE_ACTIVATE = 0x1, 395 NVME_AA_ACTIVATE = 0x2, 396 }; 397 398 struct nvme_power_state { 399 /** Maximum Power */ 400 uint16_t mp; /* Maximum Power */ 401 uint8_t ps_rsvd1; 402 uint8_t mps : 1; /* Max Power Scale */ 403 uint8_t nops : 1; /* Non-Operational State */ 404 uint8_t ps_rsvd2 : 6; 405 uint32_t enlat; /* Entry Latency */ 406 uint32_t exlat; /* Exit Latency */ 407 uint8_t rrt : 5; /* Relative Read Throughput */ 408 uint8_t ps_rsvd3 : 3; 409 uint8_t rrl : 5; /* Relative Read Latency */ 410 uint8_t ps_rsvd4 : 3; 411 uint8_t rwt : 5; /* Relative Write Throughput */ 412 uint8_t ps_rsvd5 : 3; 413 uint8_t rwl : 5; /* Relative Write Latency */ 414 uint8_t ps_rsvd6 : 3; 415 uint16_t idlp; /* Idle Power */ 416 uint8_t ps_rsvd7 : 6; 417 uint8_t ips : 2; /* Idle Power Scale */ 418 uint8_t ps_rsvd8; 419 uint16_t actp; /* Active Power */ 420 uint8_t apw : 3; /* Active Power Workload */ 421 uint8_t ps_rsvd9 : 3; 422 uint8_t aps : 2; /* Active Power Scale */ 423 uint8_t ps_rsvd10[9]; 424 } __packed; 425 426 #define NVME_SERIAL_NUMBER_LENGTH 20 427 #define NVME_MODEL_NUMBER_LENGTH 40 428 #define NVME_FIRMWARE_REVISION_LENGTH 8 429 430 struct nvme_controller_data { 431 432 /* bytes 0-255: controller capabilities and features */ 433 434 /** pci vendor id */ 435 uint16_t vid; 436 437 /** pci subsystem vendor id */ 438 uint16_t ssvid; 439 440 /** serial number */ 441 uint8_t sn[NVME_SERIAL_NUMBER_LENGTH]; 442 443 /** model number */ 444 uint8_t mn[NVME_MODEL_NUMBER_LENGTH]; 445 446 /** firmware revision */ 447 uint8_t fr[NVME_FIRMWARE_REVISION_LENGTH]; 448 449 /** recommended arbitration burst */ 450 uint8_t rab; 451 452 /** ieee oui identifier */ 453 uint8_t ieee[3]; 454 455 /** multi-interface capabilities */ 456 uint8_t mic; 457 458 /** maximum data transfer size */ 459 uint8_t mdts; 460 461 /** Controller ID */ 462 uint16_t ctrlr_id; 463 464 uint8_t reserved1[176]; 465 466 /* bytes 256-511: admin command set attributes */ 467 468 /** optional admin command support */ 469 struct { 470 /* supports security send/receive commands */ 471 uint16_t security : 1; 472 473 /* supports format nvm command */ 474 uint16_t format : 1; 475 476 /* supports firmware activate/download commands */ 477 uint16_t firmware : 1; 478 479 /* supports namespace management commands */ 480 uint16_t nsmgmt : 1; 481 482 uint16_t oacs_rsvd : 12; 483 } __packed oacs; 484 485 /** abort command limit */ 486 uint8_t acl; 487 488 /** asynchronous event request limit */ 489 uint8_t aerl; 490 491 /** firmware updates */ 492 struct { 493 /* first slot is read-only */ 494 uint8_t slot1_ro : 1; 495 496 /* number of firmware slots */ 497 uint8_t num_slots : 3; 498 499 uint8_t frmw_rsvd : 4; 500 } __packed frmw; 501 502 /** log page attributes */ 503 struct { 504 /* per namespace smart/health log page */ 505 uint8_t ns_smart : 1; 506 507 uint8_t lpa_rsvd : 7; 508 } __packed lpa; 509 510 /** error log page entries */ 511 uint8_t elpe; 512 513 /** number of power states supported */ 514 uint8_t npss; 515 516 /** admin vendor specific command configuration */ 517 struct { 518 /* admin vendor specific commands use spec format */ 519 uint8_t spec_format : 1; 520 521 uint8_t avscc_rsvd : 7; 522 } __packed avscc; 523 524 uint8_t reserved2[15]; 525 526 /** Name space capabilities */ 527 struct { 528 /* if nsmgmt, report tnvmcap and unvmcap */ 529 uint8_t tnvmcap[16]; 530 uint8_t unvmcap[16]; 531 } __packed untncap; 532 533 uint8_t reserved3[200]; 534 /* bytes 512-703: nvm command set attributes */ 535 536 /** submission queue entry size */ 537 struct { 538 uint8_t min : 4; 539 uint8_t max : 4; 540 } __packed sqes; 541 542 /** completion queue entry size */ 543 struct { 544 uint8_t min : 4; 545 uint8_t max : 4; 546 } __packed cqes; 547 548 uint8_t reserved4[2]; 549 550 /** number of namespaces */ 551 uint32_t nn; 552 553 /** optional nvm command support */ 554 struct { 555 uint16_t compare : 1; 556 uint16_t write_unc : 1; 557 uint16_t dsm: 1; 558 uint16_t reserved: 13; 559 } __packed oncs; 560 561 /** fused operation support */ 562 uint16_t fuses; 563 564 /** format nvm attributes */ 565 uint8_t fna; 566 567 /** volatile write cache */ 568 struct { 569 uint8_t present : 1; 570 uint8_t reserved : 7; 571 } __packed vwc; 572 573 /* TODO: flesh out remaining nvm command set attributes */ 574 uint8_t reserved5[178]; 575 576 /* bytes 704-2047: i/o command set attributes */ 577 uint8_t reserved6[1344]; 578 579 /* bytes 2048-3071: power state descriptors */ 580 struct nvme_power_state power_state[32]; 581 582 /* bytes 3072-4095: vendor specific */ 583 uint8_t vs[1024]; 584 } __packed __aligned(4); 585 586 struct nvme_namespace_data { 587 588 /** namespace size */ 589 uint64_t nsze; 590 591 /** namespace capacity */ 592 uint64_t ncap; 593 594 /** namespace utilization */ 595 uint64_t nuse; 596 597 /** namespace features */ 598 struct { 599 /** thin provisioning */ 600 uint8_t thin_prov : 1; 601 uint8_t reserved1 : 7; 602 } __packed nsfeat; 603 604 /** number of lba formats */ 605 uint8_t nlbaf; 606 607 /** formatted lba size */ 608 struct { 609 uint8_t format : 4; 610 uint8_t extended : 1; 611 uint8_t reserved2 : 3; 612 } __packed flbas; 613 614 /** metadata capabilities */ 615 struct { 616 /* metadata can be transferred as part of data prp list */ 617 uint8_t extended : 1; 618 619 /* metadata can be transferred with separate metadata pointer */ 620 uint8_t pointer : 1; 621 622 uint8_t reserved3 : 6; 623 } __packed mc; 624 625 /** end-to-end data protection capabilities */ 626 struct { 627 /* protection information type 1 */ 628 uint8_t pit1 : 1; 629 630 /* protection information type 2 */ 631 uint8_t pit2 : 1; 632 633 /* protection information type 3 */ 634 uint8_t pit3 : 1; 635 636 /* first eight bytes of metadata */ 637 uint8_t md_start : 1; 638 639 /* last eight bytes of metadata */ 640 uint8_t md_end : 1; 641 } __packed dpc; 642 643 /** end-to-end data protection type settings */ 644 struct { 645 /* protection information type */ 646 uint8_t pit : 3; 647 648 /* 1 == protection info transferred at start of metadata */ 649 /* 0 == protection info transferred at end of metadata */ 650 uint8_t md_start : 1; 651 652 uint8_t reserved4 : 4; 653 } __packed dps; 654 655 uint8_t reserved5[98]; 656 657 /** lba format support */ 658 struct { 659 /** metadata size */ 660 uint32_t ms : 16; 661 662 /** lba data size */ 663 uint32_t lbads : 8; 664 665 /** relative performance */ 666 uint32_t rp : 2; 667 668 uint32_t reserved6 : 6; 669 } __packed lbaf[16]; 670 671 uint8_t reserved6[192]; 672 673 uint8_t vendor_specific[3712]; 674 } __packed __aligned(4); 675 676 enum nvme_log_page { 677 678 /* 0x00 - reserved */ 679 NVME_LOG_ERROR = 0x01, 680 NVME_LOG_HEALTH_INFORMATION = 0x02, 681 NVME_LOG_FIRMWARE_SLOT = 0x03, 682 NVME_LOG_CHANGED_NAMESPACE = 0x04, 683 NVME_LOG_COMMAND_EFFECT = 0x05, 684 /* 0x06-0x7F - reserved */ 685 /* 0x80-0xBF - I/O command set specific */ 686 NVME_LOG_RES_NOTIFICATION = 0x80, 687 /* 0xC0-0xFF - vendor specific */ 688 689 /* 690 * The following are Intel Specific log pages, but they seem 691 * to be widely implemented. 692 */ 693 INTEL_LOG_READ_LAT_LOG = 0xc1, 694 INTEL_LOG_WRITE_LAT_LOG = 0xc2, 695 INTEL_LOG_TEMP_STATS = 0xc5, 696 INTEL_LOG_ADD_SMART = 0xca, 697 INTEL_LOG_DRIVE_MKT_NAME = 0xdd, 698 699 /* 700 * HGST log page, with lots ofs sub pages. 701 */ 702 HGST_INFO_LOG = 0xc1, 703 }; 704 705 struct nvme_error_information_entry { 706 707 uint64_t error_count; 708 uint16_t sqid; 709 uint16_t cid; 710 struct nvme_status status; 711 uint16_t error_location; 712 uint64_t lba; 713 uint32_t nsid; 714 uint8_t vendor_specific; 715 uint8_t reserved[35]; 716 } __packed __aligned(4); 717 718 union nvme_critical_warning_state { 719 720 uint8_t raw; 721 722 struct { 723 uint8_t available_spare : 1; 724 uint8_t temperature : 1; 725 uint8_t device_reliability : 1; 726 uint8_t read_only : 1; 727 uint8_t volatile_memory_backup : 1; 728 uint8_t reserved : 3; 729 } __packed bits; 730 } __packed; 731 732 struct nvme_health_information_page { 733 734 union nvme_critical_warning_state critical_warning; 735 736 uint16_t temperature; 737 uint8_t available_spare; 738 uint8_t available_spare_threshold; 739 uint8_t percentage_used; 740 741 uint8_t reserved[26]; 742 743 /* 744 * Note that the following are 128-bit values, but are 745 * defined as an array of 2 64-bit values. 746 */ 747 /* Data Units Read is always in 512-byte units. */ 748 uint64_t data_units_read[2]; 749 /* Data Units Written is always in 512-byte units. */ 750 uint64_t data_units_written[2]; 751 /* For NVM command set, this includes Compare commands. */ 752 uint64_t host_read_commands[2]; 753 uint64_t host_write_commands[2]; 754 /* Controller Busy Time is reported in minutes. */ 755 uint64_t controller_busy_time[2]; 756 uint64_t power_cycles[2]; 757 uint64_t power_on_hours[2]; 758 uint64_t unsafe_shutdowns[2]; 759 uint64_t media_errors[2]; 760 uint64_t num_error_info_log_entries[2]; 761 uint32_t warning_temp_time; 762 uint32_t error_temp_time; 763 uint16_t temp_sensor[8]; 764 765 uint8_t reserved2[296]; 766 } __packed __aligned(4); 767 768 struct nvme_firmware_page { 769 770 struct { 771 uint8_t slot : 3; /* slot for current FW */ 772 uint8_t reserved : 5; 773 } __packed afi; 774 775 uint8_t reserved[7]; 776 uint64_t revision[7]; /* revisions for 7 slots */ 777 uint8_t reserved2[448]; 778 } __packed __aligned(4); 779 780 struct intel_log_temp_stats 781 { 782 uint64_t current; 783 uint64_t overtemp_flag_last; 784 uint64_t overtemp_flag_life; 785 uint64_t max_temp; 786 uint64_t min_temp; 787 uint64_t _rsvd[5]; 788 uint64_t max_oper_temp; 789 uint64_t min_oper_temp; 790 uint64_t est_offset; 791 } __packed __aligned(4); 792 793 #define NVME_TEST_MAX_THREADS 128 794 795 struct nvme_io_test { 796 797 enum nvme_nvm_opcode opc; 798 uint32_t size; 799 uint32_t time; /* in seconds */ 800 uint32_t num_threads; 801 uint32_t flags; 802 uint64_t io_completed[NVME_TEST_MAX_THREADS]; 803 }; 804 805 enum nvme_io_test_flags { 806 807 /* 808 * Specifies whether dev_refthread/dev_relthread should be 809 * called during NVME_BIO_TEST. Ignored for other test 810 * types. 811 */ 812 NVME_TEST_FLAG_REFTHREAD = 0x1, 813 }; 814 815 struct nvme_pt_command { 816 817 /* 818 * cmd is used to specify a passthrough command to a controller or 819 * namespace. 820 * 821 * The following fields from cmd may be specified by the caller: 822 * * opc (opcode) 823 * * nsid (namespace id) - for admin commands only 824 * * cdw10-cdw15 825 * 826 * Remaining fields must be set to 0 by the caller. 827 */ 828 struct nvme_command cmd; 829 830 /* 831 * cpl returns completion status for the passthrough command 832 * specified by cmd. 833 * 834 * The following fields will be filled out by the driver, for 835 * consumption by the caller: 836 * * cdw0 837 * * status (except for phase) 838 * 839 * Remaining fields will be set to 0 by the driver. 840 */ 841 struct nvme_completion cpl; 842 843 /* buf is the data buffer associated with this passthrough command. */ 844 void * buf; 845 846 /* 847 * len is the length of the data buffer associated with this 848 * passthrough command. 849 */ 850 uint32_t len; 851 852 /* 853 * is_read = 1 if the passthrough command will read data into the 854 * supplied buffer from the controller. 855 * 856 * is_read = 0 if the passthrough command will write data from the 857 * supplied buffer to the controller. 858 */ 859 uint32_t is_read; 860 861 /* 862 * driver_lock is used by the driver only. It must be set to 0 863 * by the caller. 864 */ 865 struct mtx * driver_lock; 866 }; 867 868 #define nvme_completion_is_error(cpl) \ 869 ((cpl)->status.sc != 0 || (cpl)->status.sct != 0) 870 871 void nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen); 872 873 #ifdef _KERNEL 874 875 struct bio; 876 877 struct nvme_namespace; 878 struct nvme_controller; 879 struct nvme_consumer; 880 881 typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *); 882 883 typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *); 884 typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *); 885 typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *, 886 uint32_t, void *, uint32_t); 887 typedef void (*nvme_cons_fail_fn_t)(void *); 888 889 enum nvme_namespace_flags { 890 NVME_NS_DEALLOCATE_SUPPORTED = 0x1, 891 NVME_NS_FLUSH_SUPPORTED = 0x2, 892 }; 893 894 int nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 895 struct nvme_pt_command *pt, 896 uint32_t nsid, int is_user_buffer, 897 int is_admin_cmd); 898 899 /* Admin functions */ 900 void nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, 901 uint8_t feature, uint32_t cdw11, 902 void *payload, uint32_t payload_size, 903 nvme_cb_fn_t cb_fn, void *cb_arg); 904 void nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, 905 uint8_t feature, uint32_t cdw11, 906 void *payload, uint32_t payload_size, 907 nvme_cb_fn_t cb_fn, void *cb_arg); 908 void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, 909 uint8_t log_page, uint32_t nsid, 910 void *payload, uint32_t payload_size, 911 nvme_cb_fn_t cb_fn, void *cb_arg); 912 913 /* NVM I/O functions */ 914 int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, 915 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, 916 void *cb_arg); 917 int nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp, 918 nvme_cb_fn_t cb_fn, void *cb_arg); 919 int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, 920 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, 921 void *cb_arg); 922 int nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp, 923 nvme_cb_fn_t cb_fn, void *cb_arg); 924 int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload, 925 uint8_t num_ranges, nvme_cb_fn_t cb_fn, 926 void *cb_arg); 927 int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, 928 void *cb_arg); 929 int nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset, 930 size_t len); 931 932 /* Registration functions */ 933 struct nvme_consumer * nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, 934 nvme_cons_ctrlr_fn_t ctrlr_fn, 935 nvme_cons_async_fn_t async_fn, 936 nvme_cons_fail_fn_t fail_fn); 937 void nvme_unregister_consumer(struct nvme_consumer *consumer); 938 939 /* Controller helper functions */ 940 device_t nvme_ctrlr_get_device(struct nvme_controller *ctrlr); 941 const struct nvme_controller_data * 942 nvme_ctrlr_get_data(struct nvme_controller *ctrlr); 943 944 /* Namespace helper functions */ 945 uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns); 946 uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns); 947 uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns); 948 uint64_t nvme_ns_get_size(struct nvme_namespace *ns); 949 uint32_t nvme_ns_get_flags(struct nvme_namespace *ns); 950 const char * nvme_ns_get_serial_number(struct nvme_namespace *ns); 951 const char * nvme_ns_get_model_number(struct nvme_namespace *ns); 952 const struct nvme_namespace_data * 953 nvme_ns_get_data(struct nvme_namespace *ns); 954 uint32_t nvme_ns_get_stripesize(struct nvme_namespace *ns); 955 956 int nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp, 957 nvme_cb_fn_t cb_fn); 958 959 /* Command building helper functions -- shared with CAM */ 960 static inline 961 void nvme_ns_flush_cmd(struct nvme_command *cmd, uint16_t nsid) 962 { 963 964 cmd->opc = NVME_OPC_FLUSH; 965 cmd->nsid = nsid; 966 } 967 968 static inline 969 void nvme_ns_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd, uint16_t nsid, 970 uint64_t lba, uint32_t count) 971 { 972 cmd->opc = rwcmd; 973 cmd->nsid = nsid; 974 cmd->cdw10 = lba & 0xffffffffu; 975 cmd->cdw11 = lba >> 32; 976 cmd->cdw12 = count-1; 977 cmd->cdw13 = 0; 978 cmd->cdw14 = 0; 979 cmd->cdw15 = 0; 980 } 981 982 static inline 983 void nvme_ns_write_cmd(struct nvme_command *cmd, uint16_t nsid, 984 uint64_t lba, uint32_t count) 985 { 986 nvme_ns_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count); 987 } 988 989 static inline 990 void nvme_ns_read_cmd(struct nvme_command *cmd, uint16_t nsid, 991 uint64_t lba, uint32_t count) 992 { 993 nvme_ns_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count); 994 } 995 996 static inline 997 void nvme_ns_trim_cmd(struct nvme_command *cmd, uint16_t nsid, 998 uint32_t num_ranges) 999 { 1000 cmd->opc = NVME_OPC_DATASET_MANAGEMENT; 1001 cmd->nsid = nsid; 1002 cmd->cdw10 = num_ranges - 1; 1003 cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE; 1004 } 1005 1006 #endif /* _KERNEL */ 1007 1008 #endif /* __NVME_H__ */ 1009