1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2013 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef __NVME_H__ 32 #define __NVME_H__ 33 34 #ifdef _KERNEL 35 #include <sys/types.h> 36 #endif 37 38 #include <sys/param.h> 39 #include <sys/endian.h> 40 41 #define NVME_PASSTHROUGH_CMD _IOWR('n', 0, struct nvme_pt_command) 42 #define NVME_RESET_CONTROLLER _IO('n', 1) 43 44 #define NVME_IO_TEST _IOWR('n', 100, struct nvme_io_test) 45 #define NVME_BIO_TEST _IOWR('n', 101, struct nvme_io_test) 46 47 /* 48 * Macros to deal with NVME revisions, as defined VS register 49 */ 50 #define NVME_REV(x, y) (((x) << 16) | ((y) << 8)) 51 #define NVME_MAJOR(r) (((r) >> 16) & 0xffff) 52 #define NVME_MINOR(r) (((r) >> 8) & 0xff) 53 54 /* 55 * Use to mark a command to apply to all namespaces, or to retrieve global 56 * log pages. 57 */ 58 #define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF) 59 60 /* Cap nvme to 1MB transfers driver explodes with larger sizes */ 61 #define NVME_MAX_XFER_SIZE (MAXPHYS < (1<<20) ? MAXPHYS : (1<<20)) 62 63 /* Register field definitions */ 64 #define NVME_CAP_LO_REG_MQES_SHIFT (0) 65 #define NVME_CAP_LO_REG_MQES_MASK (0xFFFF) 66 #define NVME_CAP_LO_REG_CQR_SHIFT (16) 67 #define NVME_CAP_LO_REG_CQR_MASK (0x1) 68 #define NVME_CAP_LO_REG_AMS_SHIFT (17) 69 #define NVME_CAP_LO_REG_AMS_MASK (0x3) 70 #define NVME_CAP_LO_REG_TO_SHIFT (24) 71 #define NVME_CAP_LO_REG_TO_MASK (0xFF) 72 73 #define NVME_CAP_HI_REG_DSTRD_SHIFT (0) 74 #define NVME_CAP_HI_REG_DSTRD_MASK (0xF) 75 #define NVME_CAP_HI_REG_CSS_NVM_SHIFT (5) 76 #define NVME_CAP_HI_REG_CSS_NVM_MASK (0x1) 77 #define NVME_CAP_HI_REG_MPSMIN_SHIFT (16) 78 #define NVME_CAP_HI_REG_MPSMIN_MASK (0xF) 79 #define NVME_CAP_HI_REG_MPSMAX_SHIFT (20) 80 #define NVME_CAP_HI_REG_MPSMAX_MASK (0xF) 81 82 #define NVME_CC_REG_EN_SHIFT (0) 83 #define NVME_CC_REG_EN_MASK (0x1) 84 #define NVME_CC_REG_CSS_SHIFT (4) 85 #define NVME_CC_REG_CSS_MASK (0x7) 86 #define NVME_CC_REG_MPS_SHIFT (7) 87 #define NVME_CC_REG_MPS_MASK (0xF) 88 #define NVME_CC_REG_AMS_SHIFT (11) 89 #define NVME_CC_REG_AMS_MASK (0x7) 90 #define NVME_CC_REG_SHN_SHIFT (14) 91 #define NVME_CC_REG_SHN_MASK (0x3) 92 #define NVME_CC_REG_IOSQES_SHIFT (16) 93 #define NVME_CC_REG_IOSQES_MASK (0xF) 94 #define NVME_CC_REG_IOCQES_SHIFT (20) 95 #define NVME_CC_REG_IOCQES_MASK (0xF) 96 97 #define NVME_CSTS_REG_RDY_SHIFT (0) 98 #define NVME_CSTS_REG_RDY_MASK (0x1) 99 #define NVME_CSTS_REG_CFS_SHIFT (1) 100 #define NVME_CSTS_REG_CFS_MASK (0x1) 101 #define NVME_CSTS_REG_SHST_SHIFT (2) 102 #define NVME_CSTS_REG_SHST_MASK (0x3) 103 104 #define NVME_CSTS_GET_SHST(csts) (((csts) >> NVME_CSTS_REG_SHST_SHIFT) & NVME_CSTS_REG_SHST_MASK) 105 106 #define NVME_AQA_REG_ASQS_SHIFT (0) 107 #define NVME_AQA_REG_ASQS_MASK (0xFFF) 108 #define NVME_AQA_REG_ACQS_SHIFT (16) 109 #define NVME_AQA_REG_ACQS_MASK (0xFFF) 110 111 /* Command field definitions */ 112 113 #define NVME_CMD_OPC_SHIFT (0) 114 #define NVME_CMD_OPC_MASK (0xFF) 115 #define NVME_CMD_FUSE_SHIFT (8) 116 #define NVME_CMD_FUSE_MASK (0x3) 117 118 #define NVME_CMD_SET_OPC(opc) (htole16(((opc) & NVME_CMD_OPC_MASK) << NVME_CMD_OPC_SHIFT)) 119 120 #define NVME_STATUS_P_SHIFT (0) 121 #define NVME_STATUS_P_MASK (0x1) 122 #define NVME_STATUS_SC_SHIFT (1) 123 #define NVME_STATUS_SC_MASK (0xFF) 124 #define NVME_STATUS_SCT_SHIFT (9) 125 #define NVME_STATUS_SCT_MASK (0x7) 126 #define NVME_STATUS_M_SHIFT (14) 127 #define NVME_STATUS_M_MASK (0x1) 128 #define NVME_STATUS_DNR_SHIFT (15) 129 #define NVME_STATUS_DNR_MASK (0x1) 130 131 #define NVME_STATUS_GET_P(st) (((st) >> NVME_STATUS_P_SHIFT) & NVME_STATUS_P_MASK) 132 #define NVME_STATUS_GET_SC(st) (((st) >> NVME_STATUS_SC_SHIFT) & NVME_STATUS_SC_MASK) 133 #define NVME_STATUS_GET_SCT(st) (((st) >> NVME_STATUS_SCT_SHIFT) & NVME_STATUS_SCT_MASK) 134 #define NVME_STATUS_GET_M(st) (((st) >> NVME_STATUS_M_SHIFT) & NVME_STATUS_M_MASK) 135 #define NVME_STATUS_GET_DNR(st) (((st) >> NVME_STATUS_DNR_SHIFT) & NVME_STATUS_DNR_MASK) 136 137 #define NVME_PWR_ST_MPS_SHIFT (0) 138 #define NVME_PWR_ST_MPS_MASK (0x1) 139 #define NVME_PWR_ST_NOPS_SHIFT (1) 140 #define NVME_PWR_ST_NOPS_MASK (0x1) 141 #define NVME_PWR_ST_RRT_SHIFT (0) 142 #define NVME_PWR_ST_RRT_MASK (0x1F) 143 #define NVME_PWR_ST_RRL_SHIFT (0) 144 #define NVME_PWR_ST_RRL_MASK (0x1F) 145 #define NVME_PWR_ST_RWT_SHIFT (0) 146 #define NVME_PWR_ST_RWT_MASK (0x1F) 147 #define NVME_PWR_ST_RWL_SHIFT (0) 148 #define NVME_PWR_ST_RWL_MASK (0x1F) 149 #define NVME_PWR_ST_IPS_SHIFT (6) 150 #define NVME_PWR_ST_IPS_MASK (0x3) 151 #define NVME_PWR_ST_APW_SHIFT (0) 152 #define NVME_PWR_ST_APW_MASK (0x7) 153 #define NVME_PWR_ST_APS_SHIFT (6) 154 #define NVME_PWR_ST_APS_MASK (0x3) 155 156 /** OACS - optional admin command support */ 157 /* supports security send/receive commands */ 158 #define NVME_CTRLR_DATA_OACS_SECURITY_SHIFT (0) 159 #define NVME_CTRLR_DATA_OACS_SECURITY_MASK (0x1) 160 /* supports format nvm command */ 161 #define NVME_CTRLR_DATA_OACS_FORMAT_SHIFT (1) 162 #define NVME_CTRLR_DATA_OACS_FORMAT_MASK (0x1) 163 /* supports firmware activate/download commands */ 164 #define NVME_CTRLR_DATA_OACS_FIRMWARE_SHIFT (2) 165 #define NVME_CTRLR_DATA_OACS_FIRMWARE_MASK (0x1) 166 /* supports namespace management commands */ 167 #define NVME_CTRLR_DATA_OACS_NSMGMT_SHIFT (3) 168 #define NVME_CTRLR_DATA_OACS_NSMGMT_MASK (0x1) 169 170 /** firmware updates */ 171 /* first slot is read-only */ 172 #define NVME_CTRLR_DATA_FRMW_SLOT1_RO_SHIFT (0) 173 #define NVME_CTRLR_DATA_FRMW_SLOT1_RO_MASK (0x1) 174 /* number of firmware slots */ 175 #define NVME_CTRLR_DATA_FRMW_NUM_SLOTS_SHIFT (1) 176 #define NVME_CTRLR_DATA_FRMW_NUM_SLOTS_MASK (0x7) 177 178 /** log page attributes */ 179 /* per namespace smart/health log page */ 180 #define NVME_CTRLR_DATA_LPA_NS_SMART_SHIFT (0) 181 #define NVME_CTRLR_DATA_LPA_NS_SMART_MASK (0x1) 182 183 /** AVSCC - admin vendor specific command configuration */ 184 /* admin vendor specific commands use spec format */ 185 #define NVME_CTRLR_DATA_AVSCC_SPEC_FORMAT_SHIFT (0) 186 #define NVME_CTRLR_DATA_AVSCC_SPEC_FORMAT_MASK (0x1) 187 188 /** Autonomous Power State Transition Attributes */ 189 /* Autonomous Power State Transitions supported */ 190 #define NVME_CTRLR_DATA_APSTA_APST_SUPP_SHIFT (0) 191 #define NVME_CTRLR_DATA_APSTA_APST_SUPP_MASK (0x1) 192 193 /** submission queue entry size */ 194 #define NVME_CTRLR_DATA_SQES_MIN_SHIFT (0) 195 #define NVME_CTRLR_DATA_SQES_MIN_MASK (0xF) 196 #define NVME_CTRLR_DATA_SQES_MAX_SHIFT (4) 197 #define NVME_CTRLR_DATA_SQES_MAX_MASK (0xF) 198 199 /** completion queue entry size */ 200 #define NVME_CTRLR_DATA_CQES_MIN_SHIFT (0) 201 #define NVME_CTRLR_DATA_CQES_MIN_MASK (0xF) 202 #define NVME_CTRLR_DATA_CQES_MAX_SHIFT (4) 203 #define NVME_CTRLR_DATA_CQES_MAX_MASK (0xF) 204 205 /** optional nvm command support */ 206 #define NVME_CTRLR_DATA_ONCS_COMPARE_SHIFT (0) 207 #define NVME_CTRLR_DATA_ONCS_COMPARE_MASK (0x1) 208 #define NVME_CTRLR_DATA_ONCS_WRITE_UNC_SHIFT (1) 209 #define NVME_CTRLR_DATA_ONCS_WRITE_UNC_MASK (0x1) 210 #define NVME_CTRLR_DATA_ONCS_DSM_SHIFT (2) 211 #define NVME_CTRLR_DATA_ONCS_DSM_MASK (0x1) 212 213 /** volatile write cache */ 214 #define NVME_CTRLR_DATA_VWC_PRESENT_SHIFT (0) 215 #define NVME_CTRLR_DATA_VWC_PRESENT_MASK (0x1) 216 217 /** namespace features */ 218 /* thin provisioning */ 219 #define NVME_NS_DATA_NSFEAT_THIN_PROV_SHIFT (0) 220 #define NVME_NS_DATA_NSFEAT_THIN_PROV_MASK (0x1) 221 222 /** formatted lba size */ 223 #define NVME_NS_DATA_FLBAS_FORMAT_SHIFT (0) 224 #define NVME_NS_DATA_FLBAS_FORMAT_MASK (0xF) 225 #define NVME_NS_DATA_FLBAS_EXTENDED_SHIFT (4) 226 #define NVME_NS_DATA_FLBAS_EXTENDED_MASK (0x1) 227 228 /** metadata capabilities */ 229 /* metadata can be transferred as part of data prp list */ 230 #define NVME_NS_DATA_MC_EXTENDED_SHIFT (0) 231 #define NVME_NS_DATA_MC_EXTENDED_MASK (0x1) 232 /* metadata can be transferred with separate metadata pointer */ 233 #define NVME_NS_DATA_MC_POINTER_SHIFT (1) 234 #define NVME_NS_DATA_MC_POINTER_MASK (0x1) 235 236 /** end-to-end data protection capabilities */ 237 /* protection information type 1 */ 238 #define NVME_NS_DATA_DPC_PIT1_SHIFT (0) 239 #define NVME_NS_DATA_DPC_PIT1_MASK (0x1) 240 /* protection information type 2 */ 241 #define NVME_NS_DATA_DPC_PIT2_SHIFT (1) 242 #define NVME_NS_DATA_DPC_PIT2_MASK (0x1) 243 /* protection information type 3 */ 244 #define NVME_NS_DATA_DPC_PIT3_SHIFT (2) 245 #define NVME_NS_DATA_DPC_PIT3_MASK (0x1) 246 /* first eight bytes of metadata */ 247 #define NVME_NS_DATA_DPC_MD_START_SHIFT (3) 248 #define NVME_NS_DATA_DPC_MD_START_MASK (0x1) 249 /* last eight bytes of metadata */ 250 #define NVME_NS_DATA_DPC_MD_END_SHIFT (4) 251 #define NVME_NS_DATA_DPC_MD_END_MASK (0x1) 252 253 /** end-to-end data protection type settings */ 254 /* protection information type */ 255 #define NVME_NS_DATA_DPS_PIT_SHIFT (0) 256 #define NVME_NS_DATA_DPS_PIT_MASK (0x7) 257 /* 1 == protection info transferred at start of metadata */ 258 /* 0 == protection info transferred at end of metadata */ 259 #define NVME_NS_DATA_DPS_MD_START_SHIFT (3) 260 #define NVME_NS_DATA_DPS_MD_START_MASK (0x1) 261 262 /** lba format support */ 263 /* metadata size */ 264 #define NVME_NS_DATA_LBAF_MS_SHIFT (0) 265 #define NVME_NS_DATA_LBAF_MS_MASK (0xFFFF) 266 /* lba data size */ 267 #define NVME_NS_DATA_LBAF_LBADS_SHIFT (16) 268 #define NVME_NS_DATA_LBAF_LBADS_MASK (0xFF) 269 /* relative performance */ 270 #define NVME_NS_DATA_LBAF_RP_SHIFT (24) 271 #define NVME_NS_DATA_LBAF_RP_MASK (0x3) 272 273 enum nvme_critical_warning_state { 274 NVME_CRIT_WARN_ST_AVAILABLE_SPARE = 0x1, 275 NVME_CRIT_WARN_ST_TEMPERATURE = 0x2, 276 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY = 0x4, 277 NVME_CRIT_WARN_ST_READ_ONLY = 0x8, 278 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP = 0x10, 279 }; 280 #define NVME_CRIT_WARN_ST_RESERVED_MASK (0xE0) 281 282 /* slot for current FW */ 283 #define NVME_FIRMWARE_PAGE_AFI_SLOT_SHIFT (0) 284 #define NVME_FIRMWARE_PAGE_AFI_SLOT_MASK (0x7) 285 286 /* CC register SHN field values */ 287 enum shn_value { 288 NVME_SHN_NORMAL = 0x1, 289 NVME_SHN_ABRUPT = 0x2, 290 }; 291 292 /* CSTS register SHST field values */ 293 enum shst_value { 294 NVME_SHST_NORMAL = 0x0, 295 NVME_SHST_OCCURRING = 0x1, 296 NVME_SHST_COMPLETE = 0x2, 297 }; 298 299 struct nvme_registers 300 { 301 /** controller capabilities */ 302 uint32_t cap_lo; 303 uint32_t cap_hi; 304 305 uint32_t vs; /* version */ 306 uint32_t intms; /* interrupt mask set */ 307 uint32_t intmc; /* interrupt mask clear */ 308 309 /** controller configuration */ 310 uint32_t cc; 311 312 uint32_t reserved1; 313 314 /** controller status */ 315 uint32_t csts; 316 317 uint32_t reserved2; 318 319 /** admin queue attributes */ 320 uint32_t aqa; 321 322 uint64_t asq; /* admin submission queue base addr */ 323 uint64_t acq; /* admin completion queue base addr */ 324 uint32_t reserved3[0x3f2]; 325 326 struct { 327 uint32_t sq_tdbl; /* submission queue tail doorbell */ 328 uint32_t cq_hdbl; /* completion queue head doorbell */ 329 } doorbell[1] __packed; 330 } __packed; 331 332 _Static_assert(sizeof(struct nvme_registers) == 0x1008, "bad size for nvme_registers"); 333 334 struct nvme_command 335 { 336 /* dword 0 */ 337 uint16_t opc_fuse; /* opcode, fused operation */ 338 uint16_t cid; /* command identifier */ 339 340 /* dword 1 */ 341 uint32_t nsid; /* namespace identifier */ 342 343 /* dword 2-3 */ 344 uint32_t rsvd2; 345 uint32_t rsvd3; 346 347 /* dword 4-5 */ 348 uint64_t mptr; /* metadata pointer */ 349 350 /* dword 6-7 */ 351 uint64_t prp1; /* prp entry 1 */ 352 353 /* dword 8-9 */ 354 uint64_t prp2; /* prp entry 2 */ 355 356 /* dword 10-15 */ 357 uint32_t cdw10; /* command-specific */ 358 uint32_t cdw11; /* command-specific */ 359 uint32_t cdw12; /* command-specific */ 360 uint32_t cdw13; /* command-specific */ 361 uint32_t cdw14; /* command-specific */ 362 uint32_t cdw15; /* command-specific */ 363 } __packed; 364 365 _Static_assert(sizeof(struct nvme_command) == 16 * 4, "bad size for nvme_command"); 366 367 struct nvme_completion { 368 369 /* dword 0 */ 370 uint32_t cdw0; /* command-specific */ 371 372 /* dword 1 */ 373 uint32_t rsvd1; 374 375 /* dword 2 */ 376 uint16_t sqhd; /* submission queue head pointer */ 377 uint16_t sqid; /* submission queue identifier */ 378 379 /* dword 3 */ 380 uint16_t cid; /* command identifier */ 381 uint16_t status; 382 } __packed; 383 384 _Static_assert(sizeof(struct nvme_completion) == 4 * 4, "bad size for nvme_completion"); 385 386 struct nvme_dsm_range { 387 388 uint32_t attributes; 389 uint32_t length; 390 uint64_t starting_lba; 391 } __packed; 392 393 _Static_assert(sizeof(struct nvme_dsm_range) == 16, "bad size for nvme_dsm_ranage"); 394 395 /* status code types */ 396 enum nvme_status_code_type { 397 NVME_SCT_GENERIC = 0x0, 398 NVME_SCT_COMMAND_SPECIFIC = 0x1, 399 NVME_SCT_MEDIA_ERROR = 0x2, 400 /* 0x3-0x6 - reserved */ 401 NVME_SCT_VENDOR_SPECIFIC = 0x7, 402 }; 403 404 /* generic command status codes */ 405 enum nvme_generic_command_status_code { 406 NVME_SC_SUCCESS = 0x00, 407 NVME_SC_INVALID_OPCODE = 0x01, 408 NVME_SC_INVALID_FIELD = 0x02, 409 NVME_SC_COMMAND_ID_CONFLICT = 0x03, 410 NVME_SC_DATA_TRANSFER_ERROR = 0x04, 411 NVME_SC_ABORTED_POWER_LOSS = 0x05, 412 NVME_SC_INTERNAL_DEVICE_ERROR = 0x06, 413 NVME_SC_ABORTED_BY_REQUEST = 0x07, 414 NVME_SC_ABORTED_SQ_DELETION = 0x08, 415 NVME_SC_ABORTED_FAILED_FUSED = 0x09, 416 NVME_SC_ABORTED_MISSING_FUSED = 0x0a, 417 NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b, 418 NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c, 419 420 NVME_SC_LBA_OUT_OF_RANGE = 0x80, 421 NVME_SC_CAPACITY_EXCEEDED = 0x81, 422 NVME_SC_NAMESPACE_NOT_READY = 0x82, 423 }; 424 425 /* command specific status codes */ 426 enum nvme_command_specific_status_code { 427 NVME_SC_COMPLETION_QUEUE_INVALID = 0x00, 428 NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01, 429 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02, 430 NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03, 431 /* 0x04 - reserved */ 432 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05, 433 NVME_SC_INVALID_FIRMWARE_SLOT = 0x06, 434 NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07, 435 NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08, 436 NVME_SC_INVALID_LOG_PAGE = 0x09, 437 NVME_SC_INVALID_FORMAT = 0x0a, 438 NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b, 439 440 NVME_SC_CONFLICTING_ATTRIBUTES = 0x80, 441 NVME_SC_INVALID_PROTECTION_INFO = 0x81, 442 NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82, 443 }; 444 445 /* media error status codes */ 446 enum nvme_media_error_status_code { 447 NVME_SC_WRITE_FAULTS = 0x80, 448 NVME_SC_UNRECOVERED_READ_ERROR = 0x81, 449 NVME_SC_GUARD_CHECK_ERROR = 0x82, 450 NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83, 451 NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84, 452 NVME_SC_COMPARE_FAILURE = 0x85, 453 NVME_SC_ACCESS_DENIED = 0x86, 454 }; 455 456 /* admin opcodes */ 457 enum nvme_admin_opcode { 458 NVME_OPC_DELETE_IO_SQ = 0x00, 459 NVME_OPC_CREATE_IO_SQ = 0x01, 460 NVME_OPC_GET_LOG_PAGE = 0x02, 461 /* 0x03 - reserved */ 462 NVME_OPC_DELETE_IO_CQ = 0x04, 463 NVME_OPC_CREATE_IO_CQ = 0x05, 464 NVME_OPC_IDENTIFY = 0x06, 465 /* 0x07 - reserved */ 466 NVME_OPC_ABORT = 0x08, 467 NVME_OPC_SET_FEATURES = 0x09, 468 NVME_OPC_GET_FEATURES = 0x0a, 469 /* 0x0b - reserved */ 470 NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c, 471 NVME_OPC_NAMESPACE_MANAGEMENT = 0x0d, 472 /* 0x0e-0x0f - reserved */ 473 NVME_OPC_FIRMWARE_ACTIVATE = 0x10, 474 NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11, 475 NVME_OPC_NAMESPACE_ATTACHMENT = 0x15, 476 477 NVME_OPC_FORMAT_NVM = 0x80, 478 NVME_OPC_SECURITY_SEND = 0x81, 479 NVME_OPC_SECURITY_RECEIVE = 0x82, 480 }; 481 482 /* nvme nvm opcodes */ 483 enum nvme_nvm_opcode { 484 NVME_OPC_FLUSH = 0x00, 485 NVME_OPC_WRITE = 0x01, 486 NVME_OPC_READ = 0x02, 487 /* 0x03 - reserved */ 488 NVME_OPC_WRITE_UNCORRECTABLE = 0x04, 489 NVME_OPC_COMPARE = 0x05, 490 /* 0x06-0x07 - reserved */ 491 NVME_OPC_DATASET_MANAGEMENT = 0x09, 492 }; 493 494 enum nvme_feature { 495 /* 0x00 - reserved */ 496 NVME_FEAT_ARBITRATION = 0x01, 497 NVME_FEAT_POWER_MANAGEMENT = 0x02, 498 NVME_FEAT_LBA_RANGE_TYPE = 0x03, 499 NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04, 500 NVME_FEAT_ERROR_RECOVERY = 0x05, 501 NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06, 502 NVME_FEAT_NUMBER_OF_QUEUES = 0x07, 503 NVME_FEAT_INTERRUPT_COALESCING = 0x08, 504 NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09, 505 NVME_FEAT_WRITE_ATOMICITY = 0x0A, 506 NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B, 507 NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION = 0x0C, 508 NVME_FEAT_HOST_MEMORY_BUFFER = 0x0D, 509 NVME_FEAT_TIMESTAMP = 0x0E, 510 NVME_FEAT_KEEP_ALIVE_TIMER = 0x0F, 511 NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT = 0x10, 512 NVME_FEAT_NON_OP_POWER_STATE_CONFIG = 0x11, 513 /* 0x12-0x77 - reserved */ 514 /* 0x78-0x7f - NVMe Management Interface */ 515 NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80, 516 /* 0x81-0xBF - command set specific (reserved) */ 517 /* 0xC0-0xFF - vendor specific */ 518 }; 519 520 enum nvme_dsm_attribute { 521 NVME_DSM_ATTR_INTEGRAL_READ = 0x1, 522 NVME_DSM_ATTR_INTEGRAL_WRITE = 0x2, 523 NVME_DSM_ATTR_DEALLOCATE = 0x4, 524 }; 525 526 enum nvme_activate_action { 527 NVME_AA_REPLACE_NO_ACTIVATE = 0x0, 528 NVME_AA_REPLACE_ACTIVATE = 0x1, 529 NVME_AA_ACTIVATE = 0x2, 530 }; 531 532 struct nvme_power_state { 533 /** Maximum Power */ 534 uint16_t mp; /* Maximum Power */ 535 uint8_t ps_rsvd1; 536 uint8_t mps_nops; /* Max Power Scale, Non-Operational State */ 537 538 uint32_t enlat; /* Entry Latency */ 539 uint32_t exlat; /* Exit Latency */ 540 541 uint8_t rrt; /* Relative Read Throughput */ 542 uint8_t rrl; /* Relative Read Latency */ 543 uint8_t rwt; /* Relative Write Throughput */ 544 uint8_t rwl; /* Relative Write Latency */ 545 546 uint16_t idlp; /* Idle Power */ 547 uint8_t ips; /* Idle Power Scale */ 548 uint8_t ps_rsvd8; 549 550 uint16_t actp; /* Active Power */ 551 uint8_t apw_aps; /* Active Power Workload, Active Power Scale */ 552 uint8_t ps_rsvd10[9]; 553 } __packed; 554 555 _Static_assert(sizeof(struct nvme_power_state) == 32, "bad size for nvme_power_state"); 556 557 #define NVME_SERIAL_NUMBER_LENGTH 20 558 #define NVME_MODEL_NUMBER_LENGTH 40 559 #define NVME_FIRMWARE_REVISION_LENGTH 8 560 561 struct nvme_controller_data { 562 563 /* bytes 0-255: controller capabilities and features */ 564 565 /** pci vendor id */ 566 uint16_t vid; 567 568 /** pci subsystem vendor id */ 569 uint16_t ssvid; 570 571 /** serial number */ 572 uint8_t sn[NVME_SERIAL_NUMBER_LENGTH]; 573 574 /** model number */ 575 uint8_t mn[NVME_MODEL_NUMBER_LENGTH]; 576 577 /** firmware revision */ 578 uint8_t fr[NVME_FIRMWARE_REVISION_LENGTH]; 579 580 /** recommended arbitration burst */ 581 uint8_t rab; 582 583 /** ieee oui identifier */ 584 uint8_t ieee[3]; 585 586 /** multi-interface capabilities */ 587 uint8_t mic; 588 589 /** maximum data transfer size */ 590 uint8_t mdts; 591 592 /** Controller ID */ 593 uint16_t ctrlr_id; 594 595 /** Version */ 596 uint32_t ver; 597 598 /** RTD3 Resume Latency */ 599 uint32_t rtd3r; 600 601 /** RTD3 Enter Latency */ 602 uint32_t rtd3e; 603 604 /** Optional Asynchronous Events Supported */ 605 uint32_t oaes; /* bitfield really */ 606 607 /** Controller Attributes */ 608 uint32_t ctratt; /* bitfield really */ 609 610 uint8_t reserved1[12]; 611 612 /** FRU Globally Unique Identifier */ 613 uint8_t fguid[16]; 614 615 uint8_t reserved2[128]; 616 617 /* bytes 256-511: admin command set attributes */ 618 619 /** optional admin command support */ 620 uint16_t oacs; 621 622 /** abort command limit */ 623 uint8_t acl; 624 625 /** asynchronous event request limit */ 626 uint8_t aerl; 627 628 /** firmware updates */ 629 uint8_t frmw; 630 631 /** log page attributes */ 632 uint8_t lpa; 633 634 /** error log page entries */ 635 uint8_t elpe; 636 637 /** number of power states supported */ 638 uint8_t npss; 639 640 /** admin vendor specific command configuration */ 641 uint8_t avscc; 642 643 /** Autonomous Power State Transition Attributes */ 644 uint8_t apsta; 645 646 /** Warning Composite Temperature Threshold */ 647 uint16_t wctemp; 648 649 /** Critical Composite Temperature Threshold */ 650 uint16_t cctemp; 651 652 /** Maximum Time for Firmware Activation */ 653 uint16_t mtfa; 654 655 /** Host Memory Buffer Preferred Size */ 656 uint32_t hmpre; 657 658 /** Host Memory Buffer Minimum Size */ 659 uint32_t hmmin; 660 661 /** Name space capabilities */ 662 struct { 663 /* if nsmgmt, report tnvmcap and unvmcap */ 664 uint8_t tnvmcap[16]; 665 uint8_t unvmcap[16]; 666 } __packed untncap; 667 668 /** Replay Protected Memory Block Support */ 669 uint32_t rpmbs; /* Really a bitfield */ 670 671 /** Extended Device Self-test Time */ 672 uint16_t edstt; 673 674 /** Device Self-test Options */ 675 uint8_t dsto; /* Really a bitfield */ 676 677 /** Firmware Update Granularity */ 678 uint8_t fwug; 679 680 /** Keep Alive Support */ 681 uint16_t kas; 682 683 /** Host Controlled Thermal Management Attributes */ 684 uint16_t hctma; /* Really a bitfield */ 685 686 /** Minimum Thermal Management Temperature */ 687 uint16_t mntmt; 688 689 /** Maximum Thermal Management Temperature */ 690 uint16_t mxtmt; 691 692 /** Sanitize Capabilities */ 693 uint32_t sanicap; /* Really a bitfield */ 694 695 uint8_t reserved3[180]; 696 /* bytes 512-703: nvm command set attributes */ 697 698 /** submission queue entry size */ 699 uint8_t sqes; 700 701 /** completion queue entry size */ 702 uint8_t cqes; 703 704 /** Maximum Outstanding Commands */ 705 uint16_t maxcmd; 706 707 /** number of namespaces */ 708 uint32_t nn; 709 710 /** optional nvm command support */ 711 uint16_t oncs; 712 713 /** fused operation support */ 714 uint16_t fuses; 715 716 /** format nvm attributes */ 717 uint8_t fna; 718 719 /** volatile write cache */ 720 uint8_t vwc; 721 722 /* TODO: flesh out remaining nvm command set attributes */ 723 uint8_t reserved5[178]; 724 725 /* bytes 704-2047: i/o command set attributes */ 726 uint8_t reserved6[1344]; 727 728 /* bytes 2048-3071: power state descriptors */ 729 struct nvme_power_state power_state[32]; 730 731 /* bytes 3072-4095: vendor specific */ 732 uint8_t vs[1024]; 733 } __packed __aligned(4); 734 735 _Static_assert(sizeof(struct nvme_controller_data) == 4096, "bad size for nvme_controller_data"); 736 737 struct nvme_namespace_data { 738 739 /** namespace size */ 740 uint64_t nsze; 741 742 /** namespace capacity */ 743 uint64_t ncap; 744 745 /** namespace utilization */ 746 uint64_t nuse; 747 748 /** namespace features */ 749 uint8_t nsfeat; 750 751 /** number of lba formats */ 752 uint8_t nlbaf; 753 754 /** formatted lba size */ 755 uint8_t flbas; 756 757 /** metadata capabilities */ 758 uint8_t mc; 759 760 /** end-to-end data protection capabilities */ 761 uint8_t dpc; 762 763 /** end-to-end data protection type settings */ 764 uint8_t dps; 765 766 uint8_t reserved5[98]; 767 768 /** lba format support */ 769 uint32_t lbaf[16]; 770 771 uint8_t reserved6[192]; 772 773 uint8_t vendor_specific[3712]; 774 } __packed __aligned(4); 775 776 _Static_assert(sizeof(struct nvme_namespace_data) == 4096, "bad size for nvme_namepsace_data"); 777 778 enum nvme_log_page { 779 780 /* 0x00 - reserved */ 781 NVME_LOG_ERROR = 0x01, 782 NVME_LOG_HEALTH_INFORMATION = 0x02, 783 NVME_LOG_FIRMWARE_SLOT = 0x03, 784 NVME_LOG_CHANGED_NAMESPACE = 0x04, 785 NVME_LOG_COMMAND_EFFECT = 0x05, 786 /* 0x06-0x7F - reserved */ 787 /* 0x80-0xBF - I/O command set specific */ 788 NVME_LOG_RES_NOTIFICATION = 0x80, 789 /* 0xC0-0xFF - vendor specific */ 790 791 /* 792 * The following are Intel Specific log pages, but they seem 793 * to be widely implemented. 794 */ 795 INTEL_LOG_READ_LAT_LOG = 0xc1, 796 INTEL_LOG_WRITE_LAT_LOG = 0xc2, 797 INTEL_LOG_TEMP_STATS = 0xc5, 798 INTEL_LOG_ADD_SMART = 0xca, 799 INTEL_LOG_DRIVE_MKT_NAME = 0xdd, 800 801 /* 802 * HGST log page, with lots ofs sub pages. 803 */ 804 HGST_INFO_LOG = 0xc1, 805 }; 806 807 struct nvme_error_information_entry { 808 809 uint64_t error_count; 810 uint16_t sqid; 811 uint16_t cid; 812 uint16_t status; 813 uint16_t error_location; 814 uint64_t lba; 815 uint32_t nsid; 816 uint8_t vendor_specific; 817 uint8_t reserved[35]; 818 } __packed __aligned(4); 819 820 _Static_assert(sizeof(struct nvme_error_information_entry) == 64, "bad size for nvme_error_information_entry"); 821 822 struct nvme_health_information_page { 823 824 uint8_t critical_warning; 825 uint16_t temperature; 826 uint8_t available_spare; 827 uint8_t available_spare_threshold; 828 uint8_t percentage_used; 829 830 uint8_t reserved[26]; 831 832 /* 833 * Note that the following are 128-bit values, but are 834 * defined as an array of 2 64-bit values. 835 */ 836 /* Data Units Read is always in 512-byte units. */ 837 uint64_t data_units_read[2]; 838 /* Data Units Written is always in 512-byte units. */ 839 uint64_t data_units_written[2]; 840 /* For NVM command set, this includes Compare commands. */ 841 uint64_t host_read_commands[2]; 842 uint64_t host_write_commands[2]; 843 /* Controller Busy Time is reported in minutes. */ 844 uint64_t controller_busy_time[2]; 845 uint64_t power_cycles[2]; 846 uint64_t power_on_hours[2]; 847 uint64_t unsafe_shutdowns[2]; 848 uint64_t media_errors[2]; 849 uint64_t num_error_info_log_entries[2]; 850 uint32_t warning_temp_time; 851 uint32_t error_temp_time; 852 uint16_t temp_sensor[8]; 853 854 uint8_t reserved2[296]; 855 } __packed __aligned(4); 856 857 _Static_assert(sizeof(struct nvme_health_information_page) == 512, "bad size for nvme_health_information_page"); 858 859 struct nvme_firmware_page { 860 861 uint8_t afi; 862 uint8_t reserved[7]; 863 uint64_t revision[7]; /* revisions for 7 slots */ 864 uint8_t reserved2[448]; 865 } __packed __aligned(4); 866 867 _Static_assert(sizeof(struct nvme_firmware_page) == 512, "bad size for nvme_firmware_page"); 868 869 struct intel_log_temp_stats 870 { 871 uint64_t current; 872 uint64_t overtemp_flag_last; 873 uint64_t overtemp_flag_life; 874 uint64_t max_temp; 875 uint64_t min_temp; 876 uint64_t _rsvd[5]; 877 uint64_t max_oper_temp; 878 uint64_t min_oper_temp; 879 uint64_t est_offset; 880 } __packed __aligned(4); 881 882 _Static_assert(sizeof(struct intel_log_temp_stats) == 13 * 8, "bad size for intel_log_temp_stats"); 883 884 #define NVME_TEST_MAX_THREADS 128 885 886 struct nvme_io_test { 887 888 enum nvme_nvm_opcode opc; 889 uint32_t size; 890 uint32_t time; /* in seconds */ 891 uint32_t num_threads; 892 uint32_t flags; 893 uint64_t io_completed[NVME_TEST_MAX_THREADS]; 894 }; 895 896 enum nvme_io_test_flags { 897 898 /* 899 * Specifies whether dev_refthread/dev_relthread should be 900 * called during NVME_BIO_TEST. Ignored for other test 901 * types. 902 */ 903 NVME_TEST_FLAG_REFTHREAD = 0x1, 904 }; 905 906 struct nvme_pt_command { 907 908 /* 909 * cmd is used to specify a passthrough command to a controller or 910 * namespace. 911 * 912 * The following fields from cmd may be specified by the caller: 913 * * opc (opcode) 914 * * nsid (namespace id) - for admin commands only 915 * * cdw10-cdw15 916 * 917 * Remaining fields must be set to 0 by the caller. 918 */ 919 struct nvme_command cmd; 920 921 /* 922 * cpl returns completion status for the passthrough command 923 * specified by cmd. 924 * 925 * The following fields will be filled out by the driver, for 926 * consumption by the caller: 927 * * cdw0 928 * * status (except for phase) 929 * 930 * Remaining fields will be set to 0 by the driver. 931 */ 932 struct nvme_completion cpl; 933 934 /* buf is the data buffer associated with this passthrough command. */ 935 void * buf; 936 937 /* 938 * len is the length of the data buffer associated with this 939 * passthrough command. 940 */ 941 uint32_t len; 942 943 /* 944 * is_read = 1 if the passthrough command will read data into the 945 * supplied buffer from the controller. 946 * 947 * is_read = 0 if the passthrough command will write data from the 948 * supplied buffer to the controller. 949 */ 950 uint32_t is_read; 951 952 /* 953 * driver_lock is used by the driver only. It must be set to 0 954 * by the caller. 955 */ 956 struct mtx * driver_lock; 957 }; 958 959 #define nvme_completion_is_error(cpl) \ 960 (NVME_STATUS_GET_SC((cpl)->status) != 0 || NVME_STATUS_GET_SCT((cpl)->status) != 0) 961 962 void nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen); 963 964 #ifdef _KERNEL 965 966 struct bio; 967 968 struct nvme_namespace; 969 struct nvme_controller; 970 struct nvme_consumer; 971 972 typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *); 973 974 typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *); 975 typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *); 976 typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *, 977 uint32_t, void *, uint32_t); 978 typedef void (*nvme_cons_fail_fn_t)(void *); 979 980 enum nvme_namespace_flags { 981 NVME_NS_DEALLOCATE_SUPPORTED = 0x1, 982 NVME_NS_FLUSH_SUPPORTED = 0x2, 983 }; 984 985 int nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 986 struct nvme_pt_command *pt, 987 uint32_t nsid, int is_user_buffer, 988 int is_admin_cmd); 989 990 /* Admin functions */ 991 void nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, 992 uint8_t feature, uint32_t cdw11, 993 void *payload, uint32_t payload_size, 994 nvme_cb_fn_t cb_fn, void *cb_arg); 995 void nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, 996 uint8_t feature, uint32_t cdw11, 997 void *payload, uint32_t payload_size, 998 nvme_cb_fn_t cb_fn, void *cb_arg); 999 void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, 1000 uint8_t log_page, uint32_t nsid, 1001 void *payload, uint32_t payload_size, 1002 nvme_cb_fn_t cb_fn, void *cb_arg); 1003 1004 /* NVM I/O functions */ 1005 int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, 1006 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, 1007 void *cb_arg); 1008 int nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp, 1009 nvme_cb_fn_t cb_fn, void *cb_arg); 1010 int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, 1011 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, 1012 void *cb_arg); 1013 int nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp, 1014 nvme_cb_fn_t cb_fn, void *cb_arg); 1015 int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload, 1016 uint8_t num_ranges, nvme_cb_fn_t cb_fn, 1017 void *cb_arg); 1018 int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, 1019 void *cb_arg); 1020 int nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset, 1021 size_t len); 1022 1023 /* Registration functions */ 1024 struct nvme_consumer * nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, 1025 nvme_cons_ctrlr_fn_t ctrlr_fn, 1026 nvme_cons_async_fn_t async_fn, 1027 nvme_cons_fail_fn_t fail_fn); 1028 void nvme_unregister_consumer(struct nvme_consumer *consumer); 1029 1030 /* Controller helper functions */ 1031 device_t nvme_ctrlr_get_device(struct nvme_controller *ctrlr); 1032 const struct nvme_controller_data * 1033 nvme_ctrlr_get_data(struct nvme_controller *ctrlr); 1034 1035 /* Namespace helper functions */ 1036 uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns); 1037 uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns); 1038 uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns); 1039 uint64_t nvme_ns_get_size(struct nvme_namespace *ns); 1040 uint32_t nvme_ns_get_flags(struct nvme_namespace *ns); 1041 const char * nvme_ns_get_serial_number(struct nvme_namespace *ns); 1042 const char * nvme_ns_get_model_number(struct nvme_namespace *ns); 1043 const struct nvme_namespace_data * 1044 nvme_ns_get_data(struct nvme_namespace *ns); 1045 uint32_t nvme_ns_get_stripesize(struct nvme_namespace *ns); 1046 1047 int nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp, 1048 nvme_cb_fn_t cb_fn); 1049 1050 /* 1051 * Command building helper functions -- shared with CAM 1052 * These functions assume allocator zeros out cmd structure 1053 * CAM's xpt_get_ccb and the request allocator for nvme both 1054 * do zero'd allocations. 1055 */ 1056 static inline 1057 void nvme_ns_flush_cmd(struct nvme_command *cmd, uint32_t nsid) 1058 { 1059 1060 cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_FLUSH); 1061 cmd->nsid = htole32(nsid); 1062 } 1063 1064 static inline 1065 void nvme_ns_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd, uint32_t nsid, 1066 uint64_t lba, uint32_t count) 1067 { 1068 cmd->opc_fuse = NVME_CMD_SET_OPC(rwcmd); 1069 cmd->nsid = htole32(nsid); 1070 cmd->cdw10 = htole32(lba & 0xffffffffu); 1071 cmd->cdw11 = htole32(lba >> 32); 1072 cmd->cdw12 = htole32(count-1); 1073 } 1074 1075 static inline 1076 void nvme_ns_write_cmd(struct nvme_command *cmd, uint32_t nsid, 1077 uint64_t lba, uint32_t count) 1078 { 1079 nvme_ns_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count); 1080 } 1081 1082 static inline 1083 void nvme_ns_read_cmd(struct nvme_command *cmd, uint32_t nsid, 1084 uint64_t lba, uint32_t count) 1085 { 1086 nvme_ns_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count); 1087 } 1088 1089 static inline 1090 void nvme_ns_trim_cmd(struct nvme_command *cmd, uint32_t nsid, 1091 uint32_t num_ranges) 1092 { 1093 cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_DATASET_MANAGEMENT); 1094 cmd->nsid = htole32(nsid); 1095 cmd->cdw10 = htole32(num_ranges - 1); 1096 cmd->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE); 1097 } 1098 1099 extern int nvme_use_nvd; 1100 1101 #endif /* _KERNEL */ 1102 1103 /* Endianess conversion functions for NVMe structs */ 1104 static inline 1105 void nvme_completion_swapbytes(struct nvme_completion *s) 1106 { 1107 1108 s->cdw0 = le32toh(s->cdw0); 1109 /* omit rsvd1 */ 1110 s->sqhd = le16toh(s->sqhd); 1111 s->sqid = le16toh(s->sqid); 1112 /* omit cid */ 1113 s->status = le16toh(s->status); 1114 } 1115 1116 static inline 1117 void nvme_power_state_swapbytes(struct nvme_power_state *s) 1118 { 1119 1120 s->mp = le16toh(s->mp); 1121 s->enlat = le32toh(s->enlat); 1122 s->exlat = le32toh(s->exlat); 1123 s->idlp = le16toh(s->idlp); 1124 s->actp = le16toh(s->actp); 1125 } 1126 1127 static inline 1128 void nvme_controller_data_swapbytes(struct nvme_controller_data *s) 1129 { 1130 int i; 1131 1132 s->vid = le16toh(s->vid); 1133 s->ssvid = le16toh(s->ssvid); 1134 s->ctrlr_id = le16toh(s->ctrlr_id); 1135 s->ver = le32toh(s->ver); 1136 s->rtd3r = le32toh(s->rtd3r); 1137 s->rtd3e = le32toh(s->rtd3e); 1138 s->oaes = le32toh(s->oaes); 1139 s->ctratt = le32toh(s->ctratt); 1140 s->oacs = le16toh(s->oacs); 1141 s->wctemp = le16toh(s->wctemp); 1142 s->cctemp = le16toh(s->cctemp); 1143 s->mtfa = le16toh(s->mtfa); 1144 s->hmpre = le32toh(s->hmpre); 1145 s->hmmin = le32toh(s->hmmin); 1146 s->rpmbs = le32toh(s->rpmbs); 1147 s->edstt = le16toh(s->edstt); 1148 s->kas = le16toh(s->kas); 1149 s->hctma = le16toh(s->hctma); 1150 s->mntmt = le16toh(s->mntmt); 1151 s->mxtmt = le16toh(s->mxtmt); 1152 s->sanicap = le32toh(s->sanicap); 1153 s->maxcmd = le16toh(s->maxcmd); 1154 s->nn = le32toh(s->nn); 1155 s->oncs = le16toh(s->oncs); 1156 s->fuses = le16toh(s->fuses); 1157 for (i = 0; i < 32; i++) 1158 nvme_power_state_swapbytes(&s->power_state[i]); 1159 } 1160 1161 static inline 1162 void nvme_namespace_data_swapbytes(struct nvme_namespace_data *s) 1163 { 1164 int i; 1165 1166 s->nsze = le64toh(s->nsze); 1167 s->ncap = le64toh(s->ncap); 1168 s->nuse = le64toh(s->nuse); 1169 for (i = 0; i < 16; i++) 1170 s->lbaf[i] = le32toh(s->lbaf[i]); 1171 } 1172 1173 static inline 1174 void nvme_error_information_entry_swapbytes(struct nvme_error_information_entry *s) 1175 { 1176 1177 s->error_count = le64toh(s->error_count); 1178 s->sqid = le16toh(s->sqid); 1179 s->cid = le16toh(s->cid); 1180 s->status = le16toh(s->status); 1181 s->error_location = le16toh(s->error_location); 1182 s->lba = le64toh(s->lba); 1183 s->nsid = le32toh(s->nsid); 1184 } 1185 1186 static inline 1187 void nvme_le128toh(void *p) 1188 { 1189 #if _BYTE_ORDER != _LITTLE_ENDIAN 1190 /* Swap 16 bytes in place */ 1191 char *tmp = (char*)p; 1192 char b; 1193 int i; 1194 for (i = 0; i < 8; i++) { 1195 b = tmp[i]; 1196 tmp[i] = tmp[15-i]; 1197 tmp[15-i] = b; 1198 } 1199 #else 1200 (void)p; 1201 #endif 1202 } 1203 1204 static inline 1205 void nvme_health_information_page_swapbytes(struct nvme_health_information_page *s) 1206 { 1207 int i; 1208 1209 s->temperature = le16toh(s->temperature); 1210 nvme_le128toh((void *)s->data_units_read); 1211 nvme_le128toh((void *)s->data_units_written); 1212 nvme_le128toh((void *)s->host_read_commands); 1213 nvme_le128toh((void *)s->host_write_commands); 1214 nvme_le128toh((void *)s->controller_busy_time); 1215 nvme_le128toh((void *)s->power_cycles); 1216 nvme_le128toh((void *)s->power_on_hours); 1217 nvme_le128toh((void *)s->unsafe_shutdowns); 1218 nvme_le128toh((void *)s->media_errors); 1219 nvme_le128toh((void *)s->num_error_info_log_entries); 1220 s->warning_temp_time = le32toh(s->warning_temp_time); 1221 s->error_temp_time = le32toh(s->error_temp_time); 1222 for (i = 0; i < 8; i++) 1223 s->temp_sensor[i] = le16toh(s->temp_sensor[i]); 1224 } 1225 1226 1227 static inline 1228 void nvme_firmware_page_swapbytes(struct nvme_firmware_page *s) 1229 { 1230 int i; 1231 1232 for (i = 0; i < 7; i++) 1233 s->revision[i] = le64toh(s->revision[i]); 1234 } 1235 1236 static inline 1237 void intel_log_temp_stats_swapbytes(struct intel_log_temp_stats *s) 1238 { 1239 1240 s->current = le64toh(s->current); 1241 s->overtemp_flag_last = le64toh(s->overtemp_flag_last); 1242 s->overtemp_flag_life = le64toh(s->overtemp_flag_life); 1243 s->max_temp = le64toh(s->max_temp); 1244 s->min_temp = le64toh(s->min_temp); 1245 /* omit _rsvd[] */ 1246 s->max_oper_temp = le64toh(s->max_oper_temp); 1247 s->min_oper_temp = le64toh(s->min_oper_temp); 1248 s->est_offset = le64toh(s->est_offset); 1249 } 1250 1251 #endif /* __NVME_H__ */ 1252