xref: /freebsd/sys/dev/nvme/nvme.h (revision 63a938566d524836885917d95bd491aa4400b181)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2013 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef __NVME_H__
32 #define __NVME_H__
33 
34 #ifdef _KERNEL
35 #include <sys/types.h>
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/endian.h>
40 
41 #define	NVME_PASSTHROUGH_CMD		_IOWR('n', 0, struct nvme_pt_command)
42 #define	NVME_RESET_CONTROLLER		_IO('n', 1)
43 
44 #define	NVME_IO_TEST			_IOWR('n', 100, struct nvme_io_test)
45 #define	NVME_BIO_TEST			_IOWR('n', 101, struct nvme_io_test)
46 
47 /*
48  * Macros to deal with NVME revisions, as defined VS register
49  */
50 #define NVME_REV(x, y)			(((x) << 16) | ((y) << 8))
51 #define NVME_MAJOR(r)			(((r) >> 16) & 0xffff)
52 #define NVME_MINOR(r)			(((r) >> 8) & 0xff)
53 
54 /*
55  * Use to mark a command to apply to all namespaces, or to retrieve global
56  *  log pages.
57  */
58 #define NVME_GLOBAL_NAMESPACE_TAG	((uint32_t)0xFFFFFFFF)
59 
60 /* Cap nvme to 1MB transfers driver explodes with larger sizes */
61 #define NVME_MAX_XFER_SIZE		(MAXPHYS < (1<<20) ? MAXPHYS : (1<<20))
62 
63 /* Register field definitions */
64 #define NVME_CAP_LO_REG_MQES_SHIFT			(0)
65 #define NVME_CAP_LO_REG_MQES_MASK			(0xFFFF)
66 #define NVME_CAP_LO_REG_CQR_SHIFT			(16)
67 #define NVME_CAP_LO_REG_CQR_MASK			(0x1)
68 #define NVME_CAP_LO_REG_AMS_SHIFT			(17)
69 #define NVME_CAP_LO_REG_AMS_MASK			(0x3)
70 #define NVME_CAP_LO_REG_TO_SHIFT			(24)
71 #define NVME_CAP_LO_REG_TO_MASK				(0xFF)
72 
73 #define NVME_CAP_HI_REG_DSTRD_SHIFT			(0)
74 #define NVME_CAP_HI_REG_DSTRD_MASK			(0xF)
75 #define NVME_CAP_HI_REG_CSS_NVM_SHIFT			(5)
76 #define NVME_CAP_HI_REG_CSS_NVM_MASK			(0x1)
77 #define NVME_CAP_HI_REG_MPSMIN_SHIFT			(16)
78 #define NVME_CAP_HI_REG_MPSMIN_MASK			(0xF)
79 #define NVME_CAP_HI_REG_MPSMAX_SHIFT			(20)
80 #define NVME_CAP_HI_REG_MPSMAX_MASK			(0xF)
81 
82 #define NVME_CC_REG_EN_SHIFT				(0)
83 #define NVME_CC_REG_EN_MASK				(0x1)
84 #define NVME_CC_REG_CSS_SHIFT				(4)
85 #define NVME_CC_REG_CSS_MASK				(0x7)
86 #define NVME_CC_REG_MPS_SHIFT				(7)
87 #define NVME_CC_REG_MPS_MASK				(0xF)
88 #define NVME_CC_REG_AMS_SHIFT				(11)
89 #define NVME_CC_REG_AMS_MASK				(0x7)
90 #define NVME_CC_REG_SHN_SHIFT				(14)
91 #define NVME_CC_REG_SHN_MASK				(0x3)
92 #define NVME_CC_REG_IOSQES_SHIFT			(16)
93 #define NVME_CC_REG_IOSQES_MASK				(0xF)
94 #define NVME_CC_REG_IOCQES_SHIFT			(20)
95 #define NVME_CC_REG_IOCQES_MASK				(0xF)
96 
97 #define NVME_CSTS_REG_RDY_SHIFT				(0)
98 #define NVME_CSTS_REG_RDY_MASK				(0x1)
99 #define NVME_CSTS_REG_CFS_SHIFT				(1)
100 #define NVME_CSTS_REG_CFS_MASK				(0x1)
101 #define NVME_CSTS_REG_SHST_SHIFT			(2)
102 #define NVME_CSTS_REG_SHST_MASK				(0x3)
103 
104 #define NVME_CSTS_GET_SHST(csts)			(((csts) >> NVME_CSTS_REG_SHST_SHIFT) & NVME_CSTS_REG_SHST_MASK)
105 
106 #define NVME_AQA_REG_ASQS_SHIFT				(0)
107 #define NVME_AQA_REG_ASQS_MASK				(0xFFF)
108 #define NVME_AQA_REG_ACQS_SHIFT				(16)
109 #define NVME_AQA_REG_ACQS_MASK				(0xFFF)
110 
111 /* Command field definitions */
112 
113 #define NVME_CMD_OPC_SHIFT				(0)
114 #define NVME_CMD_OPC_MASK				(0xFF)
115 #define NVME_CMD_FUSE_SHIFT				(8)
116 #define NVME_CMD_FUSE_MASK				(0x3)
117 
118 #define NVME_CMD_SET_OPC(opc)				(htole16(((opc) & NVME_CMD_OPC_MASK) << NVME_CMD_OPC_SHIFT))
119 
120 #define NVME_STATUS_P_SHIFT				(0)
121 #define NVME_STATUS_P_MASK				(0x1)
122 #define NVME_STATUS_SC_SHIFT				(1)
123 #define NVME_STATUS_SC_MASK				(0xFF)
124 #define NVME_STATUS_SCT_SHIFT				(9)
125 #define NVME_STATUS_SCT_MASK				(0x7)
126 #define NVME_STATUS_M_SHIFT				(14)
127 #define NVME_STATUS_M_MASK				(0x1)
128 #define NVME_STATUS_DNR_SHIFT				(15)
129 #define NVME_STATUS_DNR_MASK				(0x1)
130 
131 #define NVME_STATUS_GET_P(st)				(((st) >> NVME_STATUS_P_SHIFT) & NVME_STATUS_P_MASK)
132 #define NVME_STATUS_GET_SC(st)				(((st) >> NVME_STATUS_SC_SHIFT) & NVME_STATUS_SC_MASK)
133 #define NVME_STATUS_GET_SCT(st)				(((st) >> NVME_STATUS_SCT_SHIFT) & NVME_STATUS_SCT_MASK)
134 #define NVME_STATUS_GET_M(st)				(((st) >> NVME_STATUS_M_SHIFT) & NVME_STATUS_M_MASK)
135 #define NVME_STATUS_GET_DNR(st)				(((st) >> NVME_STATUS_DNR_SHIFT) & NVME_STATUS_DNR_MASK)
136 
137 #define NVME_PWR_ST_MPS_SHIFT				(0)
138 #define NVME_PWR_ST_MPS_MASK				(0x1)
139 #define NVME_PWR_ST_NOPS_SHIFT				(1)
140 #define NVME_PWR_ST_NOPS_MASK				(0x1)
141 #define NVME_PWR_ST_RRT_SHIFT				(0)
142 #define NVME_PWR_ST_RRT_MASK				(0x1F)
143 #define NVME_PWR_ST_RRL_SHIFT				(0)
144 #define NVME_PWR_ST_RRL_MASK				(0x1F)
145 #define NVME_PWR_ST_RWT_SHIFT				(0)
146 #define NVME_PWR_ST_RWT_MASK				(0x1F)
147 #define NVME_PWR_ST_RWL_SHIFT				(0)
148 #define NVME_PWR_ST_RWL_MASK				(0x1F)
149 #define NVME_PWR_ST_IPS_SHIFT				(6)
150 #define NVME_PWR_ST_IPS_MASK				(0x3)
151 #define NVME_PWR_ST_APW_SHIFT				(0)
152 #define NVME_PWR_ST_APW_MASK				(0x7)
153 #define NVME_PWR_ST_APS_SHIFT				(6)
154 #define NVME_PWR_ST_APS_MASK				(0x3)
155 
156 /** Controller Multi-path I/O and Namespace Sharing Capabilities */
157 /* More then one port */
158 #define NVME_CTRLR_DATA_MIC_MPORTS_SHIFT		(0)
159 #define NVME_CTRLR_DATA_MIC_MPORTS_MASK			(0x1)
160 /* More then one controller */
161 #define NVME_CTRLR_DATA_MIC_MCTRLRS_SHIFT		(1)
162 #define NVME_CTRLR_DATA_MIC_MCTRLRS_MASK		(0x1)
163 /* SR-IOV Virtual Function */
164 #define NVME_CTRLR_DATA_MIC_SRIOVVF_SHIFT		(2)
165 #define NVME_CTRLR_DATA_MIC_SRIOVVF_MASK		(0x1)
166 
167 /** OACS - optional admin command support */
168 /* supports security send/receive commands */
169 #define NVME_CTRLR_DATA_OACS_SECURITY_SHIFT		(0)
170 #define NVME_CTRLR_DATA_OACS_SECURITY_MASK		(0x1)
171 /* supports format nvm command */
172 #define NVME_CTRLR_DATA_OACS_FORMAT_SHIFT		(1)
173 #define NVME_CTRLR_DATA_OACS_FORMAT_MASK		(0x1)
174 /* supports firmware activate/download commands */
175 #define NVME_CTRLR_DATA_OACS_FIRMWARE_SHIFT		(2)
176 #define NVME_CTRLR_DATA_OACS_FIRMWARE_MASK		(0x1)
177 /* supports namespace management commands */
178 #define NVME_CTRLR_DATA_OACS_NSMGMT_SHIFT		(3)
179 #define NVME_CTRLR_DATA_OACS_NSMGMT_MASK		(0x1)
180 /* supports Device Self-test command */
181 #define NVME_CTRLR_DATA_OACS_SELFTEST_SHIFT		(4)
182 #define NVME_CTRLR_DATA_OACS_SELFTEST_MASK		(0x1)
183 /* supports Directives */
184 #define NVME_CTRLR_DATA_OACS_DIRECTIVES_SHIFT		(5)
185 #define NVME_CTRLR_DATA_OACS_DIRECTIVES_MASK		(0x1)
186 /* supports NVMe-MI Send/Receive */
187 #define NVME_CTRLR_DATA_OACS_NVMEMI_SHIFT		(6)
188 #define NVME_CTRLR_DATA_OACS_NVMEMI_MASK		(0x1)
189 /* supports Virtualization Management */
190 #define NVME_CTRLR_DATA_OACS_VM_SHIFT			(7)
191 #define NVME_CTRLR_DATA_OACS_VM_MASK			(0x1)
192 /* supports Doorbell Buffer Config */
193 #define NVME_CTRLR_DATA_OACS_DBBUFFER_SHIFT		(8)
194 #define NVME_CTRLR_DATA_OACS_DBBUFFER_MASK		(0x1)
195 
196 /** firmware updates */
197 /* first slot is read-only */
198 #define NVME_CTRLR_DATA_FRMW_SLOT1_RO_SHIFT		(0)
199 #define NVME_CTRLR_DATA_FRMW_SLOT1_RO_MASK		(0x1)
200 /* number of firmware slots */
201 #define NVME_CTRLR_DATA_FRMW_NUM_SLOTS_SHIFT		(1)
202 #define NVME_CTRLR_DATA_FRMW_NUM_SLOTS_MASK		(0x7)
203 
204 /** log page attributes */
205 /* per namespace smart/health log page */
206 #define NVME_CTRLR_DATA_LPA_NS_SMART_SHIFT		(0)
207 #define NVME_CTRLR_DATA_LPA_NS_SMART_MASK		(0x1)
208 
209 /** AVSCC - admin vendor specific command configuration */
210 /* admin vendor specific commands use spec format */
211 #define NVME_CTRLR_DATA_AVSCC_SPEC_FORMAT_SHIFT		(0)
212 #define NVME_CTRLR_DATA_AVSCC_SPEC_FORMAT_MASK		(0x1)
213 
214 /** Autonomous Power State Transition Attributes */
215 /* Autonomous Power State Transitions supported */
216 #define NVME_CTRLR_DATA_APSTA_APST_SUPP_SHIFT		(0)
217 #define NVME_CTRLR_DATA_APSTA_APST_SUPP_MASK		(0x1)
218 
219 /** submission queue entry size */
220 #define NVME_CTRLR_DATA_SQES_MIN_SHIFT			(0)
221 #define NVME_CTRLR_DATA_SQES_MIN_MASK			(0xF)
222 #define NVME_CTRLR_DATA_SQES_MAX_SHIFT			(4)
223 #define NVME_CTRLR_DATA_SQES_MAX_MASK			(0xF)
224 
225 /** completion queue entry size */
226 #define NVME_CTRLR_DATA_CQES_MIN_SHIFT			(0)
227 #define NVME_CTRLR_DATA_CQES_MIN_MASK			(0xF)
228 #define NVME_CTRLR_DATA_CQES_MAX_SHIFT			(4)
229 #define NVME_CTRLR_DATA_CQES_MAX_MASK			(0xF)
230 
231 /** optional nvm command support */
232 #define NVME_CTRLR_DATA_ONCS_COMPARE_SHIFT		(0)
233 #define NVME_CTRLR_DATA_ONCS_COMPARE_MASK		(0x1)
234 #define NVME_CTRLR_DATA_ONCS_WRITE_UNC_SHIFT		(1)
235 #define NVME_CTRLR_DATA_ONCS_WRITE_UNC_MASK		(0x1)
236 #define NVME_CTRLR_DATA_ONCS_DSM_SHIFT			(2)
237 #define NVME_CTRLR_DATA_ONCS_DSM_MASK			(0x1)
238 #define NVME_CTRLR_DATA_ONCS_WRZERO_SHIFT		(3)
239 #define NVME_CTRLR_DATA_ONCS_WRZERO_MASK		(0x1)
240 #define NVME_CTRLR_DATA_ONCS_SAVEFEAT_SHIFT		(4)
241 #define NVME_CTRLR_DATA_ONCS_SAVEFEAT_MASK		(0x1)
242 #define NVME_CTRLR_DATA_ONCS_RESERV_SHIFT		(5)
243 #define NVME_CTRLR_DATA_ONCS_RESERV_MASK		(0x1)
244 #define NVME_CTRLR_DATA_ONCS_TIMESTAMP_SHIFT		(6)
245 #define NVME_CTRLR_DATA_ONCS_TIMESTAMP_MASK		(0x1)
246 
247 /** Fused Operation Support */
248 #define NVME_CTRLR_DATA_FUSES_CNW_SHIFT		(0)
249 #define NVME_CTRLR_DATA_FUSES_CNW_MASK		(0x1)
250 
251 /** Format NVM Attributes */
252 #define NVME_CTRLR_DATA_FNA_FORMAT_ALL_SHIFT		(0)
253 #define NVME_CTRLR_DATA_FNA_FORMAT_ALL_MASK		(0x1)
254 #define NVME_CTRLR_DATA_FNA_ERASE_ALL_SHIFT		(1)
255 #define NVME_CTRLR_DATA_FNA_ERASE_ALL_MASK		(0x1)
256 #define NVME_CTRLR_DATA_FNA_CRYPTO_ERASE_SHIFT		(2)
257 #define NVME_CTRLR_DATA_FNA_CRYPTO_ERASE_MASK		(0x1)
258 
259 /** volatile write cache */
260 #define NVME_CTRLR_DATA_VWC_PRESENT_SHIFT		(0)
261 #define NVME_CTRLR_DATA_VWC_PRESENT_MASK		(0x1)
262 
263 /** namespace features */
264 /* thin provisioning */
265 #define NVME_NS_DATA_NSFEAT_THIN_PROV_SHIFT		(0)
266 #define NVME_NS_DATA_NSFEAT_THIN_PROV_MASK		(0x1)
267 /* NAWUN, NAWUPF, and NACWU fields are valid */
268 #define NVME_NS_DATA_NSFEAT_NA_FIELDS_SHIFT		(1)
269 #define NVME_NS_DATA_NSFEAT_NA_FIELDS_MASK		(0x1)
270 /* Deallocated or Unwritten Logical Block errors supported */
271 #define NVME_NS_DATA_NSFEAT_DEALLOC_SHIFT		(2)
272 #define NVME_NS_DATA_NSFEAT_DEALLOC_MASK		(0x1)
273 /* NGUID and EUI64 fields are not reusable */
274 #define NVME_NS_DATA_NSFEAT_NO_ID_REUSE_SHIFT		(3)
275 #define NVME_NS_DATA_NSFEAT_NO_ID_REUSE_MASK		(0x1)
276 
277 /** formatted lba size */
278 #define NVME_NS_DATA_FLBAS_FORMAT_SHIFT			(0)
279 #define NVME_NS_DATA_FLBAS_FORMAT_MASK			(0xF)
280 #define NVME_NS_DATA_FLBAS_EXTENDED_SHIFT		(4)
281 #define NVME_NS_DATA_FLBAS_EXTENDED_MASK		(0x1)
282 
283 /** metadata capabilities */
284 /* metadata can be transferred as part of data prp list */
285 #define NVME_NS_DATA_MC_EXTENDED_SHIFT			(0)
286 #define NVME_NS_DATA_MC_EXTENDED_MASK			(0x1)
287 /* metadata can be transferred with separate metadata pointer */
288 #define NVME_NS_DATA_MC_POINTER_SHIFT			(1)
289 #define NVME_NS_DATA_MC_POINTER_MASK			(0x1)
290 
291 /** end-to-end data protection capabilities */
292 /* protection information type 1 */
293 #define NVME_NS_DATA_DPC_PIT1_SHIFT			(0)
294 #define NVME_NS_DATA_DPC_PIT1_MASK			(0x1)
295 /* protection information type 2 */
296 #define NVME_NS_DATA_DPC_PIT2_SHIFT			(1)
297 #define NVME_NS_DATA_DPC_PIT2_MASK			(0x1)
298 /* protection information type 3 */
299 #define NVME_NS_DATA_DPC_PIT3_SHIFT			(2)
300 #define NVME_NS_DATA_DPC_PIT3_MASK			(0x1)
301 /* first eight bytes of metadata */
302 #define NVME_NS_DATA_DPC_MD_START_SHIFT			(3)
303 #define NVME_NS_DATA_DPC_MD_START_MASK			(0x1)
304 /* last eight bytes of metadata */
305 #define NVME_NS_DATA_DPC_MD_END_SHIFT			(4)
306 #define NVME_NS_DATA_DPC_MD_END_MASK			(0x1)
307 
308 /** end-to-end data protection type settings */
309 /* protection information type */
310 #define NVME_NS_DATA_DPS_PIT_SHIFT			(0)
311 #define NVME_NS_DATA_DPS_PIT_MASK			(0x7)
312 /* 1 == protection info transferred at start of metadata */
313 /* 0 == protection info transferred at end of metadata */
314 #define NVME_NS_DATA_DPS_MD_START_SHIFT			(3)
315 #define NVME_NS_DATA_DPS_MD_START_MASK			(0x1)
316 
317 /** Namespace Multi-path I/O and Namespace Sharing Capabilities */
318 /* the namespace may be attached to two or more controllers */
319 #define NVME_NS_DATA_NMIC_MAY_BE_SHARED_SHIFT		(0)
320 #define NVME_NS_DATA_NMIC_MAY_BE_SHARED_MASK		(0x1)
321 
322 /** Reservation Capabilities */
323 /* Persist Through Power Loss */
324 #define NVME_NS_DATA_RESCAP_PTPL_SHIFT		(0)
325 #define NVME_NS_DATA_RESCAP_PTPL_MASK		(0x1)
326 /* supports the Write Exclusive */
327 #define NVME_NS_DATA_RESCAP_WR_EX_SHIFT		(1)
328 #define NVME_NS_DATA_RESCAP_WR_EX_MASK		(0x1)
329 /* supports the Exclusive Access */
330 #define NVME_NS_DATA_RESCAP_EX_AC_SHIFT		(2)
331 #define NVME_NS_DATA_RESCAP_EX_AC_MASK		(0x1)
332 /* supports the Write Exclusive – Registrants Only */
333 #define NVME_NS_DATA_RESCAP_WR_EX_RO_SHIFT	(3)
334 #define NVME_NS_DATA_RESCAP_WR_EX_RO_MASK	(0x1)
335 /* supports the Exclusive Access - Registrants Only */
336 #define NVME_NS_DATA_RESCAP_EX_AC_RO_SHIFT	(4)
337 #define NVME_NS_DATA_RESCAP_EX_AC_RO_MASK	(0x1)
338 /* supports the Write Exclusive – All Registrants */
339 #define NVME_NS_DATA_RESCAP_WR_EX_AR_SHIFT	(5)
340 #define NVME_NS_DATA_RESCAP_WR_EX_AR_MASK	(0x1)
341 /* supports the Exclusive Access - All Registrants */
342 #define NVME_NS_DATA_RESCAP_EX_AC_AR_SHIFT	(6)
343 #define NVME_NS_DATA_RESCAP_EX_AC_AR_MASK	(0x1)
344 /* Ignore Existing Key is used as defined in revision 1.3 or later */
345 #define NVME_NS_DATA_RESCAP_IEKEY13_SHIFT	(7)
346 #define NVME_NS_DATA_RESCAP_IEKEY13_MASK	(0x1)
347 
348 /** Format Progress Indicator */
349 /* percentage of the Format NVM command that remains to be completed */
350 #define NVME_NS_DATA_FPI_PERC_SHIFT		(0)
351 #define NVME_NS_DATA_FPI_PERC_MASK		(0x7f)
352 /* namespace supports the Format Progress Indicator */
353 #define NVME_NS_DATA_FPI_SUPP_SHIFT		(7)
354 #define NVME_NS_DATA_FPI_SUPP_MASK		(0x1)
355 
356 /** lba format support */
357 /* metadata size */
358 #define NVME_NS_DATA_LBAF_MS_SHIFT			(0)
359 #define NVME_NS_DATA_LBAF_MS_MASK			(0xFFFF)
360 /* lba data size */
361 #define NVME_NS_DATA_LBAF_LBADS_SHIFT			(16)
362 #define NVME_NS_DATA_LBAF_LBADS_MASK			(0xFF)
363 /* relative performance */
364 #define NVME_NS_DATA_LBAF_RP_SHIFT			(24)
365 #define NVME_NS_DATA_LBAF_RP_MASK			(0x3)
366 
367 enum nvme_critical_warning_state {
368 	NVME_CRIT_WARN_ST_AVAILABLE_SPARE		= 0x1,
369 	NVME_CRIT_WARN_ST_TEMPERATURE			= 0x2,
370 	NVME_CRIT_WARN_ST_DEVICE_RELIABILITY		= 0x4,
371 	NVME_CRIT_WARN_ST_READ_ONLY			= 0x8,
372 	NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP	= 0x10,
373 };
374 #define NVME_CRIT_WARN_ST_RESERVED_MASK			(0xE0)
375 
376 /* slot for current FW */
377 #define NVME_FIRMWARE_PAGE_AFI_SLOT_SHIFT		(0)
378 #define NVME_FIRMWARE_PAGE_AFI_SLOT_MASK		(0x7)
379 
380 /* CC register SHN field values */
381 enum shn_value {
382 	NVME_SHN_NORMAL		= 0x1,
383 	NVME_SHN_ABRUPT		= 0x2,
384 };
385 
386 /* CSTS register SHST field values */
387 enum shst_value {
388 	NVME_SHST_NORMAL	= 0x0,
389 	NVME_SHST_OCCURRING	= 0x1,
390 	NVME_SHST_COMPLETE	= 0x2,
391 };
392 
393 struct nvme_registers
394 {
395 	/** controller capabilities */
396 	uint32_t		cap_lo;
397 	uint32_t		cap_hi;
398 
399 	uint32_t		vs;	/* version */
400 	uint32_t		intms;	/* interrupt mask set */
401 	uint32_t		intmc;	/* interrupt mask clear */
402 
403 	/** controller configuration */
404 	uint32_t		cc;
405 
406 	uint32_t		reserved1;
407 
408 	/** controller status */
409 	uint32_t		csts;
410 
411 	uint32_t		reserved2;
412 
413 	/** admin queue attributes */
414 	uint32_t		aqa;
415 
416 	uint64_t		asq;	/* admin submission queue base addr */
417 	uint64_t		acq;	/* admin completion queue base addr */
418 	uint32_t		reserved3[0x3f2];
419 
420 	struct {
421 	    uint32_t		sq_tdbl; /* submission queue tail doorbell */
422 	    uint32_t		cq_hdbl; /* completion queue head doorbell */
423 	} doorbell[1] __packed;
424 } __packed;
425 
426 _Static_assert(sizeof(struct nvme_registers) == 0x1008, "bad size for nvme_registers");
427 
428 struct nvme_command
429 {
430 	/* dword 0 */
431 	uint16_t opc_fuse;	/* opcode, fused operation */
432 	uint16_t cid;		/* command identifier */
433 
434 	/* dword 1 */
435 	uint32_t nsid;		/* namespace identifier */
436 
437 	/* dword 2-3 */
438 	uint32_t rsvd2;
439 	uint32_t rsvd3;
440 
441 	/* dword 4-5 */
442 	uint64_t mptr;		/* metadata pointer */
443 
444 	/* dword 6-7 */
445 	uint64_t prp1;		/* prp entry 1 */
446 
447 	/* dword 8-9 */
448 	uint64_t prp2;		/* prp entry 2 */
449 
450 	/* dword 10-15 */
451 	uint32_t cdw10;		/* command-specific */
452 	uint32_t cdw11;		/* command-specific */
453 	uint32_t cdw12;		/* command-specific */
454 	uint32_t cdw13;		/* command-specific */
455 	uint32_t cdw14;		/* command-specific */
456 	uint32_t cdw15;		/* command-specific */
457 } __packed;
458 
459 _Static_assert(sizeof(struct nvme_command) == 16 * 4, "bad size for nvme_command");
460 
461 struct nvme_completion {
462 
463 	/* dword 0 */
464 	uint32_t		cdw0;	/* command-specific */
465 
466 	/* dword 1 */
467 	uint32_t		rsvd1;
468 
469 	/* dword 2 */
470 	uint16_t		sqhd;	/* submission queue head pointer */
471 	uint16_t		sqid;	/* submission queue identifier */
472 
473 	/* dword 3 */
474 	uint16_t		cid;	/* command identifier */
475 	uint16_t		status;
476 } __packed;
477 
478 _Static_assert(sizeof(struct nvme_completion) == 4 * 4, "bad size for nvme_completion");
479 
480 struct nvme_dsm_range {
481 	uint32_t attributes;
482 	uint32_t length;
483 	uint64_t starting_lba;
484 } __packed;
485 
486 /* Largest DSM Trim that can be done */
487 #define NVME_MAX_DSM_TRIM		4096
488 
489 _Static_assert(sizeof(struct nvme_dsm_range) == 16, "bad size for nvme_dsm_ranage");
490 
491 /* status code types */
492 enum nvme_status_code_type {
493 	NVME_SCT_GENERIC		= 0x0,
494 	NVME_SCT_COMMAND_SPECIFIC	= 0x1,
495 	NVME_SCT_MEDIA_ERROR		= 0x2,
496 	/* 0x3-0x6 - reserved */
497 	NVME_SCT_VENDOR_SPECIFIC	= 0x7,
498 };
499 
500 /* generic command status codes */
501 enum nvme_generic_command_status_code {
502 	NVME_SC_SUCCESS				= 0x00,
503 	NVME_SC_INVALID_OPCODE			= 0x01,
504 	NVME_SC_INVALID_FIELD			= 0x02,
505 	NVME_SC_COMMAND_ID_CONFLICT		= 0x03,
506 	NVME_SC_DATA_TRANSFER_ERROR		= 0x04,
507 	NVME_SC_ABORTED_POWER_LOSS		= 0x05,
508 	NVME_SC_INTERNAL_DEVICE_ERROR		= 0x06,
509 	NVME_SC_ABORTED_BY_REQUEST		= 0x07,
510 	NVME_SC_ABORTED_SQ_DELETION		= 0x08,
511 	NVME_SC_ABORTED_FAILED_FUSED		= 0x09,
512 	NVME_SC_ABORTED_MISSING_FUSED		= 0x0a,
513 	NVME_SC_INVALID_NAMESPACE_OR_FORMAT	= 0x0b,
514 	NVME_SC_COMMAND_SEQUENCE_ERROR		= 0x0c,
515 	NVME_SC_INVALID_SGL_SEGMENT_DESCR	= 0x0d,
516 	NVME_SC_INVALID_NUMBER_OF_SGL_DESCR	= 0x0e,
517 	NVME_SC_DATA_SGL_LENGTH_INVALID		= 0x0f,
518 	NVME_SC_METADATA_SGL_LENGTH_INVALID	= 0x10,
519 	NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID	= 0x11,
520 	NVME_SC_INVALID_USE_OF_CMB		= 0x12,
521 	NVME_SC_PRP_OFFET_INVALID		= 0x13,
522 	NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED	= 0x14,
523 	NVME_SC_OPERATION_DENIED		= 0x15,
524 	NVME_SC_SGL_OFFSET_INVALID		= 0x16,
525 	/* 0x17 - reserved */
526 	NVME_SC_HOST_ID_INCONSISTENT_FORMAT	= 0x18,
527 	NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED	= 0x19,
528 	NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID	= 0x1a,
529 	NVME_SC_ABORTED_DUE_TO_PREEMPT		= 0x1b,
530 	NVME_SC_SANITIZE_FAILED			= 0x1c,
531 	NVME_SC_SANITIZE_IN_PROGRESS		= 0x1d,
532 	NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID	= 0x1e,
533 	NVME_SC_NOT_SUPPORTED_IN_CMB		= 0x1f,
534 
535 	NVME_SC_LBA_OUT_OF_RANGE		= 0x80,
536 	NVME_SC_CAPACITY_EXCEEDED		= 0x81,
537 	NVME_SC_NAMESPACE_NOT_READY		= 0x82,
538 	NVME_SC_RESERVATION_CONFLICT		= 0x83,
539 	NVME_SC_FORMAT_IN_PROGRESS		= 0x84,
540 };
541 
542 /* command specific status codes */
543 enum nvme_command_specific_status_code {
544 	NVME_SC_COMPLETION_QUEUE_INVALID	= 0x00,
545 	NVME_SC_INVALID_QUEUE_IDENTIFIER	= 0x01,
546 	NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED	= 0x02,
547 	NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED	= 0x03,
548 	/* 0x04 - reserved */
549 	NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05,
550 	NVME_SC_INVALID_FIRMWARE_SLOT		= 0x06,
551 	NVME_SC_INVALID_FIRMWARE_IMAGE		= 0x07,
552 	NVME_SC_INVALID_INTERRUPT_VECTOR	= 0x08,
553 	NVME_SC_INVALID_LOG_PAGE		= 0x09,
554 	NVME_SC_INVALID_FORMAT			= 0x0a,
555 	NVME_SC_FIRMWARE_REQUIRES_RESET		= 0x0b,
556 	NVME_SC_INVALID_QUEUE_DELETION		= 0x0c,
557 	NVME_SC_FEATURE_NOT_SAVEABLE		= 0x0d,
558 	NVME_SC_FEATURE_NOT_CHANGEABLE		= 0x0e,
559 	NVME_SC_FEATURE_NOT_NS_SPECIFIC		= 0x0f,
560 	NVME_SC_FW_ACT_REQUIRES_NVMS_RESET	= 0x10,
561 	NVME_SC_FW_ACT_REQUIRES_RESET		= 0x11,
562 	NVME_SC_FW_ACT_REQUIRES_TIME		= 0x12,
563 	NVME_SC_FW_ACT_PROHIBITED		= 0x13,
564 	NVME_SC_OVERLAPPING_RANGE		= 0x14,
565 	NVME_SC_NS_INSUFFICIENT_CAPACITY	= 0x15,
566 	NVME_SC_NS_ID_UNAVAILABLE		= 0x16,
567 	/* 0x17 - reserved */
568 	NVME_SC_NS_ALREADY_ATTACHED		= 0x18,
569 	NVME_SC_NS_IS_PRIVATE			= 0x19,
570 	NVME_SC_NS_NOT_ATTACHED			= 0x1a,
571 	NVME_SC_THIN_PROV_NOT_SUPPORTED		= 0x1b,
572 	NVME_SC_CTRLR_LIST_INVALID		= 0x1c,
573 	NVME_SC_SELT_TEST_IN_PROGRESS		= 0x1d,
574 	NVME_SC_BOOT_PART_WRITE_PROHIB		= 0x1e,
575 	NVME_SC_INVALID_CTRLR_ID		= 0x1f,
576 	NVME_SC_INVALID_SEC_CTRLR_STATE		= 0x20,
577 	NVME_SC_INVALID_NUM_OF_CTRLR_RESRC	= 0x21,
578 	NVME_SC_INVALID_RESOURCE_ID		= 0x22,
579 
580 	NVME_SC_CONFLICTING_ATTRIBUTES		= 0x80,
581 	NVME_SC_INVALID_PROTECTION_INFO		= 0x81,
582 	NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE	= 0x82,
583 };
584 
585 /* media error status codes */
586 enum nvme_media_error_status_code {
587 	NVME_SC_WRITE_FAULTS			= 0x80,
588 	NVME_SC_UNRECOVERED_READ_ERROR		= 0x81,
589 	NVME_SC_GUARD_CHECK_ERROR		= 0x82,
590 	NVME_SC_APPLICATION_TAG_CHECK_ERROR	= 0x83,
591 	NVME_SC_REFERENCE_TAG_CHECK_ERROR	= 0x84,
592 	NVME_SC_COMPARE_FAILURE			= 0x85,
593 	NVME_SC_ACCESS_DENIED			= 0x86,
594 	NVME_SC_DEALLOCATED_OR_UNWRITTEN	= 0x87,
595 };
596 
597 /* admin opcodes */
598 enum nvme_admin_opcode {
599 	NVME_OPC_DELETE_IO_SQ			= 0x00,
600 	NVME_OPC_CREATE_IO_SQ			= 0x01,
601 	NVME_OPC_GET_LOG_PAGE			= 0x02,
602 	/* 0x03 - reserved */
603 	NVME_OPC_DELETE_IO_CQ			= 0x04,
604 	NVME_OPC_CREATE_IO_CQ			= 0x05,
605 	NVME_OPC_IDENTIFY			= 0x06,
606 	/* 0x07 - reserved */
607 	NVME_OPC_ABORT				= 0x08,
608 	NVME_OPC_SET_FEATURES			= 0x09,
609 	NVME_OPC_GET_FEATURES			= 0x0a,
610 	/* 0x0b - reserved */
611 	NVME_OPC_ASYNC_EVENT_REQUEST		= 0x0c,
612 	NVME_OPC_NAMESPACE_MANAGEMENT		= 0x0d,
613 	/* 0x0e-0x0f - reserved */
614 	NVME_OPC_FIRMWARE_ACTIVATE		= 0x10,
615 	NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD	= 0x11,
616 	NVME_OPC_DEVICE_SELF_TEST		= 0x14,
617 	NVME_OPC_NAMESPACE_ATTACHMENT		= 0x15,
618 	NVME_OPC_KEEP_ALIVE			= 0x18,
619 	NVME_OPC_DIRECTIVE_SEND			= 0x19,
620 	NVME_OPC_DIRECTIVE_RECEIVE		= 0x1a,
621 	NVME_OPC_VIRTUALIZATION_MANAGEMENT	= 0x1c,
622 	NVME_OPC_NVME_MI_SEND			= 0x1d,
623 	NVME_OPC_NVME_MI_RECEIVE		= 0x1e,
624 	NVME_OPC_DOORBELL_BUFFER_CONFIG		= 0x7c,
625 
626 	NVME_OPC_FORMAT_NVM			= 0x80,
627 	NVME_OPC_SECURITY_SEND			= 0x81,
628 	NVME_OPC_SECURITY_RECEIVE		= 0x82,
629 	NVME_OPC_SANITIZE			= 0x84,
630 };
631 
632 /* nvme nvm opcodes */
633 enum nvme_nvm_opcode {
634 	NVME_OPC_FLUSH				= 0x00,
635 	NVME_OPC_WRITE				= 0x01,
636 	NVME_OPC_READ				= 0x02,
637 	/* 0x03 - reserved */
638 	NVME_OPC_WRITE_UNCORRECTABLE		= 0x04,
639 	NVME_OPC_COMPARE			= 0x05,
640 	/* 0x06 - reserved */
641 	NVME_OPC_WRITE_ZEROES			= 0x08,
642 	/* 0x07 - reserved */
643 	NVME_OPC_DATASET_MANAGEMENT		= 0x09,
644 	/* 0x0a-0x0c - reserved */
645 	NVME_OPC_RESERVATION_REGISTER		= 0x0d,
646 	NVME_OPC_RESERVATION_REPORT		= 0x0e,
647 	/* 0x0f-0x10 - reserved */
648 	NVME_OPC_RESERVATION_ACQUIRE		= 0x11,
649 	/* 0x12-0x14 - reserved */
650 	NVME_OPC_RESERVATION_RELEASE		= 0x15,
651 };
652 
653 enum nvme_feature {
654 	/* 0x00 - reserved */
655 	NVME_FEAT_ARBITRATION			= 0x01,
656 	NVME_FEAT_POWER_MANAGEMENT		= 0x02,
657 	NVME_FEAT_LBA_RANGE_TYPE		= 0x03,
658 	NVME_FEAT_TEMPERATURE_THRESHOLD		= 0x04,
659 	NVME_FEAT_ERROR_RECOVERY		= 0x05,
660 	NVME_FEAT_VOLATILE_WRITE_CACHE		= 0x06,
661 	NVME_FEAT_NUMBER_OF_QUEUES		= 0x07,
662 	NVME_FEAT_INTERRUPT_COALESCING		= 0x08,
663 	NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
664 	NVME_FEAT_WRITE_ATOMICITY		= 0x0A,
665 	NVME_FEAT_ASYNC_EVENT_CONFIGURATION	= 0x0B,
666 	NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION = 0x0C,
667 	NVME_FEAT_HOST_MEMORY_BUFFER		= 0x0D,
668 	NVME_FEAT_TIMESTAMP			= 0x0E,
669 	NVME_FEAT_KEEP_ALIVE_TIMER		= 0x0F,
670 	NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT	= 0x10,
671 	NVME_FEAT_NON_OP_POWER_STATE_CONFIG	= 0x11,
672 	/* 0x12-0x77 - reserved */
673 	/* 0x78-0x7f - NVMe Management Interface */
674 	NVME_FEAT_SOFTWARE_PROGRESS_MARKER	= 0x80,
675 	/* 0x81-0xBF - command set specific (reserved) */
676 	/* 0xC0-0xFF - vendor specific */
677 };
678 
679 enum nvme_dsm_attribute {
680 	NVME_DSM_ATTR_INTEGRAL_READ		= 0x1,
681 	NVME_DSM_ATTR_INTEGRAL_WRITE		= 0x2,
682 	NVME_DSM_ATTR_DEALLOCATE		= 0x4,
683 };
684 
685 enum nvme_activate_action {
686 	NVME_AA_REPLACE_NO_ACTIVATE		= 0x0,
687 	NVME_AA_REPLACE_ACTIVATE		= 0x1,
688 	NVME_AA_ACTIVATE			= 0x2,
689 };
690 
691 struct nvme_power_state {
692 	/** Maximum Power */
693 	uint16_t	mp;			/* Maximum Power */
694 	uint8_t		ps_rsvd1;
695 	uint8_t		mps_nops;		/* Max Power Scale, Non-Operational State */
696 
697 	uint32_t	enlat;			/* Entry Latency */
698 	uint32_t	exlat;			/* Exit Latency */
699 
700 	uint8_t		rrt;			/* Relative Read Throughput */
701 	uint8_t		rrl;			/* Relative Read Latency */
702 	uint8_t		rwt;			/* Relative Write Throughput */
703 	uint8_t		rwl;			/* Relative Write Latency */
704 
705 	uint16_t	idlp;			/* Idle Power */
706 	uint8_t		ips;			/* Idle Power Scale */
707 	uint8_t		ps_rsvd8;
708 
709 	uint16_t	actp;			/* Active Power */
710 	uint8_t		apw_aps;		/* Active Power Workload, Active Power Scale */
711 	uint8_t		ps_rsvd10[9];
712 } __packed;
713 
714 _Static_assert(sizeof(struct nvme_power_state) == 32, "bad size for nvme_power_state");
715 
716 #define NVME_SERIAL_NUMBER_LENGTH	20
717 #define NVME_MODEL_NUMBER_LENGTH	40
718 #define NVME_FIRMWARE_REVISION_LENGTH	8
719 
720 struct nvme_controller_data {
721 
722 	/* bytes 0-255: controller capabilities and features */
723 
724 	/** pci vendor id */
725 	uint16_t		vid;
726 
727 	/** pci subsystem vendor id */
728 	uint16_t		ssvid;
729 
730 	/** serial number */
731 	uint8_t			sn[NVME_SERIAL_NUMBER_LENGTH];
732 
733 	/** model number */
734 	uint8_t			mn[NVME_MODEL_NUMBER_LENGTH];
735 
736 	/** firmware revision */
737 	uint8_t			fr[NVME_FIRMWARE_REVISION_LENGTH];
738 
739 	/** recommended arbitration burst */
740 	uint8_t			rab;
741 
742 	/** ieee oui identifier */
743 	uint8_t			ieee[3];
744 
745 	/** multi-interface capabilities */
746 	uint8_t			mic;
747 
748 	/** maximum data transfer size */
749 	uint8_t			mdts;
750 
751 	/** Controller ID */
752 	uint16_t		ctrlr_id;
753 
754 	/** Version */
755 	uint32_t		ver;
756 
757 	/** RTD3 Resume Latency */
758 	uint32_t		rtd3r;
759 
760 	/** RTD3 Enter Latency */
761 	uint32_t		rtd3e;
762 
763 	/** Optional Asynchronous Events Supported */
764 	uint32_t		oaes;	/* bitfield really */
765 
766 	/** Controller Attributes */
767 	uint32_t		ctratt;	/* bitfield really */
768 
769 	uint8_t			reserved1[12];
770 
771 	/** FRU Globally Unique Identifier */
772 	uint8_t			fguid[16];
773 
774 	uint8_t			reserved2[128];
775 
776 	/* bytes 256-511: admin command set attributes */
777 
778 	/** optional admin command support */
779 	uint16_t		oacs;
780 
781 	/** abort command limit */
782 	uint8_t			acl;
783 
784 	/** asynchronous event request limit */
785 	uint8_t			aerl;
786 
787 	/** firmware updates */
788 	uint8_t			frmw;
789 
790 	/** log page attributes */
791 	uint8_t			lpa;
792 
793 	/** error log page entries */
794 	uint8_t			elpe;
795 
796 	/** number of power states supported */
797 	uint8_t			npss;
798 
799 	/** admin vendor specific command configuration */
800 	uint8_t			avscc;
801 
802 	/** Autonomous Power State Transition Attributes */
803 	uint8_t			apsta;
804 
805 	/** Warning Composite Temperature Threshold */
806 	uint16_t		wctemp;
807 
808 	/** Critical Composite Temperature Threshold */
809 	uint16_t		cctemp;
810 
811 	/** Maximum Time for Firmware Activation */
812 	uint16_t		mtfa;
813 
814 	/** Host Memory Buffer Preferred Size */
815 	uint32_t		hmpre;
816 
817 	/** Host Memory Buffer Minimum Size */
818 	uint32_t		hmmin;
819 
820 	/** Name space capabilities  */
821 	struct {
822 		/* if nsmgmt, report tnvmcap and unvmcap */
823 		uint8_t    tnvmcap[16];
824 		uint8_t    unvmcap[16];
825 	} __packed untncap;
826 
827 	/** Replay Protected Memory Block Support */
828 	uint32_t		rpmbs; /* Really a bitfield */
829 
830 	/** Extended Device Self-test Time */
831 	uint16_t		edstt;
832 
833 	/** Device Self-test Options */
834 	uint8_t			dsto; /* Really a bitfield */
835 
836 	/** Firmware Update Granularity */
837 	uint8_t			fwug;
838 
839 	/** Keep Alive Support */
840 	uint16_t		kas;
841 
842 	/** Host Controlled Thermal Management Attributes */
843 	uint16_t		hctma; /* Really a bitfield */
844 
845 	/** Minimum Thermal Management Temperature */
846 	uint16_t		mntmt;
847 
848 	/** Maximum Thermal Management Temperature */
849 	uint16_t		mxtmt;
850 
851 	/** Sanitize Capabilities */
852 	uint32_t		sanicap; /* Really a bitfield */
853 
854 	uint8_t			reserved3[180];
855 	/* bytes 512-703: nvm command set attributes */
856 
857 	/** submission queue entry size */
858 	uint8_t			sqes;
859 
860 	/** completion queue entry size */
861 	uint8_t			cqes;
862 
863 	/** Maximum Outstanding Commands */
864 	uint16_t		maxcmd;
865 
866 	/** number of namespaces */
867 	uint32_t		nn;
868 
869 	/** optional nvm command support */
870 	uint16_t		oncs;
871 
872 	/** fused operation support */
873 	uint16_t		fuses;
874 
875 	/** format nvm attributes */
876 	uint8_t			fna;
877 
878 	/** volatile write cache */
879 	uint8_t			vwc;
880 
881 	/** Atomic Write Unit Normal */
882 	uint16_t		awun;
883 
884 	/** Atomic Write Unit Power Fail */
885 	uint16_t		awupf;
886 
887 	/** NVM Vendor Specific Command Configuration */
888 	uint8_t			nvscc;
889 	uint8_t			reserved5;
890 
891 	/** Atomic Compare & Write Unit */
892 	uint16_t		acwu;
893 	uint16_t		reserved6;
894 
895 	/** SGL Support */
896 	uint32_t		sgls;
897 
898 	/* bytes 540-767: Reserved */
899 	uint8_t			reserved7[228];
900 
901 	/** NVM Subsystem NVMe Qualified Name */
902 	uint8_t			subnqn[256];
903 
904 	/* bytes 1024-1791: Reserved */
905 	uint8_t			reserved8[768];
906 
907 	/* bytes 1792-2047: NVMe over Fabrics specification */
908 	uint8_t			reserved9[256];
909 
910 	/* bytes 2048-3071: power state descriptors */
911 	struct nvme_power_state power_state[32];
912 
913 	/* bytes 3072-4095: vendor specific */
914 	uint8_t			vs[1024];
915 } __packed __aligned(4);
916 
917 _Static_assert(sizeof(struct nvme_controller_data) == 4096, "bad size for nvme_controller_data");
918 
919 struct nvme_namespace_data {
920 
921 	/** namespace size */
922 	uint64_t		nsze;
923 
924 	/** namespace capacity */
925 	uint64_t		ncap;
926 
927 	/** namespace utilization */
928 	uint64_t		nuse;
929 
930 	/** namespace features */
931 	uint8_t			nsfeat;
932 
933 	/** number of lba formats */
934 	uint8_t			nlbaf;
935 
936 	/** formatted lba size */
937 	uint8_t			flbas;
938 
939 	/** metadata capabilities */
940 	uint8_t			mc;
941 
942 	/** end-to-end data protection capabilities */
943 	uint8_t			dpc;
944 
945 	/** end-to-end data protection type settings */
946 	uint8_t			dps;
947 
948 	/** Namespace Multi-path I/O and Namespace Sharing Capabilities */
949 	uint8_t			nmic;
950 
951 	/** Reservation Capabilities */
952 	uint8_t			rescap;
953 
954 	/** Format Progress Indicator */
955 	uint8_t			fpi;
956 
957 	/** Deallocate Logical Block Features */
958 	uint8_t			dlfeat;
959 
960 	/** Namespace Atomic Write Unit Normal  */
961 	uint16_t		nawun;
962 
963 	/** Namespace Atomic Write Unit Power Fail */
964 	uint16_t		nawupf;
965 
966 	/** Namespace Atomic Compare & Write Unit */
967 	uint16_t		nacwu;
968 
969 	/** Namespace Atomic Boundary Size Normal */
970 	uint16_t		nabsn;
971 
972 	/** Namespace Atomic Boundary Offset */
973 	uint16_t		nabo;
974 
975 	/** Namespace Atomic Boundary Size Power Fail */
976 	uint16_t		nabspf;
977 
978 	/** Namespace Optimal IO Boundary */
979 	uint16_t		noiob;
980 
981 	/** NVM Capacity */
982 	uint8_t			nvmcap[16];
983 
984 	/* bytes 64-103: Reserved */
985 	uint8_t			reserved5[40];
986 
987 	/** Namespace Globally Unique Identifier */
988 	uint8_t			nguid[16];
989 
990 	/** IEEE Extended Unique Identifier */
991 	uint8_t			eui64[8];
992 
993 	/** lba format support */
994 	uint32_t		lbaf[16];
995 
996 	uint8_t			reserved6[192];
997 
998 	uint8_t			vendor_specific[3712];
999 } __packed __aligned(4);
1000 
1001 _Static_assert(sizeof(struct nvme_namespace_data) == 4096, "bad size for nvme_namepsace_data");
1002 
1003 enum nvme_log_page {
1004 
1005 	/* 0x00 - reserved */
1006 	NVME_LOG_ERROR			= 0x01,
1007 	NVME_LOG_HEALTH_INFORMATION	= 0x02,
1008 	NVME_LOG_FIRMWARE_SLOT		= 0x03,
1009 	NVME_LOG_CHANGED_NAMESPACE	= 0x04,
1010 	NVME_LOG_COMMAND_EFFECT		= 0x05,
1011 	/* 0x06-0x7F - reserved */
1012 	/* 0x80-0xBF - I/O command set specific */
1013 	NVME_LOG_RES_NOTIFICATION	= 0x80,
1014 	/* 0xC0-0xFF - vendor specific */
1015 
1016 	/*
1017 	 * The following are Intel Specific log pages, but they seem
1018 	 * to be widely implemented.
1019 	 */
1020 	INTEL_LOG_READ_LAT_LOG		= 0xc1,
1021 	INTEL_LOG_WRITE_LAT_LOG		= 0xc2,
1022 	INTEL_LOG_TEMP_STATS		= 0xc5,
1023 	INTEL_LOG_ADD_SMART		= 0xca,
1024 	INTEL_LOG_DRIVE_MKT_NAME	= 0xdd,
1025 
1026 	/*
1027 	 * HGST log page, with lots ofs sub pages.
1028 	 */
1029 	HGST_INFO_LOG			= 0xc1,
1030 };
1031 
1032 struct nvme_error_information_entry {
1033 
1034 	uint64_t		error_count;
1035 	uint16_t		sqid;
1036 	uint16_t		cid;
1037 	uint16_t		status;
1038 	uint16_t		error_location;
1039 	uint64_t		lba;
1040 	uint32_t		nsid;
1041 	uint8_t			vendor_specific;
1042 	uint8_t			reserved[35];
1043 } __packed __aligned(4);
1044 
1045 _Static_assert(sizeof(struct nvme_error_information_entry) == 64, "bad size for nvme_error_information_entry");
1046 
1047 struct nvme_health_information_page {
1048 
1049 	uint8_t			critical_warning;
1050 	uint16_t		temperature;
1051 	uint8_t			available_spare;
1052 	uint8_t			available_spare_threshold;
1053 	uint8_t			percentage_used;
1054 
1055 	uint8_t			reserved[26];
1056 
1057 	/*
1058 	 * Note that the following are 128-bit values, but are
1059 	 *  defined as an array of 2 64-bit values.
1060 	 */
1061 	/* Data Units Read is always in 512-byte units. */
1062 	uint64_t		data_units_read[2];
1063 	/* Data Units Written is always in 512-byte units. */
1064 	uint64_t		data_units_written[2];
1065 	/* For NVM command set, this includes Compare commands. */
1066 	uint64_t		host_read_commands[2];
1067 	uint64_t		host_write_commands[2];
1068 	/* Controller Busy Time is reported in minutes. */
1069 	uint64_t		controller_busy_time[2];
1070 	uint64_t		power_cycles[2];
1071 	uint64_t		power_on_hours[2];
1072 	uint64_t		unsafe_shutdowns[2];
1073 	uint64_t		media_errors[2];
1074 	uint64_t		num_error_info_log_entries[2];
1075 	uint32_t		warning_temp_time;
1076 	uint32_t		error_temp_time;
1077 	uint16_t		temp_sensor[8];
1078 
1079 	uint8_t			reserved2[296];
1080 } __packed __aligned(4);
1081 
1082 _Static_assert(sizeof(struct nvme_health_information_page) == 512, "bad size for nvme_health_information_page");
1083 
1084 struct nvme_firmware_page {
1085 
1086 	uint8_t			afi;
1087 	uint8_t			reserved[7];
1088 	uint64_t		revision[7]; /* revisions for 7 slots */
1089 	uint8_t			reserved2[448];
1090 } __packed __aligned(4);
1091 
1092 _Static_assert(sizeof(struct nvme_firmware_page) == 512, "bad size for nvme_firmware_page");
1093 
1094 struct intel_log_temp_stats
1095 {
1096 	uint64_t	current;
1097 	uint64_t	overtemp_flag_last;
1098 	uint64_t	overtemp_flag_life;
1099 	uint64_t	max_temp;
1100 	uint64_t	min_temp;
1101 	uint64_t	_rsvd[5];
1102 	uint64_t	max_oper_temp;
1103 	uint64_t	min_oper_temp;
1104 	uint64_t	est_offset;
1105 } __packed __aligned(4);
1106 
1107 _Static_assert(sizeof(struct intel_log_temp_stats) == 13 * 8, "bad size for intel_log_temp_stats");
1108 
1109 #define NVME_TEST_MAX_THREADS	128
1110 
1111 struct nvme_io_test {
1112 
1113 	enum nvme_nvm_opcode	opc;
1114 	uint32_t		size;
1115 	uint32_t		time;	/* in seconds */
1116 	uint32_t		num_threads;
1117 	uint32_t		flags;
1118 	uint64_t		io_completed[NVME_TEST_MAX_THREADS];
1119 };
1120 
1121 enum nvme_io_test_flags {
1122 
1123 	/*
1124 	 * Specifies whether dev_refthread/dev_relthread should be
1125 	 *  called during NVME_BIO_TEST.  Ignored for other test
1126 	 *  types.
1127 	 */
1128 	NVME_TEST_FLAG_REFTHREAD =	0x1,
1129 };
1130 
1131 struct nvme_pt_command {
1132 
1133 	/*
1134 	 * cmd is used to specify a passthrough command to a controller or
1135 	 *  namespace.
1136 	 *
1137 	 * The following fields from cmd may be specified by the caller:
1138 	 *	* opc  (opcode)
1139 	 *	* nsid (namespace id) - for admin commands only
1140 	 *	* cdw10-cdw15
1141 	 *
1142 	 * Remaining fields must be set to 0 by the caller.
1143 	 */
1144 	struct nvme_command	cmd;
1145 
1146 	/*
1147 	 * cpl returns completion status for the passthrough command
1148 	 *  specified by cmd.
1149 	 *
1150 	 * The following fields will be filled out by the driver, for
1151 	 *  consumption by the caller:
1152 	 *	* cdw0
1153 	 *	* status (except for phase)
1154 	 *
1155 	 * Remaining fields will be set to 0 by the driver.
1156 	 */
1157 	struct nvme_completion	cpl;
1158 
1159 	/* buf is the data buffer associated with this passthrough command. */
1160 	void *			buf;
1161 
1162 	/*
1163 	 * len is the length of the data buffer associated with this
1164 	 *  passthrough command.
1165 	 */
1166 	uint32_t		len;
1167 
1168 	/*
1169 	 * is_read = 1 if the passthrough command will read data into the
1170 	 *  supplied buffer from the controller.
1171 	 *
1172 	 * is_read = 0 if the passthrough command will write data from the
1173 	 *  supplied buffer to the controller.
1174 	 */
1175 	uint32_t		is_read;
1176 
1177 	/*
1178 	 * driver_lock is used by the driver only.  It must be set to 0
1179 	 *  by the caller.
1180 	 */
1181 	struct mtx *		driver_lock;
1182 };
1183 
1184 #define nvme_completion_is_error(cpl)					\
1185 	(NVME_STATUS_GET_SC((cpl)->status) != 0 || NVME_STATUS_GET_SCT((cpl)->status) != 0)
1186 
1187 void	nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen);
1188 
1189 #ifdef _KERNEL
1190 
1191 struct bio;
1192 
1193 struct nvme_namespace;
1194 struct nvme_controller;
1195 struct nvme_consumer;
1196 
1197 typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
1198 
1199 typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *);
1200 typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *);
1201 typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *,
1202 				     uint32_t, void *, uint32_t);
1203 typedef void (*nvme_cons_fail_fn_t)(void *);
1204 
1205 enum nvme_namespace_flags {
1206 	NVME_NS_DEALLOCATE_SUPPORTED	= 0x1,
1207 	NVME_NS_FLUSH_SUPPORTED		= 0x2,
1208 };
1209 
1210 int	nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1211 				   struct nvme_pt_command *pt,
1212 				   uint32_t nsid, int is_user_buffer,
1213 				   int is_admin_cmd);
1214 
1215 /* Admin functions */
1216 void	nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
1217 				   uint8_t feature, uint32_t cdw11,
1218 				   void *payload, uint32_t payload_size,
1219 				   nvme_cb_fn_t cb_fn, void *cb_arg);
1220 void	nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
1221 				   uint8_t feature, uint32_t cdw11,
1222 				   void *payload, uint32_t payload_size,
1223 				   nvme_cb_fn_t cb_fn, void *cb_arg);
1224 void	nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
1225 				    uint8_t log_page, uint32_t nsid,
1226 				    void *payload, uint32_t payload_size,
1227 				    nvme_cb_fn_t cb_fn, void *cb_arg);
1228 
1229 /* NVM I/O functions */
1230 int	nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
1231 			  uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
1232 			  void *cb_arg);
1233 int	nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
1234 			      nvme_cb_fn_t cb_fn, void *cb_arg);
1235 int	nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
1236 			 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
1237 			 void *cb_arg);
1238 int	nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
1239 			      nvme_cb_fn_t cb_fn, void *cb_arg);
1240 int	nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
1241 			       uint8_t num_ranges, nvme_cb_fn_t cb_fn,
1242 			       void *cb_arg);
1243 int	nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
1244 			  void *cb_arg);
1245 int	nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset,
1246 		     size_t len);
1247 
1248 /* Registration functions */
1249 struct nvme_consumer *	nvme_register_consumer(nvme_cons_ns_fn_t    ns_fn,
1250 					       nvme_cons_ctrlr_fn_t ctrlr_fn,
1251 					       nvme_cons_async_fn_t async_fn,
1252 					       nvme_cons_fail_fn_t  fail_fn);
1253 void		nvme_unregister_consumer(struct nvme_consumer *consumer);
1254 
1255 /* Controller helper functions */
1256 device_t	nvme_ctrlr_get_device(struct nvme_controller *ctrlr);
1257 const struct nvme_controller_data *
1258 		nvme_ctrlr_get_data(struct nvme_controller *ctrlr);
1259 
1260 /* Namespace helper functions */
1261 uint32_t	nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
1262 uint32_t	nvme_ns_get_sector_size(struct nvme_namespace *ns);
1263 uint64_t	nvme_ns_get_num_sectors(struct nvme_namespace *ns);
1264 uint64_t	nvme_ns_get_size(struct nvme_namespace *ns);
1265 uint32_t	nvme_ns_get_flags(struct nvme_namespace *ns);
1266 const char *	nvme_ns_get_serial_number(struct nvme_namespace *ns);
1267 const char *	nvme_ns_get_model_number(struct nvme_namespace *ns);
1268 const struct nvme_namespace_data *
1269 		nvme_ns_get_data(struct nvme_namespace *ns);
1270 uint32_t	nvme_ns_get_stripesize(struct nvme_namespace *ns);
1271 
1272 int	nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
1273 			    nvme_cb_fn_t cb_fn);
1274 
1275 /*
1276  * Command building helper functions -- shared with CAM
1277  * These functions assume allocator zeros out cmd structure
1278  * CAM's xpt_get_ccb and the request allocator for nvme both
1279  * do zero'd allocations.
1280  */
1281 static inline
1282 void	nvme_ns_flush_cmd(struct nvme_command *cmd, uint32_t nsid)
1283 {
1284 
1285 	cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_FLUSH);
1286 	cmd->nsid = htole32(nsid);
1287 }
1288 
1289 static inline
1290 void	nvme_ns_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd, uint32_t nsid,
1291     uint64_t lba, uint32_t count)
1292 {
1293 	cmd->opc_fuse = NVME_CMD_SET_OPC(rwcmd);
1294 	cmd->nsid = htole32(nsid);
1295 	cmd->cdw10 = htole32(lba & 0xffffffffu);
1296 	cmd->cdw11 = htole32(lba >> 32);
1297 	cmd->cdw12 = htole32(count-1);
1298 }
1299 
1300 static inline
1301 void	nvme_ns_write_cmd(struct nvme_command *cmd, uint32_t nsid,
1302     uint64_t lba, uint32_t count)
1303 {
1304 	nvme_ns_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count);
1305 }
1306 
1307 static inline
1308 void	nvme_ns_read_cmd(struct nvme_command *cmd, uint32_t nsid,
1309     uint64_t lba, uint32_t count)
1310 {
1311 	nvme_ns_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count);
1312 }
1313 
1314 static inline
1315 void	nvme_ns_trim_cmd(struct nvme_command *cmd, uint32_t nsid,
1316     uint32_t num_ranges)
1317 {
1318 	cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_DATASET_MANAGEMENT);
1319 	cmd->nsid = htole32(nsid);
1320 	cmd->cdw10 = htole32(num_ranges - 1);
1321 	cmd->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1322 }
1323 
1324 extern int nvme_use_nvd;
1325 
1326 #endif /* _KERNEL */
1327 
1328 /* Endianess conversion functions for NVMe structs */
1329 static inline
1330 void	nvme_completion_swapbytes(struct nvme_completion *s)
1331 {
1332 
1333 	s->cdw0 = le32toh(s->cdw0);
1334 	/* omit rsvd1 */
1335 	s->sqhd = le16toh(s->sqhd);
1336 	s->sqid = le16toh(s->sqid);
1337 	/* omit cid */
1338 	s->status = le16toh(s->status);
1339 }
1340 
1341 static inline
1342 void	nvme_power_state_swapbytes(struct nvme_power_state *s)
1343 {
1344 
1345 	s->mp = le16toh(s->mp);
1346 	s->enlat = le32toh(s->enlat);
1347 	s->exlat = le32toh(s->exlat);
1348 	s->idlp = le16toh(s->idlp);
1349 	s->actp = le16toh(s->actp);
1350 }
1351 
1352 static inline
1353 void	nvme_controller_data_swapbytes(struct nvme_controller_data *s)
1354 {
1355 	int i;
1356 
1357 	s->vid = le16toh(s->vid);
1358 	s->ssvid = le16toh(s->ssvid);
1359 	s->ctrlr_id = le16toh(s->ctrlr_id);
1360 	s->ver = le32toh(s->ver);
1361 	s->rtd3r = le32toh(s->rtd3r);
1362 	s->rtd3e = le32toh(s->rtd3e);
1363 	s->oaes = le32toh(s->oaes);
1364 	s->ctratt = le32toh(s->ctratt);
1365 	s->oacs = le16toh(s->oacs);
1366 	s->wctemp = le16toh(s->wctemp);
1367 	s->cctemp = le16toh(s->cctemp);
1368 	s->mtfa = le16toh(s->mtfa);
1369 	s->hmpre = le32toh(s->hmpre);
1370 	s->hmmin = le32toh(s->hmmin);
1371 	s->rpmbs = le32toh(s->rpmbs);
1372 	s->edstt = le16toh(s->edstt);
1373 	s->kas = le16toh(s->kas);
1374 	s->hctma = le16toh(s->hctma);
1375 	s->mntmt = le16toh(s->mntmt);
1376 	s->mxtmt = le16toh(s->mxtmt);
1377 	s->sanicap = le32toh(s->sanicap);
1378 	s->maxcmd = le16toh(s->maxcmd);
1379 	s->nn = le32toh(s->nn);
1380 	s->oncs = le16toh(s->oncs);
1381 	s->fuses = le16toh(s->fuses);
1382 	s->awun = le16toh(s->awun);
1383 	s->awupf = le16toh(s->awupf);
1384 	s->acwu = le16toh(s->acwu);
1385 	s->sgls = le32toh(s->sgls);
1386 	for (i = 0; i < 32; i++)
1387 		nvme_power_state_swapbytes(&s->power_state[i]);
1388 }
1389 
1390 static inline
1391 void	nvme_namespace_data_swapbytes(struct nvme_namespace_data *s)
1392 {
1393 	int i;
1394 
1395 	s->nsze = le64toh(s->nsze);
1396 	s->ncap = le64toh(s->ncap);
1397 	s->nuse = le64toh(s->nuse);
1398 	s->nawun = le16toh(s->nawun);
1399 	s->nawupf = le16toh(s->nawupf);
1400 	s->nacwu = le16toh(s->nacwu);
1401 	s->nabsn = le16toh(s->nabsn);
1402 	s->nabo = le16toh(s->nabo);
1403 	s->nabspf = le16toh(s->nabspf);
1404 	s->noiob = le16toh(s->noiob);
1405 	for (i = 0; i < 16; i++)
1406 		s->lbaf[i] = le32toh(s->lbaf[i]);
1407 }
1408 
1409 static inline
1410 void	nvme_error_information_entry_swapbytes(struct nvme_error_information_entry *s)
1411 {
1412 
1413 	s->error_count = le64toh(s->error_count);
1414 	s->sqid = le16toh(s->sqid);
1415 	s->cid = le16toh(s->cid);
1416 	s->status = le16toh(s->status);
1417 	s->error_location = le16toh(s->error_location);
1418 	s->lba = le64toh(s->lba);
1419 	s->nsid = le32toh(s->nsid);
1420 }
1421 
1422 static inline
1423 void	nvme_le128toh(void *p)
1424 {
1425 #if _BYTE_ORDER != _LITTLE_ENDIAN
1426 	/* Swap 16 bytes in place */
1427 	char *tmp = (char*)p;
1428 	char b;
1429 	int i;
1430 	for (i = 0; i < 8; i++) {
1431 		b = tmp[i];
1432 		tmp[i] = tmp[15-i];
1433 		tmp[15-i] = b;
1434 	}
1435 #else
1436 	(void)p;
1437 #endif
1438 }
1439 
1440 static inline
1441 void	nvme_health_information_page_swapbytes(struct nvme_health_information_page *s)
1442 {
1443 	int i;
1444 
1445 	s->temperature = le16toh(s->temperature);
1446 	nvme_le128toh((void *)s->data_units_read);
1447 	nvme_le128toh((void *)s->data_units_written);
1448 	nvme_le128toh((void *)s->host_read_commands);
1449 	nvme_le128toh((void *)s->host_write_commands);
1450 	nvme_le128toh((void *)s->controller_busy_time);
1451 	nvme_le128toh((void *)s->power_cycles);
1452 	nvme_le128toh((void *)s->power_on_hours);
1453 	nvme_le128toh((void *)s->unsafe_shutdowns);
1454 	nvme_le128toh((void *)s->media_errors);
1455 	nvme_le128toh((void *)s->num_error_info_log_entries);
1456 	s->warning_temp_time = le32toh(s->warning_temp_time);
1457 	s->error_temp_time = le32toh(s->error_temp_time);
1458 	for (i = 0; i < 8; i++)
1459 		s->temp_sensor[i] = le16toh(s->temp_sensor[i]);
1460 }
1461 
1462 
1463 static inline
1464 void	nvme_firmware_page_swapbytes(struct nvme_firmware_page *s)
1465 {
1466 	int i;
1467 
1468 	for (i = 0; i < 7; i++)
1469 		s->revision[i] = le64toh(s->revision[i]);
1470 }
1471 
1472 static inline
1473 void	intel_log_temp_stats_swapbytes(struct intel_log_temp_stats *s)
1474 {
1475 
1476 	s->current = le64toh(s->current);
1477 	s->overtemp_flag_last = le64toh(s->overtemp_flag_last);
1478 	s->overtemp_flag_life = le64toh(s->overtemp_flag_life);
1479 	s->max_temp = le64toh(s->max_temp);
1480 	s->min_temp = le64toh(s->min_temp);
1481 	/* omit _rsvd[] */
1482 	s->max_oper_temp = le64toh(s->max_oper_temp);
1483 	s->min_oper_temp = le64toh(s->min_oper_temp);
1484 	s->est_offset = le64toh(s->est_offset);
1485 }
1486 
1487 #endif /* __NVME_H__ */
1488