xref: /freebsd/sys/dev/nvme/nvme.h (revision b70d2a2aa5003027b422e62435ab5bb9390d543c)
1 /*-
2  * Copyright (C) 2012-2013 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef __NVME_H__
30 #define __NVME_H__
31 
32 #ifdef _KERNEL
33 #include <sys/types.h>
34 #endif
35 
36 #include <sys/param.h>
37 
38 #define	NVME_PASSTHROUGH_CMD		_IOWR('n', 0, struct nvme_pt_command)
39 #define	NVME_RESET_CONTROLLER		_IO('n', 1)
40 
41 #define	NVME_IO_TEST			_IOWR('n', 100, struct nvme_io_test)
42 #define	NVME_BIO_TEST			_IOWR('n', 101, struct nvme_io_test)
43 
44 /*
45  * Use to mark a command to apply to all namespaces, or to retrieve global
46  *  log pages.
47  */
48 #define NVME_GLOBAL_NAMESPACE_TAG	((uint32_t)0xFFFFFFFF)
49 
50 /* Cap nvme to 1MB transfers driver explodes with larger sizes */
51 #define NVME_MAX_XFER_SIZE		(MAXPHYS < (1<<20) ? MAXPHYS : (1<<20))
52 
53 union cap_lo_register {
54 	uint32_t	raw;
55 	struct {
56 		/** maximum queue entries supported */
57 		uint32_t mqes		: 16;
58 
59 		/** contiguous queues required */
60 		uint32_t cqr		: 1;
61 
62 		/** arbitration mechanism supported */
63 		uint32_t ams		: 2;
64 
65 		uint32_t reserved1	: 5;
66 
67 		/** timeout */
68 		uint32_t to		: 8;
69 	} bits __packed;
70 } __packed;
71 
72 union cap_hi_register {
73 	uint32_t	raw;
74 	struct {
75 		/** doorbell stride */
76 		uint32_t dstrd		: 4;
77 
78 		uint32_t reserved3	: 1;
79 
80 		/** command sets supported */
81 		uint32_t css_nvm	: 1;
82 
83 		uint32_t css_reserved	: 3;
84 		uint32_t reserved2	: 7;
85 
86 		/** memory page size minimum */
87 		uint32_t mpsmin		: 4;
88 
89 		/** memory page size maximum */
90 		uint32_t mpsmax		: 4;
91 
92 		uint32_t reserved1	: 8;
93 	} bits __packed;
94 } __packed;
95 
96 union cc_register {
97 	uint32_t	raw;
98 	struct {
99 		/** enable */
100 		uint32_t en		: 1;
101 
102 		uint32_t reserved1	: 3;
103 
104 		/** i/o command set selected */
105 		uint32_t css		: 3;
106 
107 		/** memory page size */
108 		uint32_t mps		: 4;
109 
110 		/** arbitration mechanism selected */
111 		uint32_t ams		: 3;
112 
113 		/** shutdown notification */
114 		uint32_t shn		: 2;
115 
116 		/** i/o submission queue entry size */
117 		uint32_t iosqes		: 4;
118 
119 		/** i/o completion queue entry size */
120 		uint32_t iocqes		: 4;
121 
122 		uint32_t reserved2	: 8;
123 	} bits __packed;
124 } __packed;
125 
126 enum shn_value {
127 	NVME_SHN_NORMAL		= 0x1,
128 	NVME_SHN_ABRUPT		= 0x2,
129 };
130 
131 union csts_register {
132 	uint32_t	raw;
133 	struct {
134 		/** ready */
135 		uint32_t rdy		: 1;
136 
137 		/** controller fatal status */
138 		uint32_t cfs		: 1;
139 
140 		/** shutdown status */
141 		uint32_t shst		: 2;
142 
143 		uint32_t reserved1	: 28;
144 	} bits __packed;
145 } __packed;
146 
147 enum shst_value {
148 	NVME_SHST_NORMAL	= 0x0,
149 	NVME_SHST_OCCURRING	= 0x1,
150 	NVME_SHST_COMPLETE	= 0x2,
151 };
152 
153 union aqa_register {
154 	uint32_t	raw;
155 	struct {
156 		/** admin submission queue size */
157 		uint32_t asqs		: 12;
158 
159 		uint32_t reserved1	: 4;
160 
161 		/** admin completion queue size */
162 		uint32_t acqs		: 12;
163 
164 		uint32_t reserved2	: 4;
165 	} bits __packed;
166 } __packed;
167 
168 struct nvme_registers
169 {
170 	/** controller capabilities */
171 	union cap_lo_register	cap_lo;
172 	union cap_hi_register	cap_hi;
173 
174 	uint32_t		vs;	/* version */
175 	uint32_t		intms;	/* interrupt mask set */
176 	uint32_t		intmc;	/* interrupt mask clear */
177 
178 	/** controller configuration */
179 	union cc_register	cc;
180 
181 	uint32_t		reserved1;
182 
183 	/** controller status */
184 	union csts_register	csts;
185 
186 	uint32_t		reserved2;
187 
188 	/** admin queue attributes */
189 	union aqa_register	aqa;
190 
191 	uint64_t		asq;	/* admin submission queue base addr */
192 	uint64_t		acq;	/* admin completion queue base addr */
193 	uint32_t		reserved3[0x3f2];
194 
195 	struct {
196 	    uint32_t		sq_tdbl; /* submission queue tail doorbell */
197 	    uint32_t		cq_hdbl; /* completion queue head doorbell */
198 	} doorbell[1] __packed;
199 } __packed;
200 
201 struct nvme_command
202 {
203 	/* dword 0 */
204 	uint16_t opc	:  8;	/* opcode */
205 	uint16_t fuse	:  2;	/* fused operation */
206 	uint16_t rsvd1	:  6;
207 	uint16_t cid;		/* command identifier */
208 
209 	/* dword 1 */
210 	uint32_t nsid;		/* namespace identifier */
211 
212 	/* dword 2-3 */
213 	uint32_t rsvd2;
214 	uint32_t rsvd3;
215 
216 	/* dword 4-5 */
217 	uint64_t mptr;		/* metadata pointer */
218 
219 	/* dword 6-7 */
220 	uint64_t prp1;		/* prp entry 1 */
221 
222 	/* dword 8-9 */
223 	uint64_t prp2;		/* prp entry 2 */
224 
225 	/* dword 10-15 */
226 	uint32_t cdw10;		/* command-specific */
227 	uint32_t cdw11;		/* command-specific */
228 	uint32_t cdw12;		/* command-specific */
229 	uint32_t cdw13;		/* command-specific */
230 	uint32_t cdw14;		/* command-specific */
231 	uint32_t cdw15;		/* command-specific */
232 } __packed;
233 
234 struct nvme_status {
235 
236 	uint16_t p	:  1;	/* phase tag */
237 	uint16_t sc	:  8;	/* status code */
238 	uint16_t sct	:  3;	/* status code type */
239 	uint16_t rsvd2	:  2;
240 	uint16_t m	:  1;	/* more */
241 	uint16_t dnr	:  1;	/* do not retry */
242 } __packed;
243 
244 struct nvme_completion {
245 
246 	/* dword 0 */
247 	uint32_t		cdw0;	/* command-specific */
248 
249 	/* dword 1 */
250 	uint32_t		rsvd1;
251 
252 	/* dword 2 */
253 	uint16_t		sqhd;	/* submission queue head pointer */
254 	uint16_t		sqid;	/* submission queue identifier */
255 
256 	/* dword 3 */
257 	uint16_t		cid;	/* command identifier */
258 	struct nvme_status	status;
259 } __packed;
260 
261 struct nvme_dsm_range {
262 
263 	uint32_t attributes;
264 	uint32_t length;
265 	uint64_t starting_lba;
266 } __packed;
267 
268 /* status code types */
269 enum nvme_status_code_type {
270 	NVME_SCT_GENERIC		= 0x0,
271 	NVME_SCT_COMMAND_SPECIFIC	= 0x1,
272 	NVME_SCT_MEDIA_ERROR		= 0x2,
273 	/* 0x3-0x6 - reserved */
274 	NVME_SCT_VENDOR_SPECIFIC	= 0x7,
275 };
276 
277 /* generic command status codes */
278 enum nvme_generic_command_status_code {
279 	NVME_SC_SUCCESS				= 0x00,
280 	NVME_SC_INVALID_OPCODE			= 0x01,
281 	NVME_SC_INVALID_FIELD			= 0x02,
282 	NVME_SC_COMMAND_ID_CONFLICT		= 0x03,
283 	NVME_SC_DATA_TRANSFER_ERROR		= 0x04,
284 	NVME_SC_ABORTED_POWER_LOSS		= 0x05,
285 	NVME_SC_INTERNAL_DEVICE_ERROR		= 0x06,
286 	NVME_SC_ABORTED_BY_REQUEST		= 0x07,
287 	NVME_SC_ABORTED_SQ_DELETION		= 0x08,
288 	NVME_SC_ABORTED_FAILED_FUSED		= 0x09,
289 	NVME_SC_ABORTED_MISSING_FUSED		= 0x0a,
290 	NVME_SC_INVALID_NAMESPACE_OR_FORMAT	= 0x0b,
291 	NVME_SC_COMMAND_SEQUENCE_ERROR		= 0x0c,
292 
293 	NVME_SC_LBA_OUT_OF_RANGE		= 0x80,
294 	NVME_SC_CAPACITY_EXCEEDED		= 0x81,
295 	NVME_SC_NAMESPACE_NOT_READY		= 0x82,
296 };
297 
298 /* command specific status codes */
299 enum nvme_command_specific_status_code {
300 	NVME_SC_COMPLETION_QUEUE_INVALID	= 0x00,
301 	NVME_SC_INVALID_QUEUE_IDENTIFIER	= 0x01,
302 	NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED	= 0x02,
303 	NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED	= 0x03,
304 	/* 0x04 - reserved */
305 	NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05,
306 	NVME_SC_INVALID_FIRMWARE_SLOT		= 0x06,
307 	NVME_SC_INVALID_FIRMWARE_IMAGE		= 0x07,
308 	NVME_SC_INVALID_INTERRUPT_VECTOR	= 0x08,
309 	NVME_SC_INVALID_LOG_PAGE		= 0x09,
310 	NVME_SC_INVALID_FORMAT			= 0x0a,
311 	NVME_SC_FIRMWARE_REQUIRES_RESET		= 0x0b,
312 
313 	NVME_SC_CONFLICTING_ATTRIBUTES		= 0x80,
314 	NVME_SC_INVALID_PROTECTION_INFO		= 0x81,
315 	NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE	= 0x82,
316 };
317 
318 /* media error status codes */
319 enum nvme_media_error_status_code {
320 	NVME_SC_WRITE_FAULTS			= 0x80,
321 	NVME_SC_UNRECOVERED_READ_ERROR		= 0x81,
322 	NVME_SC_GUARD_CHECK_ERROR		= 0x82,
323 	NVME_SC_APPLICATION_TAG_CHECK_ERROR	= 0x83,
324 	NVME_SC_REFERENCE_TAG_CHECK_ERROR	= 0x84,
325 	NVME_SC_COMPARE_FAILURE			= 0x85,
326 	NVME_SC_ACCESS_DENIED			= 0x86,
327 };
328 
329 /* admin opcodes */
330 enum nvme_admin_opcode {
331 	NVME_OPC_DELETE_IO_SQ			= 0x00,
332 	NVME_OPC_CREATE_IO_SQ			= 0x01,
333 	NVME_OPC_GET_LOG_PAGE			= 0x02,
334 	/* 0x03 - reserved */
335 	NVME_OPC_DELETE_IO_CQ			= 0x04,
336 	NVME_OPC_CREATE_IO_CQ			= 0x05,
337 	NVME_OPC_IDENTIFY			= 0x06,
338 	/* 0x07 - reserved */
339 	NVME_OPC_ABORT				= 0x08,
340 	NVME_OPC_SET_FEATURES			= 0x09,
341 	NVME_OPC_GET_FEATURES			= 0x0a,
342 	/* 0x0b - reserved */
343 	NVME_OPC_ASYNC_EVENT_REQUEST		= 0x0c,
344 	/* 0x0d-0x0f - reserved */
345 	NVME_OPC_FIRMWARE_ACTIVATE		= 0x10,
346 	NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD	= 0x11,
347 
348 	NVME_OPC_FORMAT_NVM			= 0x80,
349 	NVME_OPC_SECURITY_SEND			= 0x81,
350 	NVME_OPC_SECURITY_RECEIVE		= 0x82,
351 };
352 
353 /* nvme nvm opcodes */
354 enum nvme_nvm_opcode {
355 	NVME_OPC_FLUSH				= 0x00,
356 	NVME_OPC_WRITE				= 0x01,
357 	NVME_OPC_READ				= 0x02,
358 	/* 0x03 - reserved */
359 	NVME_OPC_WRITE_UNCORRECTABLE		= 0x04,
360 	NVME_OPC_COMPARE			= 0x05,
361 	/* 0x06-0x07 - reserved */
362 	NVME_OPC_DATASET_MANAGEMENT		= 0x09,
363 };
364 
365 enum nvme_feature {
366 	/* 0x00 - reserved */
367 	NVME_FEAT_ARBITRATION			= 0x01,
368 	NVME_FEAT_POWER_MANAGEMENT		= 0x02,
369 	NVME_FEAT_LBA_RANGE_TYPE		= 0x03,
370 	NVME_FEAT_TEMPERATURE_THRESHOLD		= 0x04,
371 	NVME_FEAT_ERROR_RECOVERY		= 0x05,
372 	NVME_FEAT_VOLATILE_WRITE_CACHE		= 0x06,
373 	NVME_FEAT_NUMBER_OF_QUEUES		= 0x07,
374 	NVME_FEAT_INTERRUPT_COALESCING		= 0x08,
375 	NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
376 	NVME_FEAT_WRITE_ATOMICITY		= 0x0A,
377 	NVME_FEAT_ASYNC_EVENT_CONFIGURATION	= 0x0B,
378 	/* 0x0C-0x7F - reserved */
379 	NVME_FEAT_SOFTWARE_PROGRESS_MARKER	= 0x80,
380 	/* 0x81-0xBF - command set specific (reserved) */
381 	/* 0xC0-0xFF - vendor specific */
382 };
383 
384 enum nvme_dsm_attribute {
385 	NVME_DSM_ATTR_INTEGRAL_READ		= 0x1,
386 	NVME_DSM_ATTR_INTEGRAL_WRITE		= 0x2,
387 	NVME_DSM_ATTR_DEALLOCATE		= 0x4,
388 };
389 
390 enum nvme_activate_action {
391 	NVME_AA_REPLACE_NO_ACTIVATE		= 0x0,
392 	NVME_AA_REPLACE_ACTIVATE		= 0x1,
393 	NVME_AA_ACTIVATE			= 0x2,
394 };
395 
396 struct nvme_power_state {
397 	/** Maximum Power */
398 	uint16_t	mp;			/* Maximum Power */
399 	uint8_t		ps_rsvd1;
400 	uint8_t		mps      : 1;		/* Max Power Scale */
401 	uint8_t		nops     : 1;		/* Non-Operational State */
402 	uint8_t		ps_rsvd2 : 6;
403 	uint32_t	enlat;			/* Entry Latency */
404 	uint32_t	exlat;			/* Exit Latency */
405 	uint8_t		rrt      : 5;		/* Relative Read Throughput */
406 	uint8_t		ps_rsvd3 : 3;
407 	uint8_t		rrl      : 5;		/* Relative Read Latency */
408 	uint8_t		ps_rsvd4 : 3;
409 	uint8_t		rwt      : 5;		/* Relative Write Throughput */
410 	uint8_t		ps_rsvd5 : 3;
411 	uint8_t		rwl      : 5;		/* Relative Write Latency */
412 	uint8_t		ps_rsvd6 : 3;
413 	uint16_t	idlp;			/* Idle Power */
414 	uint8_t		ps_rsvd7 : 6;
415 	uint8_t		ips      : 2;		/* Idle Power Scale */
416 	uint8_t		ps_rsvd8;
417 	uint16_t	actp;			/* Active Power */
418 	uint8_t		apw      : 3;		/* Active Power Workload */
419 	uint8_t		ps_rsvd9 : 3;
420 	uint8_t		aps      : 2;		/* Active Power Scale */
421 	uint8_t		ps_rsvd10[9];
422 } __packed;
423 
424 #define NVME_SERIAL_NUMBER_LENGTH	20
425 #define NVME_MODEL_NUMBER_LENGTH	40
426 #define NVME_FIRMWARE_REVISION_LENGTH	8
427 
428 struct nvme_controller_data {
429 
430 	/* bytes 0-255: controller capabilities and features */
431 
432 	/** pci vendor id */
433 	uint16_t		vid;
434 
435 	/** pci subsystem vendor id */
436 	uint16_t		ssvid;
437 
438 	/** serial number */
439 	uint8_t			sn[NVME_SERIAL_NUMBER_LENGTH];
440 
441 	/** model number */
442 	uint8_t			mn[NVME_MODEL_NUMBER_LENGTH];
443 
444 	/** firmware revision */
445 	uint8_t			fr[NVME_FIRMWARE_REVISION_LENGTH];
446 
447 	/** recommended arbitration burst */
448 	uint8_t			rab;
449 
450 	/** ieee oui identifier */
451 	uint8_t			ieee[3];
452 
453 	/** multi-interface capabilities */
454 	uint8_t			mic;
455 
456 	/** maximum data transfer size */
457 	uint8_t			mdts;
458 
459 	uint8_t			reserved1[178];
460 
461 	/* bytes 256-511: admin command set attributes */
462 
463 	/** optional admin command support */
464 	struct {
465 		/* supports security send/receive commands */
466 		uint16_t	security  : 1;
467 
468 		/* supports format nvm command */
469 		uint16_t	format    : 1;
470 
471 		/* supports firmware activate/download commands */
472 		uint16_t	firmware  : 1;
473 
474 		uint16_t	oacs_rsvd : 13;
475 	} __packed oacs;
476 
477 	/** abort command limit */
478 	uint8_t			acl;
479 
480 	/** asynchronous event request limit */
481 	uint8_t			aerl;
482 
483 	/** firmware updates */
484 	struct {
485 		/* first slot is read-only */
486 		uint8_t		slot1_ro  : 1;
487 
488 		/* number of firmware slots */
489 		uint8_t		num_slots : 3;
490 
491 		uint8_t		frmw_rsvd : 4;
492 	} __packed frmw;
493 
494 	/** log page attributes */
495 	struct {
496 		/* per namespace smart/health log page */
497 		uint8_t		ns_smart : 1;
498 
499 		uint8_t		lpa_rsvd : 7;
500 	} __packed lpa;
501 
502 	/** error log page entries */
503 	uint8_t			elpe;
504 
505 	/** number of power states supported */
506 	uint8_t			npss;
507 
508 	/** admin vendor specific command configuration */
509 	struct {
510 		/* admin vendor specific commands use spec format */
511 		uint8_t		spec_format : 1;
512 
513 		uint8_t		avscc_rsvd  : 7;
514 	} __packed avscc;
515 
516 	uint8_t			reserved2[247];
517 
518 	/* bytes 512-703: nvm command set attributes */
519 
520 	/** submission queue entry size */
521 	struct {
522 		uint8_t		min : 4;
523 		uint8_t		max : 4;
524 	} __packed sqes;
525 
526 	/** completion queue entry size */
527 	struct {
528 		uint8_t		min : 4;
529 		uint8_t		max : 4;
530 	} __packed cqes;
531 
532 	uint8_t			reserved3[2];
533 
534 	/** number of namespaces */
535 	uint32_t		nn;
536 
537 	/** optional nvm command support */
538 	struct {
539 		uint16_t	compare : 1;
540 		uint16_t	write_unc : 1;
541 		uint16_t	dsm: 1;
542 		uint16_t	reserved: 13;
543 	} __packed oncs;
544 
545 	/** fused operation support */
546 	uint16_t		fuses;
547 
548 	/** format nvm attributes */
549 	uint8_t			fna;
550 
551 	/** volatile write cache */
552 	struct {
553 		uint8_t		present : 1;
554 		uint8_t		reserved : 7;
555 	} __packed vwc;
556 
557 	/* TODO: flesh out remaining nvm command set attributes */
558 	uint8_t			reserved4[178];
559 
560 	/* bytes 704-2047: i/o command set attributes */
561 	uint8_t			reserved5[1344];
562 
563 	/* bytes 2048-3071: power state descriptors */
564 	struct nvme_power_state power_state[32];
565 
566 	/* bytes 3072-4095: vendor specific */
567 	uint8_t			vs[1024];
568 } __packed __aligned(4);
569 
570 struct nvme_namespace_data {
571 
572 	/** namespace size */
573 	uint64_t		nsze;
574 
575 	/** namespace capacity */
576 	uint64_t		ncap;
577 
578 	/** namespace utilization */
579 	uint64_t		nuse;
580 
581 	/** namespace features */
582 	struct {
583 		/** thin provisioning */
584 		uint8_t		thin_prov : 1;
585 		uint8_t		reserved1 : 7;
586 	} __packed nsfeat;
587 
588 	/** number of lba formats */
589 	uint8_t			nlbaf;
590 
591 	/** formatted lba size */
592 	struct {
593 		uint8_t		format    : 4;
594 		uint8_t		extended  : 1;
595 		uint8_t		reserved2 : 3;
596 	} __packed flbas;
597 
598 	/** metadata capabilities */
599 	struct {
600 		/* metadata can be transferred as part of data prp list */
601 		uint8_t		extended  : 1;
602 
603 		/* metadata can be transferred with separate metadata pointer */
604 		uint8_t		pointer   : 1;
605 
606 		uint8_t		reserved3 : 6;
607 	} __packed mc;
608 
609 	/** end-to-end data protection capabilities */
610 	struct {
611 		/* protection information type 1 */
612 		uint8_t		pit1     : 1;
613 
614 		/* protection information type 2 */
615 		uint8_t		pit2     : 1;
616 
617 		/* protection information type 3 */
618 		uint8_t		pit3     : 1;
619 
620 		/* first eight bytes of metadata */
621 		uint8_t		md_start : 1;
622 
623 		/* last eight bytes of metadata */
624 		uint8_t		md_end   : 1;
625 	} __packed dpc;
626 
627 	/** end-to-end data protection type settings */
628 	struct {
629 		/* protection information type */
630 		uint8_t		pit       : 3;
631 
632 		/* 1 == protection info transferred at start of metadata */
633 		/* 0 == protection info transferred at end of metadata */
634 		uint8_t		md_start  : 1;
635 
636 		uint8_t		reserved4 : 4;
637 	} __packed dps;
638 
639 	uint8_t			reserved5[98];
640 
641 	/** lba format support */
642 	struct {
643 		/** metadata size */
644 		uint32_t	ms	  : 16;
645 
646 		/** lba data size */
647 		uint32_t	lbads	  : 8;
648 
649 		/** relative performance */
650 		uint32_t	rp	  : 2;
651 
652 		uint32_t	reserved6 : 6;
653 	} __packed lbaf[16];
654 
655 	uint8_t			reserved6[192];
656 
657 	uint8_t			vendor_specific[3712];
658 } __packed __aligned(4);
659 
660 enum nvme_log_page {
661 
662 	/* 0x00 - reserved */
663 	NVME_LOG_ERROR			= 0x01,
664 	NVME_LOG_HEALTH_INFORMATION	= 0x02,
665 	NVME_LOG_FIRMWARE_SLOT		= 0x03,
666 	NVME_LOG_CHANGED_NAMESPACE	= 0x04,
667 	NVME_LOG_COMMAND_EFFECT		= 0x05,
668 	/* 0x06-0x7F - reserved */
669 	/* 0x80-0xBF - I/O command set specific */
670 	NVME_LOG_RES_NOTIFICATION	= 0x80,
671 	/* 0xC0-0xFF - vendor specific */
672 
673 	/*
674 	 * The following are Intel Specific log pages, but they seem
675 	 * to be widely implemented.
676 	 */
677 	INTEL_LOG_READ_LAT_LOG		= 0xc1,
678 	INTEL_LOG_WRITE_LAT_LOG		= 0xc2,
679 	INTEL_LOG_TEMP_STATS		= 0xc5,
680 	INTEL_LOG_ADD_SMART		= 0xca,
681 	INTEL_LOG_DRIVE_MKT_NAME	= 0xdd,
682 
683 	/*
684 	 * HGST log page, with lots ofs sub pages.
685 	 */
686 	HGST_INFO_LOG			= 0xc1,
687 };
688 
689 struct nvme_error_information_entry {
690 
691 	uint64_t		error_count;
692 	uint16_t		sqid;
693 	uint16_t		cid;
694 	struct nvme_status	status;
695 	uint16_t		error_location;
696 	uint64_t		lba;
697 	uint32_t		nsid;
698 	uint8_t			vendor_specific;
699 	uint8_t			reserved[35];
700 } __packed __aligned(4);
701 
702 union nvme_critical_warning_state {
703 
704 	uint8_t		raw;
705 
706 	struct {
707 		uint8_t	available_spare		: 1;
708 		uint8_t	temperature		: 1;
709 		uint8_t	device_reliability	: 1;
710 		uint8_t	read_only		: 1;
711 		uint8_t	volatile_memory_backup	: 1;
712 		uint8_t	reserved		: 3;
713 	} __packed bits;
714 } __packed;
715 
716 struct nvme_health_information_page {
717 
718 	union nvme_critical_warning_state	critical_warning;
719 
720 	uint16_t		temperature;
721 	uint8_t			available_spare;
722 	uint8_t			available_spare_threshold;
723 	uint8_t			percentage_used;
724 
725 	uint8_t			reserved[26];
726 
727 	/*
728 	 * Note that the following are 128-bit values, but are
729 	 *  defined as an array of 2 64-bit values.
730 	 */
731 	/* Data Units Read is always in 512-byte units. */
732 	uint64_t		data_units_read[2];
733 	/* Data Units Written is always in 512-byte units. */
734 	uint64_t		data_units_written[2];
735 	/* For NVM command set, this includes Compare commands. */
736 	uint64_t		host_read_commands[2];
737 	uint64_t		host_write_commands[2];
738 	/* Controller Busy Time is reported in minutes. */
739 	uint64_t		controller_busy_time[2];
740 	uint64_t		power_cycles[2];
741 	uint64_t		power_on_hours[2];
742 	uint64_t		unsafe_shutdowns[2];
743 	uint64_t		media_errors[2];
744 	uint64_t		num_error_info_log_entries[2];
745 	uint32_t		warning_temp_time;
746 	uint32_t		error_temp_time;
747 	uint16_t		temp_sensor[8];
748 
749 	uint8_t			reserved2[296];
750 } __packed __aligned(4);
751 
752 struct nvme_firmware_page {
753 
754 	struct {
755 		uint8_t	slot		: 3; /* slot for current FW */
756 		uint8_t	reserved	: 5;
757 	} __packed afi;
758 
759 	uint8_t			reserved[7];
760 	uint64_t		revision[7]; /* revisions for 7 slots */
761 	uint8_t			reserved2[448];
762 } __packed __aligned(4);
763 
764 struct intel_log_temp_stats
765 {
766 	uint64_t	current;
767 	uint64_t	overtemp_flag_last;
768 	uint64_t	overtemp_flag_life;
769 	uint64_t	max_temp;
770 	uint64_t	min_temp;
771 	uint64_t	_rsvd[5];
772 	uint64_t	max_oper_temp;
773 	uint64_t	min_oper_temp;
774 	uint64_t	est_offset;
775 } __packed __aligned(4);
776 
777 #define NVME_TEST_MAX_THREADS	128
778 
779 struct nvme_io_test {
780 
781 	enum nvme_nvm_opcode	opc;
782 	uint32_t		size;
783 	uint32_t		time;	/* in seconds */
784 	uint32_t		num_threads;
785 	uint32_t		flags;
786 	uint64_t		io_completed[NVME_TEST_MAX_THREADS];
787 };
788 
789 enum nvme_io_test_flags {
790 
791 	/*
792 	 * Specifies whether dev_refthread/dev_relthread should be
793 	 *  called during NVME_BIO_TEST.  Ignored for other test
794 	 *  types.
795 	 */
796 	NVME_TEST_FLAG_REFTHREAD =	0x1,
797 };
798 
799 struct nvme_pt_command {
800 
801 	/*
802 	 * cmd is used to specify a passthrough command to a controller or
803 	 *  namespace.
804 	 *
805 	 * The following fields from cmd may be specified by the caller:
806 	 *	* opc  (opcode)
807 	 *	* nsid (namespace id) - for admin commands only
808 	 *	* cdw10-cdw15
809 	 *
810 	 * Remaining fields must be set to 0 by the caller.
811 	 */
812 	struct nvme_command	cmd;
813 
814 	/*
815 	 * cpl returns completion status for the passthrough command
816 	 *  specified by cmd.
817 	 *
818 	 * The following fields will be filled out by the driver, for
819 	 *  consumption by the caller:
820 	 *	* cdw0
821 	 *	* status (except for phase)
822 	 *
823 	 * Remaining fields will be set to 0 by the driver.
824 	 */
825 	struct nvme_completion	cpl;
826 
827 	/* buf is the data buffer associated with this passthrough command. */
828 	void *			buf;
829 
830 	/*
831 	 * len is the length of the data buffer associated with this
832 	 *  passthrough command.
833 	 */
834 	uint32_t		len;
835 
836 	/*
837 	 * is_read = 1 if the passthrough command will read data into the
838 	 *  supplied buffer from the controller.
839 	 *
840 	 * is_read = 0 if the passthrough command will write data from the
841 	 *  supplied buffer to the controller.
842 	 */
843 	uint32_t		is_read;
844 
845 	/*
846 	 * driver_lock is used by the driver only.  It must be set to 0
847 	 *  by the caller.
848 	 */
849 	struct mtx *		driver_lock;
850 };
851 
852 #define nvme_completion_is_error(cpl)					\
853 	((cpl)->status.sc != 0 || (cpl)->status.sct != 0)
854 
855 void	nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen);
856 
857 #ifdef _KERNEL
858 
859 struct bio;
860 
861 struct nvme_namespace;
862 struct nvme_controller;
863 struct nvme_consumer;
864 
865 typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
866 
867 typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *);
868 typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *);
869 typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *,
870 				     uint32_t, void *, uint32_t);
871 typedef void (*nvme_cons_fail_fn_t)(void *);
872 
873 enum nvme_namespace_flags {
874 	NVME_NS_DEALLOCATE_SUPPORTED	= 0x1,
875 	NVME_NS_FLUSH_SUPPORTED		= 0x2,
876 };
877 
878 int	nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
879 				   struct nvme_pt_command *pt,
880 				   uint32_t nsid, int is_user_buffer,
881 				   int is_admin_cmd);
882 
883 /* Admin functions */
884 void	nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
885 				   uint8_t feature, uint32_t cdw11,
886 				   void *payload, uint32_t payload_size,
887 				   nvme_cb_fn_t cb_fn, void *cb_arg);
888 void	nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
889 				   uint8_t feature, uint32_t cdw11,
890 				   void *payload, uint32_t payload_size,
891 				   nvme_cb_fn_t cb_fn, void *cb_arg);
892 void	nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
893 				    uint8_t log_page, uint32_t nsid,
894 				    void *payload, uint32_t payload_size,
895 				    nvme_cb_fn_t cb_fn, void *cb_arg);
896 
897 /* NVM I/O functions */
898 int	nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
899 			  uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
900 			  void *cb_arg);
901 int	nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
902 			      nvme_cb_fn_t cb_fn, void *cb_arg);
903 int	nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
904 			 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
905 			 void *cb_arg);
906 int	nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
907 			      nvme_cb_fn_t cb_fn, void *cb_arg);
908 int	nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
909 			       uint8_t num_ranges, nvme_cb_fn_t cb_fn,
910 			       void *cb_arg);
911 int	nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
912 			  void *cb_arg);
913 int	nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset,
914 		     size_t len);
915 
916 /* Registration functions */
917 struct nvme_consumer *	nvme_register_consumer(nvme_cons_ns_fn_t    ns_fn,
918 					       nvme_cons_ctrlr_fn_t ctrlr_fn,
919 					       nvme_cons_async_fn_t async_fn,
920 					       nvme_cons_fail_fn_t  fail_fn);
921 void		nvme_unregister_consumer(struct nvme_consumer *consumer);
922 
923 /* Controller helper functions */
924 device_t	nvme_ctrlr_get_device(struct nvme_controller *ctrlr);
925 const struct nvme_controller_data *
926 		nvme_ctrlr_get_data(struct nvme_controller *ctrlr);
927 
928 /* Namespace helper functions */
929 uint32_t	nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
930 uint32_t	nvme_ns_get_sector_size(struct nvme_namespace *ns);
931 uint64_t	nvme_ns_get_num_sectors(struct nvme_namespace *ns);
932 uint64_t	nvme_ns_get_size(struct nvme_namespace *ns);
933 uint32_t	nvme_ns_get_flags(struct nvme_namespace *ns);
934 const char *	nvme_ns_get_serial_number(struct nvme_namespace *ns);
935 const char *	nvme_ns_get_model_number(struct nvme_namespace *ns);
936 const struct nvme_namespace_data *
937 		nvme_ns_get_data(struct nvme_namespace *ns);
938 uint32_t	nvme_ns_get_stripesize(struct nvme_namespace *ns);
939 
940 int	nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
941 			    nvme_cb_fn_t cb_fn);
942 
943 /* Command building helper functions -- shared with CAM */
944 static inline
945 void	nvme_ns_flush_cmd(struct nvme_command *cmd, uint16_t nsid)
946 {
947 
948 	cmd->opc = NVME_OPC_FLUSH;
949 	cmd->nsid = nsid;
950 }
951 
952 static inline
953 void	nvme_ns_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd, uint16_t nsid,
954     uint64_t lba, uint32_t count)
955 {
956 	cmd->opc = rwcmd;
957 	cmd->nsid = nsid;
958 	cmd->cdw10 = lba & 0xffffffffu;
959 	cmd->cdw11 = lba >> 32;
960 	cmd->cdw12 = count-1;
961 	cmd->cdw13 = 0;
962 	cmd->cdw14 = 0;
963 	cmd->cdw15 = 0;
964 }
965 
966 static inline
967 void	nvme_ns_write_cmd(struct nvme_command *cmd, uint16_t nsid,
968     uint64_t lba, uint32_t count)
969 {
970 	nvme_ns_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count);
971 }
972 
973 static inline
974 void	nvme_ns_read_cmd(struct nvme_command *cmd, uint16_t nsid,
975     uint64_t lba, uint32_t count)
976 {
977 	nvme_ns_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count);
978 }
979 
980 static inline
981 void	nvme_ns_trim_cmd(struct nvme_command *cmd, uint16_t nsid,
982     uint32_t num_ranges)
983 {
984 	cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
985 	cmd->nsid = nsid;
986 	cmd->cdw10 = num_ranges - 1;
987 	cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE;
988 }
989 
990 #endif /* _KERNEL */
991 
992 #endif /* __NVME_H__ */
993