xref: /freebsd/sys/dev/nvme/nvme.h (revision 43faedc1339a9624c7acedb7f3e5624e64da5b99)
1 /*-
2  * Copyright (C) 2012-2013 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef __NVME_H__
30 #define __NVME_H__
31 
32 #ifdef _KERNEL
33 #include <sys/types.h>
34 #endif
35 
36 #include <sys/param.h>
37 
38 #define	NVME_PASSTHROUGH_CMD		_IOWR('n', 0, struct nvme_pt_command)
39 #define	NVME_RESET_CONTROLLER		_IO('n', 1)
40 
41 #define	NVME_IO_TEST			_IOWR('n', 100, struct nvme_io_test)
42 #define	NVME_BIO_TEST			_IOWR('n', 101, struct nvme_io_test)
43 
44 /*
45  * Use to mark a command to apply to all namespaces, or to retrieve global
46  *  log pages.
47  */
48 #define NVME_GLOBAL_NAMESPACE_TAG	((uint32_t)0xFFFFFFFF)
49 
50 #define NVME_MAX_XFER_SIZE		MAXPHYS
51 
52 union cap_lo_register {
53 	uint32_t	raw;
54 	struct {
55 		/** maximum queue entries supported */
56 		uint32_t mqes		: 16;
57 
58 		/** contiguous queues required */
59 		uint32_t cqr		: 1;
60 
61 		/** arbitration mechanism supported */
62 		uint32_t ams		: 2;
63 
64 		uint32_t reserved1	: 5;
65 
66 		/** timeout */
67 		uint32_t to		: 8;
68 	} bits __packed;
69 } __packed;
70 
71 union cap_hi_register {
72 	uint32_t	raw;
73 	struct {
74 		/** doorbell stride */
75 		uint32_t dstrd		: 4;
76 
77 		uint32_t reserved3	: 1;
78 
79 		/** command sets supported */
80 		uint32_t css_nvm	: 1;
81 
82 		uint32_t css_reserved	: 3;
83 		uint32_t reserved2	: 7;
84 
85 		/** memory page size minimum */
86 		uint32_t mpsmin		: 4;
87 
88 		/** memory page size maximum */
89 		uint32_t mpsmax		: 4;
90 
91 		uint32_t reserved1	: 8;
92 	} bits __packed;
93 } __packed;
94 
95 union cc_register {
96 	uint32_t	raw;
97 	struct {
98 		/** enable */
99 		uint32_t en		: 1;
100 
101 		uint32_t reserved1	: 3;
102 
103 		/** i/o command set selected */
104 		uint32_t css		: 3;
105 
106 		/** memory page size */
107 		uint32_t mps		: 4;
108 
109 		/** arbitration mechanism selected */
110 		uint32_t ams		: 3;
111 
112 		/** shutdown notification */
113 		uint32_t shn		: 2;
114 
115 		/** i/o submission queue entry size */
116 		uint32_t iosqes		: 4;
117 
118 		/** i/o completion queue entry size */
119 		uint32_t iocqes		: 4;
120 
121 		uint32_t reserved2	: 8;
122 	} bits __packed;
123 } __packed;
124 
125 enum shn_value {
126 	NVME_SHN_NORMAL		= 0x1,
127 	NVME_SHN_ABRUPT		= 0x2,
128 };
129 
130 union csts_register {
131 	uint32_t	raw;
132 	struct {
133 		/** ready */
134 		uint32_t rdy		: 1;
135 
136 		/** controller fatal status */
137 		uint32_t cfs		: 1;
138 
139 		/** shutdown status */
140 		uint32_t shst		: 2;
141 
142 		uint32_t reserved1	: 28;
143 	} bits __packed;
144 } __packed;
145 
146 enum shst_value {
147 	NVME_SHST_NORMAL	= 0x0,
148 	NVME_SHST_OCCURRING	= 0x1,
149 	NVME_SHST_COMPLETE	= 0x2,
150 };
151 
152 union aqa_register {
153 	uint32_t	raw;
154 	struct {
155 		/** admin submission queue size */
156 		uint32_t asqs		: 12;
157 
158 		uint32_t reserved1	: 4;
159 
160 		/** admin completion queue size */
161 		uint32_t acqs		: 12;
162 
163 		uint32_t reserved2	: 4;
164 	} bits __packed;
165 } __packed;
166 
167 struct nvme_registers
168 {
169 	/** controller capabilities */
170 	union cap_lo_register	cap_lo;
171 	union cap_hi_register	cap_hi;
172 
173 	uint32_t		vs;	/* version */
174 	uint32_t		intms;	/* interrupt mask set */
175 	uint32_t		intmc;	/* interrupt mask clear */
176 
177 	/** controller configuration */
178 	union cc_register	cc;
179 
180 	uint32_t		reserved1;
181 
182 	/** controller status */
183 	union csts_register	csts;
184 
185 	uint32_t		reserved2;
186 
187 	/** admin queue attributes */
188 	union aqa_register	aqa;
189 
190 	uint64_t		asq;	/* admin submission queue base addr */
191 	uint64_t		acq;	/* admin completion queue base addr */
192 	uint32_t		reserved3[0x3f2];
193 
194 	struct {
195 	    uint32_t		sq_tdbl; /* submission queue tail doorbell */
196 	    uint32_t		cq_hdbl; /* completion queue head doorbell */
197 	} doorbell[1] __packed;
198 } __packed;
199 
200 struct nvme_command
201 {
202 	/* dword 0 */
203 	uint16_t opc	:  8;	/* opcode */
204 	uint16_t fuse	:  2;	/* fused operation */
205 	uint16_t rsvd1	:  6;
206 	uint16_t cid;		/* command identifier */
207 
208 	/* dword 1 */
209 	uint32_t nsid;		/* namespace identifier */
210 
211 	/* dword 2-3 */
212 	uint32_t rsvd2;
213 	uint32_t rsvd3;
214 
215 	/* dword 4-5 */
216 	uint64_t mptr;		/* metadata pointer */
217 
218 	/* dword 6-7 */
219 	uint64_t prp1;		/* prp entry 1 */
220 
221 	/* dword 8-9 */
222 	uint64_t prp2;		/* prp entry 2 */
223 
224 	/* dword 10-15 */
225 	uint32_t cdw10;		/* command-specific */
226 	uint32_t cdw11;		/* command-specific */
227 	uint32_t cdw12;		/* command-specific */
228 	uint32_t cdw13;		/* command-specific */
229 	uint32_t cdw14;		/* command-specific */
230 	uint32_t cdw15;		/* command-specific */
231 } __packed;
232 
233 struct nvme_status {
234 
235 	uint16_t p	:  1;	/* phase tag */
236 	uint16_t sc	:  8;	/* status code */
237 	uint16_t sct	:  3;	/* status code type */
238 	uint16_t rsvd2	:  2;
239 	uint16_t m	:  1;	/* more */
240 	uint16_t dnr	:  1;	/* do not retry */
241 } __packed;
242 
243 struct nvme_completion {
244 
245 	/* dword 0 */
246 	uint32_t		cdw0;	/* command-specific */
247 
248 	/* dword 1 */
249 	uint32_t		rsvd1;
250 
251 	/* dword 2 */
252 	uint16_t		sqhd;	/* submission queue head pointer */
253 	uint16_t		sqid;	/* submission queue identifier */
254 
255 	/* dword 3 */
256 	uint16_t		cid;	/* command identifier */
257 	struct nvme_status	status;
258 } __packed;
259 
260 struct nvme_dsm_range {
261 
262 	uint32_t attributes;
263 	uint32_t length;
264 	uint64_t starting_lba;
265 } __packed;
266 
267 /* status code types */
268 enum nvme_status_code_type {
269 	NVME_SCT_GENERIC		= 0x0,
270 	NVME_SCT_COMMAND_SPECIFIC	= 0x1,
271 	NVME_SCT_MEDIA_ERROR		= 0x2,
272 	/* 0x3-0x6 - reserved */
273 	NVME_SCT_VENDOR_SPECIFIC	= 0x7,
274 };
275 
276 /* generic command status codes */
277 enum nvme_generic_command_status_code {
278 	NVME_SC_SUCCESS				= 0x00,
279 	NVME_SC_INVALID_OPCODE			= 0x01,
280 	NVME_SC_INVALID_FIELD			= 0x02,
281 	NVME_SC_COMMAND_ID_CONFLICT		= 0x03,
282 	NVME_SC_DATA_TRANSFER_ERROR		= 0x04,
283 	NVME_SC_ABORTED_POWER_LOSS		= 0x05,
284 	NVME_SC_INTERNAL_DEVICE_ERROR		= 0x06,
285 	NVME_SC_ABORTED_BY_REQUEST		= 0x07,
286 	NVME_SC_ABORTED_SQ_DELETION		= 0x08,
287 	NVME_SC_ABORTED_FAILED_FUSED		= 0x09,
288 	NVME_SC_ABORTED_MISSING_FUSED		= 0x0a,
289 	NVME_SC_INVALID_NAMESPACE_OR_FORMAT	= 0x0b,
290 	NVME_SC_COMMAND_SEQUENCE_ERROR		= 0x0c,
291 
292 	NVME_SC_LBA_OUT_OF_RANGE		= 0x80,
293 	NVME_SC_CAPACITY_EXCEEDED		= 0x81,
294 	NVME_SC_NAMESPACE_NOT_READY		= 0x82,
295 };
296 
297 /* command specific status codes */
298 enum nvme_command_specific_status_code {
299 	NVME_SC_COMPLETION_QUEUE_INVALID	= 0x00,
300 	NVME_SC_INVALID_QUEUE_IDENTIFIER	= 0x01,
301 	NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED	= 0x02,
302 	NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED	= 0x03,
303 	/* 0x04 - reserved */
304 	NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05,
305 	NVME_SC_INVALID_FIRMWARE_SLOT		= 0x06,
306 	NVME_SC_INVALID_FIRMWARE_IMAGE		= 0x07,
307 	NVME_SC_INVALID_INTERRUPT_VECTOR	= 0x08,
308 	NVME_SC_INVALID_LOG_PAGE		= 0x09,
309 	NVME_SC_INVALID_FORMAT			= 0x0a,
310 	NVME_SC_FIRMWARE_REQUIRES_RESET		= 0x0b,
311 
312 	NVME_SC_CONFLICTING_ATTRIBUTES		= 0x80,
313 	NVME_SC_INVALID_PROTECTION_INFO		= 0x81,
314 	NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE	= 0x82,
315 };
316 
317 /* media error status codes */
318 enum nvme_media_error_status_code {
319 	NVME_SC_WRITE_FAULTS			= 0x80,
320 	NVME_SC_UNRECOVERED_READ_ERROR		= 0x81,
321 	NVME_SC_GUARD_CHECK_ERROR		= 0x82,
322 	NVME_SC_APPLICATION_TAG_CHECK_ERROR	= 0x83,
323 	NVME_SC_REFERENCE_TAG_CHECK_ERROR	= 0x84,
324 	NVME_SC_COMPARE_FAILURE			= 0x85,
325 	NVME_SC_ACCESS_DENIED			= 0x86,
326 };
327 
328 /* admin opcodes */
329 enum nvme_admin_opcode {
330 	NVME_OPC_DELETE_IO_SQ			= 0x00,
331 	NVME_OPC_CREATE_IO_SQ			= 0x01,
332 	NVME_OPC_GET_LOG_PAGE			= 0x02,
333 	/* 0x03 - reserved */
334 	NVME_OPC_DELETE_IO_CQ			= 0x04,
335 	NVME_OPC_CREATE_IO_CQ			= 0x05,
336 	NVME_OPC_IDENTIFY			= 0x06,
337 	/* 0x07 - reserved */
338 	NVME_OPC_ABORT				= 0x08,
339 	NVME_OPC_SET_FEATURES			= 0x09,
340 	NVME_OPC_GET_FEATURES			= 0x0a,
341 	/* 0x0b - reserved */
342 	NVME_OPC_ASYNC_EVENT_REQUEST		= 0x0c,
343 	/* 0x0d-0x0f - reserved */
344 	NVME_OPC_FIRMWARE_ACTIVATE		= 0x10,
345 	NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD	= 0x11,
346 
347 	NVME_OPC_FORMAT_NVM			= 0x80,
348 	NVME_OPC_SECURITY_SEND			= 0x81,
349 	NVME_OPC_SECURITY_RECEIVE		= 0x82,
350 };
351 
352 /* nvme nvm opcodes */
353 enum nvme_nvm_opcode {
354 	NVME_OPC_FLUSH				= 0x00,
355 	NVME_OPC_WRITE				= 0x01,
356 	NVME_OPC_READ				= 0x02,
357 	/* 0x03 - reserved */
358 	NVME_OPC_WRITE_UNCORRECTABLE		= 0x04,
359 	NVME_OPC_COMPARE			= 0x05,
360 	/* 0x06-0x07 - reserved */
361 	NVME_OPC_DATASET_MANAGEMENT		= 0x09,
362 };
363 
364 enum nvme_feature {
365 	/* 0x00 - reserved */
366 	NVME_FEAT_ARBITRATION			= 0x01,
367 	NVME_FEAT_POWER_MANAGEMENT		= 0x02,
368 	NVME_FEAT_LBA_RANGE_TYPE		= 0x03,
369 	NVME_FEAT_TEMPERATURE_THRESHOLD		= 0x04,
370 	NVME_FEAT_ERROR_RECOVERY		= 0x05,
371 	NVME_FEAT_VOLATILE_WRITE_CACHE		= 0x06,
372 	NVME_FEAT_NUMBER_OF_QUEUES		= 0x07,
373 	NVME_FEAT_INTERRUPT_COALESCING		= 0x08,
374 	NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
375 	NVME_FEAT_WRITE_ATOMICITY		= 0x0A,
376 	NVME_FEAT_ASYNC_EVENT_CONFIGURATION	= 0x0B,
377 	/* 0x0C-0x7F - reserved */
378 	NVME_FEAT_SOFTWARE_PROGRESS_MARKER	= 0x80,
379 	/* 0x81-0xBF - command set specific (reserved) */
380 	/* 0xC0-0xFF - vendor specific */
381 };
382 
383 enum nvme_dsm_attribute {
384 	NVME_DSM_ATTR_INTEGRAL_READ		= 0x1,
385 	NVME_DSM_ATTR_INTEGRAL_WRITE		= 0x2,
386 	NVME_DSM_ATTR_DEALLOCATE		= 0x4,
387 };
388 
389 enum nvme_activate_action {
390 	NVME_AA_REPLACE_NO_ACTIVATE		= 0x0,
391 	NVME_AA_REPLACE_ACTIVATE		= 0x1,
392 	NVME_AA_ACTIVATE			= 0x2,
393 };
394 
395 #define NVME_SERIAL_NUMBER_LENGTH	20
396 #define NVME_MODEL_NUMBER_LENGTH	40
397 #define NVME_FIRMWARE_REVISION_LENGTH	8
398 
399 struct nvme_controller_data {
400 
401 	/* bytes 0-255: controller capabilities and features */
402 
403 	/** pci vendor id */
404 	uint16_t		vid;
405 
406 	/** pci subsystem vendor id */
407 	uint16_t		ssvid;
408 
409 	/** serial number */
410 	uint8_t			sn[NVME_SERIAL_NUMBER_LENGTH];
411 
412 	/** model number */
413 	uint8_t			mn[NVME_MODEL_NUMBER_LENGTH];
414 
415 	/** firmware revision */
416 	uint8_t			fr[NVME_FIRMWARE_REVISION_LENGTH];
417 
418 	/** recommended arbitration burst */
419 	uint8_t			rab;
420 
421 	/** ieee oui identifier */
422 	uint8_t			ieee[3];
423 
424 	/** multi-interface capabilities */
425 	uint8_t			mic;
426 
427 	/** maximum data transfer size */
428 	uint8_t			mdts;
429 
430 	uint8_t			reserved1[178];
431 
432 	/* bytes 256-511: admin command set attributes */
433 
434 	/** optional admin command support */
435 	struct {
436 		/* supports security send/receive commands */
437 		uint16_t	security  : 1;
438 
439 		/* supports format nvm command */
440 		uint16_t	format    : 1;
441 
442 		/* supports firmware activate/download commands */
443 		uint16_t	firmware  : 1;
444 
445 		uint16_t	oacs_rsvd : 13;
446 	} __packed oacs;
447 
448 	/** abort command limit */
449 	uint8_t			acl;
450 
451 	/** asynchronous event request limit */
452 	uint8_t			aerl;
453 
454 	/** firmware updates */
455 	struct {
456 		/* first slot is read-only */
457 		uint8_t		slot1_ro  : 1;
458 
459 		/* number of firmware slots */
460 		uint8_t		num_slots : 3;
461 
462 		uint8_t		frmw_rsvd : 4;
463 	} __packed frmw;
464 
465 	/** log page attributes */
466 	struct {
467 		/* per namespace smart/health log page */
468 		uint8_t		ns_smart : 1;
469 
470 		uint8_t		lpa_rsvd : 7;
471 	} __packed lpa;
472 
473 	/** error log page entries */
474 	uint8_t			elpe;
475 
476 	/** number of power states supported */
477 	uint8_t			npss;
478 
479 	/** admin vendor specific command configuration */
480 	struct {
481 		/* admin vendor specific commands use spec format */
482 		uint8_t		spec_format : 1;
483 
484 		uint8_t		avscc_rsvd  : 7;
485 	} __packed avscc;
486 
487 	uint8_t			reserved2[247];
488 
489 	/* bytes 512-703: nvm command set attributes */
490 
491 	/** submission queue entry size */
492 	struct {
493 		uint8_t		min : 4;
494 		uint8_t		max : 4;
495 	} __packed sqes;
496 
497 	/** completion queue entry size */
498 	struct {
499 		uint8_t		min : 4;
500 		uint8_t		max : 4;
501 	} __packed cqes;
502 
503 	uint8_t			reserved3[2];
504 
505 	/** number of namespaces */
506 	uint32_t		nn;
507 
508 	/** optional nvm command support */
509 	struct {
510 		uint16_t	compare : 1;
511 		uint16_t	write_unc : 1;
512 		uint16_t	dsm: 1;
513 		uint16_t	reserved: 13;
514 	} __packed oncs;
515 
516 	/** fused operation support */
517 	uint16_t		fuses;
518 
519 	/** format nvm attributes */
520 	uint8_t			fna;
521 
522 	/** volatile write cache */
523 	struct {
524 		uint8_t		present : 1;
525 		uint8_t		reserved : 7;
526 	} __packed vwc;
527 
528 	/* TODO: flesh out remaining nvm command set attributes */
529 	uint8_t			reserved4[178];
530 
531 	/* bytes 704-2047: i/o command set attributes */
532 	uint8_t			reserved5[1344];
533 
534 	/* bytes 2048-3071: power state descriptors */
535 	uint8_t			reserved6[1024];
536 
537 	/* bytes 3072-4095: vendor specific */
538 	uint8_t			vs[1024];
539 } __packed __aligned(4);
540 
541 struct nvme_namespace_data {
542 
543 	/** namespace size */
544 	uint64_t		nsze;
545 
546 	/** namespace capacity */
547 	uint64_t		ncap;
548 
549 	/** namespace utilization */
550 	uint64_t		nuse;
551 
552 	/** namespace features */
553 	struct {
554 		/** thin provisioning */
555 		uint8_t		thin_prov : 1;
556 		uint8_t		reserved1 : 7;
557 	} __packed nsfeat;
558 
559 	/** number of lba formats */
560 	uint8_t			nlbaf;
561 
562 	/** formatted lba size */
563 	struct {
564 		uint8_t		format    : 4;
565 		uint8_t		extended  : 1;
566 		uint8_t		reserved2 : 3;
567 	} __packed flbas;
568 
569 	/** metadata capabilities */
570 	struct {
571 		/* metadata can be transferred as part of data prp list */
572 		uint8_t		extended  : 1;
573 
574 		/* metadata can be transferred with separate metadata pointer */
575 		uint8_t		pointer   : 1;
576 
577 		uint8_t		reserved3 : 6;
578 	} __packed mc;
579 
580 	/** end-to-end data protection capabilities */
581 	struct {
582 		/* protection information type 1 */
583 		uint8_t		pit1     : 1;
584 
585 		/* protection information type 2 */
586 		uint8_t		pit2     : 1;
587 
588 		/* protection information type 3 */
589 		uint8_t		pit3     : 1;
590 
591 		/* first eight bytes of metadata */
592 		uint8_t		md_start : 1;
593 
594 		/* last eight bytes of metadata */
595 		uint8_t		md_end   : 1;
596 	} __packed dpc;
597 
598 	/** end-to-end data protection type settings */
599 	struct {
600 		/* protection information type */
601 		uint8_t		pit       : 3;
602 
603 		/* 1 == protection info transferred at start of metadata */
604 		/* 0 == protection info transferred at end of metadata */
605 		uint8_t		md_start  : 1;
606 
607 		uint8_t		reserved4 : 4;
608 	} __packed dps;
609 
610 	uint8_t			reserved5[98];
611 
612 	/** lba format support */
613 	struct {
614 		/** metadata size */
615 		uint32_t	ms	  : 16;
616 
617 		/** lba data size */
618 		uint32_t	lbads	  : 8;
619 
620 		/** relative performance */
621 		uint32_t	rp	  : 2;
622 
623 		uint32_t	reserved6 : 6;
624 	} __packed lbaf[16];
625 
626 	uint8_t			reserved6[192];
627 
628 	uint8_t			vendor_specific[3712];
629 } __packed __aligned(4);
630 
631 enum nvme_log_page {
632 
633 	/* 0x00 - reserved */
634 	NVME_LOG_ERROR			= 0x01,
635 	NVME_LOG_HEALTH_INFORMATION	= 0x02,
636 	NVME_LOG_FIRMWARE_SLOT		= 0x03,
637 	/* 0x04-0x7F - reserved */
638 	/* 0x80-0xBF - I/O command set specific */
639 	/* 0xC0-0xFF - vendor specific */
640 };
641 
642 struct nvme_error_information_entry {
643 
644 	uint64_t		error_count;
645 	uint16_t		sqid;
646 	uint16_t		cid;
647 	struct nvme_status	status;
648 	uint16_t		error_location;
649 	uint64_t		lba;
650 	uint32_t		nsid;
651 	uint8_t			vendor_specific;
652 	uint8_t			reserved[35];
653 } __packed __aligned(4);
654 
655 union nvme_critical_warning_state {
656 
657 	uint8_t		raw;
658 
659 	struct {
660 		uint8_t	available_spare		: 1;
661 		uint8_t	temperature		: 1;
662 		uint8_t	device_reliability	: 1;
663 		uint8_t	read_only		: 1;
664 		uint8_t	volatile_memory_backup	: 1;
665 		uint8_t	reserved		: 3;
666 	} __packed bits;
667 } __packed;
668 
669 struct nvme_health_information_page {
670 
671 	union nvme_critical_warning_state	critical_warning;
672 
673 	uint16_t		temperature;
674 	uint8_t			available_spare;
675 	uint8_t			available_spare_threshold;
676 	uint8_t			percentage_used;
677 
678 	uint8_t			reserved[26];
679 
680 	/*
681 	 * Note that the following are 128-bit values, but are
682 	 *  defined as an array of 2 64-bit values.
683 	 */
684 	/* Data Units Read is always in 512-byte units. */
685 	uint64_t		data_units_read[2];
686 	/* Data Units Written is always in 512-byte units. */
687 	uint64_t		data_units_written[2];
688 	/* For NVM command set, this includes Compare commands. */
689 	uint64_t		host_read_commands[2];
690 	uint64_t		host_write_commands[2];
691 	/* Controller Busy Time is reported in minutes. */
692 	uint64_t		controller_busy_time[2];
693 	uint64_t		power_cycles[2];
694 	uint64_t		power_on_hours[2];
695 	uint64_t		unsafe_shutdowns[2];
696 	uint64_t		media_errors[2];
697 	uint64_t		num_error_info_log_entries[2];
698 
699 	uint8_t			reserved2[320];
700 } __packed __aligned(4);
701 
702 struct nvme_firmware_page {
703 
704 	struct {
705 		uint8_t	slot		: 3; /* slot for current FW */
706 		uint8_t	reserved	: 5;
707 	} __packed afi;
708 
709 	uint8_t			reserved[7];
710 	uint64_t		revision[7]; /* revisions for 7 slots */
711 	uint8_t			reserved2[448];
712 } __packed __aligned(4);
713 
714 #define NVME_TEST_MAX_THREADS	128
715 
716 struct nvme_io_test {
717 
718 	enum nvme_nvm_opcode	opc;
719 	uint32_t		size;
720 	uint32_t		time;	/* in seconds */
721 	uint32_t		num_threads;
722 	uint32_t		flags;
723 	uint64_t		io_completed[NVME_TEST_MAX_THREADS];
724 };
725 
726 enum nvme_io_test_flags {
727 
728 	/*
729 	 * Specifies whether dev_refthread/dev_relthread should be
730 	 *  called during NVME_BIO_TEST.  Ignored for other test
731 	 *  types.
732 	 */
733 	NVME_TEST_FLAG_REFTHREAD =	0x1,
734 };
735 
736 struct nvme_pt_command {
737 
738 	/*
739 	 * cmd is used to specify a passthrough command to a controller or
740 	 *  namespace.
741 	 *
742 	 * The following fields from cmd may be specified by the caller:
743 	 *	* opc  (opcode)
744 	 *	* nsid (namespace id) - for admin commands only
745 	 *	* cdw10-cdw15
746 	 *
747 	 * Remaining fields must be set to 0 by the caller.
748 	 */
749 	struct nvme_command	cmd;
750 
751 	/*
752 	 * cpl returns completion status for the passthrough command
753 	 *  specified by cmd.
754 	 *
755 	 * The following fields will be filled out by the driver, for
756 	 *  consumption by the caller:
757 	 *	* cdw0
758 	 *	* status (except for phase)
759 	 *
760 	 * Remaining fields will be set to 0 by the driver.
761 	 */
762 	struct nvme_completion	cpl;
763 
764 	/* buf is the data buffer associated with this passthrough command. */
765 	void *			buf;
766 
767 	/*
768 	 * len is the length of the data buffer associated with this
769 	 *  passthrough command.
770 	 */
771 	uint32_t		len;
772 
773 	/*
774 	 * is_read = 1 if the passthrough command will read data into the
775 	 *  supplied buffer from the controller.
776 	 *
777 	 * is_read = 0 if the passthrough command will write data from the
778 	 *  supplied buffer to the controller.
779 	 */
780 	uint32_t		is_read;
781 
782 	/*
783 	 * driver_lock is used by the driver only.  It must be set to 0
784 	 *  by the caller.
785 	 */
786 	struct mtx *		driver_lock;
787 };
788 
789 #define nvme_completion_is_error(cpl)					\
790 	((cpl)->status.sc != 0 || (cpl)->status.sct != 0)
791 
792 void	nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen);
793 
794 #ifdef _KERNEL
795 
796 struct bio;
797 
798 struct nvme_namespace;
799 struct nvme_controller;
800 struct nvme_consumer;
801 
802 typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
803 
804 typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *);
805 typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *);
806 typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *,
807 				     uint32_t, void *, uint32_t);
808 typedef void (*nvme_cons_fail_fn_t)(void *);
809 
810 enum nvme_namespace_flags {
811 	NVME_NS_DEALLOCATE_SUPPORTED	= 0x1,
812 	NVME_NS_FLUSH_SUPPORTED		= 0x2,
813 };
814 
815 int	nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
816 				   struct nvme_pt_command *pt,
817 				   uint32_t nsid, int is_user_buffer,
818 				   int is_admin_cmd);
819 
820 /* Admin functions */
821 void	nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
822 				   uint8_t feature, uint32_t cdw11,
823 				   void *payload, uint32_t payload_size,
824 				   nvme_cb_fn_t cb_fn, void *cb_arg);
825 void	nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
826 				   uint8_t feature, uint32_t cdw11,
827 				   void *payload, uint32_t payload_size,
828 				   nvme_cb_fn_t cb_fn, void *cb_arg);
829 void	nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
830 				    uint8_t log_page, uint32_t nsid,
831 				    void *payload, uint32_t payload_size,
832 				    nvme_cb_fn_t cb_fn, void *cb_arg);
833 
834 /* NVM I/O functions */
835 int	nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
836 			  uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
837 			  void *cb_arg);
838 int	nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
839 			      nvme_cb_fn_t cb_fn, void *cb_arg);
840 int	nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
841 			 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
842 			 void *cb_arg);
843 int	nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
844 			      nvme_cb_fn_t cb_fn, void *cb_arg);
845 int	nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
846 			       uint8_t num_ranges, nvme_cb_fn_t cb_fn,
847 			       void *cb_arg);
848 int	nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
849 			  void *cb_arg);
850 
851 /* Registration functions */
852 struct nvme_consumer *	nvme_register_consumer(nvme_cons_ns_fn_t    ns_fn,
853 					       nvme_cons_ctrlr_fn_t ctrlr_fn,
854 					       nvme_cons_async_fn_t async_fn,
855 					       nvme_cons_fail_fn_t  fail_fn);
856 void		nvme_unregister_consumer(struct nvme_consumer *consumer);
857 
858 /* Controller helper functions */
859 device_t	nvme_ctrlr_get_device(struct nvme_controller *ctrlr);
860 const struct nvme_controller_data *
861 		nvme_ctrlr_get_data(struct nvme_controller *ctrlr);
862 
863 /* Namespace helper functions */
864 uint32_t	nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
865 uint32_t	nvme_ns_get_sector_size(struct nvme_namespace *ns);
866 uint64_t	nvme_ns_get_num_sectors(struct nvme_namespace *ns);
867 uint64_t	nvme_ns_get_size(struct nvme_namespace *ns);
868 uint32_t	nvme_ns_get_flags(struct nvme_namespace *ns);
869 const char *	nvme_ns_get_serial_number(struct nvme_namespace *ns);
870 const char *	nvme_ns_get_model_number(struct nvme_namespace *ns);
871 const struct nvme_namespace_data *
872 		nvme_ns_get_data(struct nvme_namespace *ns);
873 uint32_t	nvme_ns_get_optimal_sector_size(struct nvme_namespace *ns);
874 uint32_t	nvme_ns_get_stripesize(struct nvme_namespace *ns);
875 
876 int	nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
877 			    nvme_cb_fn_t cb_fn);
878 
879 #endif /* _KERNEL */
880 
881 #endif /* __NVME_H__ */
882