xref: /freebsd/sys/dev/nvmf/nvmf_proto.h (revision 894cb08f0d3656fdb81f4d89085bedc4235f3cb6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 /* Derived from include/spdk/nvmf_spec.h from Intel's SPDK. */
7 
8 #ifndef __NVMF_PROTO_H__
9 #define	__NVMF_PROTO_H__
10 
11 #include <sys/types.h>
12 #include <sys/cdefs.h>
13 #ifdef _KERNEL
14 #include <sys/stddef.h>
15 #else
16 #include <stddef.h>
17 #endif
18 #include <dev/nvme/nvme.h>
19 
20 /**
21  * \file
22  * NVMe over Fabrics specification definitions
23  */
24 
25 #pragma pack(push, 1)
26 
27 #define	NVME_NQN_FIELD_SIZE		256
28 
29 struct nvmf_capsule_cmd {
30 	uint8_t		opcode;
31 	uint8_t		reserved1;
32 	uint16_t	cid;
33 	uint8_t		fctype;
34 	uint8_t		reserved2[35];
35 	uint8_t		fabric_specific[24];
36 };
37 _Static_assert(sizeof(struct nvmf_capsule_cmd) == 64, "Incorrect size");
38 
39 /* Fabric Command Set */
40 enum nvmf_fabric_cmd_types {
41 	NVMF_FABRIC_COMMAND_PROPERTY_SET			= 0x00,
42 	NVMF_FABRIC_COMMAND_CONNECT				= 0x01,
43 	NVMF_FABRIC_COMMAND_PROPERTY_GET			= 0x04,
44 	NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND			= 0x05,
45 	NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV			= 0x06,
46 	NVMF_FABRIC_COMMAND_DISCONNECT				= 0x08,
47 	NVMF_FABRIC_COMMAND_START_VENDOR_SPECIFIC		= 0xC0,
48 };
49 
50 enum nvmf_fabric_cmd_status_code {
51 	NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT		= 0x80,
52 	NVMF_FABRIC_SC_CONTROLLER_BUSY			= 0x81,
53 	NVMF_FABRIC_SC_INVALID_PARAM			= 0x82,
54 	NVMF_FABRIC_SC_RESTART_DISCOVERY		= 0x83,
55 	NVMF_FABRIC_SC_INVALID_HOST			= 0x84,
56 	NVMF_FABRIC_SC_INVALID_QUEUE_TYPE		= 0x85,
57 	NVMF_FABRIC_SC_LOG_RESTART_DISCOVERY		= 0x90,
58 	NVMF_FABRIC_SC_AUTH_REQUIRED			= 0x91,
59 };
60 
61 /**
62  * RDMA Queue Pair service types
63  */
64 enum nvmf_rdma_qptype {
65 	/** Reliable connected */
66 	NVMF_RDMA_QPTYPE_RELIABLE_CONNECTED		= 0x1,
67 
68 	/** Reliable datagram */
69 	NVMF_RDMA_QPTYPE_RELIABLE_DATAGRAM		= 0x2,
70 };
71 
72 /**
73  * RDMA provider types
74  */
75 enum nvmf_rdma_prtype {
76 	/** No provider specified */
77 	NVMF_RDMA_PRTYPE_NONE		= 0x1,
78 
79 	/** InfiniBand */
80 	NVMF_RDMA_PRTYPE_IB		= 0x2,
81 
82 	/** RoCE v1 */
83 	NVMF_RDMA_PRTYPE_ROCE		= 0x3,
84 
85 	/** RoCE v2 */
86 	NVMF_RDMA_PRTYPE_ROCE2		= 0x4,
87 
88 	/** iWARP */
89 	NVMF_RDMA_PRTYPE_IWARP		= 0x5,
90 };
91 
92 /**
93  * RDMA connection management service types
94  */
95 enum nvmf_rdma_cms {
96 	/** Sockets based endpoint addressing */
97 	NVMF_RDMA_CMS_RDMA_CM		= 0x1,
98 };
99 
100 /**
101  * NVMe over Fabrics transport types
102  */
103 enum nvmf_trtype {
104 	/** RDMA */
105 	NVMF_TRTYPE_RDMA		= 0x1,
106 
107 	/** Fibre Channel */
108 	NVMF_TRTYPE_FC			= 0x2,
109 
110 	/** TCP */
111 	NVMF_TRTYPE_TCP			= 0x3,
112 
113 	/** Intra-host transport (loopback) */
114 	NVMF_TRTYPE_INTRA_HOST		= 0xfe,
115 };
116 
117 /**
118  * Address family types
119  */
120 enum nvmf_adrfam {
121 	/** IPv4 (AF_INET) */
122 	NVMF_ADRFAM_IPV4		= 0x1,
123 
124 	/** IPv6 (AF_INET6) */
125 	NVMF_ADRFAM_IPV6		= 0x2,
126 
127 	/** InfiniBand (AF_IB) */
128 	NVMF_ADRFAM_IB			= 0x3,
129 
130 	/** Fibre Channel address family */
131 	NVMF_ADRFAM_FC			= 0x4,
132 
133 	/** Intra-host transport (loopback) */
134 	NVMF_ADRFAM_INTRA_HOST		= 0xfe,
135 };
136 
137 /**
138  * NVM subsystem types
139  */
140 enum nvmf_subtype {
141 	/** Referral to a discovery service */
142 	NVMF_SUBTYPE_DISCOVERY		= 0x1,
143 
144 	/** NVM Subsystem */
145 	NVMF_SUBTYPE_NVME		= 0x2,
146 
147 	/** Current Discovery Subsystem */
148 	NVMF_SUBTYPE_DISCOVERY_CURRENT	= 0x3
149 };
150 
151 /* Discovery Log Entry Flags - Duplicate Returned Information */
152 #define NVMF_DISCOVERY_LOG_EFLAGS_DUPRETINFO (1u << 0u)
153 
154 /* Discovery Log Entry Flags - Explicit Persistent Connection Support for Discovery */
155 #define NVMF_DISCOVERY_LOG_EFLAGS_EPCSD (1u << 1u)
156 
157 /**
158  * Connections shall be made over a fabric secure channel
159  */
160 enum nvmf_treq_secure_channel {
161 	/** Not specified */
162 	NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED		= 0x0,
163 
164 	/** Required */
165 	NVMF_TREQ_SECURE_CHANNEL_REQUIRED		= 0x1,
166 
167 	/** Not required */
168 	NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED		= 0x2,
169 };
170 
171 struct nvmf_fabric_cmd {
172 	uint8_t		opcode;
173 	uint8_t		reserved1;
174 	uint16_t	cid;
175 	uint8_t		fctype;
176 	uint8_t		reserved2[59];
177 };
178 
179 struct nvmf_fabric_auth_recv_cmd {
180 	uint8_t		opcode;
181 	uint8_t		reserved1;
182 	uint16_t	cid;
183 	uint8_t		fctype; /* NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV (0x06) */
184 	uint8_t		reserved2[19];
185 	struct nvme_sgl_descriptor sgl1;
186 	uint8_t		reserved3;
187 	uint8_t		spsp0;
188 	uint8_t		spsp1;
189 	uint8_t		secp;
190 	uint32_t	al;
191 	uint8_t		reserved4[16];
192 };
193 _Static_assert(sizeof(struct nvmf_fabric_auth_recv_cmd) == 64, "Incorrect size");
194 
195 struct nvmf_fabric_auth_send_cmd {
196 	uint8_t		opcode;
197 	uint8_t		reserved1;
198 	uint16_t	cid;
199 	uint8_t		fctype; /* NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND (0x05) */
200 	uint8_t		reserved2[19];
201 	struct nvme_sgl_descriptor sgl1;
202 	uint8_t		reserved3;
203 	uint8_t		spsp0;
204 	uint8_t		spsp1;
205 	uint8_t		secp;
206 	uint32_t	tl;
207 	uint8_t		reserved4[16];
208 };
209 _Static_assert(sizeof(struct nvmf_fabric_auth_send_cmd) == 64, "Incorrect size");
210 
211 struct nvmf_fabric_connect_data {
212 	uint8_t		hostid[16];
213 	uint16_t	cntlid;
214 	uint8_t		reserved5[238];
215 	uint8_t		subnqn[NVME_NQN_FIELD_SIZE];
216 	uint8_t		hostnqn[NVME_NQN_FIELD_SIZE];
217 	uint8_t		reserved6[256];
218 };
219 _Static_assert(sizeof(struct nvmf_fabric_connect_data) == 1024, "Incorrect size");
220 
221 struct nvmf_fabric_connect_cmd {
222 	uint8_t		opcode;
223 	uint8_t		reserved1;
224 	uint16_t	cid;
225 	uint8_t		fctype;
226 	uint8_t		reserved2[19];
227 	struct nvme_sgl_descriptor sgl1;
228 	uint16_t	recfmt; /* Connect Record Format */
229 	uint16_t	qid; /* Queue Identifier */
230 	uint16_t	sqsize; /* Submission Queue Size */
231 	uint8_t		cattr; /* queue attributes */
232 	uint8_t		reserved3;
233 	uint32_t	kato; /* keep alive timeout */
234 	uint8_t		reserved4[12];
235 };
236 _Static_assert(sizeof(struct nvmf_fabric_connect_cmd) == 64, "Incorrect size");
237 
238 #define	NVMF_CNTLID_DYNAMIC	0xFFFF
239 #define	NVMF_CNTLID_STATIC_ANY	0xFFFE
240 
241 /*
242  * XXX: 5.3 in NVMe-over-Fabrics 1.1 gives this as an upper bound in
243  * the Discovery Log Entry.
244  */
245 #define	NVMF_CNTLID_STATIC_MAX	0xFFEF
246 
247 /* 5.21.1.15 in NVMe 1.4b */
248 #define	NVMF_KATO_DEFAULT			(120000)
249 
250 #define NVMF_CONNECT_ATTR_PRIORITY_CLASS	(0x3)
251 #define NVMF_CONNECT_ATTR_DISABLE_SQ_FC		(1u << 2)
252 #define NVMF_CONNECT_ATTR_IO_QUEUE_DELETION	(1u << 3)
253 
254 struct nvmf_fabric_connect_rsp {
255 	union {
256 		struct {
257 			uint16_t cntlid;
258 			uint16_t authreq;
259 		} success;
260 
261 		struct {
262 			uint16_t	ipo;
263 			uint8_t		iattr;
264 			uint8_t		reserved;
265 		} invalid;
266 
267 		uint32_t raw;
268 	} status_code_specific;
269 
270 	uint32_t	reserved0;
271 	uint16_t	sqhd;
272 	uint16_t	reserved1;
273 	uint16_t	cid;
274 	uint16_t	status;
275 };
276 _Static_assert(sizeof(struct nvmf_fabric_connect_rsp) == 16, "Incorrect size");
277 
278 struct nvmf_fabric_disconnect_cmd {
279 	uint8_t		opcode;
280 	uint8_t		reserved1;
281 	uint16_t	cid;
282 	uint8_t		fctype;
283 	uint8_t		reserved2[19];
284 	struct nvme_sgl_descriptor sgl1;
285 	uint16_t	recfmt; /* Disconnect Record Format */
286 	uint8_t		reserved3[22];
287 };
288 _Static_assert(sizeof(struct nvmf_fabric_disconnect_cmd) == 64, "Incorrect size");
289 
290 #define NVMF_PROP_SIZE_4	0
291 #define NVMF_PROP_SIZE_8	1
292 
293 #define	NVMF_PROP_CAP		0x00	/* Controller Capabilities */
294 #define	NVMF_PROP_VS		0x08	/* Version */
295 #define	NVMF_PROP_CC		0x14	/* Controller Configuration */
296 #define	NVMF_PROP_CSTS		0x1C	/* Controller Status */
297 #define	NVMF_PROP_NSSR		0x20	/* NVM Subsystem Reset */
298 
299 struct nvmf_fabric_prop_get_cmd {
300 	uint8_t		opcode;
301 	uint8_t		reserved1;
302 	uint16_t	cid;
303 	uint8_t		fctype;
304 	uint8_t		reserved2[35];
305 	struct {
306 		uint8_t size		: 3;
307 		uint8_t reserved	: 5;
308 	} attrib;
309 	uint8_t		reserved3[3];
310 	uint32_t	ofst;
311 	uint8_t		reserved4[16];
312 };
313 _Static_assert(sizeof(struct nvmf_fabric_prop_get_cmd) == 64, "Incorrect size");
314 
315 struct nvmf_fabric_prop_get_rsp {
316 	union {
317 		uint64_t u64;
318 		struct {
319 			uint32_t low;
320 			uint32_t high;
321 		} u32;
322 	} value;
323 
324 	uint16_t	sqhd;
325 	uint16_t	reserved0;
326 	uint16_t	cid;
327 	uint16_t	status;
328 };
329 _Static_assert(sizeof(struct nvmf_fabric_prop_get_rsp) == 16, "Incorrect size");
330 
331 struct nvmf_fabric_prop_set_cmd {
332 	uint8_t		opcode;
333 	uint8_t		reserved0;
334 	uint16_t	cid;
335 	uint8_t		fctype;
336 	uint8_t		reserved1[35];
337 	struct {
338 		uint8_t size		: 3;
339 		uint8_t reserved	: 5;
340 	} attrib;
341 	uint8_t		reserved2[3];
342 	uint32_t	ofst;
343 
344 	union {
345 		uint64_t u64;
346 		struct {
347 			uint32_t low;
348 			uint32_t high;
349 		} u32;
350 	} value;
351 
352 	uint8_t		reserved4[8];
353 };
354 _Static_assert(sizeof(struct nvmf_fabric_prop_set_cmd) == 64, "Incorrect size");
355 
356 #define NVMF_NQN_MIN_LEN 11 /* The prefix in the spec is 11 characters */
357 #define NVMF_NQN_MAX_LEN 223
358 #define NVMF_NQN_UUID_PRE_LEN 32
359 #define NVMF_UUID_STRING_LEN 36
360 #define NVMF_NQN_UUID_PRE "nqn.2014-08.org.nvmexpress:uuid:"
361 #define NVMF_DISCOVERY_NQN "nqn.2014-08.org.nvmexpress.discovery"
362 
363 #define NVMF_TRSTRING_MAX_LEN 32
364 #define NVMF_TRADDR_MAX_LEN 256
365 #define NVMF_TRSVCID_MAX_LEN 32
366 
367 /** RDMA transport-specific address subtype */
368 struct nvmf_rdma_transport_specific_address_subtype {
369 	/** RDMA QP service type (\ref nvmf_rdma_qptype) */
370 	uint8_t		rdma_qptype;
371 
372 	/** RDMA provider type (\ref nvmf_rdma_prtype) */
373 	uint8_t		rdma_prtype;
374 
375 	/** RDMA connection management service (\ref nvmf_rdma_cms) */
376 	uint8_t		rdma_cms;
377 
378 	uint8_t		reserved0[5];
379 
380 	/** RDMA partition key for AF_IB */
381 	uint16_t	rdma_pkey;
382 
383 	uint8_t		reserved2[246];
384 };
385 _Static_assert(sizeof(struct nvmf_rdma_transport_specific_address_subtype) == 256,
386 		   "Incorrect size");
387 
388 /** TCP Secure Socket Type */
389 enum nvme_tcp_secure_socket_type {
390 	/** No security */
391 	NVME_TCP_SECURITY_NONE				= 0,
392 
393 	/** TLS (Secure Sockets) version 1.2 */
394 	NVME_TCP_SECURITY_TLS_1_2			= 1,
395 
396 	/** TLS (Secure Sockets) version 1.3 */
397 	NVME_TCP_SECURITY_TLS_1_3			= 2,
398 };
399 
400 /** TCP transport-specific address subtype */
401 struct nvme_tcp_transport_specific_address_subtype {
402 	/** Security type (\ref nvme_tcp_secure_socket_type) */
403 	uint8_t		sectype;
404 
405 	uint8_t		reserved0[255];
406 };
407 _Static_assert(sizeof(struct nvme_tcp_transport_specific_address_subtype) == 256,
408 		   "Incorrect size");
409 
410 /** Transport-specific address subtype */
411 union nvmf_transport_specific_address_subtype {
412 	uint8_t raw[256];
413 
414 	/** RDMA */
415 	struct nvmf_rdma_transport_specific_address_subtype rdma;
416 
417 	/** TCP */
418 	struct nvme_tcp_transport_specific_address_subtype tcp;
419 };
420 _Static_assert(sizeof(union nvmf_transport_specific_address_subtype) == 256,
421 		   "Incorrect size");
422 
423 #define NVMF_MIN_ADMIN_MAX_SQ_SIZE 32
424 
425 /**
426  * Discovery Log Page entry
427  */
428 struct nvmf_discovery_log_page_entry {
429 	/** Transport type (\ref nvmf_trtype) */
430 	uint8_t		trtype;
431 
432 	/** Address family (\ref nvmf_adrfam) */
433 	uint8_t		adrfam;
434 
435 	/** Subsystem type (\ref nvmf_subtype) */
436 	uint8_t		subtype;
437 
438 	/** Transport requirements */
439 	struct {
440 		/** Secure channel requirements (\ref nvmf_treq_secure_channel) */
441 		uint8_t secure_channel : 2;
442 
443 		uint8_t reserved : 6;
444 	} treq;
445 
446 	/** NVM subsystem port ID */
447 	uint16_t	portid;
448 
449 	/** Controller ID */
450 	uint16_t	cntlid;
451 
452 	/** Admin max SQ size */
453 	uint16_t	asqsz;
454 
455 	/** Entry Flags */
456 	uint16_t	eflags;
457 
458 	uint8_t		reserved0[20];
459 
460 	/** Transport service identifier */
461 	uint8_t		trsvcid[NVMF_TRSVCID_MAX_LEN];
462 
463 	uint8_t		reserved1[192];
464 
465 	/** NVM subsystem qualified name */
466 	uint8_t		subnqn[256];
467 
468 	/** Transport address */
469 	uint8_t		traddr[NVMF_TRADDR_MAX_LEN];
470 
471 	/** Transport-specific address subtype */
472 	union nvmf_transport_specific_address_subtype tsas;
473 };
474 _Static_assert(sizeof(struct nvmf_discovery_log_page_entry) == 1024, "Incorrect size");
475 
476 struct nvmf_discovery_log_page {
477 	uint64_t	genctr;
478 	uint64_t	numrec;
479 	uint16_t	recfmt;
480 	uint8_t		reserved0[1006];
481 	struct nvmf_discovery_log_page_entry entries[0];
482 };
483 _Static_assert(sizeof(struct nvmf_discovery_log_page) == 1024, "Incorrect size");
484 
485 /* RDMA Fabric specific definitions below */
486 
487 #define NVME_SGL_SUBTYPE_INVALIDATE_KEY	0xF
488 
489 struct nvmf_rdma_request_private_data {
490 	uint16_t	recfmt; /* record format */
491 	uint16_t	qid;	/* queue id */
492 	uint16_t	hrqsize;	/* host receive queue size */
493 	uint16_t	hsqsize;	/* host send queue size */
494 	uint16_t	cntlid;		/* controller id */
495 	uint8_t		reserved[22];
496 };
497 _Static_assert(sizeof(struct nvmf_rdma_request_private_data) == 32, "Incorrect size");
498 
499 struct nvmf_rdma_accept_private_data {
500 	uint16_t	recfmt; /* record format */
501 	uint16_t	crqsize;	/* controller receive queue size */
502 	uint8_t		reserved[28];
503 };
504 _Static_assert(sizeof(struct nvmf_rdma_accept_private_data) == 32, "Incorrect size");
505 
506 struct nvmf_rdma_reject_private_data {
507 	uint16_t	recfmt; /* record format */
508 	uint16_t	sts; /* status */
509 };
510 _Static_assert(sizeof(struct nvmf_rdma_reject_private_data) == 4, "Incorrect size");
511 
512 union nvmf_rdma_private_data {
513 	struct nvmf_rdma_request_private_data	pd_request;
514 	struct nvmf_rdma_accept_private_data	pd_accept;
515 	struct nvmf_rdma_reject_private_data	pd_reject;
516 };
517 _Static_assert(sizeof(union nvmf_rdma_private_data) == 32, "Incorrect size");
518 
519 enum nvmf_rdma_transport_error {
520 	NVMF_RDMA_ERROR_INVALID_PRIVATE_DATA_LENGTH	= 0x1,
521 	NVMF_RDMA_ERROR_INVALID_RECFMT			= 0x2,
522 	NVMF_RDMA_ERROR_INVALID_QID			= 0x3,
523 	NVMF_RDMA_ERROR_INVALID_HSQSIZE			= 0x4,
524 	NVMF_RDMA_ERROR_INVALID_HRQSIZE			= 0x5,
525 	NVMF_RDMA_ERROR_NO_RESOURCES			= 0x6,
526 	NVMF_RDMA_ERROR_INVALID_IRD			= 0x7,
527 	NVMF_RDMA_ERROR_INVALID_ORD			= 0x8,
528 };
529 
530 /* TCP transport specific definitions below */
531 
532 /** NVMe/TCP PDU type */
533 enum nvme_tcp_pdu_type {
534 	/** Initialize Connection Request (ICReq) */
535 	NVME_TCP_PDU_TYPE_IC_REQ			= 0x00,
536 
537 	/** Initialize Connection Response (ICResp) */
538 	NVME_TCP_PDU_TYPE_IC_RESP			= 0x01,
539 
540 	/** Terminate Connection Request (TermReq) */
541 	NVME_TCP_PDU_TYPE_H2C_TERM_REQ			= 0x02,
542 
543 	/** Terminate Connection Response (TermResp) */
544 	NVME_TCP_PDU_TYPE_C2H_TERM_REQ			= 0x03,
545 
546 	/** Command Capsule (CapsuleCmd) */
547 	NVME_TCP_PDU_TYPE_CAPSULE_CMD			= 0x04,
548 
549 	/** Response Capsule (CapsuleRsp) */
550 	NVME_TCP_PDU_TYPE_CAPSULE_RESP			= 0x05,
551 
552 	/** Host To Controller Data (H2CData) */
553 	NVME_TCP_PDU_TYPE_H2C_DATA			= 0x06,
554 
555 	/** Controller To Host Data (C2HData) */
556 	NVME_TCP_PDU_TYPE_C2H_DATA			= 0x07,
557 
558 	/** Ready to Transfer (R2T) */
559 	NVME_TCP_PDU_TYPE_R2T				= 0x09,
560 };
561 
562 /** Common NVMe/TCP PDU header */
563 struct nvme_tcp_common_pdu_hdr {
564 	/** PDU type (\ref nvme_tcp_pdu_type) */
565 	uint8_t				pdu_type;
566 
567 	/** pdu_type-specific flags */
568 	uint8_t				flags;
569 
570 	/** Length of PDU header (not including the Header Digest) */
571 	uint8_t				hlen;
572 
573 	/** PDU Data Offset from the start of the PDU */
574 	uint8_t				pdo;
575 
576 	/** Total number of bytes in PDU, including pdu_hdr */
577 	uint32_t			plen;
578 };
579 _Static_assert(sizeof(struct nvme_tcp_common_pdu_hdr) == 8, "Incorrect size");
580 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, pdu_type) == 0,
581 		   "Incorrect offset");
582 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, flags) == 1, "Incorrect offset");
583 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, hlen) == 2, "Incorrect offset");
584 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, pdo) == 3, "Incorrect offset");
585 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, plen) == 4, "Incorrect offset");
586 
587 #define NVME_TCP_CH_FLAGS_HDGSTF		(1u << 0)
588 #define NVME_TCP_CH_FLAGS_DDGSTF		(1u << 1)
589 
590 /**
591  * ICReq
592  *
593  * common.pdu_type == NVME_TCP_PDU_TYPE_IC_REQ
594  */
595 struct nvme_tcp_ic_req {
596 	struct nvme_tcp_common_pdu_hdr	common;
597 	uint16_t				pfv;
598 	/** Specifies the data alignment for all PDUs transferred from the controller to the host that contain data */
599 	uint8_t					hpda;
600 	union {
601 		uint8_t				raw;
602 		struct {
603 			uint8_t			hdgst_enable : 1;
604 			uint8_t			ddgst_enable : 1;
605 			uint8_t			reserved : 6;
606 		} bits;
607 	} dgst;
608 	uint32_t				maxr2t;
609 	uint8_t					reserved16[112];
610 };
611 _Static_assert(sizeof(struct nvme_tcp_ic_req) == 128, "Incorrect size");
612 _Static_assert(offsetof(struct nvme_tcp_ic_req, pfv) == 8, "Incorrect offset");
613 _Static_assert(offsetof(struct nvme_tcp_ic_req, hpda) == 10, "Incorrect offset");
614 _Static_assert(offsetof(struct nvme_tcp_ic_req, maxr2t) == 12, "Incorrect offset");
615 
616 #define NVME_TCP_HPDA_MAX 31
617 #define NVME_TCP_CPDA_MAX 31
618 #define NVME_TCP_PDU_PDO_MAX_OFFSET     ((NVME_TCP_CPDA_MAX + 1) << 2)
619 
620 /**
621  * ICResp
622  *
623  * common.pdu_type == NVME_TCP_PDU_TYPE_IC_RESP
624  */
625 struct nvme_tcp_ic_resp {
626 	struct nvme_tcp_common_pdu_hdr	common;
627 	uint16_t				pfv;
628 	/** Specifies the data alignment for all PDUs transferred from the host to the controller that contain data */
629 	uint8_t					cpda;
630 	union {
631 		uint8_t				raw;
632 		struct {
633 			uint8_t			hdgst_enable : 1;
634 			uint8_t			ddgst_enable : 1;
635 			uint8_t			reserved : 6;
636 		} bits;
637 	} dgst;
638 	/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
639 	uint32_t				maxh2cdata;
640 	uint8_t					reserved16[112];
641 };
642 _Static_assert(sizeof(struct nvme_tcp_ic_resp) == 128, "Incorrect size");
643 _Static_assert(offsetof(struct nvme_tcp_ic_resp, pfv) == 8, "Incorrect offset");
644 _Static_assert(offsetof(struct nvme_tcp_ic_resp, cpda) == 10, "Incorrect offset");
645 _Static_assert(offsetof(struct nvme_tcp_ic_resp, maxh2cdata) == 12, "Incorrect offset");
646 
647 /**
648  * TermReq
649  *
650  * common.pdu_type == NVME_TCP_PDU_TYPE_TERM_REQ
651  */
652 struct nvme_tcp_term_req_hdr {
653 	struct nvme_tcp_common_pdu_hdr	common;
654 	uint16_t				fes;
655 	uint8_t					fei[4];
656 	uint8_t					reserved14[10];
657 };
658 
659 _Static_assert(sizeof(struct nvme_tcp_term_req_hdr) == 24, "Incorrect size");
660 _Static_assert(offsetof(struct nvme_tcp_term_req_hdr, fes) == 8, "Incorrect offset");
661 _Static_assert(offsetof(struct nvme_tcp_term_req_hdr, fei) == 10, "Incorrect offset");
662 
663 enum nvme_tcp_term_req_fes {
664 	NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD			= 0x01,
665 	NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR			= 0x02,
666 	NVME_TCP_TERM_REQ_FES_HDGST_ERROR				= 0x03,
667 	NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE		= 0x04,
668 	NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED		= 0x05,
669 	NVME_TCP_TERM_REQ_FES_R2T_LIMIT_EXCEEDED			= 0x05,
670 	NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER	= 0x06,
671 };
672 
673 /* Total length of term req PDU (including PDU header and DATA) in bytes shall not exceed a limit of 152 bytes. */
674 #define NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE	128
675 #define NVME_TCP_TERM_REQ_PDU_MAX_SIZE		(NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE + sizeof(struct nvme_tcp_term_req_hdr))
676 
677 /**
678  * CapsuleCmd
679  *
680  * common.pdu_type == NVME_TCP_PDU_TYPE_CAPSULE_CMD
681  */
682 struct nvme_tcp_cmd {
683 	struct nvme_tcp_common_pdu_hdr	common;
684 	struct nvme_command		ccsqe;
685 	/**< icdoff hdgst padding + in-capsule data + ddgst (if enabled) */
686 };
687 _Static_assert(sizeof(struct nvme_tcp_cmd) == 72, "Incorrect size");
688 _Static_assert(offsetof(struct nvme_tcp_cmd, ccsqe) == 8, "Incorrect offset");
689 
690 /**
691  * CapsuleResp
692  *
693  * common.pdu_type == NVME_TCP_PDU_TYPE_CAPSULE_RESP
694  */
695 struct nvme_tcp_rsp {
696 	struct nvme_tcp_common_pdu_hdr	common;
697 	struct nvme_completion		rccqe;
698 };
699 _Static_assert(sizeof(struct nvme_tcp_rsp) == 24, "incorrect size");
700 _Static_assert(offsetof(struct nvme_tcp_rsp, rccqe) == 8, "Incorrect offset");
701 
702 
703 /**
704  * H2CData
705  *
706  * hdr.pdu_type == NVME_TCP_PDU_TYPE_H2C_DATA
707  */
708 struct nvme_tcp_h2c_data_hdr {
709 	struct nvme_tcp_common_pdu_hdr	common;
710 	uint16_t				cccid;
711 	uint16_t				ttag;
712 	uint32_t				datao;
713 	uint32_t				datal;
714 	uint8_t					reserved20[4];
715 };
716 _Static_assert(sizeof(struct nvme_tcp_h2c_data_hdr) == 24, "Incorrect size");
717 _Static_assert(offsetof(struct nvme_tcp_h2c_data_hdr, cccid) == 8, "Incorrect offset");
718 _Static_assert(offsetof(struct nvme_tcp_h2c_data_hdr, ttag) == 10, "Incorrect offset");
719 _Static_assert(offsetof(struct nvme_tcp_h2c_data_hdr, datao) == 12, "Incorrect offset");
720 _Static_assert(offsetof(struct nvme_tcp_h2c_data_hdr, datal) == 16, "Incorrect offset");
721 
722 #define NVME_TCP_H2C_DATA_FLAGS_LAST_PDU	(1u << 2)
723 #define NVME_TCP_H2C_DATA_FLAGS_SUCCESS		(1u << 3)
724 #define NVME_TCP_H2C_DATA_PDO_MULT		8u
725 
726 /**
727  * C2HData
728  *
729  * hdr.pdu_type == NVME_TCP_PDU_TYPE_C2H_DATA
730  */
731 struct nvme_tcp_c2h_data_hdr {
732 	struct nvme_tcp_common_pdu_hdr	common;
733 	uint16_t				cccid;
734 	uint8_t					reserved10[2];
735 	uint32_t				datao;
736 	uint32_t				datal;
737 	uint8_t					reserved20[4];
738 };
739 _Static_assert(sizeof(struct nvme_tcp_c2h_data_hdr) == 24, "Incorrect size");
740 _Static_assert(offsetof(struct nvme_tcp_c2h_data_hdr, cccid) == 8, "Incorrect offset");
741 _Static_assert(offsetof(struct nvme_tcp_c2h_data_hdr, datao) == 12, "Incorrect offset");
742 _Static_assert(offsetof(struct nvme_tcp_c2h_data_hdr, datal) == 16, "Incorrect offset");
743 
744 #define NVME_TCP_C2H_DATA_FLAGS_SUCCESS		(1u << 3)
745 #define NVME_TCP_C2H_DATA_FLAGS_LAST_PDU	(1u << 2)
746 #define NVME_TCP_C2H_DATA_PDO_MULT		8u
747 
748 /**
749  * R2T
750  *
751  * common.pdu_type == NVME_TCP_PDU_TYPE_R2T
752  */
753 struct nvme_tcp_r2t_hdr {
754 	struct nvme_tcp_common_pdu_hdr	common;
755 	uint16_t				cccid;
756 	uint16_t				ttag;
757 	uint32_t				r2to;
758 	uint32_t				r2tl;
759 	uint8_t					reserved20[4];
760 };
761 _Static_assert(sizeof(struct nvme_tcp_r2t_hdr) == 24, "Incorrect size");
762 _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, cccid) == 8, "Incorrect offset");
763 _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, ttag) == 10, "Incorrect offset");
764 _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, r2to) == 12, "Incorrect offset");
765 _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, r2tl) == 16, "Incorrect offset");
766 
767 #pragma pack(pop)
768 
769 #endif /* __NVMF_PROTO_H__ */
770