xref: /freebsd/sys/dev/nvmf/nvmf_proto.h (revision 3b35e7ee8de9b0260149a2b77e87a2b9c7a36244)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 /* Derived from include/spdk/nvmf_spec.h from Intel's SPDK. */
7 
8 #ifndef __NVMF_PROTO_H__
9 #define	__NVMF_PROTO_H__
10 
11 #include <sys/types.h>
12 #include <sys/cdefs.h>
13 #ifdef _KERNEL
14 #include <sys/stddef.h>
15 #else
16 #include <stddef.h>
17 #endif
18 #include <dev/nvme/nvme.h>
19 
20 /**
21  * \file
22  * NVMe over Fabrics specification definitions
23  */
24 
25 #define	NVME_NQN_FIELD_SIZE		256
26 
27 struct nvmf_capsule_cmd {
28 	uint8_t		opcode;
29 	uint8_t		reserved1;
30 	uint16_t	cid;
31 	uint8_t		fctype;
32 	uint8_t		reserved2[35];
33 	uint8_t		fabric_specific[24];
34 };
35 _Static_assert(sizeof(struct nvmf_capsule_cmd) == 64, "Incorrect size");
36 
37 /* Fabric Command Set */
38 enum nvmf_fabric_cmd_types {
39 	NVMF_FABRIC_COMMAND_PROPERTY_SET			= 0x00,
40 	NVMF_FABRIC_COMMAND_CONNECT				= 0x01,
41 	NVMF_FABRIC_COMMAND_PROPERTY_GET			= 0x04,
42 	NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND			= 0x05,
43 	NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV			= 0x06,
44 	NVMF_FABRIC_COMMAND_DISCONNECT				= 0x08,
45 	NVMF_FABRIC_COMMAND_START_VENDOR_SPECIFIC		= 0xC0,
46 };
47 
48 enum nvmf_fabric_cmd_status_code {
49 	NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT		= 0x80,
50 	NVMF_FABRIC_SC_CONTROLLER_BUSY			= 0x81,
51 	NVMF_FABRIC_SC_INVALID_PARAM			= 0x82,
52 	NVMF_FABRIC_SC_RESTART_DISCOVERY		= 0x83,
53 	NVMF_FABRIC_SC_INVALID_HOST			= 0x84,
54 	NVMF_FABRIC_SC_INVALID_QUEUE_TYPE		= 0x85,
55 	NVMF_FABRIC_SC_LOG_RESTART_DISCOVERY		= 0x90,
56 	NVMF_FABRIC_SC_AUTH_REQUIRED			= 0x91,
57 };
58 
59 /**
60  * RDMA Queue Pair service types
61  */
62 enum nvmf_rdma_qptype {
63 	/** Reliable connected */
64 	NVMF_RDMA_QPTYPE_RELIABLE_CONNECTED		= 0x1,
65 
66 	/** Reliable datagram */
67 	NVMF_RDMA_QPTYPE_RELIABLE_DATAGRAM		= 0x2,
68 };
69 
70 /**
71  * RDMA provider types
72  */
73 enum nvmf_rdma_prtype {
74 	/** No provider specified */
75 	NVMF_RDMA_PRTYPE_NONE		= 0x1,
76 
77 	/** InfiniBand */
78 	NVMF_RDMA_PRTYPE_IB		= 0x2,
79 
80 	/** RoCE v1 */
81 	NVMF_RDMA_PRTYPE_ROCE		= 0x3,
82 
83 	/** RoCE v2 */
84 	NVMF_RDMA_PRTYPE_ROCE2		= 0x4,
85 
86 	/** iWARP */
87 	NVMF_RDMA_PRTYPE_IWARP		= 0x5,
88 };
89 
90 /**
91  * RDMA connection management service types
92  */
93 enum nvmf_rdma_cms {
94 	/** Sockets based endpoint addressing */
95 	NVMF_RDMA_CMS_RDMA_CM		= 0x1,
96 };
97 
98 /**
99  * NVMe over Fabrics transport types
100  */
101 enum nvmf_trtype {
102 	/** RDMA */
103 	NVMF_TRTYPE_RDMA		= 0x1,
104 
105 	/** Fibre Channel */
106 	NVMF_TRTYPE_FC			= 0x2,
107 
108 	/** TCP */
109 	NVMF_TRTYPE_TCP			= 0x3,
110 
111 	/** Intra-host transport (loopback) */
112 	NVMF_TRTYPE_INTRA_HOST		= 0xfe,
113 };
114 
115 /**
116  * Address family types
117  */
118 enum nvmf_adrfam {
119 	/** IPv4 (AF_INET) */
120 	NVMF_ADRFAM_IPV4		= 0x1,
121 
122 	/** IPv6 (AF_INET6) */
123 	NVMF_ADRFAM_IPV6		= 0x2,
124 
125 	/** InfiniBand (AF_IB) */
126 	NVMF_ADRFAM_IB			= 0x3,
127 
128 	/** Fibre Channel address family */
129 	NVMF_ADRFAM_FC			= 0x4,
130 
131 	/** Intra-host transport (loopback) */
132 	NVMF_ADRFAM_INTRA_HOST		= 0xfe,
133 };
134 
135 /**
136  * NVM subsystem types
137  */
138 enum nvmf_subtype {
139 	/** Referral to a discovery service */
140 	NVMF_SUBTYPE_DISCOVERY		= 0x1,
141 
142 	/** NVM Subsystem */
143 	NVMF_SUBTYPE_NVME		= 0x2,
144 
145 	/** Current Discovery Subsystem */
146 	NVMF_SUBTYPE_DISCOVERY_CURRENT	= 0x3
147 };
148 
149 /* Discovery Log Entry Flags - Duplicate Returned Information */
150 #define NVMF_DISCOVERY_LOG_EFLAGS_DUPRETINFO (1u << 0u)
151 
152 /* Discovery Log Entry Flags - Explicit Persistent Connection Support for Discovery */
153 #define NVMF_DISCOVERY_LOG_EFLAGS_EPCSD (1u << 1u)
154 
155 /**
156  * Connections shall be made over a fabric secure channel
157  */
158 enum nvmf_treq_secure_channel {
159 	/** Not specified */
160 	NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED		= 0x0,
161 
162 	/** Required */
163 	NVMF_TREQ_SECURE_CHANNEL_REQUIRED		= 0x1,
164 
165 	/** Not required */
166 	NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED		= 0x2,
167 };
168 
169 struct nvmf_fabric_cmd {
170 	uint8_t		opcode;
171 	uint8_t		reserved1;
172 	uint16_t	cid;
173 	uint8_t		fctype;
174 	uint8_t		reserved2[59];
175 } __aligned(8);
176 
177 struct nvmf_fabric_auth_recv_cmd {
178 	uint8_t		opcode;
179 	uint8_t		reserved1;
180 	uint16_t	cid;
181 	uint8_t		fctype; /* NVMF_FABRIC_COMMAND_AUTHENTICATION_RECV (0x06) */
182 	uint8_t		reserved2[19];
183 	struct nvme_sgl_descriptor sgl1;
184 	uint8_t		reserved3;
185 	uint8_t		spsp0;
186 	uint8_t		spsp1;
187 	uint8_t		secp;
188 	uint32_t	al;
189 	uint8_t		reserved4[16];
190 };
191 _Static_assert(sizeof(struct nvmf_fabric_auth_recv_cmd) == 64, "Incorrect size");
192 
193 struct nvmf_fabric_auth_send_cmd {
194 	uint8_t		opcode;
195 	uint8_t		reserved1;
196 	uint16_t	cid;
197 	uint8_t		fctype; /* NVMF_FABRIC_COMMAND_AUTHENTICATION_SEND (0x05) */
198 	uint8_t		reserved2[19];
199 	struct nvme_sgl_descriptor sgl1;
200 	uint8_t		reserved3;
201 	uint8_t		spsp0;
202 	uint8_t		spsp1;
203 	uint8_t		secp;
204 	uint32_t	tl;
205 	uint8_t		reserved4[16];
206 };
207 _Static_assert(sizeof(struct nvmf_fabric_auth_send_cmd) == 64, "Incorrect size");
208 
209 struct nvmf_fabric_connect_data {
210 	uint8_t		hostid[16];
211 	uint16_t	cntlid;
212 	uint8_t		reserved5[238];
213 	uint8_t		subnqn[NVME_NQN_FIELD_SIZE];
214 	uint8_t		hostnqn[NVME_NQN_FIELD_SIZE];
215 	uint8_t		reserved6[256];
216 };
217 _Static_assert(sizeof(struct nvmf_fabric_connect_data) == 1024, "Incorrect size");
218 
219 struct nvmf_fabric_connect_cmd {
220 	uint8_t		opcode;
221 	uint8_t		reserved1;
222 	uint16_t	cid;
223 	uint8_t		fctype;
224 	uint8_t		reserved2[19];
225 	struct nvme_sgl_descriptor sgl1;
226 	uint16_t	recfmt; /* Connect Record Format */
227 	uint16_t	qid; /* Queue Identifier */
228 	uint16_t	sqsize; /* Submission Queue Size */
229 	uint8_t		cattr; /* queue attributes */
230 	uint8_t		reserved3;
231 	uint32_t	kato; /* keep alive timeout */
232 	uint8_t		reserved4[12];
233 };
234 _Static_assert(sizeof(struct nvmf_fabric_connect_cmd) == 64, "Incorrect size");
235 
236 #define	NVMF_CNTLID_DYNAMIC	0xFFFF
237 #define	NVMF_CNTLID_STATIC_ANY	0xFFFE
238 
239 /*
240  * XXX: 5.3 in NVMe-over-Fabrics 1.1 gives this as an upper bound in
241  * the Discovery Log Entry.
242  */
243 #define	NVMF_CNTLID_STATIC_MAX	0xFFEF
244 
245 /* 5.21.1.15 in NVMe 1.4b */
246 #define	NVMF_KATO_DEFAULT			(120000)
247 
248 #define NVMF_CONNECT_ATTR_PRIORITY_CLASS	(0x3)
249 #define NVMF_CONNECT_ATTR_DISABLE_SQ_FC		(1u << 2)
250 #define NVMF_CONNECT_ATTR_IO_QUEUE_DELETION	(1u << 3)
251 
252 struct nvmf_fabric_connect_rsp {
253 	union {
254 		struct {
255 			uint16_t cntlid;
256 			uint16_t authreq;
257 		} success;
258 
259 		struct {
260 			uint16_t	ipo;
261 			uint8_t		iattr;
262 			uint8_t		reserved;
263 		} invalid;
264 
265 		uint32_t raw;
266 	} status_code_specific;
267 
268 	uint32_t	reserved0;
269 	uint16_t	sqhd;
270 	uint16_t	reserved1;
271 	uint16_t	cid;
272 	uint16_t	status;
273 };
274 _Static_assert(sizeof(struct nvmf_fabric_connect_rsp) == 16, "Incorrect size");
275 
276 struct nvmf_fabric_disconnect_cmd {
277 	uint8_t		opcode;
278 	uint8_t		reserved1;
279 	uint16_t	cid;
280 	uint8_t		fctype;
281 	uint8_t		reserved2[19];
282 	struct nvme_sgl_descriptor sgl1;
283 	uint16_t	recfmt; /* Disconnect Record Format */
284 	uint8_t		reserved3[22];
285 };
286 _Static_assert(sizeof(struct nvmf_fabric_disconnect_cmd) == 64, "Incorrect size");
287 
288 #define NVMF_PROP_SIZE_4	0
289 #define NVMF_PROP_SIZE_8	1
290 
291 #define	NVMF_PROP_CAP		0x00	/* Controller Capabilities */
292 #define	NVMF_PROP_VS		0x08	/* Version */
293 #define	NVMF_PROP_CC		0x14	/* Controller Configuration */
294 #define	NVMF_PROP_CSTS		0x1C	/* Controller Status */
295 #define	NVMF_PROP_NSSR		0x20	/* NVM Subsystem Reset */
296 
297 struct nvmf_fabric_prop_get_cmd {
298 	uint8_t		opcode;
299 	uint8_t		reserved1;
300 	uint16_t	cid;
301 	uint8_t		fctype;
302 	uint8_t		reserved2[35];
303 	struct {
304 		uint8_t size		: 3;
305 		uint8_t reserved	: 5;
306 	} attrib;
307 	uint8_t		reserved3[3];
308 	uint32_t	ofst;
309 	uint8_t		reserved4[16];
310 };
311 _Static_assert(sizeof(struct nvmf_fabric_prop_get_cmd) == 64, "Incorrect size");
312 
313 struct nvmf_fabric_prop_get_rsp {
314 	union {
315 		uint64_t u64;
316 		struct {
317 			uint32_t low;
318 			uint32_t high;
319 		} u32;
320 	} value;
321 
322 	uint16_t	sqhd;
323 	uint16_t	reserved0;
324 	uint16_t	cid;
325 	uint16_t	status;
326 };
327 _Static_assert(sizeof(struct nvmf_fabric_prop_get_rsp) == 16, "Incorrect size");
328 
329 struct nvmf_fabric_prop_set_cmd {
330 	uint8_t		opcode;
331 	uint8_t		reserved0;
332 	uint16_t	cid;
333 	uint8_t		fctype;
334 	uint8_t		reserved1[35];
335 	struct {
336 		uint8_t size		: 3;
337 		uint8_t reserved	: 5;
338 	} attrib;
339 	uint8_t		reserved2[3];
340 	uint32_t	ofst;
341 
342 	union {
343 		uint64_t u64;
344 		struct {
345 			uint32_t low;
346 			uint32_t high;
347 		} u32;
348 	} value;
349 
350 	uint8_t		reserved4[8];
351 };
352 _Static_assert(sizeof(struct nvmf_fabric_prop_set_cmd) == 64, "Incorrect size");
353 
354 #define NVMF_NQN_MIN_LEN 11 /* The prefix in the spec is 11 characters */
355 #define NVMF_NQN_MAX_LEN 223
356 #define NVMF_NQN_UUID_PRE_LEN 32
357 #define NVMF_UUID_STRING_LEN 36
358 #define NVMF_NQN_UUID_PRE "nqn.2014-08.org.nvmexpress:uuid:"
359 #define NVMF_DISCOVERY_NQN "nqn.2014-08.org.nvmexpress.discovery"
360 
361 #define NVMF_TRSTRING_MAX_LEN 32
362 #define NVMF_TRADDR_MAX_LEN 256
363 #define NVMF_TRSVCID_MAX_LEN 32
364 
365 /** RDMA transport-specific address subtype */
366 struct nvmf_rdma_transport_specific_address_subtype {
367 	/** RDMA QP service type (\ref nvmf_rdma_qptype) */
368 	uint8_t		rdma_qptype;
369 
370 	/** RDMA provider type (\ref nvmf_rdma_prtype) */
371 	uint8_t		rdma_prtype;
372 
373 	/** RDMA connection management service (\ref nvmf_rdma_cms) */
374 	uint8_t		rdma_cms;
375 
376 	uint8_t		reserved0[5];
377 
378 	/** RDMA partition key for AF_IB */
379 	uint16_t	rdma_pkey;
380 
381 	uint8_t		reserved2[246];
382 };
383 _Static_assert(sizeof(struct nvmf_rdma_transport_specific_address_subtype) == 256,
384 		   "Incorrect size");
385 
386 /** TCP Secure Socket Type */
387 enum nvme_tcp_secure_socket_type {
388 	/** No security */
389 	NVME_TCP_SECURITY_NONE				= 0,
390 
391 	/** TLS (Secure Sockets) version 1.2 */
392 	NVME_TCP_SECURITY_TLS_1_2			= 1,
393 
394 	/** TLS (Secure Sockets) version 1.3 */
395 	NVME_TCP_SECURITY_TLS_1_3			= 2,
396 };
397 
398 /** TCP transport-specific address subtype */
399 struct nvme_tcp_transport_specific_address_subtype {
400 	/** Security type (\ref nvme_tcp_secure_socket_type) */
401 	uint8_t		sectype;
402 
403 	uint8_t		reserved0[255];
404 };
405 _Static_assert(sizeof(struct nvme_tcp_transport_specific_address_subtype) == 256,
406 		   "Incorrect size");
407 
408 /** Transport-specific address subtype */
409 union nvmf_transport_specific_address_subtype {
410 	uint8_t raw[256];
411 
412 	/** RDMA */
413 	struct nvmf_rdma_transport_specific_address_subtype rdma;
414 
415 	/** TCP */
416 	struct nvme_tcp_transport_specific_address_subtype tcp;
417 };
418 _Static_assert(sizeof(union nvmf_transport_specific_address_subtype) == 256,
419 		   "Incorrect size");
420 
421 #define NVMF_MIN_ADMIN_MAX_SQ_SIZE 32
422 
423 /**
424  * Discovery Log Page entry
425  */
426 struct nvmf_discovery_log_page_entry {
427 	/** Transport type (\ref nvmf_trtype) */
428 	uint8_t		trtype;
429 
430 	/** Address family (\ref nvmf_adrfam) */
431 	uint8_t		adrfam;
432 
433 	/** Subsystem type (\ref nvmf_subtype) */
434 	uint8_t		subtype;
435 
436 	/** Transport requirements */
437 	struct {
438 		/** Secure channel requirements (\ref nvmf_treq_secure_channel) */
439 		uint8_t secure_channel : 2;
440 
441 		uint8_t reserved : 6;
442 	} treq;
443 
444 	/** NVM subsystem port ID */
445 	uint16_t	portid;
446 
447 	/** Controller ID */
448 	uint16_t	cntlid;
449 
450 	/** Admin max SQ size */
451 	uint16_t	asqsz;
452 
453 	/** Entry Flags */
454 	uint16_t	eflags;
455 
456 	uint8_t		reserved0[20];
457 
458 	/** Transport service identifier */
459 	uint8_t		trsvcid[NVMF_TRSVCID_MAX_LEN];
460 
461 	uint8_t		reserved1[192];
462 
463 	/** NVM subsystem qualified name */
464 	uint8_t		subnqn[256];
465 
466 	/** Transport address */
467 	uint8_t		traddr[NVMF_TRADDR_MAX_LEN];
468 
469 	/** Transport-specific address subtype */
470 	union nvmf_transport_specific_address_subtype tsas;
471 };
472 _Static_assert(sizeof(struct nvmf_discovery_log_page_entry) == 1024, "Incorrect size");
473 
474 struct nvmf_discovery_log_page {
475 	uint64_t	genctr;
476 	uint64_t	numrec;
477 	uint16_t	recfmt;
478 	uint8_t		reserved0[1006];
479 	struct nvmf_discovery_log_page_entry entries[0];
480 };
481 _Static_assert(sizeof(struct nvmf_discovery_log_page) == 1024, "Incorrect size");
482 
483 /* RDMA Fabric specific definitions below */
484 
485 #define NVME_SGL_SUBTYPE_INVALIDATE_KEY	0xF
486 
487 struct nvmf_rdma_request_private_data {
488 	uint16_t	recfmt; /* record format */
489 	uint16_t	qid;	/* queue id */
490 	uint16_t	hrqsize;	/* host receive queue size */
491 	uint16_t	hsqsize;	/* host send queue size */
492 	uint16_t	cntlid;		/* controller id */
493 	uint8_t		reserved[22];
494 };
495 _Static_assert(sizeof(struct nvmf_rdma_request_private_data) == 32, "Incorrect size");
496 
497 struct nvmf_rdma_accept_private_data {
498 	uint16_t	recfmt; /* record format */
499 	uint16_t	crqsize;	/* controller receive queue size */
500 	uint8_t		reserved[28];
501 };
502 _Static_assert(sizeof(struct nvmf_rdma_accept_private_data) == 32, "Incorrect size");
503 
504 struct nvmf_rdma_reject_private_data {
505 	uint16_t	recfmt; /* record format */
506 	uint16_t	sts; /* status */
507 };
508 _Static_assert(sizeof(struct nvmf_rdma_reject_private_data) == 4, "Incorrect size");
509 
510 union nvmf_rdma_private_data {
511 	struct nvmf_rdma_request_private_data	pd_request;
512 	struct nvmf_rdma_accept_private_data	pd_accept;
513 	struct nvmf_rdma_reject_private_data	pd_reject;
514 };
515 _Static_assert(sizeof(union nvmf_rdma_private_data) == 32, "Incorrect size");
516 
517 enum nvmf_rdma_transport_error {
518 	NVMF_RDMA_ERROR_INVALID_PRIVATE_DATA_LENGTH	= 0x1,
519 	NVMF_RDMA_ERROR_INVALID_RECFMT			= 0x2,
520 	NVMF_RDMA_ERROR_INVALID_QID			= 0x3,
521 	NVMF_RDMA_ERROR_INVALID_HSQSIZE			= 0x4,
522 	NVMF_RDMA_ERROR_INVALID_HRQSIZE			= 0x5,
523 	NVMF_RDMA_ERROR_NO_RESOURCES			= 0x6,
524 	NVMF_RDMA_ERROR_INVALID_IRD			= 0x7,
525 	NVMF_RDMA_ERROR_INVALID_ORD			= 0x8,
526 };
527 
528 /* TCP transport specific definitions below */
529 
530 /** NVMe/TCP PDU type */
531 enum nvme_tcp_pdu_type {
532 	/** Initialize Connection Request (ICReq) */
533 	NVME_TCP_PDU_TYPE_IC_REQ			= 0x00,
534 
535 	/** Initialize Connection Response (ICResp) */
536 	NVME_TCP_PDU_TYPE_IC_RESP			= 0x01,
537 
538 	/** Terminate Connection Request (TermReq) */
539 	NVME_TCP_PDU_TYPE_H2C_TERM_REQ			= 0x02,
540 
541 	/** Terminate Connection Response (TermResp) */
542 	NVME_TCP_PDU_TYPE_C2H_TERM_REQ			= 0x03,
543 
544 	/** Command Capsule (CapsuleCmd) */
545 	NVME_TCP_PDU_TYPE_CAPSULE_CMD			= 0x04,
546 
547 	/** Response Capsule (CapsuleRsp) */
548 	NVME_TCP_PDU_TYPE_CAPSULE_RESP			= 0x05,
549 
550 	/** Host To Controller Data (H2CData) */
551 	NVME_TCP_PDU_TYPE_H2C_DATA			= 0x06,
552 
553 	/** Controller To Host Data (C2HData) */
554 	NVME_TCP_PDU_TYPE_C2H_DATA			= 0x07,
555 
556 	/** Ready to Transfer (R2T) */
557 	NVME_TCP_PDU_TYPE_R2T				= 0x09,
558 };
559 
560 /** Common NVMe/TCP PDU header */
561 struct nvme_tcp_common_pdu_hdr {
562 	/** PDU type (\ref nvme_tcp_pdu_type) */
563 	uint8_t				pdu_type;
564 
565 	/** pdu_type-specific flags */
566 	uint8_t				flags;
567 
568 	/** Length of PDU header (not including the Header Digest) */
569 	uint8_t				hlen;
570 
571 	/** PDU Data Offset from the start of the PDU */
572 	uint8_t				pdo;
573 
574 	/** Total number of bytes in PDU, including pdu_hdr */
575 	uint32_t			plen;
576 };
577 _Static_assert(sizeof(struct nvme_tcp_common_pdu_hdr) == 8, "Incorrect size");
578 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, pdu_type) == 0,
579 		   "Incorrect offset");
580 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, flags) == 1, "Incorrect offset");
581 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, hlen) == 2, "Incorrect offset");
582 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, pdo) == 3, "Incorrect offset");
583 _Static_assert(offsetof(struct nvme_tcp_common_pdu_hdr, plen) == 4, "Incorrect offset");
584 
585 #define NVME_TCP_CH_FLAGS_HDGSTF		(1u << 0)
586 #define NVME_TCP_CH_FLAGS_DDGSTF		(1u << 1)
587 
588 /**
589  * ICReq
590  *
591  * common.pdu_type == NVME_TCP_PDU_TYPE_IC_REQ
592  */
593 struct nvme_tcp_ic_req {
594 	struct nvme_tcp_common_pdu_hdr	common;
595 	uint16_t				pfv;
596 	/** Specifies the data alignment for all PDUs transferred from the controller to the host that contain data */
597 	uint8_t					hpda;
598 	union {
599 		uint8_t				raw;
600 		struct {
601 			uint8_t			hdgst_enable : 1;
602 			uint8_t			ddgst_enable : 1;
603 			uint8_t			reserved : 6;
604 		} bits;
605 	} dgst;
606 	uint32_t				maxr2t;
607 	uint8_t					reserved16[112];
608 };
609 _Static_assert(sizeof(struct nvme_tcp_ic_req) == 128, "Incorrect size");
610 _Static_assert(offsetof(struct nvme_tcp_ic_req, pfv) == 8, "Incorrect offset");
611 _Static_assert(offsetof(struct nvme_tcp_ic_req, hpda) == 10, "Incorrect offset");
612 _Static_assert(offsetof(struct nvme_tcp_ic_req, maxr2t) == 12, "Incorrect offset");
613 
614 #define NVME_TCP_HPDA_MAX 31
615 #define NVME_TCP_CPDA_MAX 31
616 #define NVME_TCP_PDU_PDO_MAX_OFFSET     ((NVME_TCP_CPDA_MAX + 1) << 2)
617 
618 /**
619  * ICResp
620  *
621  * common.pdu_type == NVME_TCP_PDU_TYPE_IC_RESP
622  */
623 struct nvme_tcp_ic_resp {
624 	struct nvme_tcp_common_pdu_hdr	common;
625 	uint16_t				pfv;
626 	/** Specifies the data alignment for all PDUs transferred from the host to the controller that contain data */
627 	uint8_t					cpda;
628 	union {
629 		uint8_t				raw;
630 		struct {
631 			uint8_t			hdgst_enable : 1;
632 			uint8_t			ddgst_enable : 1;
633 			uint8_t			reserved : 6;
634 		} bits;
635 	} dgst;
636 	/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
637 	uint32_t				maxh2cdata;
638 	uint8_t					reserved16[112];
639 };
640 _Static_assert(sizeof(struct nvme_tcp_ic_resp) == 128, "Incorrect size");
641 _Static_assert(offsetof(struct nvme_tcp_ic_resp, pfv) == 8, "Incorrect offset");
642 _Static_assert(offsetof(struct nvme_tcp_ic_resp, cpda) == 10, "Incorrect offset");
643 _Static_assert(offsetof(struct nvme_tcp_ic_resp, maxh2cdata) == 12, "Incorrect offset");
644 
645 /**
646  * TermReq
647  *
648  * common.pdu_type == NVME_TCP_PDU_TYPE_TERM_REQ
649  */
650 struct nvme_tcp_term_req_hdr {
651 	struct nvme_tcp_common_pdu_hdr	common;
652 	uint16_t				fes;
653 	uint8_t					fei[4];
654 	uint8_t					reserved14[10];
655 };
656 
657 _Static_assert(sizeof(struct nvme_tcp_term_req_hdr) == 24, "Incorrect size");
658 _Static_assert(offsetof(struct nvme_tcp_term_req_hdr, fes) == 8, "Incorrect offset");
659 _Static_assert(offsetof(struct nvme_tcp_term_req_hdr, fei) == 10, "Incorrect offset");
660 
661 enum nvme_tcp_term_req_fes {
662 	NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD			= 0x01,
663 	NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR			= 0x02,
664 	NVME_TCP_TERM_REQ_FES_HDGST_ERROR				= 0x03,
665 	NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE		= 0x04,
666 	NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED		= 0x05,
667 	NVME_TCP_TERM_REQ_FES_R2T_LIMIT_EXCEEDED			= 0x05,
668 	NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER	= 0x06,
669 };
670 
671 /* Total length of term req PDU (including PDU header and DATA) in bytes shall not exceed a limit of 152 bytes. */
672 #define NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE	128
673 #define NVME_TCP_TERM_REQ_PDU_MAX_SIZE		(NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE + sizeof(struct nvme_tcp_term_req_hdr))
674 
675 /**
676  * CapsuleCmd
677  *
678  * common.pdu_type == NVME_TCP_PDU_TYPE_CAPSULE_CMD
679  */
680 struct nvme_tcp_cmd {
681 	struct nvme_tcp_common_pdu_hdr	common;
682 	struct nvme_command		ccsqe;
683 	/**< icdoff hdgst padding + in-capsule data + ddgst (if enabled) */
684 };
685 _Static_assert(sizeof(struct nvme_tcp_cmd) == 72, "Incorrect size");
686 _Static_assert(offsetof(struct nvme_tcp_cmd, ccsqe) == 8, "Incorrect offset");
687 
688 /**
689  * CapsuleResp
690  *
691  * common.pdu_type == NVME_TCP_PDU_TYPE_CAPSULE_RESP
692  */
693 struct nvme_tcp_rsp {
694 	struct nvme_tcp_common_pdu_hdr	common;
695 	struct nvme_completion		rccqe;
696 };
697 _Static_assert(sizeof(struct nvme_tcp_rsp) == 24, "incorrect size");
698 _Static_assert(offsetof(struct nvme_tcp_rsp, rccqe) == 8, "Incorrect offset");
699 
700 
701 /**
702  * H2CData
703  *
704  * hdr.pdu_type == NVME_TCP_PDU_TYPE_H2C_DATA
705  */
706 struct nvme_tcp_h2c_data_hdr {
707 	struct nvme_tcp_common_pdu_hdr	common;
708 	uint16_t				cccid;
709 	uint16_t				ttag;
710 	uint32_t				datao;
711 	uint32_t				datal;
712 	uint8_t					reserved20[4];
713 };
714 _Static_assert(sizeof(struct nvme_tcp_h2c_data_hdr) == 24, "Incorrect size");
715 _Static_assert(offsetof(struct nvme_tcp_h2c_data_hdr, cccid) == 8, "Incorrect offset");
716 _Static_assert(offsetof(struct nvme_tcp_h2c_data_hdr, ttag) == 10, "Incorrect offset");
717 _Static_assert(offsetof(struct nvme_tcp_h2c_data_hdr, datao) == 12, "Incorrect offset");
718 _Static_assert(offsetof(struct nvme_tcp_h2c_data_hdr, datal) == 16, "Incorrect offset");
719 
720 #define NVME_TCP_H2C_DATA_FLAGS_LAST_PDU	(1u << 2)
721 #define NVME_TCP_H2C_DATA_FLAGS_SUCCESS		(1u << 3)
722 #define NVME_TCP_H2C_DATA_PDO_MULT		8u
723 
724 /**
725  * C2HData
726  *
727  * hdr.pdu_type == NVME_TCP_PDU_TYPE_C2H_DATA
728  */
729 struct nvme_tcp_c2h_data_hdr {
730 	struct nvme_tcp_common_pdu_hdr	common;
731 	uint16_t				cccid;
732 	uint8_t					reserved10[2];
733 	uint32_t				datao;
734 	uint32_t				datal;
735 	uint8_t					reserved20[4];
736 };
737 _Static_assert(sizeof(struct nvme_tcp_c2h_data_hdr) == 24, "Incorrect size");
738 _Static_assert(offsetof(struct nvme_tcp_c2h_data_hdr, cccid) == 8, "Incorrect offset");
739 _Static_assert(offsetof(struct nvme_tcp_c2h_data_hdr, datao) == 12, "Incorrect offset");
740 _Static_assert(offsetof(struct nvme_tcp_c2h_data_hdr, datal) == 16, "Incorrect offset");
741 
742 #define NVME_TCP_C2H_DATA_FLAGS_SUCCESS		(1u << 3)
743 #define NVME_TCP_C2H_DATA_FLAGS_LAST_PDU	(1u << 2)
744 #define NVME_TCP_C2H_DATA_PDO_MULT		8u
745 
746 /**
747  * R2T
748  *
749  * common.pdu_type == NVME_TCP_PDU_TYPE_R2T
750  */
751 struct nvme_tcp_r2t_hdr {
752 	struct nvme_tcp_common_pdu_hdr	common;
753 	uint16_t				cccid;
754 	uint16_t				ttag;
755 	uint32_t				r2to;
756 	uint32_t				r2tl;
757 	uint8_t					reserved20[4];
758 };
759 _Static_assert(sizeof(struct nvme_tcp_r2t_hdr) == 24, "Incorrect size");
760 _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, cccid) == 8, "Incorrect offset");
761 _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, ttag) == 10, "Incorrect offset");
762 _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, r2to) == 12, "Incorrect offset");
763 _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, r2tl) == 16, "Incorrect offset");
764 
765 #endif /* __NVMF_PROTO_H__ */
766