1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Definitions for the NVM Express interface
4 * Copyright (c) 2011-2014, Intel Corporation.
5 */
6
7 #ifndef _LINUX_NVME_H
8 #define _LINUX_NVME_H
9
10 #include <linux/bits.h>
11 #include <linux/types.h>
12 #include <linux/uuid.h>
13
14 /* NQN names in commands fields specified one size */
15 #define NVMF_NQN_FIELD_LEN 256
16
17 /* However the max length of a qualified name is another size */
18 #define NVMF_NQN_SIZE 223
19
20 #define NVMF_TRSVCID_SIZE 32
21 #define NVMF_TRADDR_SIZE 256
22 #define NVMF_TSAS_SIZE 256
23
24 #define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
25
26 #define NVME_NSID_ALL 0xffffffff
27
28 /* Special NSSR value, 'NVMe' */
29 #define NVME_SUBSYS_RESET 0x4E564D65
30
31 enum nvme_subsys_type {
32 /* Referral to another discovery type target subsystem */
33 NVME_NQN_DISC = 1,
34
35 /* NVME type target subsystem */
36 NVME_NQN_NVME = 2,
37
38 /* Current discovery type target subsystem */
39 NVME_NQN_CURR = 3,
40 };
41
42 enum nvme_ctrl_type {
43 NVME_CTRL_IO = 1, /* I/O controller */
44 NVME_CTRL_DISC = 2, /* Discovery controller */
45 NVME_CTRL_ADMIN = 3, /* Administrative controller */
46 };
47
48 enum nvme_dctype {
49 NVME_DCTYPE_NOT_REPORTED = 0,
50 NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */
51 NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */
52 };
53
54 /* Address Family codes for Discovery Log Page entry ADRFAM field */
55 enum {
56 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
57 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
58 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
59 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
60 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
61 NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */
62 NVMF_ADDR_FAMILY_MAX,
63 };
64
65 /* Transport Type codes for Discovery Log Page entry TRTYPE field */
66 enum {
67 NVMF_TRTYPE_PCI = 0, /* PCI */
68 NVMF_TRTYPE_RDMA = 1, /* RDMA */
69 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
70 NVMF_TRTYPE_TCP = 3, /* TCP/IP */
71 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
72 NVMF_TRTYPE_MAX,
73 };
74
75 /* Transport Requirements codes for Discovery Log Page entry TREQ field */
76 enum {
77 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
78 NVMF_TREQ_REQUIRED = 1, /* Required */
79 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
80 #define NVME_TREQ_SECURE_CHANNEL_MASK \
81 (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
82
83 NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
84 };
85
86 /* RDMA QP Service Type codes for Discovery Log Page entry TSAS
87 * RDMA_QPTYPE field
88 */
89 enum {
90 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
91 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
92 NVMF_RDMA_QPTYPE_INVALID = 0xff,
93 };
94
95 /* RDMA Provider Type codes for Discovery Log Page entry TSAS
96 * RDMA_PRTYPE field
97 */
98 enum {
99 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
100 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
101 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
102 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
103 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
104 };
105
106 /* RDMA Connection Management Service Type codes for Discovery Log Page
107 * entry TSAS RDMA_CMS field
108 */
109 enum {
110 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
111 };
112
113 /* TSAS SECTYPE for TCP transport */
114 enum {
115 NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
116 NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
117 NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
118 NVMF_TCP_SECTYPE_INVALID = 0xff,
119 };
120
121 #define NVME_AQ_DEPTH 32
122 #define NVME_NR_AEN_COMMANDS 1
123 #define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
124
125 /*
126 * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
127 * NVM-Express 1.2 specification, section 4.1.2.
128 */
129 #define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
130
131 enum {
132 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
133 NVME_REG_VS = 0x0008, /* Version */
134 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
135 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
136 NVME_REG_CC = 0x0014, /* Controller Configuration */
137 NVME_REG_CSTS = 0x001c, /* Controller Status */
138 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
139 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
140 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
141 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
142 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
143 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
144 NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
145 NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
146 NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
147 * Location
148 */
149 NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
150 * Space Control
151 */
152 NVME_REG_CRTO = 0x0068, /* Controller Ready Timeouts */
153 NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
154 NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
155 NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
156 NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
157 * Buffer Size
158 */
159 NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
160 * Write Throughput
161 */
162 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
163 };
164
165 #define NVME_CAP_MQES(cap) ((cap) & 0xffff)
166 #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
167 #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
168 #define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
169 #define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
170 #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
171 #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
172 #define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1)
173
174 #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
175 #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
176
177 #define NVME_CRTO_CRIMT(crto) ((crto) >> 16)
178 #define NVME_CRTO_CRWMT(crto) ((crto) & 0xffff)
179
180 enum {
181 NVME_CMBSZ_SQS = 1 << 0,
182 NVME_CMBSZ_CQS = 1 << 1,
183 NVME_CMBSZ_LISTS = 1 << 2,
184 NVME_CMBSZ_RDS = 1 << 3,
185 NVME_CMBSZ_WDS = 1 << 4,
186
187 NVME_CMBSZ_SZ_SHIFT = 12,
188 NVME_CMBSZ_SZ_MASK = 0xfffff,
189
190 NVME_CMBSZ_SZU_SHIFT = 8,
191 NVME_CMBSZ_SZU_MASK = 0xf,
192 };
193
194 /*
195 * Submission and Completion Queue Entry Sizes for the NVM command set.
196 * (In bytes and specified as a power of two (2^n)).
197 */
198 #define NVME_ADM_SQES 6
199 #define NVME_NVM_IOSQES 6
200 #define NVME_NVM_IOCQES 4
201
202 /*
203 * Controller Configuration (CC) register (Offset 14h)
204 */
205 enum {
206 /* Enable (EN): bit 0 */
207 NVME_CC_ENABLE = 1 << 0,
208 NVME_CC_EN_SHIFT = 0,
209
210 /* Bits 03:01 are reserved (NVMe Base Specification rev 2.1) */
211
212 /* I/O Command Set Selected (CSS): bits 06:04 */
213 NVME_CC_CSS_SHIFT = 4,
214 NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
215 NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT,
216 NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT,
217
218 /* Memory Page Size (MPS): bits 10:07 */
219 NVME_CC_MPS_SHIFT = 7,
220 NVME_CC_MPS_MASK = 0xf << NVME_CC_MPS_SHIFT,
221
222 /* Arbitration Mechanism Selected (AMS): bits 13:11 */
223 NVME_CC_AMS_SHIFT = 11,
224 NVME_CC_AMS_MASK = 7 << NVME_CC_AMS_SHIFT,
225 NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
226 NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
227 NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
228
229 /* Shutdown Notification (SHN): bits 15:14 */
230 NVME_CC_SHN_SHIFT = 14,
231 NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
232 NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
233 NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
234 NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
235
236 /* I/O Submission Queue Entry Size (IOSQES): bits 19:16 */
237 NVME_CC_IOSQES_SHIFT = 16,
238 NVME_CC_IOSQES_MASK = 0xf << NVME_CC_IOSQES_SHIFT,
239 NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
240
241 /* I/O Completion Queue Entry Size (IOCQES): bits 23:20 */
242 NVME_CC_IOCQES_SHIFT = 20,
243 NVME_CC_IOCQES_MASK = 0xf << NVME_CC_IOCQES_SHIFT,
244 NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
245
246 /* Controller Ready Independent of Media Enable (CRIME): bit 24 */
247 NVME_CC_CRIME = 1 << 24,
248
249 /* Bits 25:31 are reserved (NVMe Base Specification rev 2.1) */
250 };
251
252 enum {
253 NVME_CSTS_RDY = 1 << 0,
254 NVME_CSTS_CFS = 1 << 1,
255 NVME_CSTS_NSSRO = 1 << 4,
256 NVME_CSTS_PP = 1 << 5,
257 NVME_CSTS_SHST_NORMAL = 0 << 2,
258 NVME_CSTS_SHST_OCCUR = 1 << 2,
259 NVME_CSTS_SHST_CMPLT = 2 << 2,
260 NVME_CSTS_SHST_MASK = 3 << 2,
261 };
262
263 enum {
264 NVME_CMBMSC_CRE = 1 << 0,
265 NVME_CMBMSC_CMSE = 1 << 1,
266 };
267
268 enum {
269 NVME_CAP_CSS_NVM = 1 << 0,
270 NVME_CAP_CSS_CSI = 1 << 6,
271 };
272
273 enum {
274 NVME_CAP_CRMS_CRWMS = 1ULL << 59,
275 NVME_CAP_CRMS_CRIMS = 1ULL << 60,
276 };
277
278 struct nvme_id_power_state {
279 __le16 max_power; /* centiwatts */
280 __u8 rsvd2;
281 __u8 flags;
282 __le32 entry_lat; /* microseconds */
283 __le32 exit_lat; /* microseconds */
284 __u8 read_tput;
285 __u8 read_lat;
286 __u8 write_tput;
287 __u8 write_lat;
288 __le16 idle_power;
289 __u8 idle_scale;
290 __u8 rsvd19;
291 __le16 active_power;
292 __u8 active_work_scale;
293 __u8 rsvd23[9];
294 };
295
296 enum {
297 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
298 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
299 };
300
301 enum nvme_ctrl_attr {
302 NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
303 NVME_CTRL_ATTR_TBKAS = (1 << 6),
304 NVME_CTRL_ATTR_ELBAS = (1 << 15),
305 NVME_CTRL_ATTR_RHII = (1 << 18),
306 NVME_CTRL_ATTR_FDPS = (1 << 19),
307 };
308
309 struct nvme_id_ctrl {
310 __le16 vid;
311 __le16 ssvid;
312 char sn[20];
313 char mn[40];
314 char fr[8];
315 __u8 rab;
316 __u8 ieee[3];
317 __u8 cmic;
318 __u8 mdts;
319 __le16 cntlid;
320 __le32 ver;
321 __le32 rtd3r;
322 __le32 rtd3e;
323 __le32 oaes;
324 __le32 ctratt;
325 __u8 rsvd100[11];
326 __u8 cntrltype;
327 __u8 fguid[16];
328 __le16 crdt1;
329 __le16 crdt2;
330 __le16 crdt3;
331 __u8 rsvd134[122];
332 __le16 oacs;
333 __u8 acl;
334 __u8 aerl;
335 __u8 frmw;
336 __u8 lpa;
337 __u8 elpe;
338 __u8 npss;
339 __u8 avscc;
340 __u8 apsta;
341 __le16 wctemp;
342 __le16 cctemp;
343 __le16 mtfa;
344 __le32 hmpre;
345 __le32 hmmin;
346 __u8 tnvmcap[16];
347 __u8 unvmcap[16];
348 __le32 rpmbs;
349 __le16 edstt;
350 __u8 dsto;
351 __u8 fwug;
352 __le16 kas;
353 __le16 hctma;
354 __le16 mntmt;
355 __le16 mxtmt;
356 __le32 sanicap;
357 __le32 hmminds;
358 __le16 hmmaxd;
359 __le16 nvmsetidmax;
360 __le16 endgidmax;
361 __u8 anatt;
362 __u8 anacap;
363 __le32 anagrpmax;
364 __le32 nanagrpid;
365 __u8 rsvd352[160];
366 __u8 sqes;
367 __u8 cqes;
368 __le16 maxcmd;
369 __le32 nn;
370 __le16 oncs;
371 __le16 fuses;
372 __u8 fna;
373 __u8 vwc;
374 __le16 awun;
375 __le16 awupf;
376 __u8 nvscc;
377 __u8 nwpc;
378 __le16 acwu;
379 __u8 rsvd534[2];
380 __le32 sgls;
381 __le32 mnan;
382 __u8 rsvd544[224];
383 char subnqn[256];
384 __u8 rsvd1024[768];
385 __le32 ioccsz;
386 __le32 iorcsz;
387 __le16 icdoff;
388 __u8 ctrattr;
389 __u8 msdbd;
390 __u8 rsvd1804[2];
391 __u8 dctype;
392 __u8 rsvd1807[241];
393 struct nvme_id_power_state psd[32];
394 __u8 vs[1024];
395 };
396
397 enum {
398 NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
399 NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
400 NVME_CTRL_CMIC_ANA = 1 << 3,
401 NVME_CTRL_ONCS_COMPARE = 1 << 0,
402 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
403 NVME_CTRL_ONCS_DSM = 1 << 2,
404 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
405 NVME_CTRL_ONCS_RESERVATIONS = 1 << 5,
406 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
407 NVME_CTRL_VWC_PRESENT = 1 << 0,
408 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
409 NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
410 NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
411 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
412 NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
413 NVME_CTRL_CTRATT_128_ID = 1 << 0,
414 NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
415 NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
416 NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
417 NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
418 NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
419 NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
420 NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
421 NVME_CTRL_SGLS_BYTE_ALIGNED = 1,
422 NVME_CTRL_SGLS_DWORD_ALIGNED = 2,
423 NVME_CTRL_SGLS_KSDBDS = 1 << 2,
424 NVME_CTRL_SGLS_MSDS = 1 << 19,
425 NVME_CTRL_SGLS_SAOS = 1 << 20,
426 };
427
428 struct nvme_lbaf {
429 __le16 ms;
430 __u8 ds;
431 __u8 rp;
432 };
433
434 struct nvme_id_ns {
435 __le64 nsze;
436 __le64 ncap;
437 __le64 nuse;
438 __u8 nsfeat;
439 __u8 nlbaf;
440 __u8 flbas;
441 __u8 mc;
442 __u8 dpc;
443 __u8 dps;
444 __u8 nmic;
445 __u8 rescap;
446 __u8 fpi;
447 __u8 dlfeat;
448 __le16 nawun;
449 __le16 nawupf;
450 __le16 nacwu;
451 __le16 nabsn;
452 __le16 nabo;
453 __le16 nabspf;
454 __le16 noiob;
455 __u8 nvmcap[16];
456 __le16 npwg;
457 __le16 npwa;
458 __le16 npdg;
459 __le16 npda;
460 __le16 nows;
461 __u8 rsvd74[18];
462 __le32 anagrpid;
463 __u8 rsvd96[3];
464 __u8 nsattr;
465 __le16 nvmsetid;
466 __le16 endgid;
467 __u8 nguid[16];
468 __u8 eui64[8];
469 struct nvme_lbaf lbaf[64];
470 __u8 vs[3712];
471 };
472
473 /* I/O Command Set Independent Identify Namespace Data Structure */
474 struct nvme_id_ns_cs_indep {
475 __u8 nsfeat;
476 __u8 nmic;
477 __u8 rescap;
478 __u8 fpi;
479 __le32 anagrpid;
480 __u8 nsattr;
481 __u8 rsvd9;
482 __le16 nvmsetid;
483 __le16 endgid;
484 __u8 nstat;
485 __u8 rsvd15[4081];
486 };
487
488 struct nvme_zns_lbafe {
489 __le64 zsze;
490 __u8 zdes;
491 __u8 rsvd9[7];
492 };
493
494 struct nvme_id_ns_zns {
495 __le16 zoc;
496 __le16 ozcs;
497 __le32 mar;
498 __le32 mor;
499 __le32 rrl;
500 __le32 frl;
501 __u8 rsvd20[2796];
502 struct nvme_zns_lbafe lbafe[64];
503 __u8 vs[256];
504 };
505
506 struct nvme_id_ctrl_zns {
507 __u8 zasl;
508 __u8 rsvd1[4095];
509 };
510
511 struct nvme_id_ns_nvm {
512 __le64 lbstm;
513 __u8 pic;
514 __u8 rsvd9[3];
515 __le32 elbaf[64];
516 __u8 rsvd268[3828];
517 };
518
519 enum {
520 NVME_ID_NS_NVM_STS_MASK = 0x7f,
521 NVME_ID_NS_NVM_GUARD_SHIFT = 7,
522 NVME_ID_NS_NVM_GUARD_MASK = 0x3,
523 NVME_ID_NS_NVM_QPIF_SHIFT = 9,
524 NVME_ID_NS_NVM_QPIF_MASK = 0xf,
525 NVME_ID_NS_NVM_QPIFS = 1 << 3,
526 };
527
nvme_elbaf_sts(__u32 elbaf)528 static inline __u8 nvme_elbaf_sts(__u32 elbaf)
529 {
530 return elbaf & NVME_ID_NS_NVM_STS_MASK;
531 }
532
nvme_elbaf_guard_type(__u32 elbaf)533 static inline __u8 nvme_elbaf_guard_type(__u32 elbaf)
534 {
535 return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK;
536 }
537
nvme_elbaf_qualified_guard_type(__u32 elbaf)538 static inline __u8 nvme_elbaf_qualified_guard_type(__u32 elbaf)
539 {
540 return (elbaf >> NVME_ID_NS_NVM_QPIF_SHIFT) & NVME_ID_NS_NVM_QPIF_MASK;
541 }
542
543 struct nvme_id_ctrl_nvm {
544 __u8 vsl;
545 __u8 wzsl;
546 __u8 wusl;
547 __u8 dmrl;
548 __le32 dmrsl;
549 __le64 dmsl;
550 __u8 rsvd16[4080];
551 };
552
553 enum {
554 NVME_ID_CNS_NS = 0x00,
555 NVME_ID_CNS_CTRL = 0x01,
556 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
557 NVME_ID_CNS_NS_DESC_LIST = 0x03,
558 NVME_ID_CNS_CS_NS = 0x05,
559 NVME_ID_CNS_CS_CTRL = 0x06,
560 NVME_ID_CNS_NS_ACTIVE_LIST_CS = 0x07,
561 NVME_ID_CNS_NS_CS_INDEP = 0x08,
562 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
563 NVME_ID_CNS_NS_PRESENT = 0x11,
564 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
565 NVME_ID_CNS_CTRL_LIST = 0x13,
566 NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
567 NVME_ID_CNS_NS_GRANULARITY = 0x16,
568 NVME_ID_CNS_UUID_LIST = 0x17,
569 NVME_ID_CNS_ENDGRP_LIST = 0x19,
570 };
571
572 enum {
573 NVME_CSI_NVM = 0,
574 NVME_CSI_ZNS = 2,
575 };
576
577 enum {
578 NVME_DIR_IDENTIFY = 0x00,
579 NVME_DIR_STREAMS = 0x01,
580 NVME_DIR_SND_ID_OP_ENABLE = 0x01,
581 NVME_DIR_SND_ST_OP_REL_ID = 0x01,
582 NVME_DIR_SND_ST_OP_REL_RSC = 0x02,
583 NVME_DIR_RCV_ID_OP_PARAM = 0x01,
584 NVME_DIR_RCV_ST_OP_PARAM = 0x01,
585 NVME_DIR_RCV_ST_OP_STATUS = 0x02,
586 NVME_DIR_RCV_ST_OP_RESOURCE = 0x03,
587 NVME_DIR_ENDIR = 0x01,
588 };
589
590 enum {
591 NVME_NS_FEAT_THIN = 1 << 0,
592 NVME_NS_FEAT_ATOMICS = 1 << 1,
593 NVME_NS_FEAT_IO_OPT = 1 << 4,
594 NVME_NS_ATTR_RO = 1 << 0,
595 NVME_NS_FLBAS_LBA_MASK = 0xf,
596 NVME_NS_FLBAS_LBA_UMASK = 0x60,
597 NVME_NS_FLBAS_LBA_SHIFT = 1,
598 NVME_NS_FLBAS_META_EXT = 0x10,
599 NVME_NS_NMIC_SHARED = 1 << 0,
600 NVME_NS_ROTATIONAL = 1 << 4,
601 NVME_NS_VWC_NOT_PRESENT = 1 << 5,
602 NVME_LBAF_RP_BEST = 0,
603 NVME_LBAF_RP_BETTER = 1,
604 NVME_LBAF_RP_GOOD = 2,
605 NVME_LBAF_RP_DEGRADED = 3,
606 NVME_NS_DPC_PI_LAST = 1 << 4,
607 NVME_NS_DPC_PI_FIRST = 1 << 3,
608 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
609 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
610 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
611 NVME_NS_DPS_PI_FIRST = 1 << 3,
612 NVME_NS_DPS_PI_MASK = 0x7,
613 NVME_NS_DPS_PI_TYPE1 = 1,
614 NVME_NS_DPS_PI_TYPE2 = 2,
615 NVME_NS_DPS_PI_TYPE3 = 3,
616 };
617
618 enum {
619 NVME_NSTAT_NRDY = 1 << 0,
620 };
621
622 enum {
623 NVME_NVM_NS_16B_GUARD = 0,
624 NVME_NVM_NS_32B_GUARD = 1,
625 NVME_NVM_NS_64B_GUARD = 2,
626 NVME_NVM_NS_QTYPE_GUARD = 3,
627 };
628
nvme_lbaf_index(__u8 flbas)629 static inline __u8 nvme_lbaf_index(__u8 flbas)
630 {
631 return (flbas & NVME_NS_FLBAS_LBA_MASK) |
632 ((flbas & NVME_NS_FLBAS_LBA_UMASK) >> NVME_NS_FLBAS_LBA_SHIFT);
633 }
634
635 /* Identify Namespace Metadata Capabilities (MC): */
636 enum {
637 NVME_MC_EXTENDED_LBA = (1 << 0),
638 NVME_MC_METADATA_PTR = (1 << 1),
639 };
640
641 struct nvme_ns_id_desc {
642 __u8 nidt;
643 __u8 nidl;
644 __le16 reserved;
645 };
646
647 #define NVME_NIDT_EUI64_LEN 8
648 #define NVME_NIDT_NGUID_LEN 16
649 #define NVME_NIDT_UUID_LEN 16
650 #define NVME_NIDT_CSI_LEN 1
651
652 enum {
653 NVME_NIDT_EUI64 = 0x01,
654 NVME_NIDT_NGUID = 0x02,
655 NVME_NIDT_UUID = 0x03,
656 NVME_NIDT_CSI = 0x04,
657 };
658
659 struct nvme_endurance_group_log {
660 __u8 egcw;
661 __u8 egfeat;
662 __u8 rsvd2;
663 __u8 avsp;
664 __u8 avspt;
665 __u8 pused;
666 __le16 did;
667 __u8 rsvd8[24];
668 __u8 ee[16];
669 __u8 dur[16];
670 __u8 duw[16];
671 __u8 muw[16];
672 __u8 hrc[16];
673 __u8 hwc[16];
674 __u8 mdie[16];
675 __u8 neile[16];
676 __u8 tegcap[16];
677 __u8 uegcap[16];
678 __u8 rsvd192[320];
679 };
680
681 struct nvme_rotational_media_log {
682 __le16 endgid;
683 __le16 numa;
684 __le16 nrs;
685 __u8 rsvd6[2];
686 __le32 spinc;
687 __le32 fspinc;
688 __le32 ldc;
689 __le32 fldc;
690 __u8 rsvd24[488];
691 };
692
693 struct nvme_fdp_config {
694 __u8 flags;
695 #define FDPCFG_FDPE (1U << 0)
696 __u8 fdpcidx;
697 __le16 reserved;
698 };
699
700 struct nvme_fdp_ruh_desc {
701 __u8 ruht;
702 __u8 reserved[3];
703 };
704
705 struct nvme_fdp_config_desc {
706 __le16 dsze;
707 __u8 fdpa;
708 __u8 vss;
709 __le32 nrg;
710 __le16 nruh;
711 __le16 maxpids;
712 __le32 nns;
713 __le64 runs;
714 __le32 erutl;
715 __u8 rsvd28[36];
716 struct nvme_fdp_ruh_desc ruhs[];
717 };
718
719 struct nvme_fdp_config_log {
720 __le16 numfdpc;
721 __u8 ver;
722 __u8 rsvd3;
723 __le32 sze;
724 __u8 rsvd8[8];
725 /*
726 * This is followed by variable number of nvme_fdp_config_desc
727 * structures, but sparse doesn't like nested variable sized arrays.
728 */
729 };
730
731 struct nvme_smart_log {
732 __u8 critical_warning;
733 __u8 temperature[2];
734 __u8 avail_spare;
735 __u8 spare_thresh;
736 __u8 percent_used;
737 __u8 endu_grp_crit_warn_sumry;
738 __u8 rsvd7[25];
739 __u8 data_units_read[16];
740 __u8 data_units_written[16];
741 __u8 host_reads[16];
742 __u8 host_writes[16];
743 __u8 ctrl_busy_time[16];
744 __u8 power_cycles[16];
745 __u8 power_on_hours[16];
746 __u8 unsafe_shutdowns[16];
747 __u8 media_errors[16];
748 __u8 num_err_log_entries[16];
749 __le32 warning_temp_time;
750 __le32 critical_comp_time;
751 __le16 temp_sensor[8];
752 __le32 thm_temp1_trans_count;
753 __le32 thm_temp2_trans_count;
754 __le32 thm_temp1_total_time;
755 __le32 thm_temp2_total_time;
756 __u8 rsvd232[280];
757 };
758
759 struct nvme_fw_slot_info_log {
760 __u8 afi;
761 __u8 rsvd1[7];
762 __le64 frs[7];
763 __u8 rsvd64[448];
764 };
765
766 enum {
767 NVME_CMD_EFFECTS_CSUPP = 1 << 0,
768 NVME_CMD_EFFECTS_LBCC = 1 << 1,
769 NVME_CMD_EFFECTS_NCC = 1 << 2,
770 NVME_CMD_EFFECTS_NIC = 1 << 3,
771 NVME_CMD_EFFECTS_CCC = 1 << 4,
772 NVME_CMD_EFFECTS_CSER_MASK = GENMASK(15, 14),
773 NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
774 NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
775 NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
776 };
777
778 struct nvme_effects_log {
779 __le32 acs[256];
780 __le32 iocs[256];
781 __u8 resv[2048];
782 };
783
784 enum nvme_ana_state {
785 NVME_ANA_OPTIMIZED = 0x01,
786 NVME_ANA_NONOPTIMIZED = 0x02,
787 NVME_ANA_INACCESSIBLE = 0x03,
788 NVME_ANA_PERSISTENT_LOSS = 0x04,
789 NVME_ANA_CHANGE = 0x0f,
790 };
791
792 struct nvme_ana_group_desc {
793 __le32 grpid;
794 __le32 nnsids;
795 __le64 chgcnt;
796 __u8 state;
797 __u8 rsvd17[15];
798 __le32 nsids[];
799 };
800
801 /* flag for the log specific field of the ANA log */
802 #define NVME_ANA_LOG_RGO (1 << 0)
803
804 struct nvme_ana_rsp_hdr {
805 __le64 chgcnt;
806 __le16 ngrps;
807 __le16 rsvd10[3];
808 };
809
810 struct nvme_zone_descriptor {
811 __u8 zt;
812 __u8 zs;
813 __u8 za;
814 __u8 rsvd3[5];
815 __le64 zcap;
816 __le64 zslba;
817 __le64 wp;
818 __u8 rsvd32[32];
819 };
820
821 enum {
822 NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2,
823 };
824
825 struct nvme_zone_report {
826 __le64 nr_zones;
827 __u8 resv8[56];
828 struct nvme_zone_descriptor entries[];
829 };
830
831 enum {
832 NVME_SMART_CRIT_SPARE = 1 << 0,
833 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
834 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
835 NVME_SMART_CRIT_MEDIA = 1 << 3,
836 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
837 };
838
839 enum {
840 NVME_AER_ERROR = 0,
841 NVME_AER_SMART = 1,
842 NVME_AER_NOTICE = 2,
843 NVME_AER_CSS = 6,
844 NVME_AER_VS = 7,
845 };
846
847 enum {
848 NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
849 };
850
851 enum {
852 NVME_AER_NOTICE_NS_CHANGED = 0x00,
853 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
854 NVME_AER_NOTICE_ANA = 0x03,
855 NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
856 };
857
858 enum {
859 NVME_AEN_BIT_NS_ATTR = 8,
860 NVME_AEN_BIT_FW_ACT = 9,
861 NVME_AEN_BIT_ANA_CHANGE = 11,
862 NVME_AEN_BIT_DISC_CHANGE = 31,
863 };
864
865 enum {
866 NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
867 NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
868 NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
869 NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
870 };
871
872 struct nvme_lba_range_type {
873 __u8 type;
874 __u8 attributes;
875 __u8 rsvd2[14];
876 __le64 slba;
877 __le64 nlb;
878 __u8 guid[16];
879 __u8 rsvd48[16];
880 };
881
882 enum {
883 NVME_LBART_TYPE_FS = 0x01,
884 NVME_LBART_TYPE_RAID = 0x02,
885 NVME_LBART_TYPE_CACHE = 0x03,
886 NVME_LBART_TYPE_SWAP = 0x04,
887
888 NVME_LBART_ATTRIB_TEMP = 1 << 0,
889 NVME_LBART_ATTRIB_HIDE = 1 << 1,
890 };
891
892 enum nvme_pr_type {
893 NVME_PR_WRITE_EXCLUSIVE = 1,
894 NVME_PR_EXCLUSIVE_ACCESS = 2,
895 NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
896 NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
897 NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
898 NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
899 };
900
901 enum nvme_eds {
902 NVME_EXTENDED_DATA_STRUCT = 0x1,
903 };
904
905 struct nvme_registered_ctrl {
906 __le16 cntlid;
907 __u8 rcsts;
908 __u8 rsvd3[5];
909 __le64 hostid;
910 __le64 rkey;
911 };
912
913 struct nvme_reservation_status {
914 __le32 gen;
915 __u8 rtype;
916 __u8 regctl[2];
917 __u8 resv5[2];
918 __u8 ptpls;
919 __u8 resv10[14];
920 struct nvme_registered_ctrl regctl_ds[];
921 };
922
923 struct nvme_registered_ctrl_ext {
924 __le16 cntlid;
925 __u8 rcsts;
926 __u8 rsvd3[5];
927 __le64 rkey;
928 __u8 hostid[16];
929 __u8 rsvd32[32];
930 };
931
932 struct nvme_reservation_status_ext {
933 __le32 gen;
934 __u8 rtype;
935 __u8 regctl[2];
936 __u8 resv5[2];
937 __u8 ptpls;
938 __u8 resv10[14];
939 __u8 rsvd24[40];
940 struct nvme_registered_ctrl_ext regctl_eds[];
941 };
942
943 /* I/O commands */
944
945 enum nvme_opcode {
946 nvme_cmd_flush = 0x00,
947 nvme_cmd_write = 0x01,
948 nvme_cmd_read = 0x02,
949 nvme_cmd_write_uncor = 0x04,
950 nvme_cmd_compare = 0x05,
951 nvme_cmd_write_zeroes = 0x08,
952 nvme_cmd_dsm = 0x09,
953 nvme_cmd_verify = 0x0c,
954 nvme_cmd_resv_register = 0x0d,
955 nvme_cmd_resv_report = 0x0e,
956 nvme_cmd_resv_acquire = 0x11,
957 nvme_cmd_io_mgmt_recv = 0x12,
958 nvme_cmd_resv_release = 0x15,
959 nvme_cmd_zone_mgmt_send = 0x79,
960 nvme_cmd_zone_mgmt_recv = 0x7a,
961 nvme_cmd_zone_append = 0x7d,
962 nvme_cmd_vendor_start = 0x80,
963 };
964
965 #define nvme_opcode_name(opcode) { opcode, #opcode }
966 #define show_nvm_opcode_name(val) \
967 __print_symbolic(val, \
968 nvme_opcode_name(nvme_cmd_flush), \
969 nvme_opcode_name(nvme_cmd_write), \
970 nvme_opcode_name(nvme_cmd_read), \
971 nvme_opcode_name(nvme_cmd_write_uncor), \
972 nvme_opcode_name(nvme_cmd_compare), \
973 nvme_opcode_name(nvme_cmd_write_zeroes), \
974 nvme_opcode_name(nvme_cmd_dsm), \
975 nvme_opcode_name(nvme_cmd_verify), \
976 nvme_opcode_name(nvme_cmd_resv_register), \
977 nvme_opcode_name(nvme_cmd_resv_report), \
978 nvme_opcode_name(nvme_cmd_resv_acquire), \
979 nvme_opcode_name(nvme_cmd_io_mgmt_recv), \
980 nvme_opcode_name(nvme_cmd_resv_release), \
981 nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
982 nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
983 nvme_opcode_name(nvme_cmd_zone_append))
984
985
986
987 /*
988 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
989 *
990 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
991 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
992 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
993 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
994 * request subtype
995 */
996 enum {
997 NVME_SGL_FMT_ADDRESS = 0x00,
998 NVME_SGL_FMT_OFFSET = 0x01,
999 NVME_SGL_FMT_TRANSPORT_A = 0x0A,
1000 NVME_SGL_FMT_INVALIDATE = 0x0f,
1001 };
1002
1003 /*
1004 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
1005 *
1006 * For struct nvme_sgl_desc:
1007 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
1008 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
1009 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
1010 *
1011 * For struct nvme_keyed_sgl_desc:
1012 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
1013 *
1014 * Transport-specific SGL types:
1015 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
1016 */
1017 enum {
1018 NVME_SGL_FMT_DATA_DESC = 0x00,
1019 NVME_SGL_FMT_SEG_DESC = 0x02,
1020 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
1021 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
1022 NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
1023 };
1024
1025 struct nvme_sgl_desc {
1026 __le64 addr;
1027 __le32 length;
1028 __u8 rsvd[3];
1029 __u8 type;
1030 };
1031
1032 struct nvme_keyed_sgl_desc {
1033 __le64 addr;
1034 __u8 length[3];
1035 __u8 key[4];
1036 __u8 type;
1037 };
1038
1039 union nvme_data_ptr {
1040 struct {
1041 __le64 prp1;
1042 __le64 prp2;
1043 };
1044 struct nvme_sgl_desc sgl;
1045 struct nvme_keyed_sgl_desc ksgl;
1046 };
1047
1048 /*
1049 * Lowest two bits of our flags field (FUSE field in the spec):
1050 *
1051 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
1052 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
1053 *
1054 * Highest two bits in our flags field (PSDT field in the spec):
1055 *
1056 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
1057 * If used, MPTR contains addr of single physical buffer (byte aligned).
1058 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
1059 * If used, MPTR contains an address of an SGL segment containing
1060 * exactly 1 SGL descriptor (qword aligned).
1061 */
1062 enum {
1063 NVME_CMD_FUSE_FIRST = (1 << 0),
1064 NVME_CMD_FUSE_SECOND = (1 << 1),
1065
1066 NVME_CMD_SGL_METABUF = (1 << 6),
1067 NVME_CMD_SGL_METASEG = (1 << 7),
1068 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
1069 };
1070
1071 struct nvme_common_command {
1072 __u8 opcode;
1073 __u8 flags;
1074 __u16 command_id;
1075 __le32 nsid;
1076 __le32 cdw2[2];
1077 __le64 metadata;
1078 union nvme_data_ptr dptr;
1079 struct_group(cdws,
1080 __le32 cdw10;
1081 __le32 cdw11;
1082 __le32 cdw12;
1083 __le32 cdw13;
1084 __le32 cdw14;
1085 __le32 cdw15;
1086 );
1087 };
1088
1089 struct nvme_rw_command {
1090 __u8 opcode;
1091 __u8 flags;
1092 __u16 command_id;
1093 __le32 nsid;
1094 __le32 cdw2;
1095 __le32 cdw3;
1096 __le64 metadata;
1097 union nvme_data_ptr dptr;
1098 __le64 slba;
1099 __le16 length;
1100 __le16 control;
1101 __le32 dsmgmt;
1102 __le32 reftag;
1103 __le16 lbat;
1104 __le16 lbatm;
1105 };
1106
1107 enum {
1108 NVME_RW_LR = 1 << 15,
1109 NVME_RW_FUA = 1 << 14,
1110 NVME_RW_APPEND_PIREMAP = 1 << 9,
1111 NVME_RW_DSM_FREQ_UNSPEC = 0,
1112 NVME_RW_DSM_FREQ_TYPICAL = 1,
1113 NVME_RW_DSM_FREQ_RARE = 2,
1114 NVME_RW_DSM_FREQ_READS = 3,
1115 NVME_RW_DSM_FREQ_WRITES = 4,
1116 NVME_RW_DSM_FREQ_RW = 5,
1117 NVME_RW_DSM_FREQ_ONCE = 6,
1118 NVME_RW_DSM_FREQ_PREFETCH = 7,
1119 NVME_RW_DSM_FREQ_TEMP = 8,
1120 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
1121 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
1122 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
1123 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
1124 NVME_RW_DSM_SEQ_REQ = 1 << 6,
1125 NVME_RW_DSM_COMPRESSED = 1 << 7,
1126 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
1127 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
1128 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
1129 NVME_RW_PRINFO_PRACT = 1 << 13,
1130 NVME_RW_DTYPE_STREAMS = 1 << 4,
1131 NVME_RW_DTYPE_DPLCMT = 2 << 4,
1132 NVME_WZ_DEAC = 1 << 9,
1133 };
1134
1135 struct nvme_dsm_cmd {
1136 __u8 opcode;
1137 __u8 flags;
1138 __u16 command_id;
1139 __le32 nsid;
1140 __u64 rsvd2[2];
1141 union nvme_data_ptr dptr;
1142 __le32 nr;
1143 __le32 attributes;
1144 __u32 rsvd12[4];
1145 };
1146
1147 enum {
1148 NVME_DSMGMT_IDR = 1 << 0,
1149 NVME_DSMGMT_IDW = 1 << 1,
1150 NVME_DSMGMT_AD = 1 << 2,
1151 };
1152
1153 #define NVME_DSM_MAX_RANGES 256
1154
1155 struct nvme_dsm_range {
1156 __le32 cattr;
1157 __le32 nlb;
1158 __le64 slba;
1159 };
1160
1161 struct nvme_write_zeroes_cmd {
1162 __u8 opcode;
1163 __u8 flags;
1164 __u16 command_id;
1165 __le32 nsid;
1166 __u64 rsvd2;
1167 __le64 metadata;
1168 union nvme_data_ptr dptr;
1169 __le64 slba;
1170 __le16 length;
1171 __le16 control;
1172 __le32 dsmgmt;
1173 __le32 reftag;
1174 __le16 lbat;
1175 __le16 lbatm;
1176 };
1177
1178 enum nvme_zone_mgmt_action {
1179 NVME_ZONE_CLOSE = 0x1,
1180 NVME_ZONE_FINISH = 0x2,
1181 NVME_ZONE_OPEN = 0x3,
1182 NVME_ZONE_RESET = 0x4,
1183 NVME_ZONE_OFFLINE = 0x5,
1184 NVME_ZONE_SET_DESC_EXT = 0x10,
1185 };
1186
1187 struct nvme_zone_mgmt_send_cmd {
1188 __u8 opcode;
1189 __u8 flags;
1190 __u16 command_id;
1191 __le32 nsid;
1192 __le32 cdw2[2];
1193 __le64 metadata;
1194 union nvme_data_ptr dptr;
1195 __le64 slba;
1196 __le32 cdw12;
1197 __u8 zsa;
1198 __u8 select_all;
1199 __u8 rsvd13[2];
1200 __le32 cdw14[2];
1201 };
1202
1203 struct nvme_zone_mgmt_recv_cmd {
1204 __u8 opcode;
1205 __u8 flags;
1206 __u16 command_id;
1207 __le32 nsid;
1208 __le64 rsvd2[2];
1209 union nvme_data_ptr dptr;
1210 __le64 slba;
1211 __le32 numd;
1212 __u8 zra;
1213 __u8 zrasf;
1214 __u8 pr;
1215 __u8 rsvd13;
1216 __le32 cdw14[2];
1217 };
1218
1219 struct nvme_io_mgmt_recv_cmd {
1220 __u8 opcode;
1221 __u8 flags;
1222 __u16 command_id;
1223 __le32 nsid;
1224 __le64 rsvd2[2];
1225 union nvme_data_ptr dptr;
1226 __u8 mo;
1227 __u8 rsvd11;
1228 __u16 mos;
1229 __le32 numd;
1230 __le32 cdw12[4];
1231 };
1232
1233 enum {
1234 NVME_IO_MGMT_RECV_MO_RUHS = 1,
1235 };
1236
1237 struct nvme_fdp_ruh_status_desc {
1238 __le16 pid;
1239 __le16 ruhid;
1240 __le32 earutr;
1241 __le64 ruamw;
1242 __u8 reserved[16];
1243 };
1244
1245 struct nvme_fdp_ruh_status {
1246 __u8 rsvd0[14];
1247 __le16 nruhsd;
1248 struct nvme_fdp_ruh_status_desc ruhsd[];
1249 };
1250
1251 enum {
1252 NVME_ZRA_ZONE_REPORT = 0,
1253 NVME_ZRASF_ZONE_REPORT_ALL = 0,
1254 NVME_ZRASF_ZONE_STATE_EMPTY = 0x01,
1255 NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02,
1256 NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03,
1257 NVME_ZRASF_ZONE_STATE_CLOSED = 0x04,
1258 NVME_ZRASF_ZONE_STATE_READONLY = 0x05,
1259 NVME_ZRASF_ZONE_STATE_FULL = 0x06,
1260 NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07,
1261 NVME_REPORT_ZONE_PARTIAL = 1,
1262 };
1263
1264 /* Features */
1265
1266 enum {
1267 NVME_TEMP_THRESH_MASK = 0xffff,
1268 NVME_TEMP_THRESH_SELECT_SHIFT = 16,
1269 NVME_TEMP_THRESH_TYPE_UNDER = 0x100000,
1270 };
1271
1272 struct nvme_feat_auto_pst {
1273 __le64 entries[32];
1274 };
1275
1276 enum {
1277 NVME_HOST_MEM_ENABLE = (1 << 0),
1278 NVME_HOST_MEM_RETURN = (1 << 1),
1279 };
1280
1281 struct nvme_feat_host_behavior {
1282 __u8 acre;
1283 __u8 etdas;
1284 __u8 lbafee;
1285 __u8 resv1[509];
1286 };
1287
1288 enum {
1289 NVME_ENABLE_ACRE = 1,
1290 NVME_ENABLE_LBAFEE = 1,
1291 };
1292
1293 /* Admin commands */
1294
1295 enum nvme_admin_opcode {
1296 nvme_admin_delete_sq = 0x00,
1297 nvme_admin_create_sq = 0x01,
1298 nvme_admin_get_log_page = 0x02,
1299 nvme_admin_delete_cq = 0x04,
1300 nvme_admin_create_cq = 0x05,
1301 nvme_admin_identify = 0x06,
1302 nvme_admin_abort_cmd = 0x08,
1303 nvme_admin_set_features = 0x09,
1304 nvme_admin_get_features = 0x0a,
1305 nvme_admin_async_event = 0x0c,
1306 nvme_admin_ns_mgmt = 0x0d,
1307 nvme_admin_activate_fw = 0x10,
1308 nvme_admin_download_fw = 0x11,
1309 nvme_admin_dev_self_test = 0x14,
1310 nvme_admin_ns_attach = 0x15,
1311 nvme_admin_keep_alive = 0x18,
1312 nvme_admin_directive_send = 0x19,
1313 nvme_admin_directive_recv = 0x1a,
1314 nvme_admin_virtual_mgmt = 0x1c,
1315 nvme_admin_nvme_mi_send = 0x1d,
1316 nvme_admin_nvme_mi_recv = 0x1e,
1317 nvme_admin_dbbuf = 0x7C,
1318 nvme_admin_format_nvm = 0x80,
1319 nvme_admin_security_send = 0x81,
1320 nvme_admin_security_recv = 0x82,
1321 nvme_admin_sanitize_nvm = 0x84,
1322 nvme_admin_get_lba_status = 0x86,
1323 nvme_admin_vendor_start = 0xC0,
1324 };
1325
1326 #define nvme_admin_opcode_name(opcode) { opcode, #opcode }
1327 #define show_admin_opcode_name(val) \
1328 __print_symbolic(val, \
1329 nvme_admin_opcode_name(nvme_admin_delete_sq), \
1330 nvme_admin_opcode_name(nvme_admin_create_sq), \
1331 nvme_admin_opcode_name(nvme_admin_get_log_page), \
1332 nvme_admin_opcode_name(nvme_admin_delete_cq), \
1333 nvme_admin_opcode_name(nvme_admin_create_cq), \
1334 nvme_admin_opcode_name(nvme_admin_identify), \
1335 nvme_admin_opcode_name(nvme_admin_abort_cmd), \
1336 nvme_admin_opcode_name(nvme_admin_set_features), \
1337 nvme_admin_opcode_name(nvme_admin_get_features), \
1338 nvme_admin_opcode_name(nvme_admin_async_event), \
1339 nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
1340 nvme_admin_opcode_name(nvme_admin_activate_fw), \
1341 nvme_admin_opcode_name(nvme_admin_download_fw), \
1342 nvme_admin_opcode_name(nvme_admin_dev_self_test), \
1343 nvme_admin_opcode_name(nvme_admin_ns_attach), \
1344 nvme_admin_opcode_name(nvme_admin_keep_alive), \
1345 nvme_admin_opcode_name(nvme_admin_directive_send), \
1346 nvme_admin_opcode_name(nvme_admin_directive_recv), \
1347 nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \
1348 nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \
1349 nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \
1350 nvme_admin_opcode_name(nvme_admin_dbbuf), \
1351 nvme_admin_opcode_name(nvme_admin_format_nvm), \
1352 nvme_admin_opcode_name(nvme_admin_security_send), \
1353 nvme_admin_opcode_name(nvme_admin_security_recv), \
1354 nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \
1355 nvme_admin_opcode_name(nvme_admin_get_lba_status))
1356
1357 enum {
1358 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
1359 NVME_CQ_IRQ_ENABLED = (1 << 1),
1360 NVME_SQ_PRIO_URGENT = (0 << 1),
1361 NVME_SQ_PRIO_HIGH = (1 << 1),
1362 NVME_SQ_PRIO_MEDIUM = (2 << 1),
1363 NVME_SQ_PRIO_LOW = (3 << 1),
1364 NVME_FEAT_ARBITRATION = 0x01,
1365 NVME_FEAT_POWER_MGMT = 0x02,
1366 NVME_FEAT_LBA_RANGE = 0x03,
1367 NVME_FEAT_TEMP_THRESH = 0x04,
1368 NVME_FEAT_ERR_RECOVERY = 0x05,
1369 NVME_FEAT_VOLATILE_WC = 0x06,
1370 NVME_FEAT_NUM_QUEUES = 0x07,
1371 NVME_FEAT_IRQ_COALESCE = 0x08,
1372 NVME_FEAT_IRQ_CONFIG = 0x09,
1373 NVME_FEAT_WRITE_ATOMIC = 0x0a,
1374 NVME_FEAT_ASYNC_EVENT = 0x0b,
1375 NVME_FEAT_AUTO_PST = 0x0c,
1376 NVME_FEAT_HOST_MEM_BUF = 0x0d,
1377 NVME_FEAT_TIMESTAMP = 0x0e,
1378 NVME_FEAT_KATO = 0x0f,
1379 NVME_FEAT_HCTM = 0x10,
1380 NVME_FEAT_NOPSC = 0x11,
1381 NVME_FEAT_RRL = 0x12,
1382 NVME_FEAT_PLM_CONFIG = 0x13,
1383 NVME_FEAT_PLM_WINDOW = 0x14,
1384 NVME_FEAT_HOST_BEHAVIOR = 0x16,
1385 NVME_FEAT_SANITIZE = 0x17,
1386 NVME_FEAT_FDP = 0x1d,
1387 NVME_FEAT_SW_PROGRESS = 0x80,
1388 NVME_FEAT_HOST_ID = 0x81,
1389 NVME_FEAT_RESV_MASK = 0x82,
1390 NVME_FEAT_RESV_PERSIST = 0x83,
1391 NVME_FEAT_WRITE_PROTECT = 0x84,
1392 NVME_FEAT_VENDOR_START = 0xC0,
1393 NVME_FEAT_VENDOR_END = 0xFF,
1394 NVME_LOG_SUPPORTED = 0x00,
1395 NVME_LOG_ERROR = 0x01,
1396 NVME_LOG_SMART = 0x02,
1397 NVME_LOG_FW_SLOT = 0x03,
1398 NVME_LOG_CHANGED_NS = 0x04,
1399 NVME_LOG_CMD_EFFECTS = 0x05,
1400 NVME_LOG_DEVICE_SELF_TEST = 0x06,
1401 NVME_LOG_TELEMETRY_HOST = 0x07,
1402 NVME_LOG_TELEMETRY_CTRL = 0x08,
1403 NVME_LOG_ENDURANCE_GROUP = 0x09,
1404 NVME_LOG_ANA = 0x0c,
1405 NVME_LOG_FEATURES = 0x12,
1406 NVME_LOG_RMI = 0x16,
1407 NVME_LOG_FDP_CONFIGS = 0x20,
1408 NVME_LOG_DISC = 0x70,
1409 NVME_LOG_RESERVATION = 0x80,
1410 NVME_FWACT_REPL = (0 << 3),
1411 NVME_FWACT_REPL_ACTV = (1 << 3),
1412 NVME_FWACT_ACTV = (2 << 3),
1413 };
1414
1415 struct nvme_supported_log {
1416 __le32 lids[256];
1417 };
1418
1419 enum {
1420 NVME_LIDS_LSUPP = 1 << 0,
1421 };
1422
1423 struct nvme_supported_features_log {
1424 __le32 fis[256];
1425 };
1426
1427 enum {
1428 NVME_FIS_FSUPP = 1 << 0,
1429 NVME_FIS_NSCPE = 1 << 20,
1430 NVME_FIS_CSCPE = 1 << 21,
1431 };
1432
1433 /* NVMe Namespace Write Protect State */
1434 enum {
1435 NVME_NS_NO_WRITE_PROTECT = 0,
1436 NVME_NS_WRITE_PROTECT,
1437 NVME_NS_WRITE_PROTECT_POWER_CYCLE,
1438 NVME_NS_WRITE_PROTECT_PERMANENT,
1439 };
1440
1441 #define NVME_MAX_CHANGED_NAMESPACES 1024
1442
1443 struct nvme_identify {
1444 __u8 opcode;
1445 __u8 flags;
1446 __u16 command_id;
1447 __le32 nsid;
1448 __u64 rsvd2[2];
1449 union nvme_data_ptr dptr;
1450 __u8 cns;
1451 __u8 rsvd3;
1452 __le16 ctrlid;
1453 __le16 cnssid;
1454 __u8 rsvd11;
1455 __u8 csi;
1456 __u32 rsvd12[4];
1457 };
1458
1459 #define NVME_IDENTIFY_DATA_SIZE 4096
1460
1461 struct nvme_features {
1462 __u8 opcode;
1463 __u8 flags;
1464 __u16 command_id;
1465 __le32 nsid;
1466 __u64 rsvd2[2];
1467 union nvme_data_ptr dptr;
1468 __le32 fid;
1469 __le32 dword11;
1470 __le32 dword12;
1471 __le32 dword13;
1472 __le32 dword14;
1473 __le32 dword15;
1474 };
1475
1476 struct nvme_host_mem_buf_desc {
1477 __le64 addr;
1478 __le32 size;
1479 __u32 rsvd;
1480 };
1481
1482 struct nvme_create_cq {
1483 __u8 opcode;
1484 __u8 flags;
1485 __u16 command_id;
1486 __u32 rsvd1[5];
1487 __le64 prp1;
1488 __u64 rsvd8;
1489 __le16 cqid;
1490 __le16 qsize;
1491 __le16 cq_flags;
1492 __le16 irq_vector;
1493 __u32 rsvd12[4];
1494 };
1495
1496 struct nvme_create_sq {
1497 __u8 opcode;
1498 __u8 flags;
1499 __u16 command_id;
1500 __u32 rsvd1[5];
1501 __le64 prp1;
1502 __u64 rsvd8;
1503 __le16 sqid;
1504 __le16 qsize;
1505 __le16 sq_flags;
1506 __le16 cqid;
1507 __u32 rsvd12[4];
1508 };
1509
1510 struct nvme_delete_queue {
1511 __u8 opcode;
1512 __u8 flags;
1513 __u16 command_id;
1514 __u32 rsvd1[9];
1515 __le16 qid;
1516 __u16 rsvd10;
1517 __u32 rsvd11[5];
1518 };
1519
1520 struct nvme_abort_cmd {
1521 __u8 opcode;
1522 __u8 flags;
1523 __u16 command_id;
1524 __u32 rsvd1[9];
1525 __le16 sqid;
1526 __u16 cid;
1527 __u32 rsvd11[5];
1528 };
1529
1530 struct nvme_download_firmware {
1531 __u8 opcode;
1532 __u8 flags;
1533 __u16 command_id;
1534 __u32 rsvd1[5];
1535 union nvme_data_ptr dptr;
1536 __le32 numd;
1537 __le32 offset;
1538 __u32 rsvd12[4];
1539 };
1540
1541 struct nvme_format_cmd {
1542 __u8 opcode;
1543 __u8 flags;
1544 __u16 command_id;
1545 __le32 nsid;
1546 __u64 rsvd2[4];
1547 __le32 cdw10;
1548 __u32 rsvd11[5];
1549 };
1550
1551 struct nvme_get_log_page_command {
1552 __u8 opcode;
1553 __u8 flags;
1554 __u16 command_id;
1555 __le32 nsid;
1556 __u64 rsvd2[2];
1557 union nvme_data_ptr dptr;
1558 __u8 lid;
1559 __u8 lsp; /* upper 4 bits reserved */
1560 __le16 numdl;
1561 __le16 numdu;
1562 __le16 lsi;
1563 union {
1564 struct {
1565 __le32 lpol;
1566 __le32 lpou;
1567 };
1568 __le64 lpo;
1569 };
1570 __u8 rsvd14[3];
1571 __u8 csi;
1572 __u32 rsvd15;
1573 };
1574
1575 struct nvme_directive_cmd {
1576 __u8 opcode;
1577 __u8 flags;
1578 __u16 command_id;
1579 __le32 nsid;
1580 __u64 rsvd2[2];
1581 union nvme_data_ptr dptr;
1582 __le32 numd;
1583 __u8 doper;
1584 __u8 dtype;
1585 __le16 dspec;
1586 __u8 endir;
1587 __u8 tdtype;
1588 __u16 rsvd15;
1589
1590 __u32 rsvd16[3];
1591 };
1592
1593 /*
1594 * Fabrics subcommands.
1595 */
1596 enum nvmf_fabrics_opcode {
1597 nvme_fabrics_command = 0x7f,
1598 };
1599
1600 enum nvmf_capsule_command {
1601 nvme_fabrics_type_property_set = 0x00,
1602 nvme_fabrics_type_connect = 0x01,
1603 nvme_fabrics_type_property_get = 0x04,
1604 nvme_fabrics_type_auth_send = 0x05,
1605 nvme_fabrics_type_auth_receive = 0x06,
1606 };
1607
1608 #define nvme_fabrics_type_name(type) { type, #type }
1609 #define show_fabrics_type_name(type) \
1610 __print_symbolic(type, \
1611 nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
1612 nvme_fabrics_type_name(nvme_fabrics_type_connect), \
1613 nvme_fabrics_type_name(nvme_fabrics_type_property_get), \
1614 nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \
1615 nvme_fabrics_type_name(nvme_fabrics_type_auth_receive))
1616
1617 /*
1618 * If not fabrics command, fctype will be ignored.
1619 */
1620 #define show_opcode_name(qid, opcode, fctype) \
1621 ((opcode) == nvme_fabrics_command ? \
1622 show_fabrics_type_name(fctype) : \
1623 ((qid) ? \
1624 show_nvm_opcode_name(opcode) : \
1625 show_admin_opcode_name(opcode)))
1626
1627 struct nvmf_common_command {
1628 __u8 opcode;
1629 __u8 resv1;
1630 __u16 command_id;
1631 __u8 fctype;
1632 __u8 resv2[35];
1633 __u8 ts[24];
1634 };
1635
1636 /*
1637 * The legal cntlid range a NVMe Target will provide.
1638 * Note that cntlid of value 0 is considered illegal in the fabrics world.
1639 * Devices based on earlier specs did not have the subsystem concept;
1640 * therefore, those devices had their cntlid value set to 0 as a result.
1641 */
1642 #define NVME_CNTLID_MIN 1
1643 #define NVME_CNTLID_MAX 0xffef
1644 #define NVME_CNTLID_DYNAMIC 0xffff
1645
1646 #define MAX_DISC_LOGS 255
1647
1648 /* Discovery log page entry flags (EFLAGS): */
1649 enum {
1650 NVME_DISC_EFLAGS_EPCSD = (1 << 1),
1651 NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0),
1652 };
1653
1654 /* Discovery log page entry */
1655 struct nvmf_disc_rsp_page_entry {
1656 __u8 trtype;
1657 __u8 adrfam;
1658 __u8 subtype;
1659 __u8 treq;
1660 __le16 portid;
1661 __le16 cntlid;
1662 __le16 asqsz;
1663 __le16 eflags;
1664 __u8 resv10[20];
1665 char trsvcid[NVMF_TRSVCID_SIZE];
1666 __u8 resv64[192];
1667 char subnqn[NVMF_NQN_FIELD_LEN];
1668 char traddr[NVMF_TRADDR_SIZE];
1669 union tsas {
1670 char common[NVMF_TSAS_SIZE];
1671 struct rdma {
1672 __u8 qptype;
1673 __u8 prtype;
1674 __u8 cms;
1675 __u8 resv3[5];
1676 __u16 pkey;
1677 __u8 resv10[246];
1678 } rdma;
1679 struct tcp {
1680 __u8 sectype;
1681 } tcp;
1682 } tsas;
1683 };
1684
1685 /* Discovery log page header */
1686 struct nvmf_disc_rsp_page_hdr {
1687 __le64 genctr;
1688 __le64 numrec;
1689 __le16 recfmt;
1690 __u8 resv14[1006];
1691 struct nvmf_disc_rsp_page_entry entries[];
1692 };
1693
1694 enum {
1695 NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
1696 };
1697
1698 struct nvmf_connect_command {
1699 __u8 opcode;
1700 __u8 resv1;
1701 __u16 command_id;
1702 __u8 fctype;
1703 __u8 resv2[19];
1704 union nvme_data_ptr dptr;
1705 __le16 recfmt;
1706 __le16 qid;
1707 __le16 sqsize;
1708 __u8 cattr;
1709 __u8 resv3;
1710 __le32 kato;
1711 __u8 resv4[12];
1712 };
1713
1714 enum {
1715 NVME_CONNECT_AUTHREQ_ASCR = (1U << 18),
1716 NVME_CONNECT_AUTHREQ_ATR = (1U << 17),
1717 };
1718
1719 struct nvmf_connect_data {
1720 uuid_t hostid;
1721 __le16 cntlid;
1722 char resv4[238];
1723 char subsysnqn[NVMF_NQN_FIELD_LEN];
1724 char hostnqn[NVMF_NQN_FIELD_LEN];
1725 char resv5[256];
1726 };
1727
1728 struct nvmf_property_set_command {
1729 __u8 opcode;
1730 __u8 resv1;
1731 __u16 command_id;
1732 __u8 fctype;
1733 __u8 resv2[35];
1734 __u8 attrib;
1735 __u8 resv3[3];
1736 __le32 offset;
1737 __le64 value;
1738 __u8 resv4[8];
1739 };
1740
1741 struct nvmf_property_get_command {
1742 __u8 opcode;
1743 __u8 resv1;
1744 __u16 command_id;
1745 __u8 fctype;
1746 __u8 resv2[35];
1747 __u8 attrib;
1748 __u8 resv3[3];
1749 __le32 offset;
1750 __u8 resv4[16];
1751 };
1752
1753 struct nvmf_auth_common_command {
1754 __u8 opcode;
1755 __u8 resv1;
1756 __u16 command_id;
1757 __u8 fctype;
1758 __u8 resv2[19];
1759 union nvme_data_ptr dptr;
1760 __u8 resv3;
1761 __u8 spsp0;
1762 __u8 spsp1;
1763 __u8 secp;
1764 __le32 al_tl;
1765 __u8 resv4[16];
1766 };
1767
1768 struct nvmf_auth_send_command {
1769 __u8 opcode;
1770 __u8 resv1;
1771 __u16 command_id;
1772 __u8 fctype;
1773 __u8 resv2[19];
1774 union nvme_data_ptr dptr;
1775 __u8 resv3;
1776 __u8 spsp0;
1777 __u8 spsp1;
1778 __u8 secp;
1779 __le32 tl;
1780 __u8 resv4[16];
1781 };
1782
1783 struct nvmf_auth_receive_command {
1784 __u8 opcode;
1785 __u8 resv1;
1786 __u16 command_id;
1787 __u8 fctype;
1788 __u8 resv2[19];
1789 union nvme_data_ptr dptr;
1790 __u8 resv3;
1791 __u8 spsp0;
1792 __u8 spsp1;
1793 __u8 secp;
1794 __le32 al;
1795 __u8 resv4[16];
1796 };
1797
1798 /* Value for secp */
1799 enum {
1800 NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9,
1801 };
1802
1803 /* Defined value for auth_type */
1804 enum {
1805 NVME_AUTH_COMMON_MESSAGES = 0x00,
1806 NVME_AUTH_DHCHAP_MESSAGES = 0x01,
1807 };
1808
1809 /* Defined messages for auth_id */
1810 enum {
1811 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00,
1812 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01,
1813 NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02,
1814 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03,
1815 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04,
1816 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0,
1817 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1,
1818 };
1819
1820 struct nvmf_auth_dhchap_protocol_descriptor {
1821 __u8 authid;
1822 __u8 rsvd;
1823 __u8 halen;
1824 __u8 dhlen;
1825 __u8 idlist[60];
1826 };
1827
1828 enum {
1829 NVME_AUTH_DHCHAP_AUTH_ID = 0x01,
1830 };
1831
1832 /* Defined hash functions for DH-HMAC-CHAP authentication */
1833 enum {
1834 NVME_AUTH_HASH_SHA256 = 0x01,
1835 NVME_AUTH_HASH_SHA384 = 0x02,
1836 NVME_AUTH_HASH_SHA512 = 0x03,
1837 NVME_AUTH_HASH_INVALID = 0xff,
1838 };
1839
1840 /* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
1841 enum {
1842 NVME_AUTH_DHGROUP_NULL = 0x00,
1843 NVME_AUTH_DHGROUP_2048 = 0x01,
1844 NVME_AUTH_DHGROUP_3072 = 0x02,
1845 NVME_AUTH_DHGROUP_4096 = 0x03,
1846 NVME_AUTH_DHGROUP_6144 = 0x04,
1847 NVME_AUTH_DHGROUP_8192 = 0x05,
1848 NVME_AUTH_DHGROUP_INVALID = 0xff,
1849 };
1850
1851 enum {
1852 NVME_AUTH_SECP_NOSC = 0x00,
1853 NVME_AUTH_SECP_SC = 0x01,
1854 NVME_AUTH_SECP_NEWTLSPSK = 0x02,
1855 NVME_AUTH_SECP_REPLACETLSPSK = 0x03,
1856 };
1857
1858 union nvmf_auth_protocol {
1859 struct nvmf_auth_dhchap_protocol_descriptor dhchap;
1860 };
1861
1862 struct nvmf_auth_dhchap_negotiate_data {
1863 __u8 auth_type;
1864 __u8 auth_id;
1865 __le16 rsvd;
1866 __le16 t_id;
1867 __u8 sc_c;
1868 __u8 napd;
1869 union nvmf_auth_protocol auth_protocol[];
1870 };
1871
1872 struct nvmf_auth_dhchap_challenge_data {
1873 __u8 auth_type;
1874 __u8 auth_id;
1875 __u16 rsvd1;
1876 __le16 t_id;
1877 __u8 hl;
1878 __u8 rsvd2;
1879 __u8 hashid;
1880 __u8 dhgid;
1881 __le16 dhvlen;
1882 __le32 seqnum;
1883 /* 'hl' bytes of challenge value */
1884 __u8 cval[];
1885 /* followed by 'dhvlen' bytes of DH value */
1886 };
1887
1888 struct nvmf_auth_dhchap_reply_data {
1889 __u8 auth_type;
1890 __u8 auth_id;
1891 __le16 rsvd1;
1892 __le16 t_id;
1893 __u8 hl;
1894 __u8 rsvd2;
1895 __u8 cvalid;
1896 __u8 rsvd3;
1897 __le16 dhvlen;
1898 __le32 seqnum;
1899 /* 'hl' bytes of response data */
1900 __u8 rval[];
1901 /* followed by 'hl' bytes of Challenge value */
1902 /* followed by 'dhvlen' bytes of DH value */
1903 };
1904
1905 enum {
1906 NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0),
1907 };
1908
1909 struct nvmf_auth_dhchap_success1_data {
1910 __u8 auth_type;
1911 __u8 auth_id;
1912 __le16 rsvd1;
1913 __le16 t_id;
1914 __u8 hl;
1915 __u8 rsvd2;
1916 __u8 rvalid;
1917 __u8 rsvd3[7];
1918 /* 'hl' bytes of response value */
1919 __u8 rval[];
1920 };
1921
1922 struct nvmf_auth_dhchap_success2_data {
1923 __u8 auth_type;
1924 __u8 auth_id;
1925 __le16 rsvd1;
1926 __le16 t_id;
1927 __u8 rsvd2[10];
1928 };
1929
1930 struct nvmf_auth_dhchap_failure_data {
1931 __u8 auth_type;
1932 __u8 auth_id;
1933 __le16 rsvd1;
1934 __le16 t_id;
1935 __u8 rescode;
1936 __u8 rescode_exp;
1937 };
1938
1939 enum {
1940 NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01,
1941 };
1942
1943 enum {
1944 NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01,
1945 NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02,
1946 NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03,
1947 NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04,
1948 NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05,
1949 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06,
1950 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07,
1951 };
1952
1953
1954 struct nvme_dbbuf {
1955 __u8 opcode;
1956 __u8 flags;
1957 __u16 command_id;
1958 __u32 rsvd1[5];
1959 __le64 prp1;
1960 __le64 prp2;
1961 __u32 rsvd12[6];
1962 };
1963
1964 struct streams_directive_params {
1965 __le16 msl;
1966 __le16 nssa;
1967 __le16 nsso;
1968 __u8 rsvd[10];
1969 __le32 sws;
1970 __le16 sgs;
1971 __le16 nsa;
1972 __le16 nso;
1973 __u8 rsvd2[6];
1974 };
1975
1976 struct nvme_command {
1977 union {
1978 struct nvme_common_command common;
1979 struct nvme_rw_command rw;
1980 struct nvme_identify identify;
1981 struct nvme_features features;
1982 struct nvme_create_cq create_cq;
1983 struct nvme_create_sq create_sq;
1984 struct nvme_delete_queue delete_queue;
1985 struct nvme_download_firmware dlfw;
1986 struct nvme_format_cmd format;
1987 struct nvme_dsm_cmd dsm;
1988 struct nvme_write_zeroes_cmd write_zeroes;
1989 struct nvme_zone_mgmt_send_cmd zms;
1990 struct nvme_zone_mgmt_recv_cmd zmr;
1991 struct nvme_abort_cmd abort;
1992 struct nvme_get_log_page_command get_log_page;
1993 struct nvmf_common_command fabrics;
1994 struct nvmf_connect_command connect;
1995 struct nvmf_property_set_command prop_set;
1996 struct nvmf_property_get_command prop_get;
1997 struct nvmf_auth_common_command auth_common;
1998 struct nvmf_auth_send_command auth_send;
1999 struct nvmf_auth_receive_command auth_receive;
2000 struct nvme_dbbuf dbbuf;
2001 struct nvme_directive_cmd directive;
2002 struct nvme_io_mgmt_recv_cmd imr;
2003 };
2004 };
2005
nvme_is_fabrics(const struct nvme_command * cmd)2006 static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
2007 {
2008 return cmd->common.opcode == nvme_fabrics_command;
2009 }
2010
2011 #ifdef CONFIG_NVME_VERBOSE_ERRORS
2012 const char *nvme_get_error_status_str(u16 status);
2013 const char *nvme_get_opcode_str(u8 opcode);
2014 const char *nvme_get_admin_opcode_str(u8 opcode);
2015 const char *nvme_get_fabrics_opcode_str(u8 opcode);
2016 #else /* CONFIG_NVME_VERBOSE_ERRORS */
nvme_get_error_status_str(u16 status)2017 static inline const char *nvme_get_error_status_str(u16 status)
2018 {
2019 return "I/O Error";
2020 }
nvme_get_opcode_str(u8 opcode)2021 static inline const char *nvme_get_opcode_str(u8 opcode)
2022 {
2023 return "I/O Cmd";
2024 }
nvme_get_admin_opcode_str(u8 opcode)2025 static inline const char *nvme_get_admin_opcode_str(u8 opcode)
2026 {
2027 return "Admin Cmd";
2028 }
2029
nvme_get_fabrics_opcode_str(u8 opcode)2030 static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
2031 {
2032 return "Fabrics Cmd";
2033 }
2034 #endif /* CONFIG_NVME_VERBOSE_ERRORS */
2035
nvme_opcode_str(int qid,u8 opcode)2036 static inline const char *nvme_opcode_str(int qid, u8 opcode)
2037 {
2038 return qid ? nvme_get_opcode_str(opcode) :
2039 nvme_get_admin_opcode_str(opcode);
2040 }
2041
nvme_fabrics_opcode_str(int qid,const struct nvme_command * cmd)2042 static inline const char *nvme_fabrics_opcode_str(
2043 int qid, const struct nvme_command *cmd)
2044 {
2045 if (nvme_is_fabrics(cmd))
2046 return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
2047
2048 return nvme_opcode_str(qid, cmd->common.opcode);
2049 }
2050
2051 struct nvme_error_slot {
2052 __le64 error_count;
2053 __le16 sqid;
2054 __le16 cmdid;
2055 __le16 status_field;
2056 __le16 param_error_location;
2057 __le64 lba;
2058 __le32 nsid;
2059 __u8 vs;
2060 __u8 resv[3];
2061 __le64 cs;
2062 __u8 resv2[24];
2063 };
2064
nvme_is_write(const struct nvme_command * cmd)2065 static inline bool nvme_is_write(const struct nvme_command *cmd)
2066 {
2067 /*
2068 * What a mess...
2069 *
2070 * Why can't we simply have a Fabrics In and Fabrics out command?
2071 */
2072 if (unlikely(nvme_is_fabrics(cmd)))
2073 return cmd->fabrics.fctype & 1;
2074 return cmd->common.opcode & 1;
2075 }
2076
2077 enum {
2078 /*
2079 * Generic Command Status:
2080 */
2081 NVME_SCT_GENERIC = 0x0,
2082 NVME_SC_SUCCESS = 0x0,
2083 NVME_SC_INVALID_OPCODE = 0x1,
2084 NVME_SC_INVALID_FIELD = 0x2,
2085 NVME_SC_CMDID_CONFLICT = 0x3,
2086 NVME_SC_DATA_XFER_ERROR = 0x4,
2087 NVME_SC_POWER_LOSS = 0x5,
2088 NVME_SC_INTERNAL = 0x6,
2089 NVME_SC_ABORT_REQ = 0x7,
2090 NVME_SC_ABORT_QUEUE = 0x8,
2091 NVME_SC_FUSED_FAIL = 0x9,
2092 NVME_SC_FUSED_MISSING = 0xa,
2093 NVME_SC_INVALID_NS = 0xb,
2094 NVME_SC_CMD_SEQ_ERROR = 0xc,
2095 NVME_SC_SGL_INVALID_LAST = 0xd,
2096 NVME_SC_SGL_INVALID_COUNT = 0xe,
2097 NVME_SC_SGL_INVALID_DATA = 0xf,
2098 NVME_SC_SGL_INVALID_METADATA = 0x10,
2099 NVME_SC_SGL_INVALID_TYPE = 0x11,
2100 NVME_SC_CMB_INVALID_USE = 0x12,
2101 NVME_SC_PRP_INVALID_OFFSET = 0x13,
2102 NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
2103 NVME_SC_OP_DENIED = 0x15,
2104 NVME_SC_SGL_INVALID_OFFSET = 0x16,
2105 NVME_SC_RESERVED = 0x17,
2106 NVME_SC_HOST_ID_INCONSIST = 0x18,
2107 NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
2108 NVME_SC_KA_TIMEOUT_INVALID = 0x1A,
2109 NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B,
2110 NVME_SC_SANITIZE_FAILED = 0x1C,
2111 NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
2112 NVME_SC_SGL_INVALID_GRANULARITY = 0x1E,
2113 NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F,
2114 NVME_SC_NS_WRITE_PROTECTED = 0x20,
2115 NVME_SC_CMD_INTERRUPTED = 0x21,
2116 NVME_SC_TRANSIENT_TR_ERR = 0x22,
2117 NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 0x24,
2118 NVME_SC_INVALID_IO_CMD_SET = 0x2C,
2119
2120 NVME_SC_LBA_RANGE = 0x80,
2121 NVME_SC_CAP_EXCEEDED = 0x81,
2122 NVME_SC_NS_NOT_READY = 0x82,
2123 NVME_SC_RESERVATION_CONFLICT = 0x83,
2124 NVME_SC_FORMAT_IN_PROGRESS = 0x84,
2125
2126 /*
2127 * Command Specific Status:
2128 */
2129 NVME_SCT_COMMAND_SPECIFIC = 0x100,
2130 NVME_SC_CQ_INVALID = 0x100,
2131 NVME_SC_QID_INVALID = 0x101,
2132 NVME_SC_QUEUE_SIZE = 0x102,
2133 NVME_SC_ABORT_LIMIT = 0x103,
2134 NVME_SC_ABORT_MISSING = 0x104,
2135 NVME_SC_ASYNC_LIMIT = 0x105,
2136 NVME_SC_FIRMWARE_SLOT = 0x106,
2137 NVME_SC_FIRMWARE_IMAGE = 0x107,
2138 NVME_SC_INVALID_VECTOR = 0x108,
2139 NVME_SC_INVALID_LOG_PAGE = 0x109,
2140 NVME_SC_INVALID_FORMAT = 0x10a,
2141 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
2142 NVME_SC_INVALID_QUEUE = 0x10c,
2143 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
2144 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
2145 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
2146 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
2147 NVME_SC_FW_NEEDS_RESET = 0x111,
2148 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
2149 NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113,
2150 NVME_SC_OVERLAPPING_RANGE = 0x114,
2151 NVME_SC_NS_INSUFFICIENT_CAP = 0x115,
2152 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
2153 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
2154 NVME_SC_NS_IS_PRIVATE = 0x119,
2155 NVME_SC_NS_NOT_ATTACHED = 0x11a,
2156 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
2157 NVME_SC_CTRL_LIST_INVALID = 0x11c,
2158 NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
2159 NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
2160 NVME_SC_CTRL_ID_INVALID = 0x11f,
2161 NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
2162 NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
2163 NVME_SC_RES_ID_INVALID = 0x122,
2164 NVME_SC_PMR_SAN_PROHIBITED = 0x123,
2165 NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
2166 NVME_SC_ANA_ATTACH_FAILED = 0x125,
2167
2168 /*
2169 * I/O Command Set Specific - NVM commands:
2170 */
2171 NVME_SC_BAD_ATTRIBUTES = 0x180,
2172 NVME_SC_INVALID_PI = 0x181,
2173 NVME_SC_READ_ONLY = 0x182,
2174 NVME_SC_CMD_SIZE_LIM_EXCEEDED = 0x183,
2175
2176 /*
2177 * I/O Command Set Specific - Fabrics commands:
2178 */
2179 NVME_SC_CONNECT_FORMAT = 0x180,
2180 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
2181 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
2182 NVME_SC_CONNECT_RESTART_DISC = 0x183,
2183 NVME_SC_CONNECT_INVALID_HOST = 0x184,
2184
2185 NVME_SC_DISCOVERY_RESTART = 0x190,
2186 NVME_SC_AUTH_REQUIRED = 0x191,
2187
2188 /*
2189 * I/O Command Set Specific - Zoned commands:
2190 */
2191 NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8,
2192 NVME_SC_ZONE_FULL = 0x1b9,
2193 NVME_SC_ZONE_READ_ONLY = 0x1ba,
2194 NVME_SC_ZONE_OFFLINE = 0x1bb,
2195 NVME_SC_ZONE_INVALID_WRITE = 0x1bc,
2196 NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd,
2197 NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be,
2198 NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf,
2199
2200 /*
2201 * Media and Data Integrity Errors:
2202 */
2203 NVME_SCT_MEDIA_ERROR = 0x200,
2204 NVME_SC_WRITE_FAULT = 0x280,
2205 NVME_SC_READ_ERROR = 0x281,
2206 NVME_SC_GUARD_CHECK = 0x282,
2207 NVME_SC_APPTAG_CHECK = 0x283,
2208 NVME_SC_REFTAG_CHECK = 0x284,
2209 NVME_SC_COMPARE_FAILED = 0x285,
2210 NVME_SC_ACCESS_DENIED = 0x286,
2211 NVME_SC_UNWRITTEN_BLOCK = 0x287,
2212
2213 /*
2214 * Path-related Errors:
2215 */
2216 NVME_SCT_PATH = 0x300,
2217 NVME_SC_INTERNAL_PATH_ERROR = 0x300,
2218 NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
2219 NVME_SC_ANA_INACCESSIBLE = 0x302,
2220 NVME_SC_ANA_TRANSITION = 0x303,
2221 NVME_SC_CTRL_PATH_ERROR = 0x360,
2222 NVME_SC_HOST_PATH_ERROR = 0x370,
2223 NVME_SC_HOST_ABORTED_CMD = 0x371,
2224
2225 NVME_SC_MASK = 0x00ff, /* Status Code */
2226 NVME_SCT_MASK = 0x0700, /* Status Code Type */
2227 NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK,
2228
2229 NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */
2230 NVME_STATUS_MORE = 0x2000,
2231 NVME_STATUS_DNR = 0x4000, /* Do Not Retry */
2232 };
2233
2234 #define NVME_SCT(status) ((status) >> 8 & 7)
2235
2236 struct nvme_completion {
2237 /*
2238 * Used by Admin and Fabrics commands to return data:
2239 */
2240 union nvme_result {
2241 __le16 u16;
2242 __le32 u32;
2243 __le64 u64;
2244 } result;
2245 __le16 sq_head; /* how much of this queue may be reclaimed */
2246 __le16 sq_id; /* submission queue that generated this entry */
2247 __u16 command_id; /* of the command which completed */
2248 __le16 status; /* did the command fail, and if so, why? */
2249 };
2250
2251 #define NVME_VS(major, minor, tertiary) \
2252 (((major) << 16) | ((minor) << 8) | (tertiary))
2253
2254 #define NVME_MAJOR(ver) ((ver) >> 16)
2255 #define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
2256 #define NVME_TERTIARY(ver) ((ver) & 0xff)
2257
2258 enum {
2259 NVME_AEN_RESV_LOG_PAGE_AVALIABLE = 0x00,
2260 };
2261
2262 enum {
2263 NVME_PR_LOG_EMPTY_LOG_PAGE = 0x00,
2264 NVME_PR_LOG_REGISTRATION_PREEMPTED = 0x01,
2265 NVME_PR_LOG_RESERVATION_RELEASED = 0x02,
2266 NVME_PR_LOG_RESERVATOIN_PREEMPTED = 0x03,
2267 };
2268
2269 enum {
2270 NVME_PR_NOTIFY_BIT_REG_PREEMPTED = 1,
2271 NVME_PR_NOTIFY_BIT_RESV_RELEASED = 2,
2272 NVME_PR_NOTIFY_BIT_RESV_PREEMPTED = 3,
2273 };
2274
2275 struct nvme_pr_log {
2276 __le64 count;
2277 __u8 type;
2278 __u8 nr_pages;
2279 __u8 rsvd1[2];
2280 __le32 nsid;
2281 __u8 rsvd2[48];
2282 };
2283
2284 struct nvmet_pr_register_data {
2285 __le64 crkey;
2286 __le64 nrkey;
2287 };
2288
2289 struct nvmet_pr_acquire_data {
2290 __le64 crkey;
2291 __le64 prkey;
2292 };
2293
2294 struct nvmet_pr_release_data {
2295 __le64 crkey;
2296 };
2297
2298 enum nvme_pr_capabilities {
2299 NVME_PR_SUPPORT_PTPL = 1,
2300 NVME_PR_SUPPORT_WRITE_EXCLUSIVE = 1 << 1,
2301 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS = 1 << 2,
2302 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY = 1 << 3,
2303 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY = 1 << 4,
2304 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS = 1 << 5,
2305 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS = 1 << 6,
2306 NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF = 1 << 7,
2307 };
2308
2309 enum nvme_pr_register_action {
2310 NVME_PR_REGISTER_ACT_REG = 0,
2311 NVME_PR_REGISTER_ACT_UNREG = 1,
2312 NVME_PR_REGISTER_ACT_REPLACE = 1 << 1,
2313 };
2314
2315 enum nvme_pr_acquire_action {
2316 NVME_PR_ACQUIRE_ACT_ACQUIRE = 0,
2317 NVME_PR_ACQUIRE_ACT_PREEMPT = 1,
2318 NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 1 << 1,
2319 };
2320
2321 enum nvme_pr_release_action {
2322 NVME_PR_RELEASE_ACT_RELEASE = 0,
2323 NVME_PR_RELEASE_ACT_CLEAR = 1,
2324 };
2325
2326 enum nvme_pr_change_ptpl {
2327 NVME_PR_CPTPL_NO_CHANGE = 0,
2328 NVME_PR_CPTPL_RESV = 1 << 30,
2329 NVME_PR_CPTPL_CLEARED = 2 << 30,
2330 NVME_PR_CPTPL_PERSIST = 3 << 30,
2331 };
2332
2333 #define NVME_PR_IGNORE_KEY (1 << 3)
2334
2335 #endif /* _LINUX_NVME_H */
2336