1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2011-2014, Intel Corporation.
4 */
5
6 #ifndef _NVME_H
7 #define _NVME_H
8
9 #include <linux/nvme.h>
10 #include <linux/cdev.h>
11 #include <linux/pci.h>
12 #include <linux/kref.h>
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
16 #include <linux/rcupdate.h>
17 #include <linux/wait.h>
18 #include <linux/t10-pi.h>
19 #include <linux/ratelimit_types.h>
20
21 #include <trace/events/block.h>
22
23 extern const struct pr_ops nvme_pr_ops;
24
25 extern unsigned int nvme_io_timeout;
26 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
27
28 extern unsigned int admin_timeout;
29 #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
30
31 #define NVME_DEFAULT_KATO 5
32
33 #ifdef CONFIG_ARCH_NO_SG_CHAIN
34 #define NVME_INLINE_SG_CNT 0
35 #define NVME_INLINE_METADATA_SG_CNT 0
36 #else
37 #define NVME_INLINE_SG_CNT 2
38 #define NVME_INLINE_METADATA_SG_CNT 1
39 #endif
40
41 /*
42 * Default to a 4K page size, with the intention to update this
43 * path in the future to accommodate architectures with differing
44 * kernel and IO page sizes.
45 */
46 #define NVME_CTRL_PAGE_SHIFT 12
47 #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
48
49 extern struct workqueue_struct *nvme_wq;
50 extern struct workqueue_struct *nvme_reset_wq;
51 extern struct workqueue_struct *nvme_delete_wq;
52 extern struct mutex nvme_subsystems_lock;
53
54 /*
55 * List of workarounds for devices that required behavior not specified in
56 * the standard.
57 */
58 enum nvme_quirks {
59 /*
60 * Prefers I/O aligned to a stripe size specified in a vendor
61 * specific Identify field.
62 */
63 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
64
65 /*
66 * The controller doesn't handle Identify value others than 0 or 1
67 * correctly.
68 */
69 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
70
71 /*
72 * The controller deterministically returns 0's on reads to
73 * logical blocks that deallocate was called on.
74 */
75 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
76
77 /*
78 * The controller needs a delay before starts checking the device
79 * readiness, which is done by reading the NVME_CSTS_RDY bit.
80 */
81 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
82
83 /*
84 * APST should not be used.
85 */
86 NVME_QUIRK_NO_APST = (1 << 4),
87
88 /*
89 * The deepest sleep state should not be used.
90 */
91 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
92
93 /*
94 * Problems seen with concurrent commands
95 */
96 NVME_QUIRK_QDEPTH_ONE = (1 << 6),
97
98 /*
99 * Set MEDIUM priority on SQ creation
100 */
101 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
102
103 /*
104 * Ignore device provided subnqn.
105 */
106 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
107
108 /*
109 * Broken Write Zeroes.
110 */
111 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
112
113 /*
114 * Force simple suspend/resume path.
115 */
116 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
117
118 /*
119 * Use only one interrupt vector for all queues
120 */
121 NVME_QUIRK_SINGLE_VECTOR = (1 << 11),
122
123 /*
124 * Use non-standard 128 bytes SQEs.
125 */
126 NVME_QUIRK_128_BYTES_SQES = (1 << 12),
127
128 /*
129 * Prevent tag overlap between queues
130 */
131 NVME_QUIRK_SHARED_TAGS = (1 << 13),
132
133 /*
134 * Don't change the value of the temperature threshold feature
135 */
136 NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
137
138 /*
139 * The controller doesn't handle the Identify Namespace
140 * Identification Descriptor list subcommand despite claiming
141 * NVMe 1.3 compliance.
142 */
143 NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
144
145 /*
146 * The controller does not properly handle DMA addresses over
147 * 48 bits.
148 */
149 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
150
151 /*
152 * The controller requires the command_id value be limited, so skip
153 * encoding the generation sequence number.
154 */
155 NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
156
157 /*
158 * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
159 */
160 NVME_QUIRK_BOGUS_NID = (1 << 18),
161
162 /*
163 * No temperature thresholds for channels other than 0 (Composite).
164 */
165 NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
166
167 /*
168 * Disables simple suspend/resume path.
169 */
170 NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
171
172 /*
173 * MSI (but not MSI-X) interrupts are broken and never fire.
174 */
175 NVME_QUIRK_BROKEN_MSI = (1 << 21),
176
177 /*
178 * Align dma pool segment size to 512 bytes
179 */
180 NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
181 };
182
nvme_quirk_name(enum nvme_quirks q)183 static inline char *nvme_quirk_name(enum nvme_quirks q)
184 {
185 switch (q) {
186 case NVME_QUIRK_STRIPE_SIZE:
187 return "stripe_size";
188 case NVME_QUIRK_IDENTIFY_CNS:
189 return "identify_cns";
190 case NVME_QUIRK_DEALLOCATE_ZEROES:
191 return "deallocate_zeroes";
192 case NVME_QUIRK_DELAY_BEFORE_CHK_RDY:
193 return "delay_before_chk_rdy";
194 case NVME_QUIRK_NO_APST:
195 return "no_apst";
196 case NVME_QUIRK_NO_DEEPEST_PS:
197 return "no_deepest_ps";
198 case NVME_QUIRK_QDEPTH_ONE:
199 return "qdepth_one";
200 case NVME_QUIRK_MEDIUM_PRIO_SQ:
201 return "medium_prio_sq";
202 case NVME_QUIRK_IGNORE_DEV_SUBNQN:
203 return "ignore_dev_subnqn";
204 case NVME_QUIRK_DISABLE_WRITE_ZEROES:
205 return "disable_write_zeroes";
206 case NVME_QUIRK_SIMPLE_SUSPEND:
207 return "simple_suspend";
208 case NVME_QUIRK_SINGLE_VECTOR:
209 return "single_vector";
210 case NVME_QUIRK_128_BYTES_SQES:
211 return "128_bytes_sqes";
212 case NVME_QUIRK_SHARED_TAGS:
213 return "shared_tags";
214 case NVME_QUIRK_NO_TEMP_THRESH_CHANGE:
215 return "no_temp_thresh_change";
216 case NVME_QUIRK_NO_NS_DESC_LIST:
217 return "no_ns_desc_list";
218 case NVME_QUIRK_DMA_ADDRESS_BITS_48:
219 return "dma_address_bits_48";
220 case NVME_QUIRK_SKIP_CID_GEN:
221 return "skip_cid_gen";
222 case NVME_QUIRK_BOGUS_NID:
223 return "bogus_nid";
224 case NVME_QUIRK_NO_SECONDARY_TEMP_THRESH:
225 return "no_secondary_temp_thresh";
226 case NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND:
227 return "force_no_simple_suspend";
228 case NVME_QUIRK_BROKEN_MSI:
229 return "broken_msi";
230 case NVME_QUIRK_DMAPOOL_ALIGN_512:
231 return "dmapool_align_512";
232 }
233
234 return "unknown";
235 }
236
237 /*
238 * Common request structure for NVMe passthrough. All drivers must have
239 * this structure as the first member of their request-private data.
240 */
241 struct nvme_request {
242 struct nvme_command *cmd;
243 union nvme_result result;
244 u8 genctr;
245 u8 retries;
246 u8 flags;
247 u16 status;
248 #ifdef CONFIG_NVME_MULTIPATH
249 unsigned long start_time;
250 #endif
251 struct nvme_ctrl *ctrl;
252 };
253
254 /*
255 * Mark a bio as coming in through the mpath node.
256 */
257 #define REQ_NVME_MPATH REQ_DRV
258
259 enum {
260 NVME_REQ_CANCELLED = (1 << 0),
261 NVME_REQ_USERCMD = (1 << 1),
262 NVME_MPATH_IO_STATS = (1 << 2),
263 NVME_MPATH_CNT_ACTIVE = (1 << 3),
264 };
265
nvme_req(struct request * req)266 static inline struct nvme_request *nvme_req(struct request *req)
267 {
268 return blk_mq_rq_to_pdu(req);
269 }
270
nvme_req_qid(struct request * req)271 static inline u16 nvme_req_qid(struct request *req)
272 {
273 if (!req->q->queuedata)
274 return 0;
275
276 return req->mq_hctx->queue_num + 1;
277 }
278
279 /* The below value is the specific amount of delay needed before checking
280 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
281 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
282 * found empirically.
283 */
284 #define NVME_QUIRK_DELAY_AMOUNT 2300
285
286 /*
287 * enum nvme_ctrl_state: Controller state
288 *
289 * @NVME_CTRL_NEW: New controller just allocated, initial state
290 * @NVME_CTRL_LIVE: Controller is connected and I/O capable
291 * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
292 * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
293 * transport
294 * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
295 * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
296 * disabled/failed immediately. This state comes
297 * after all async event processing took place and
298 * before ns removal and the controller deletion
299 * progress
300 * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
301 * shutdown or removal. In this case we forcibly
302 * kill all inflight I/O as they have no chance to
303 * complete
304 */
305 enum nvme_ctrl_state {
306 NVME_CTRL_NEW,
307 NVME_CTRL_LIVE,
308 NVME_CTRL_RESETTING,
309 NVME_CTRL_CONNECTING,
310 NVME_CTRL_DELETING,
311 NVME_CTRL_DELETING_NOIO,
312 NVME_CTRL_DEAD,
313 };
314
315 struct nvme_fault_inject {
316 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
317 struct fault_attr attr;
318 struct dentry *parent;
319 bool dont_retry; /* DNR, do not retry */
320 u16 status; /* status code */
321 #endif
322 };
323
324 enum nvme_ctrl_flags {
325 NVME_CTRL_FAILFAST_EXPIRED = 0,
326 NVME_CTRL_ADMIN_Q_STOPPED = 1,
327 NVME_CTRL_STARTED_ONCE = 2,
328 NVME_CTRL_STOPPED = 3,
329 NVME_CTRL_SKIP_ID_CNS_CS = 4,
330 NVME_CTRL_DIRTY_CAPABILITY = 5,
331 NVME_CTRL_FROZEN = 6,
332 };
333
334 struct nvme_ctrl {
335 bool comp_seen;
336 bool identified;
337 bool passthru_err_log_enabled;
338 enum nvme_ctrl_state state;
339 spinlock_t lock;
340 struct mutex scan_lock;
341 const struct nvme_ctrl_ops *ops;
342 struct request_queue *admin_q;
343 struct request_queue *connect_q;
344 struct request_queue *fabrics_q;
345 struct device *dev;
346 int instance;
347 int numa_node;
348 struct blk_mq_tag_set *tagset;
349 struct blk_mq_tag_set *admin_tagset;
350 struct list_head namespaces;
351 struct mutex namespaces_lock;
352 struct srcu_struct srcu;
353 struct device ctrl_device;
354 struct device *device; /* char device */
355 #ifdef CONFIG_NVME_HWMON
356 struct device *hwmon_device;
357 #endif
358 struct cdev cdev;
359 struct work_struct reset_work;
360 struct work_struct delete_work;
361 wait_queue_head_t state_wq;
362
363 struct nvme_subsystem *subsys;
364 struct list_head subsys_entry;
365
366 struct opal_dev *opal_dev;
367
368 u16 cntlid;
369
370 u16 mtfa;
371 u32 ctrl_config;
372 u32 queue_count;
373
374 u64 cap;
375 u32 max_hw_sectors;
376 u32 max_segments;
377 u32 max_integrity_segments;
378 u32 max_zeroes_sectors;
379 #ifdef CONFIG_BLK_DEV_ZONED
380 u32 max_zone_append;
381 #endif
382 u16 crdt[3];
383 u16 oncs;
384 u8 dmrl;
385 u32 dmrsl;
386 u16 oacs;
387 u16 sqsize;
388 u32 max_namespaces;
389 atomic_t abort_limit;
390 u8 vwc;
391 u32 vs;
392 u32 sgls;
393 u16 kas;
394 u8 npss;
395 u8 apsta;
396 u16 wctemp;
397 u16 cctemp;
398 u32 oaes;
399 u32 aen_result;
400 u32 ctratt;
401 unsigned int shutdown_timeout;
402 unsigned int kato;
403 bool subsystem;
404 unsigned long quirks;
405 struct nvme_id_power_state psd[32];
406 struct nvme_effects_log *effects;
407 struct xarray cels;
408 struct work_struct scan_work;
409 struct work_struct async_event_work;
410 struct delayed_work ka_work;
411 struct delayed_work failfast_work;
412 struct nvme_command ka_cmd;
413 unsigned long ka_last_check_time;
414 struct work_struct fw_act_work;
415 unsigned long events;
416
417 #ifdef CONFIG_NVME_MULTIPATH
418 /* asymmetric namespace access: */
419 u8 anacap;
420 u8 anatt;
421 u32 anagrpmax;
422 u32 nanagrpid;
423 struct mutex ana_lock;
424 struct nvme_ana_rsp_hdr *ana_log_buf;
425 size_t ana_log_size;
426 struct timer_list anatt_timer;
427 struct work_struct ana_work;
428 atomic_t nr_active;
429 #endif
430
431 #ifdef CONFIG_NVME_HOST_AUTH
432 struct work_struct dhchap_auth_work;
433 struct mutex dhchap_auth_mutex;
434 struct nvme_dhchap_queue_context *dhchap_ctxs;
435 struct nvme_dhchap_key *host_key;
436 struct nvme_dhchap_key *ctrl_key;
437 u16 transaction;
438 #endif
439 key_serial_t tls_pskid;
440
441 /* Power saving configuration */
442 u64 ps_max_latency_us;
443 bool apst_enabled;
444
445 /* PCIe only: */
446 u16 hmmaxd;
447 u32 hmpre;
448 u32 hmmin;
449 u32 hmminds;
450
451 /* Fabrics only */
452 u32 ioccsz;
453 u32 iorcsz;
454 u16 icdoff;
455 u16 maxcmd;
456 int nr_reconnects;
457 unsigned long flags;
458 struct nvmf_ctrl_options *opts;
459
460 struct page *discard_page;
461 unsigned long discard_page_busy;
462
463 struct nvme_fault_inject fault_inject;
464
465 enum nvme_ctrl_type cntrltype;
466 enum nvme_dctype dctype;
467
468 u16 awupf; /* 0's based value. */
469 };
470
nvme_ctrl_state(struct nvme_ctrl * ctrl)471 static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
472 {
473 return READ_ONCE(ctrl->state);
474 }
475
476 enum nvme_iopolicy {
477 NVME_IOPOLICY_NUMA,
478 NVME_IOPOLICY_RR,
479 NVME_IOPOLICY_QD,
480 };
481
482 struct nvme_subsystem {
483 int instance;
484 struct device dev;
485 /*
486 * Because we unregister the device on the last put we need
487 * a separate refcount.
488 */
489 struct kref ref;
490 struct list_head entry;
491 struct mutex lock;
492 struct list_head ctrls;
493 struct list_head nsheads;
494 char subnqn[NVMF_NQN_SIZE];
495 char serial[20];
496 char model[40];
497 char firmware_rev[8];
498 u8 cmic;
499 enum nvme_subsys_type subtype;
500 u16 vendor_id;
501 struct ida ns_ida;
502 #ifdef CONFIG_NVME_MULTIPATH
503 enum nvme_iopolicy iopolicy;
504 #endif
505 };
506
507 /*
508 * Container structure for uniqueue namespace identifiers.
509 */
510 struct nvme_ns_ids {
511 u8 eui64[8];
512 u8 nguid[16];
513 uuid_t uuid;
514 u8 csi;
515 };
516
517 /*
518 * Anchor structure for namespaces. There is one for each namespace in a
519 * NVMe subsystem that any of our controllers can see, and the namespace
520 * structure for each controller is chained of it. For private namespaces
521 * there is a 1:1 relation to our namespace structures, that is ->list
522 * only ever has a single entry for private namespaces.
523 */
524 struct nvme_ns_head {
525 struct list_head list;
526 struct srcu_struct srcu;
527 struct nvme_subsystem *subsys;
528 struct nvme_ns_ids ids;
529 u8 lba_shift;
530 u16 ms;
531 u16 pi_size;
532 u8 pi_type;
533 u8 guard_type;
534 struct list_head entry;
535 struct kref ref;
536 bool shared;
537 bool rotational;
538 bool passthru_err_log_enabled;
539 struct nvme_effects_log *effects;
540 u64 nuse;
541 unsigned ns_id;
542 int instance;
543 #ifdef CONFIG_BLK_DEV_ZONED
544 u64 zsze;
545 #endif
546 unsigned long features;
547
548 struct ratelimit_state rs_nuse;
549
550 struct cdev cdev;
551 struct device cdev_device;
552
553 struct gendisk *disk;
554
555 u16 nr_plids;
556 u16 *plids;
557 #ifdef CONFIG_NVME_MULTIPATH
558 struct bio_list requeue_list;
559 spinlock_t requeue_lock;
560 struct work_struct requeue_work;
561 struct work_struct partition_scan_work;
562 struct mutex lock;
563 unsigned long flags;
564 struct delayed_work remove_work;
565 unsigned int delayed_removal_secs;
566 #define NVME_NSHEAD_DISK_LIVE 0
567 #define NVME_NSHEAD_QUEUE_IF_NO_PATH 1
568 struct nvme_ns __rcu *current_path[];
569 #endif
570 };
571
nvme_ns_head_multipath(struct nvme_ns_head * head)572 static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
573 {
574 return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
575 }
576
577 enum nvme_ns_features {
578 NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
579 NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
580 NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeroes supported */
581 };
582
583 struct nvme_ns {
584 struct list_head list;
585
586 struct nvme_ctrl *ctrl;
587 struct request_queue *queue;
588 struct gendisk *disk;
589 #ifdef CONFIG_NVME_MULTIPATH
590 enum nvme_ana_state ana_state;
591 u32 ana_grpid;
592 #endif
593 struct list_head siblings;
594 struct kref kref;
595 struct nvme_ns_head *head;
596
597 unsigned long flags;
598 #define NVME_NS_REMOVING 0
599 #define NVME_NS_ANA_PENDING 2
600 #define NVME_NS_FORCE_RO 3
601 #define NVME_NS_READY 4
602 #define NVME_NS_SYSFS_ATTR_LINK 5
603
604 struct cdev cdev;
605 struct device cdev_device;
606
607 struct nvme_fault_inject fault_inject;
608 };
609
610 /* NVMe ns supports metadata actions by the controller (generate/strip) */
nvme_ns_has_pi(struct nvme_ns_head * head)611 static inline bool nvme_ns_has_pi(struct nvme_ns_head *head)
612 {
613 return head->pi_type && head->ms == head->pi_size;
614 }
615
nvme_get_virt_boundary(struct nvme_ctrl * ctrl,bool is_admin)616 static inline unsigned long nvme_get_virt_boundary(struct nvme_ctrl *ctrl,
617 bool is_admin)
618 {
619 return NVME_CTRL_PAGE_SIZE - 1;
620 }
621
622 struct nvme_ctrl_ops {
623 const char *name;
624 struct module *module;
625 unsigned int flags;
626 #define NVME_F_FABRICS (1 << 0)
627 #define NVME_F_METADATA_SUPPORTED (1 << 1)
628 #define NVME_F_BLOCKING (1 << 2)
629
630 const struct attribute_group **dev_attr_groups;
631 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
632 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
633 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
634 void (*free_ctrl)(struct nvme_ctrl *ctrl);
635 void (*submit_async_event)(struct nvme_ctrl *ctrl);
636 int (*subsystem_reset)(struct nvme_ctrl *ctrl);
637 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
638 void (*stop_ctrl)(struct nvme_ctrl *ctrl);
639 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
640 void (*print_device_info)(struct nvme_ctrl *ctrl);
641 bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
642 unsigned long (*get_virt_boundary)(struct nvme_ctrl *ctrl, bool is_admin);
643 };
644
645 /*
646 * nvme command_id is constructed as such:
647 * | xxxx | xxxxxxxxxxxx |
648 * gen request tag
649 */
650 #define nvme_genctr_mask(gen) (gen & 0xf)
651 #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
652 #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
653 #define nvme_tag_from_cid(cid) (cid & 0xfff)
654
nvme_cid(struct request * rq)655 static inline u16 nvme_cid(struct request *rq)
656 {
657 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
658 }
659
nvme_find_rq(struct blk_mq_tags * tags,u16 command_id)660 static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
661 u16 command_id)
662 {
663 u8 genctr = nvme_genctr_from_cid(command_id);
664 u16 tag = nvme_tag_from_cid(command_id);
665 struct request *rq;
666
667 rq = blk_mq_tag_to_rq(tags, tag);
668 if (unlikely(!rq)) {
669 pr_err("could not locate request for tag %#x\n",
670 tag);
671 return NULL;
672 }
673 if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
674 dev_err(nvme_req(rq)->ctrl->device,
675 "request %#x genctr mismatch (got %#x expected %#x)\n",
676 tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
677 return NULL;
678 }
679 return rq;
680 }
681
nvme_cid_to_rq(struct blk_mq_tags * tags,u16 command_id)682 static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
683 u16 command_id)
684 {
685 return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
686 }
687
688 /*
689 * Return the length of the string without the space padding
690 */
nvme_strlen(char * s,int len)691 static inline int nvme_strlen(char *s, int len)
692 {
693 while (s[len - 1] == ' ')
694 len--;
695 return len;
696 }
697
nvme_print_device_info(struct nvme_ctrl * ctrl)698 static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
699 {
700 struct nvme_subsystem *subsys = ctrl->subsys;
701
702 if (ctrl->ops->print_device_info) {
703 ctrl->ops->print_device_info(ctrl);
704 return;
705 }
706
707 dev_err(ctrl->device,
708 "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
709 nvme_strlen(subsys->model, sizeof(subsys->model)),
710 subsys->model, nvme_strlen(subsys->firmware_rev,
711 sizeof(subsys->firmware_rev)),
712 subsys->firmware_rev);
713 }
714
715 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
716 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
717 const char *dev_name);
718 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
719 void nvme_should_fail(struct request *req);
720 #else
nvme_fault_inject_init(struct nvme_fault_inject * fault_inj,const char * dev_name)721 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
722 const char *dev_name)
723 {
724 }
nvme_fault_inject_fini(struct nvme_fault_inject * fault_inj)725 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
726 {
727 }
nvme_should_fail(struct request * req)728 static inline void nvme_should_fail(struct request *req) {}
729 #endif
730
731 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
732 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
733
nvme_reset_subsystem(struct nvme_ctrl * ctrl)734 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
735 {
736 if (!ctrl->subsystem || !ctrl->ops->subsystem_reset)
737 return -ENOTTY;
738 return ctrl->ops->subsystem_reset(ctrl);
739 }
740
741 /*
742 * Convert a 512B sector number to a device logical block number.
743 */
nvme_sect_to_lba(struct nvme_ns_head * head,sector_t sector)744 static inline u64 nvme_sect_to_lba(struct nvme_ns_head *head, sector_t sector)
745 {
746 return sector >> (head->lba_shift - SECTOR_SHIFT);
747 }
748
749 /*
750 * Convert a device logical block number to a 512B sector number.
751 */
nvme_lba_to_sect(struct nvme_ns_head * head,u64 lba)752 static inline sector_t nvme_lba_to_sect(struct nvme_ns_head *head, u64 lba)
753 {
754 return lba << (head->lba_shift - SECTOR_SHIFT);
755 }
756
757 /*
758 * Convert byte length to nvme's 0-based num dwords
759 */
nvme_bytes_to_numd(size_t len)760 static inline u32 nvme_bytes_to_numd(size_t len)
761 {
762 return (len >> 2) - 1;
763 }
764
nvme_is_ana_error(u16 status)765 static inline bool nvme_is_ana_error(u16 status)
766 {
767 switch (status & NVME_SCT_SC_MASK) {
768 case NVME_SC_ANA_TRANSITION:
769 case NVME_SC_ANA_INACCESSIBLE:
770 case NVME_SC_ANA_PERSISTENT_LOSS:
771 return true;
772 default:
773 return false;
774 }
775 }
776
nvme_is_path_error(u16 status)777 static inline bool nvme_is_path_error(u16 status)
778 {
779 /* check for a status code type of 'path related status' */
780 return (status & NVME_SCT_MASK) == NVME_SCT_PATH;
781 }
782
783 /*
784 * Fill in the status and result information from the CQE, and then figure out
785 * if blk-mq will need to use IPI magic to complete the request, and if yes do
786 * so. If not let the caller complete the request without an indirect function
787 * call.
788 */
nvme_try_complete_req(struct request * req,__le16 status,union nvme_result result)789 static inline bool nvme_try_complete_req(struct request *req, __le16 status,
790 union nvme_result result)
791 {
792 struct nvme_request *rq = nvme_req(req);
793 struct nvme_ctrl *ctrl = rq->ctrl;
794
795 if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
796 rq->genctr++;
797
798 rq->status = le16_to_cpu(status) >> 1;
799 rq->result = result;
800 /* inject error when permitted by fault injection framework */
801 nvme_should_fail(req);
802 if (unlikely(blk_should_fake_timeout(req->q)))
803 return true;
804 return blk_mq_complete_request_remote(req);
805 }
806
nvme_get_ctrl(struct nvme_ctrl * ctrl)807 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
808 {
809 get_device(ctrl->device);
810 }
811
nvme_put_ctrl(struct nvme_ctrl * ctrl)812 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
813 {
814 put_device(ctrl->device);
815 }
816
nvme_is_aen_req(u16 qid,__u16 command_id)817 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
818 {
819 return !qid &&
820 nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
821 }
822
823 /*
824 * Returns true for sink states that can't ever transition back to live.
825 */
nvme_state_terminal(struct nvme_ctrl * ctrl)826 static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
827 {
828 switch (nvme_ctrl_state(ctrl)) {
829 case NVME_CTRL_NEW:
830 case NVME_CTRL_LIVE:
831 case NVME_CTRL_RESETTING:
832 case NVME_CTRL_CONNECTING:
833 return false;
834 case NVME_CTRL_DELETING:
835 case NVME_CTRL_DELETING_NOIO:
836 case NVME_CTRL_DEAD:
837 return true;
838 default:
839 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
840 return true;
841 }
842 }
843
844 void nvme_end_req(struct request *req);
845 void nvme_complete_rq(struct request *req);
846 void nvme_complete_batch_req(struct request *req);
847
nvme_complete_batch(struct io_comp_batch * iob,void (* fn)(struct request * rq))848 static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
849 void (*fn)(struct request *rq))
850 {
851 struct request *req;
852
853 rq_list_for_each(&iob->req_list, req) {
854 fn(req);
855 nvme_complete_batch_req(req);
856 }
857 blk_mq_end_request_batch(iob);
858 }
859
860 blk_status_t nvme_host_path_error(struct request *req);
861 bool nvme_cancel_request(struct request *req, void *data);
862 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
863 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
864 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
865 enum nvme_ctrl_state new_state);
866 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
867 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
868 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
869 const struct nvme_ctrl_ops *ops, unsigned long quirks);
870 int nvme_add_ctrl(struct nvme_ctrl *ctrl);
871 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
872 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
873 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
874 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended);
875 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
876 const struct blk_mq_ops *ops, unsigned int cmd_size);
877 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
878 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
879 const struct blk_mq_ops *ops, unsigned int nr_maps,
880 unsigned int cmd_size);
881 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
882
883 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
884
885 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
886 volatile union nvme_result *res);
887
888 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
889 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
890 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
891 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
892 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
893 void nvme_sync_queues(struct nvme_ctrl *ctrl);
894 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
895 void nvme_unfreeze(struct nvme_ctrl *ctrl);
896 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
897 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
898 void nvme_start_freeze(struct nvme_ctrl *ctrl);
899
nvme_req_op(struct nvme_command * cmd)900 static inline enum req_op nvme_req_op(struct nvme_command *cmd)
901 {
902 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
903 }
904
905 #define NVME_QID_ANY -1
906 void nvme_init_request(struct request *req, struct nvme_command *cmd);
907 void nvme_cleanup_cmd(struct request *req);
908 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
909 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
910 struct request *req);
911 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
912 bool queue_live, enum nvme_ctrl_state state);
913
nvme_check_ready(struct nvme_ctrl * ctrl,struct request * rq,bool queue_live)914 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
915 bool queue_live)
916 {
917 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
918
919 if (likely(state == NVME_CTRL_LIVE))
920 return true;
921 if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
922 return queue_live;
923 return __nvme_check_ready(ctrl, rq, queue_live, state);
924 }
925
926 /*
927 * NSID shall be unique for all shared namespaces, or if at least one of the
928 * following conditions is met:
929 * 1. Namespace Management is supported by the controller
930 * 2. ANA is supported by the controller
931 * 3. NVM Set are supported by the controller
932 *
933 * In other case, private namespace are not required to report a unique NSID.
934 */
nvme_is_unique_nsid(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)935 static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
936 struct nvme_ns_head *head)
937 {
938 return head->shared ||
939 (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
940 (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
941 (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
942 }
943
944 /*
945 * Flags for __nvme_submit_sync_cmd()
946 */
947 typedef __u32 __bitwise nvme_submit_flags_t;
948
949 enum {
950 /* Insert request at the head of the queue */
951 NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0),
952 /* Set BLK_MQ_REQ_NOWAIT when allocating request */
953 NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
954 /* Set BLK_MQ_REQ_RESERVED when allocating request */
955 NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
956 /* Retry command when NVME_STATUS_DNR is not set in the result */
957 NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
958 };
959
960 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
961 void *buf, unsigned bufflen);
962 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
963 union nvme_result *result, void *buffer, unsigned bufflen,
964 int qid, nvme_submit_flags_t flags);
965 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
966 unsigned int dword11, void *buffer, size_t buflen,
967 void *result);
968 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
969 unsigned int dword11, void *buffer, size_t buflen,
970 void *result);
971 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
972 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
973 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
974 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
975 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
976 void nvme_queue_scan(struct nvme_ctrl *ctrl);
977 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
978 void *log, size_t size, u64 offset);
979 bool nvme_tryget_ns_head(struct nvme_ns_head *head);
980 void nvme_put_ns_head(struct nvme_ns_head *head);
981 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
982 const struct file_operations *fops, struct module *owner);
983 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
984 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
985 unsigned int cmd, unsigned long arg);
986 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
987 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
988 unsigned int cmd, unsigned long arg);
989 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
990 unsigned long arg);
991 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
992 unsigned long arg);
993 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
994 struct io_comp_batch *iob, unsigned int poll_flags);
995 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
996 unsigned int issue_flags);
997 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
998 unsigned int issue_flags);
999 int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1000 struct nvme_id_ns **id);
1001 int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo);
1002 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
1003
1004 extern const struct attribute_group *nvme_ns_attr_groups[];
1005 extern const struct attribute_group nvme_ns_mpath_attr_group;
1006 extern const struct pr_ops nvme_pr_ops;
1007 extern const struct block_device_operations nvme_ns_head_ops;
1008 extern const struct attribute_group nvme_dev_attrs_group;
1009 extern const struct attribute_group *nvme_subsys_attrs_groups[];
1010 extern const struct attribute_group *nvme_dev_attr_groups[];
1011 extern const struct block_device_operations nvme_bdev_ops;
1012
1013 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
1014 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
1015 #ifdef CONFIG_NVME_MULTIPATH
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)1016 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
1017 {
1018 return ctrl->ana_log_buf != NULL;
1019 }
1020
1021 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
1022 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
1023 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
1024 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
1025 void nvme_failover_req(struct request *req);
1026 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
1027 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
1028 void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns);
1029 void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns);
1030 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
1031 void nvme_mpath_put_disk(struct nvme_ns_head *head);
1032 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
1033 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
1034 void nvme_mpath_update(struct nvme_ctrl *ctrl);
1035 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
1036 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
1037 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
1038 void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
1039 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
1040 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
1041 void nvme_mpath_start_request(struct request *rq);
1042 void nvme_mpath_end_request(struct request *rq);
1043
nvme_trace_bio_complete(struct request * req)1044 static inline void nvme_trace_bio_complete(struct request *req)
1045 {
1046 struct nvme_ns *ns = req->q->queuedata;
1047
1048 if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
1049 trace_block_bio_complete(ns->head->disk->queue, req->bio);
1050 }
1051
1052 extern bool multipath;
1053 extern struct device_attribute dev_attr_ana_grpid;
1054 extern struct device_attribute dev_attr_ana_state;
1055 extern struct device_attribute dev_attr_queue_depth;
1056 extern struct device_attribute dev_attr_numa_nodes;
1057 extern struct device_attribute dev_attr_delayed_removal_secs;
1058 extern struct device_attribute subsys_attr_iopolicy;
1059
nvme_disk_is_ns_head(struct gendisk * disk)1060 static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
1061 {
1062 return disk->fops == &nvme_ns_head_ops;
1063 }
nvme_mpath_queue_if_no_path(struct nvme_ns_head * head)1064 static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head)
1065 {
1066 if (test_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags))
1067 return true;
1068 return false;
1069 }
1070 #else
1071 #define multipath false
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)1072 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
1073 {
1074 return false;
1075 }
nvme_failover_req(struct request * req)1076 static inline void nvme_failover_req(struct request *req)
1077 {
1078 }
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)1079 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
1080 {
1081 }
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)1082 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
1083 struct nvme_ns_head *head)
1084 {
1085 return 0;
1086 }
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)1087 static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
1088 {
1089 }
nvme_mpath_put_disk(struct nvme_ns_head * head)1090 static inline void nvme_mpath_put_disk(struct nvme_ns_head *head)
1091 {
1092 }
nvme_mpath_add_sysfs_link(struct nvme_ns * ns)1093 static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns)
1094 {
1095 }
nvme_mpath_remove_sysfs_link(struct nvme_ns * ns)1096 static inline void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns)
1097 {
1098 }
nvme_mpath_clear_current_path(struct nvme_ns * ns)1099 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
1100 {
1101 return false;
1102 }
nvme_mpath_revalidate_paths(struct nvme_ns * ns)1103 static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
1104 {
1105 }
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)1106 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
1107 {
1108 }
nvme_mpath_remove_disk(struct nvme_ns_head * head)1109 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
1110 {
1111 }
nvme_trace_bio_complete(struct request * req)1112 static inline void nvme_trace_bio_complete(struct request *req)
1113 {
1114 }
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)1115 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
1116 {
1117 }
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)1118 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
1119 struct nvme_id_ctrl *id)
1120 {
1121 if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
1122 dev_warn(ctrl->device,
1123 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
1124 return 0;
1125 }
nvme_mpath_update(struct nvme_ctrl * ctrl)1126 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
1127 {
1128 }
nvme_mpath_uninit(struct nvme_ctrl * ctrl)1129 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
1130 {
1131 }
nvme_mpath_stop(struct nvme_ctrl * ctrl)1132 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
1133 {
1134 }
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)1135 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
1136 {
1137 }
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)1138 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
1139 {
1140 }
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)1141 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
1142 {
1143 }
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)1144 static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
1145 {
1146 }
nvme_mpath_start_request(struct request * rq)1147 static inline void nvme_mpath_start_request(struct request *rq)
1148 {
1149 }
nvme_mpath_end_request(struct request * rq)1150 static inline void nvme_mpath_end_request(struct request *rq)
1151 {
1152 }
nvme_disk_is_ns_head(struct gendisk * disk)1153 static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
1154 {
1155 return false;
1156 }
nvme_mpath_queue_if_no_path(struct nvme_ns_head * head)1157 static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head)
1158 {
1159 return false;
1160 }
1161 #endif /* CONFIG_NVME_MULTIPATH */
1162
1163 int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
1164 enum blk_unique_id type);
1165
1166 struct nvme_zone_info {
1167 u64 zone_size;
1168 unsigned int max_open_zones;
1169 unsigned int max_active_zones;
1170 };
1171
1172 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
1173 unsigned int nr_zones, struct blk_report_zones_args *args);
1174 int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
1175 struct nvme_zone_info *zi);
1176 void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
1177 struct nvme_zone_info *zi);
1178 #ifdef CONFIG_BLK_DEV_ZONED
1179 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
1180 struct nvme_command *cmnd,
1181 enum nvme_zone_mgmt_action action);
1182 #else
nvme_setup_zone_mgmt_send(struct nvme_ns * ns,struct request * req,struct nvme_command * cmnd,enum nvme_zone_mgmt_action action)1183 static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
1184 struct request *req, struct nvme_command *cmnd,
1185 enum nvme_zone_mgmt_action action)
1186 {
1187 return BLK_STS_NOTSUPP;
1188 }
1189 #endif
1190
nvme_get_ns_from_dev(struct device * dev)1191 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
1192 {
1193 struct gendisk *disk = dev_to_disk(dev);
1194
1195 WARN_ON(nvme_disk_is_ns_head(disk));
1196 return disk->private_data;
1197 }
1198
1199 #ifdef CONFIG_NVME_HWMON
1200 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
1201 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
1202 #else
nvme_hwmon_init(struct nvme_ctrl * ctrl)1203 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
1204 {
1205 return 0;
1206 }
1207
nvme_hwmon_exit(struct nvme_ctrl * ctrl)1208 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
1209 {
1210 }
1211 #endif
1212
nvme_start_request(struct request * rq)1213 static inline void nvme_start_request(struct request *rq)
1214 {
1215 if (rq->cmd_flags & REQ_NVME_MPATH)
1216 nvme_mpath_start_request(rq);
1217 blk_mq_start_request(rq);
1218 }
1219
nvme_ctrl_sgl_supported(struct nvme_ctrl * ctrl)1220 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
1221 {
1222 return ctrl->sgls & (NVME_CTRL_SGLS_BYTE_ALIGNED |
1223 NVME_CTRL_SGLS_DWORD_ALIGNED);
1224 }
1225
nvme_ctrl_meta_sgl_supported(struct nvme_ctrl * ctrl)1226 static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl)
1227 {
1228 if (ctrl->ops->flags & NVME_F_FABRICS)
1229 return true;
1230 return ctrl->sgls & NVME_CTRL_SGLS_MSDS;
1231 }
1232
1233 #ifdef CONFIG_NVME_HOST_AUTH
1234 int __init nvme_init_auth(void);
1235 void __exit nvme_exit_auth(void);
1236 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
1237 void nvme_auth_stop(struct nvme_ctrl *ctrl);
1238 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
1239 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
1240 void nvme_auth_free(struct nvme_ctrl *ctrl);
1241 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl);
1242 #else
nvme_auth_init_ctrl(struct nvme_ctrl * ctrl)1243 static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1244 {
1245 return 0;
1246 }
nvme_init_auth(void)1247 static inline int __init nvme_init_auth(void)
1248 {
1249 return 0;
1250 }
nvme_exit_auth(void)1251 static inline void __exit nvme_exit_auth(void)
1252 {
1253 }
nvme_auth_stop(struct nvme_ctrl * ctrl)1254 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
nvme_auth_negotiate(struct nvme_ctrl * ctrl,int qid)1255 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
1256 {
1257 return -EPROTONOSUPPORT;
1258 }
nvme_auth_wait(struct nvme_ctrl * ctrl,int qid)1259 static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
1260 {
1261 return -EPROTONOSUPPORT;
1262 }
nvme_auth_free(struct nvme_ctrl * ctrl)1263 static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
nvme_auth_revoke_tls_key(struct nvme_ctrl * ctrl)1264 static inline void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) {};
1265 #endif
1266
1267 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1268 u8 opcode);
1269 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
1270 int nvme_execute_rq(struct request *rq, bool at_head);
1271 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1272 struct nvme_command *cmd, int status);
1273 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
1274 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
1275 bool nvme_get_ns(struct nvme_ns *ns);
1276 void nvme_put_ns(struct nvme_ns *ns);
1277
nvme_multi_css(struct nvme_ctrl * ctrl)1278 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
1279 {
1280 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
1281 }
1282
1283 #endif /* _NVME_H */
1284