xref: /linux/drivers/nvme/host/nvme.h (revision 9b960d8cd6f712cb2c03e2bdd4d5ca058238037f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  */
5 
6 #ifndef _NVME_H
7 #define _NVME_H
8 
9 #include <linux/nvme.h>
10 #include <linux/cdev.h>
11 #include <linux/pci.h>
12 #include <linux/kref.h>
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
16 #include <linux/rcupdate.h>
17 #include <linux/wait.h>
18 #include <linux/t10-pi.h>
19 #include <linux/ratelimit_types.h>
20 
21 #include <trace/events/block.h>
22 
23 extern const struct pr_ops nvme_pr_ops;
24 
25 extern unsigned int nvme_io_timeout;
26 #define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
27 
28 extern unsigned int admin_timeout;
29 #define NVME_ADMIN_TIMEOUT	(admin_timeout * HZ)
30 
31 #define NVME_DEFAULT_KATO	5
32 
33 #ifdef CONFIG_ARCH_NO_SG_CHAIN
34 #define  NVME_INLINE_SG_CNT  0
35 #define  NVME_INLINE_METADATA_SG_CNT  0
36 #else
37 #define  NVME_INLINE_SG_CNT  2
38 #define  NVME_INLINE_METADATA_SG_CNT  1
39 #endif
40 
41 /*
42  * Default to a 4K page size, with the intention to update this
43  * path in the future to accommodate architectures with differing
44  * kernel and IO page sizes.
45  */
46 #define NVME_CTRL_PAGE_SHIFT	12
47 #define NVME_CTRL_PAGE_SIZE	(1 << NVME_CTRL_PAGE_SHIFT)
48 
49 extern struct workqueue_struct *nvme_wq;
50 extern struct workqueue_struct *nvme_reset_wq;
51 extern struct workqueue_struct *nvme_delete_wq;
52 extern struct mutex nvme_subsystems_lock;
53 
54 /*
55  * List of workarounds for devices that required behavior not specified in
56  * the standard.
57  */
58 enum nvme_quirks {
59 	/*
60 	 * Prefers I/O aligned to a stripe size specified in a vendor
61 	 * specific Identify field.
62 	 */
63 	NVME_QUIRK_STRIPE_SIZE			= (1 << 0),
64 
65 	/*
66 	 * The controller doesn't handle Identify value others than 0 or 1
67 	 * correctly.
68 	 */
69 	NVME_QUIRK_IDENTIFY_CNS			= (1 << 1),
70 
71 	/*
72 	 * The controller deterministically returns O's on reads to
73 	 * logical blocks that deallocate was called on.
74 	 */
75 	NVME_QUIRK_DEALLOCATE_ZEROES		= (1 << 2),
76 
77 	/*
78 	 * The controller needs a delay before starts checking the device
79 	 * readiness, which is done by reading the NVME_CSTS_RDY bit.
80 	 */
81 	NVME_QUIRK_DELAY_BEFORE_CHK_RDY		= (1 << 3),
82 
83 	/*
84 	 * APST should not be used.
85 	 */
86 	NVME_QUIRK_NO_APST			= (1 << 4),
87 
88 	/*
89 	 * The deepest sleep state should not be used.
90 	 */
91 	NVME_QUIRK_NO_DEEPEST_PS		= (1 << 5),
92 
93 	/*
94 	 *  Problems seen with concurrent commands
95 	 */
96 	NVME_QUIRK_QDEPTH_ONE			= (1 << 6),
97 
98 	/*
99 	 * Set MEDIUM priority on SQ creation
100 	 */
101 	NVME_QUIRK_MEDIUM_PRIO_SQ		= (1 << 7),
102 
103 	/*
104 	 * Ignore device provided subnqn.
105 	 */
106 	NVME_QUIRK_IGNORE_DEV_SUBNQN		= (1 << 8),
107 
108 	/*
109 	 * Broken Write Zeroes.
110 	 */
111 	NVME_QUIRK_DISABLE_WRITE_ZEROES		= (1 << 9),
112 
113 	/*
114 	 * Force simple suspend/resume path.
115 	 */
116 	NVME_QUIRK_SIMPLE_SUSPEND		= (1 << 10),
117 
118 	/*
119 	 * Use only one interrupt vector for all queues
120 	 */
121 	NVME_QUIRK_SINGLE_VECTOR		= (1 << 11),
122 
123 	/*
124 	 * Use non-standard 128 bytes SQEs.
125 	 */
126 	NVME_QUIRK_128_BYTES_SQES		= (1 << 12),
127 
128 	/*
129 	 * Prevent tag overlap between queues
130 	 */
131 	NVME_QUIRK_SHARED_TAGS                  = (1 << 13),
132 
133 	/*
134 	 * Don't change the value of the temperature threshold feature
135 	 */
136 	NVME_QUIRK_NO_TEMP_THRESH_CHANGE	= (1 << 14),
137 
138 	/*
139 	 * The controller doesn't handle the Identify Namespace
140 	 * Identification Descriptor list subcommand despite claiming
141 	 * NVMe 1.3 compliance.
142 	 */
143 	NVME_QUIRK_NO_NS_DESC_LIST		= (1 << 15),
144 
145 	/*
146 	 * The controller does not properly handle DMA addresses over
147 	 * 48 bits.
148 	 */
149 	NVME_QUIRK_DMA_ADDRESS_BITS_48		= (1 << 16),
150 
151 	/*
152 	 * The controller requires the command_id value be limited, so skip
153 	 * encoding the generation sequence number.
154 	 */
155 	NVME_QUIRK_SKIP_CID_GEN			= (1 << 17),
156 
157 	/*
158 	 * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
159 	 */
160 	NVME_QUIRK_BOGUS_NID			= (1 << 18),
161 
162 	/*
163 	 * No temperature thresholds for channels other than 0 (Composite).
164 	 */
165 	NVME_QUIRK_NO_SECONDARY_TEMP_THRESH	= (1 << 19),
166 
167 	/*
168 	 * Disables simple suspend/resume path.
169 	 */
170 	NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND	= (1 << 20),
171 
172 	/*
173 	 * MSI (but not MSI-X) interrupts are broken and never fire.
174 	 */
175 	NVME_QUIRK_BROKEN_MSI			= (1 << 21),
176 
177 	/*
178 	 * Align dma pool segment size to 512 bytes
179 	 */
180 	NVME_QUIRK_DMAPOOL_ALIGN_512		= (1 << 22),
181 };
182 
183 /*
184  * Common request structure for NVMe passthrough.  All drivers must have
185  * this structure as the first member of their request-private data.
186  */
187 struct nvme_request {
188 	struct nvme_command	*cmd;
189 	union nvme_result	result;
190 	u8			genctr;
191 	u8			retries;
192 	u8			flags;
193 	u16			status;
194 #ifdef CONFIG_NVME_MULTIPATH
195 	unsigned long		start_time;
196 #endif
197 	struct nvme_ctrl	*ctrl;
198 };
199 
200 /*
201  * Mark a bio as coming in through the mpath node.
202  */
203 #define REQ_NVME_MPATH		REQ_DRV
204 
205 enum {
206 	NVME_REQ_CANCELLED		= (1 << 0),
207 	NVME_REQ_USERCMD		= (1 << 1),
208 	NVME_MPATH_IO_STATS		= (1 << 2),
209 	NVME_MPATH_CNT_ACTIVE		= (1 << 3),
210 };
211 
nvme_req(struct request * req)212 static inline struct nvme_request *nvme_req(struct request *req)
213 {
214 	return blk_mq_rq_to_pdu(req);
215 }
216 
nvme_req_qid(struct request * req)217 static inline u16 nvme_req_qid(struct request *req)
218 {
219 	if (!req->q->queuedata)
220 		return 0;
221 
222 	return req->mq_hctx->queue_num + 1;
223 }
224 
225 /* The below value is the specific amount of delay needed before checking
226  * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
227  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
228  * found empirically.
229  */
230 #define NVME_QUIRK_DELAY_AMOUNT		2300
231 
232 /*
233  * enum nvme_ctrl_state: Controller state
234  *
235  * @NVME_CTRL_NEW:		New controller just allocated, initial state
236  * @NVME_CTRL_LIVE:		Controller is connected and I/O capable
237  * @NVME_CTRL_RESETTING:	Controller is resetting (or scheduled reset)
238  * @NVME_CTRL_CONNECTING:	Controller is disconnected, now connecting the
239  *				transport
240  * @NVME_CTRL_DELETING:		Controller is deleting (or scheduled deletion)
241  * @NVME_CTRL_DELETING_NOIO:	Controller is deleting and I/O is not
242  *				disabled/failed immediately. This state comes
243  * 				after all async event processing took place and
244  * 				before ns removal and the controller deletion
245  * 				progress
246  * @NVME_CTRL_DEAD:		Controller is non-present/unresponsive during
247  *				shutdown or removal. In this case we forcibly
248  *				kill all inflight I/O as they have no chance to
249  *				complete
250  */
251 enum nvme_ctrl_state {
252 	NVME_CTRL_NEW,
253 	NVME_CTRL_LIVE,
254 	NVME_CTRL_RESETTING,
255 	NVME_CTRL_CONNECTING,
256 	NVME_CTRL_DELETING,
257 	NVME_CTRL_DELETING_NOIO,
258 	NVME_CTRL_DEAD,
259 };
260 
261 struct nvme_fault_inject {
262 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
263 	struct fault_attr attr;
264 	struct dentry *parent;
265 	bool dont_retry;	/* DNR, do not retry */
266 	u16 status;		/* status code */
267 #endif
268 };
269 
270 enum nvme_ctrl_flags {
271 	NVME_CTRL_FAILFAST_EXPIRED	= 0,
272 	NVME_CTRL_ADMIN_Q_STOPPED	= 1,
273 	NVME_CTRL_STARTED_ONCE		= 2,
274 	NVME_CTRL_STOPPED		= 3,
275 	NVME_CTRL_SKIP_ID_CNS_CS	= 4,
276 	NVME_CTRL_DIRTY_CAPABILITY	= 5,
277 	NVME_CTRL_FROZEN		= 6,
278 };
279 
280 struct nvme_ctrl {
281 	bool comp_seen;
282 	bool identified;
283 	bool passthru_err_log_enabled;
284 	enum nvme_ctrl_state state;
285 	spinlock_t lock;
286 	struct mutex scan_lock;
287 	const struct nvme_ctrl_ops *ops;
288 	struct request_queue *admin_q;
289 	struct request_queue *connect_q;
290 	struct request_queue *fabrics_q;
291 	struct device *dev;
292 	int instance;
293 	int numa_node;
294 	struct blk_mq_tag_set *tagset;
295 	struct blk_mq_tag_set *admin_tagset;
296 	struct list_head namespaces;
297 	struct mutex namespaces_lock;
298 	struct srcu_struct srcu;
299 	struct device ctrl_device;
300 	struct device *device;	/* char device */
301 #ifdef CONFIG_NVME_HWMON
302 	struct device *hwmon_device;
303 #endif
304 	struct cdev cdev;
305 	struct work_struct reset_work;
306 	struct work_struct delete_work;
307 	wait_queue_head_t state_wq;
308 
309 	struct nvme_subsystem *subsys;
310 	struct list_head subsys_entry;
311 
312 	struct opal_dev *opal_dev;
313 
314 	u16 cntlid;
315 
316 	u16 mtfa;
317 	u32 ctrl_config;
318 	u32 queue_count;
319 
320 	u64 cap;
321 	u32 max_hw_sectors;
322 	u32 max_segments;
323 	u32 max_integrity_segments;
324 	u32 max_zeroes_sectors;
325 #ifdef CONFIG_BLK_DEV_ZONED
326 	u32 max_zone_append;
327 #endif
328 	u16 crdt[3];
329 	u16 oncs;
330 	u8 dmrl;
331 	u32 dmrsl;
332 	u16 oacs;
333 	u16 sqsize;
334 	u32 max_namespaces;
335 	atomic_t abort_limit;
336 	u8 vwc;
337 	u32 vs;
338 	u32 sgls;
339 	u16 kas;
340 	u8 npss;
341 	u8 apsta;
342 	u16 wctemp;
343 	u16 cctemp;
344 	u32 oaes;
345 	u32 aen_result;
346 	u32 ctratt;
347 	unsigned int shutdown_timeout;
348 	unsigned int kato;
349 	bool subsystem;
350 	unsigned long quirks;
351 	struct nvme_id_power_state psd[32];
352 	struct nvme_effects_log *effects;
353 	struct xarray cels;
354 	struct work_struct scan_work;
355 	struct work_struct async_event_work;
356 	struct delayed_work ka_work;
357 	struct delayed_work failfast_work;
358 	struct nvme_command ka_cmd;
359 	unsigned long ka_last_check_time;
360 	struct work_struct fw_act_work;
361 	unsigned long events;
362 
363 #ifdef CONFIG_NVME_MULTIPATH
364 	/* asymmetric namespace access: */
365 	u8 anacap;
366 	u8 anatt;
367 	u32 anagrpmax;
368 	u32 nanagrpid;
369 	struct mutex ana_lock;
370 	struct nvme_ana_rsp_hdr *ana_log_buf;
371 	size_t ana_log_size;
372 	struct timer_list anatt_timer;
373 	struct work_struct ana_work;
374 	atomic_t nr_active;
375 #endif
376 
377 #ifdef CONFIG_NVME_HOST_AUTH
378 	struct work_struct dhchap_auth_work;
379 	struct mutex dhchap_auth_mutex;
380 	struct nvme_dhchap_queue_context *dhchap_ctxs;
381 	struct nvme_dhchap_key *host_key;
382 	struct nvme_dhchap_key *ctrl_key;
383 	u16 transaction;
384 #endif
385 	key_serial_t tls_pskid;
386 
387 	/* Power saving configuration */
388 	u64 ps_max_latency_us;
389 	bool apst_enabled;
390 
391 	/* PCIe only: */
392 	u16 hmmaxd;
393 	u32 hmpre;
394 	u32 hmmin;
395 	u32 hmminds;
396 
397 	/* Fabrics only */
398 	u32 ioccsz;
399 	u32 iorcsz;
400 	u16 icdoff;
401 	u16 maxcmd;
402 	int nr_reconnects;
403 	unsigned long flags;
404 	struct nvmf_ctrl_options *opts;
405 
406 	struct page *discard_page;
407 	unsigned long discard_page_busy;
408 
409 	struct nvme_fault_inject fault_inject;
410 
411 	enum nvme_ctrl_type cntrltype;
412 	enum nvme_dctype dctype;
413 };
414 
nvme_ctrl_state(struct nvme_ctrl * ctrl)415 static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
416 {
417 	return READ_ONCE(ctrl->state);
418 }
419 
420 enum nvme_iopolicy {
421 	NVME_IOPOLICY_NUMA,
422 	NVME_IOPOLICY_RR,
423 	NVME_IOPOLICY_QD,
424 };
425 
426 struct nvme_subsystem {
427 	int			instance;
428 	struct device		dev;
429 	/*
430 	 * Because we unregister the device on the last put we need
431 	 * a separate refcount.
432 	 */
433 	struct kref		ref;
434 	struct list_head	entry;
435 	struct mutex		lock;
436 	struct list_head	ctrls;
437 	struct list_head	nsheads;
438 	char			subnqn[NVMF_NQN_SIZE];
439 	char			serial[20];
440 	char			model[40];
441 	char			firmware_rev[8];
442 	u8			cmic;
443 	enum nvme_subsys_type	subtype;
444 	u16			vendor_id;
445 	u16			awupf;	/* 0's based awupf value. */
446 	struct ida		ns_ida;
447 #ifdef CONFIG_NVME_MULTIPATH
448 	enum nvme_iopolicy	iopolicy;
449 #endif
450 };
451 
452 /*
453  * Container structure for uniqueue namespace identifiers.
454  */
455 struct nvme_ns_ids {
456 	u8	eui64[8];
457 	u8	nguid[16];
458 	uuid_t	uuid;
459 	u8	csi;
460 };
461 
462 /*
463  * Anchor structure for namespaces.  There is one for each namespace in a
464  * NVMe subsystem that any of our controllers can see, and the namespace
465  * structure for each controller is chained of it.  For private namespaces
466  * there is a 1:1 relation to our namespace structures, that is ->list
467  * only ever has a single entry for private namespaces.
468  */
469 struct nvme_ns_head {
470 	struct list_head	list;
471 	struct srcu_struct      srcu;
472 	struct nvme_subsystem	*subsys;
473 	struct nvme_ns_ids	ids;
474 	u8			lba_shift;
475 	u16			ms;
476 	u16			pi_size;
477 	u8			pi_type;
478 	u8			guard_type;
479 	struct list_head	entry;
480 	struct kref		ref;
481 	bool			shared;
482 	bool			rotational;
483 	bool			passthru_err_log_enabled;
484 	struct nvme_effects_log *effects;
485 	u64			nuse;
486 	unsigned		ns_id;
487 	int			instance;
488 #ifdef CONFIG_BLK_DEV_ZONED
489 	u64			zsze;
490 #endif
491 	unsigned long		features;
492 
493 	struct ratelimit_state	rs_nuse;
494 
495 	struct cdev		cdev;
496 	struct device		cdev_device;
497 
498 	struct gendisk		*disk;
499 #ifdef CONFIG_NVME_MULTIPATH
500 	struct bio_list		requeue_list;
501 	spinlock_t		requeue_lock;
502 	struct work_struct	requeue_work;
503 	struct work_struct	partition_scan_work;
504 	struct mutex		lock;
505 	unsigned long		flags;
506 #define NVME_NSHEAD_DISK_LIVE	0
507 	struct nvme_ns __rcu	*current_path[];
508 #endif
509 };
510 
nvme_ns_head_multipath(struct nvme_ns_head * head)511 static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
512 {
513 	return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
514 }
515 
516 enum nvme_ns_features {
517 	NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
518 	NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
519 	NVME_NS_DEAC = 1 << 2,		/* DEAC bit in Write Zeores supported */
520 };
521 
522 struct nvme_ns {
523 	struct list_head list;
524 
525 	struct nvme_ctrl *ctrl;
526 	struct request_queue *queue;
527 	struct gendisk *disk;
528 #ifdef CONFIG_NVME_MULTIPATH
529 	enum nvme_ana_state ana_state;
530 	u32 ana_grpid;
531 #endif
532 	struct list_head siblings;
533 	struct kref kref;
534 	struct nvme_ns_head *head;
535 
536 	unsigned long flags;
537 #define NVME_NS_REMOVING		0
538 #define NVME_NS_ANA_PENDING		2
539 #define NVME_NS_FORCE_RO		3
540 #define NVME_NS_READY			4
541 #define NVME_NS_SYSFS_ATTR_LINK	5
542 
543 	struct cdev		cdev;
544 	struct device		cdev_device;
545 
546 	struct nvme_fault_inject fault_inject;
547 };
548 
549 /* NVMe ns supports metadata actions by the controller (generate/strip) */
nvme_ns_has_pi(struct nvme_ns_head * head)550 static inline bool nvme_ns_has_pi(struct nvme_ns_head *head)
551 {
552 	return head->pi_type && head->ms == head->pi_size;
553 }
554 
555 struct nvme_ctrl_ops {
556 	const char *name;
557 	struct module *module;
558 	unsigned int flags;
559 #define NVME_F_FABRICS			(1 << 0)
560 #define NVME_F_METADATA_SUPPORTED	(1 << 1)
561 #define NVME_F_BLOCKING			(1 << 2)
562 
563 	const struct attribute_group **dev_attr_groups;
564 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
565 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
566 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
567 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
568 	void (*submit_async_event)(struct nvme_ctrl *ctrl);
569 	int (*subsystem_reset)(struct nvme_ctrl *ctrl);
570 	void (*delete_ctrl)(struct nvme_ctrl *ctrl);
571 	void (*stop_ctrl)(struct nvme_ctrl *ctrl);
572 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
573 	void (*print_device_info)(struct nvme_ctrl *ctrl);
574 	bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
575 };
576 
577 /*
578  * nvme command_id is constructed as such:
579  * | xxxx | xxxxxxxxxxxx |
580  *   gen    request tag
581  */
582 #define nvme_genctr_mask(gen)			(gen & 0xf)
583 #define nvme_cid_install_genctr(gen)		(nvme_genctr_mask(gen) << 12)
584 #define nvme_genctr_from_cid(cid)		((cid & 0xf000) >> 12)
585 #define nvme_tag_from_cid(cid)			(cid & 0xfff)
586 
nvme_cid(struct request * rq)587 static inline u16 nvme_cid(struct request *rq)
588 {
589 	return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
590 }
591 
nvme_find_rq(struct blk_mq_tags * tags,u16 command_id)592 static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
593 		u16 command_id)
594 {
595 	u8 genctr = nvme_genctr_from_cid(command_id);
596 	u16 tag = nvme_tag_from_cid(command_id);
597 	struct request *rq;
598 
599 	rq = blk_mq_tag_to_rq(tags, tag);
600 	if (unlikely(!rq)) {
601 		pr_err("could not locate request for tag %#x\n",
602 			tag);
603 		return NULL;
604 	}
605 	if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
606 		dev_err(nvme_req(rq)->ctrl->device,
607 			"request %#x genctr mismatch (got %#x expected %#x)\n",
608 			tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
609 		return NULL;
610 	}
611 	return rq;
612 }
613 
nvme_cid_to_rq(struct blk_mq_tags * tags,u16 command_id)614 static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
615                 u16 command_id)
616 {
617 	return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
618 }
619 
620 /*
621  * Return the length of the string without the space padding
622  */
nvme_strlen(char * s,int len)623 static inline int nvme_strlen(char *s, int len)
624 {
625 	while (s[len - 1] == ' ')
626 		len--;
627 	return len;
628 }
629 
nvme_print_device_info(struct nvme_ctrl * ctrl)630 static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
631 {
632 	struct nvme_subsystem *subsys = ctrl->subsys;
633 
634 	if (ctrl->ops->print_device_info) {
635 		ctrl->ops->print_device_info(ctrl);
636 		return;
637 	}
638 
639 	dev_err(ctrl->device,
640 		"VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
641 		nvme_strlen(subsys->model, sizeof(subsys->model)),
642 		subsys->model, nvme_strlen(subsys->firmware_rev,
643 					   sizeof(subsys->firmware_rev)),
644 		subsys->firmware_rev);
645 }
646 
647 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
648 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
649 			    const char *dev_name);
650 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
651 void nvme_should_fail(struct request *req);
652 #else
nvme_fault_inject_init(struct nvme_fault_inject * fault_inj,const char * dev_name)653 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
654 					  const char *dev_name)
655 {
656 }
nvme_fault_inject_fini(struct nvme_fault_inject * fault_inj)657 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
658 {
659 }
nvme_should_fail(struct request * req)660 static inline void nvme_should_fail(struct request *req) {}
661 #endif
662 
663 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
664 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
665 
nvme_reset_subsystem(struct nvme_ctrl * ctrl)666 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
667 {
668 	if (!ctrl->subsystem || !ctrl->ops->subsystem_reset)
669 		return -ENOTTY;
670 	return ctrl->ops->subsystem_reset(ctrl);
671 }
672 
673 /*
674  * Convert a 512B sector number to a device logical block number.
675  */
nvme_sect_to_lba(struct nvme_ns_head * head,sector_t sector)676 static inline u64 nvme_sect_to_lba(struct nvme_ns_head *head, sector_t sector)
677 {
678 	return sector >> (head->lba_shift - SECTOR_SHIFT);
679 }
680 
681 /*
682  * Convert a device logical block number to a 512B sector number.
683  */
nvme_lba_to_sect(struct nvme_ns_head * head,u64 lba)684 static inline sector_t nvme_lba_to_sect(struct nvme_ns_head *head, u64 lba)
685 {
686 	return lba << (head->lba_shift - SECTOR_SHIFT);
687 }
688 
689 /*
690  * Convert byte length to nvme's 0-based num dwords
691  */
nvme_bytes_to_numd(size_t len)692 static inline u32 nvme_bytes_to_numd(size_t len)
693 {
694 	return (len >> 2) - 1;
695 }
696 
nvme_is_ana_error(u16 status)697 static inline bool nvme_is_ana_error(u16 status)
698 {
699 	switch (status & NVME_SCT_SC_MASK) {
700 	case NVME_SC_ANA_TRANSITION:
701 	case NVME_SC_ANA_INACCESSIBLE:
702 	case NVME_SC_ANA_PERSISTENT_LOSS:
703 		return true;
704 	default:
705 		return false;
706 	}
707 }
708 
nvme_is_path_error(u16 status)709 static inline bool nvme_is_path_error(u16 status)
710 {
711 	/* check for a status code type of 'path related status' */
712 	return (status & NVME_SCT_MASK) == NVME_SCT_PATH;
713 }
714 
715 /*
716  * Fill in the status and result information from the CQE, and then figure out
717  * if blk-mq will need to use IPI magic to complete the request, and if yes do
718  * so.  If not let the caller complete the request without an indirect function
719  * call.
720  */
nvme_try_complete_req(struct request * req,__le16 status,union nvme_result result)721 static inline bool nvme_try_complete_req(struct request *req, __le16 status,
722 		union nvme_result result)
723 {
724 	struct nvme_request *rq = nvme_req(req);
725 	struct nvme_ctrl *ctrl = rq->ctrl;
726 
727 	if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
728 		rq->genctr++;
729 
730 	rq->status = le16_to_cpu(status) >> 1;
731 	rq->result = result;
732 	/* inject error when permitted by fault injection framework */
733 	nvme_should_fail(req);
734 	if (unlikely(blk_should_fake_timeout(req->q)))
735 		return true;
736 	return blk_mq_complete_request_remote(req);
737 }
738 
nvme_get_ctrl(struct nvme_ctrl * ctrl)739 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
740 {
741 	get_device(ctrl->device);
742 }
743 
nvme_put_ctrl(struct nvme_ctrl * ctrl)744 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
745 {
746 	put_device(ctrl->device);
747 }
748 
nvme_is_aen_req(u16 qid,__u16 command_id)749 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
750 {
751 	return !qid &&
752 		nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
753 }
754 
755 /*
756  * Returns true for sink states that can't ever transition back to live.
757  */
nvme_state_terminal(struct nvme_ctrl * ctrl)758 static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
759 {
760 	switch (nvme_ctrl_state(ctrl)) {
761 	case NVME_CTRL_NEW:
762 	case NVME_CTRL_LIVE:
763 	case NVME_CTRL_RESETTING:
764 	case NVME_CTRL_CONNECTING:
765 		return false;
766 	case NVME_CTRL_DELETING:
767 	case NVME_CTRL_DELETING_NOIO:
768 	case NVME_CTRL_DEAD:
769 		return true;
770 	default:
771 		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
772 		return true;
773 	}
774 }
775 
776 void nvme_end_req(struct request *req);
777 void nvme_complete_rq(struct request *req);
778 void nvme_complete_batch_req(struct request *req);
779 
nvme_complete_batch(struct io_comp_batch * iob,void (* fn)(struct request * rq))780 static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
781 						void (*fn)(struct request *rq))
782 {
783 	struct request *req;
784 
785 	rq_list_for_each(&iob->req_list, req) {
786 		fn(req);
787 		nvme_complete_batch_req(req);
788 	}
789 	blk_mq_end_request_batch(iob);
790 }
791 
792 blk_status_t nvme_host_path_error(struct request *req);
793 bool nvme_cancel_request(struct request *req, void *data);
794 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
795 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
796 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
797 		enum nvme_ctrl_state new_state);
798 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
799 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
800 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
801 		const struct nvme_ctrl_ops *ops, unsigned long quirks);
802 int nvme_add_ctrl(struct nvme_ctrl *ctrl);
803 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
804 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
805 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
806 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended);
807 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
808 		const struct blk_mq_ops *ops, unsigned int cmd_size);
809 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
810 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
811 		const struct blk_mq_ops *ops, unsigned int nr_maps,
812 		unsigned int cmd_size);
813 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
814 
815 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
816 
817 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
818 		volatile union nvme_result *res);
819 
820 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
821 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
822 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
823 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
824 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
825 void nvme_sync_queues(struct nvme_ctrl *ctrl);
826 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
827 void nvme_unfreeze(struct nvme_ctrl *ctrl);
828 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
829 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
830 void nvme_start_freeze(struct nvme_ctrl *ctrl);
831 
nvme_req_op(struct nvme_command * cmd)832 static inline enum req_op nvme_req_op(struct nvme_command *cmd)
833 {
834 	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
835 }
836 
837 #define NVME_QID_ANY -1
838 void nvme_init_request(struct request *req, struct nvme_command *cmd);
839 void nvme_cleanup_cmd(struct request *req);
840 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
841 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
842 		struct request *req);
843 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
844 		bool queue_live, enum nvme_ctrl_state state);
845 
nvme_check_ready(struct nvme_ctrl * ctrl,struct request * rq,bool queue_live)846 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
847 		bool queue_live)
848 {
849 	enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
850 
851 	if (likely(state == NVME_CTRL_LIVE))
852 		return true;
853 	if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
854 		return queue_live;
855 	return __nvme_check_ready(ctrl, rq, queue_live, state);
856 }
857 
858 /*
859  * NSID shall be unique for all shared namespaces, or if at least one of the
860  * following conditions is met:
861  *   1. Namespace Management is supported by the controller
862  *   2. ANA is supported by the controller
863  *   3. NVM Set are supported by the controller
864  *
865  * In other case, private namespace are not required to report a unique NSID.
866  */
nvme_is_unique_nsid(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)867 static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
868 		struct nvme_ns_head *head)
869 {
870 	return head->shared ||
871 		(ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
872 		(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
873 		(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
874 }
875 
876 /*
877  * Flags for __nvme_submit_sync_cmd()
878  */
879 typedef __u32 __bitwise nvme_submit_flags_t;
880 
881 enum {
882 	/* Insert request at the head of the queue */
883 	NVME_SUBMIT_AT_HEAD  = (__force nvme_submit_flags_t)(1 << 0),
884 	/* Set BLK_MQ_REQ_NOWAIT when allocating request */
885 	NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
886 	/* Set BLK_MQ_REQ_RESERVED when allocating request */
887 	NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
888 	/* Retry command when NVME_STATUS_DNR is not set in the result */
889 	NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
890 };
891 
892 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
893 		void *buf, unsigned bufflen);
894 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
895 		union nvme_result *result, void *buffer, unsigned bufflen,
896 		int qid, nvme_submit_flags_t flags);
897 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
898 		      unsigned int dword11, void *buffer, size_t buflen,
899 		      u32 *result);
900 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
901 		      unsigned int dword11, void *buffer, size_t buflen,
902 		      u32 *result);
903 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
904 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
905 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
906 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
907 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
908 void nvme_queue_scan(struct nvme_ctrl *ctrl);
909 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
910 		void *log, size_t size, u64 offset);
911 bool nvme_tryget_ns_head(struct nvme_ns_head *head);
912 void nvme_put_ns_head(struct nvme_ns_head *head);
913 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
914 		const struct file_operations *fops, struct module *owner);
915 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
916 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
917 		unsigned int cmd, unsigned long arg);
918 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
919 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
920 		unsigned int cmd, unsigned long arg);
921 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
922 		unsigned long arg);
923 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
924 		unsigned long arg);
925 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
926 		struct io_comp_batch *iob, unsigned int poll_flags);
927 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
928 		unsigned int issue_flags);
929 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
930 		unsigned int issue_flags);
931 int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
932 		struct nvme_id_ns **id);
933 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
934 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
935 
936 extern const struct attribute_group *nvme_ns_attr_groups[];
937 extern const struct attribute_group nvme_ns_mpath_attr_group;
938 extern const struct pr_ops nvme_pr_ops;
939 extern const struct block_device_operations nvme_ns_head_ops;
940 extern const struct attribute_group nvme_dev_attrs_group;
941 extern const struct attribute_group *nvme_subsys_attrs_groups[];
942 extern const struct attribute_group *nvme_dev_attr_groups[];
943 extern const struct block_device_operations nvme_bdev_ops;
944 
945 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
946 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
947 #ifdef CONFIG_NVME_MULTIPATH
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)948 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
949 {
950 	return ctrl->ana_log_buf != NULL;
951 }
952 
953 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
954 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
955 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
956 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
957 void nvme_failover_req(struct request *req);
958 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
959 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
960 void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns);
961 void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns);
962 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
963 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
964 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
965 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
966 void nvme_mpath_update(struct nvme_ctrl *ctrl);
967 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
968 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
969 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
970 void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
971 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
972 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
973 void nvme_mpath_start_request(struct request *rq);
974 void nvme_mpath_end_request(struct request *rq);
975 
nvme_trace_bio_complete(struct request * req)976 static inline void nvme_trace_bio_complete(struct request *req)
977 {
978 	struct nvme_ns *ns = req->q->queuedata;
979 
980 	if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
981 		trace_block_bio_complete(ns->head->disk->queue, req->bio);
982 }
983 
984 extern bool multipath;
985 extern struct device_attribute dev_attr_ana_grpid;
986 extern struct device_attribute dev_attr_ana_state;
987 extern struct device_attribute dev_attr_queue_depth;
988 extern struct device_attribute dev_attr_numa_nodes;
989 extern struct device_attribute subsys_attr_iopolicy;
990 
nvme_disk_is_ns_head(struct gendisk * disk)991 static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
992 {
993 	return disk->fops == &nvme_ns_head_ops;
994 }
995 #else
996 #define multipath false
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)997 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
998 {
999 	return false;
1000 }
nvme_failover_req(struct request * req)1001 static inline void nvme_failover_req(struct request *req)
1002 {
1003 }
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)1004 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
1005 {
1006 }
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)1007 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
1008 		struct nvme_ns_head *head)
1009 {
1010 	return 0;
1011 }
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)1012 static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
1013 {
1014 }
nvme_mpath_remove_disk(struct nvme_ns_head * head)1015 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
1016 {
1017 }
nvme_mpath_add_sysfs_link(struct nvme_ns * ns)1018 static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns)
1019 {
1020 }
nvme_mpath_remove_sysfs_link(struct nvme_ns * ns)1021 static inline void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns)
1022 {
1023 }
nvme_mpath_clear_current_path(struct nvme_ns * ns)1024 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
1025 {
1026 	return false;
1027 }
nvme_mpath_revalidate_paths(struct nvme_ns * ns)1028 static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
1029 {
1030 }
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)1031 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
1032 {
1033 }
nvme_mpath_shutdown_disk(struct nvme_ns_head * head)1034 static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
1035 {
1036 }
nvme_trace_bio_complete(struct request * req)1037 static inline void nvme_trace_bio_complete(struct request *req)
1038 {
1039 }
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)1040 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
1041 {
1042 }
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)1043 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
1044 		struct nvme_id_ctrl *id)
1045 {
1046 	if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
1047 		dev_warn(ctrl->device,
1048 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
1049 	return 0;
1050 }
nvme_mpath_update(struct nvme_ctrl * ctrl)1051 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
1052 {
1053 }
nvme_mpath_uninit(struct nvme_ctrl * ctrl)1054 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
1055 {
1056 }
nvme_mpath_stop(struct nvme_ctrl * ctrl)1057 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
1058 {
1059 }
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)1060 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
1061 {
1062 }
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)1063 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
1064 {
1065 }
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)1066 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
1067 {
1068 }
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)1069 static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
1070 {
1071 }
nvme_mpath_start_request(struct request * rq)1072 static inline void nvme_mpath_start_request(struct request *rq)
1073 {
1074 }
nvme_mpath_end_request(struct request * rq)1075 static inline void nvme_mpath_end_request(struct request *rq)
1076 {
1077 }
nvme_disk_is_ns_head(struct gendisk * disk)1078 static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
1079 {
1080 	return false;
1081 }
1082 #endif /* CONFIG_NVME_MULTIPATH */
1083 
1084 int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
1085 		enum blk_unique_id type);
1086 
1087 struct nvme_zone_info {
1088 	u64 zone_size;
1089 	unsigned int max_open_zones;
1090 	unsigned int max_active_zones;
1091 };
1092 
1093 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
1094 		unsigned int nr_zones, report_zones_cb cb, void *data);
1095 int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
1096 		struct nvme_zone_info *zi);
1097 void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
1098 		struct nvme_zone_info *zi);
1099 #ifdef CONFIG_BLK_DEV_ZONED
1100 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
1101 				       struct nvme_command *cmnd,
1102 				       enum nvme_zone_mgmt_action action);
1103 #else
nvme_setup_zone_mgmt_send(struct nvme_ns * ns,struct request * req,struct nvme_command * cmnd,enum nvme_zone_mgmt_action action)1104 static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
1105 		struct request *req, struct nvme_command *cmnd,
1106 		enum nvme_zone_mgmt_action action)
1107 {
1108 	return BLK_STS_NOTSUPP;
1109 }
1110 #endif
1111 
nvme_get_ns_from_dev(struct device * dev)1112 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
1113 {
1114 	struct gendisk *disk = dev_to_disk(dev);
1115 
1116 	WARN_ON(nvme_disk_is_ns_head(disk));
1117 	return disk->private_data;
1118 }
1119 
1120 #ifdef CONFIG_NVME_HWMON
1121 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
1122 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
1123 #else
nvme_hwmon_init(struct nvme_ctrl * ctrl)1124 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
1125 {
1126 	return 0;
1127 }
1128 
nvme_hwmon_exit(struct nvme_ctrl * ctrl)1129 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
1130 {
1131 }
1132 #endif
1133 
nvme_start_request(struct request * rq)1134 static inline void nvme_start_request(struct request *rq)
1135 {
1136 	if (rq->cmd_flags & REQ_NVME_MPATH)
1137 		nvme_mpath_start_request(rq);
1138 	blk_mq_start_request(rq);
1139 }
1140 
nvme_ctrl_sgl_supported(struct nvme_ctrl * ctrl)1141 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
1142 {
1143 	return ctrl->sgls & (NVME_CTRL_SGLS_BYTE_ALIGNED |
1144 			     NVME_CTRL_SGLS_DWORD_ALIGNED);
1145 }
1146 
nvme_ctrl_meta_sgl_supported(struct nvme_ctrl * ctrl)1147 static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl)
1148 {
1149 	if (ctrl->ops->flags & NVME_F_FABRICS)
1150 		return true;
1151 	return ctrl->sgls & NVME_CTRL_SGLS_MSDS;
1152 }
1153 
1154 #ifdef CONFIG_NVME_HOST_AUTH
1155 int __init nvme_init_auth(void);
1156 void __exit nvme_exit_auth(void);
1157 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
1158 void nvme_auth_stop(struct nvme_ctrl *ctrl);
1159 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
1160 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
1161 void nvme_auth_free(struct nvme_ctrl *ctrl);
1162 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl);
1163 #else
nvme_auth_init_ctrl(struct nvme_ctrl * ctrl)1164 static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1165 {
1166 	return 0;
1167 }
nvme_init_auth(void)1168 static inline int __init nvme_init_auth(void)
1169 {
1170 	return 0;
1171 }
nvme_exit_auth(void)1172 static inline void __exit nvme_exit_auth(void)
1173 {
1174 }
nvme_auth_stop(struct nvme_ctrl * ctrl)1175 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
nvme_auth_negotiate(struct nvme_ctrl * ctrl,int qid)1176 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
1177 {
1178 	return -EPROTONOSUPPORT;
1179 }
nvme_auth_wait(struct nvme_ctrl * ctrl,int qid)1180 static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
1181 {
1182 	return -EPROTONOSUPPORT;
1183 }
nvme_auth_free(struct nvme_ctrl * ctrl)1184 static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
nvme_auth_revoke_tls_key(struct nvme_ctrl * ctrl)1185 static inline void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) {};
1186 #endif
1187 
1188 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1189 			 u8 opcode);
1190 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
1191 int nvme_execute_rq(struct request *rq, bool at_head);
1192 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1193 		       struct nvme_command *cmd, int status);
1194 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
1195 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
1196 bool nvme_get_ns(struct nvme_ns *ns);
1197 void nvme_put_ns(struct nvme_ns *ns);
1198 
nvme_multi_css(struct nvme_ctrl * ctrl)1199 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
1200 {
1201 	return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
1202 }
1203 
1204 #endif /* _NVME_H */
1205