xref: /linux/drivers/nvme/target/nvmet.h (revision 151ebcf0797b1a3ba53c8843dc21748c80e098c7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  */
5 
6 #ifndef _NVMET_H
7 #define _NVMET_H
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23 
24 #define NVMET_DEFAULT_VS		NVME_VS(1, 3, 0)
25 
26 #define NVMET_ASYNC_EVENTS		4
27 #define NVMET_ERROR_LOG_SLOTS		128
28 #define NVMET_NO_ERROR_LOC		((u16)-1)
29 #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
30 #define NVMET_MN_MAX_SIZE		40
31 #define NVMET_SN_MAX_SIZE		20
32 #define NVMET_FR_MAX_SIZE		8
33 
34 /*
35  * Supported optional AENs:
36  */
37 #define NVMET_AEN_CFG_OPTIONAL \
38 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
39 #define NVMET_DISC_AEN_CFG_OPTIONAL \
40 	(NVME_AEN_CFG_DISC_CHANGE)
41 
42 /*
43  * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
44  */
45 #define NVMET_AEN_CFG_ALL \
46 	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
47 	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
48 	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
49 
50 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
51  * The 16 bit shift is to set IATTR bit to 1, which means offending
52  * offset starts in the data section of connect()
53  */
54 #define IPO_IATTR_CONNECT_DATA(x)	\
55 	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
56 #define IPO_IATTR_CONNECT_SQE(x)	\
57 	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
58 
59 struct nvmet_ns {
60 	struct percpu_ref	ref;
61 	struct file		*bdev_file;
62 	struct block_device	*bdev;
63 	struct file		*file;
64 	bool			readonly;
65 	u32			nsid;
66 	u32			blksize_shift;
67 	loff_t			size;
68 	u8			nguid[16];
69 	uuid_t			uuid;
70 	u32			anagrpid;
71 
72 	bool			buffered_io;
73 	bool			enabled;
74 	struct nvmet_subsys	*subsys;
75 	const char		*device_path;
76 
77 	struct config_group	device_group;
78 	struct config_group	group;
79 
80 	struct completion	disable_done;
81 	mempool_t		*bvec_pool;
82 
83 	struct pci_dev		*p2p_dev;
84 	int			use_p2pmem;
85 	int			pi_type;
86 	int			metadata_size;
87 	u8			csi;
88 };
89 
90 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
91 {
92 	return container_of(to_config_group(item), struct nvmet_ns, group);
93 }
94 
95 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
96 {
97 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
98 }
99 
100 struct nvmet_cq {
101 	u16			qid;
102 	u16			size;
103 };
104 
105 struct nvmet_sq {
106 	struct nvmet_ctrl	*ctrl;
107 	struct percpu_ref	ref;
108 	u16			qid;
109 	u16			size;
110 	u32			sqhd;
111 	bool			sqhd_disabled;
112 #ifdef CONFIG_NVME_TARGET_AUTH
113 	bool			authenticated;
114 	struct delayed_work	auth_expired_work;
115 	u16			dhchap_tid;
116 	u8			dhchap_status;
117 	u8			dhchap_step;
118 	u8			*dhchap_c1;
119 	u8			*dhchap_c2;
120 	u32			dhchap_s1;
121 	u32			dhchap_s2;
122 	u8			*dhchap_skey;
123 	int			dhchap_skey_len;
124 #endif
125 	struct completion	free_done;
126 	struct completion	confirm_done;
127 };
128 
129 struct nvmet_ana_group {
130 	struct config_group	group;
131 	struct nvmet_port	*port;
132 	u32			grpid;
133 };
134 
135 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
136 {
137 	return container_of(to_config_group(item), struct nvmet_ana_group,
138 			group);
139 }
140 
141 /**
142  * struct nvmet_port -	Common structure to keep port
143  *				information for the target.
144  * @entry:		Entry into referrals or transport list.
145  * @disc_addr:		Address information is stored in a format defined
146  *				for a discovery log page entry.
147  * @group:		ConfigFS group for this element's folder.
148  * @priv:		Private data for the transport.
149  */
150 struct nvmet_port {
151 	struct list_head		entry;
152 	struct nvmf_disc_rsp_page_entry	disc_addr;
153 	struct config_group		group;
154 	struct config_group		subsys_group;
155 	struct list_head		subsystems;
156 	struct config_group		referrals_group;
157 	struct list_head		referrals;
158 	struct list_head		global_entry;
159 	struct config_group		ana_groups_group;
160 	struct nvmet_ana_group		ana_default_group;
161 	enum nvme_ana_state		*ana_state;
162 	struct key			*keyring;
163 	void				*priv;
164 	bool				enabled;
165 	int				inline_data_size;
166 	int				max_queue_size;
167 	const struct nvmet_fabrics_ops	*tr_ops;
168 	bool				pi_enable;
169 };
170 
171 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
172 {
173 	return container_of(to_config_group(item), struct nvmet_port,
174 			group);
175 }
176 
177 static inline struct nvmet_port *ana_groups_to_port(
178 		struct config_item *item)
179 {
180 	return container_of(to_config_group(item), struct nvmet_port,
181 			ana_groups_group);
182 }
183 
184 static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
185 {
186 	return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
187 }
188 
189 static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
190 {
191     return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
192 }
193 
194 struct nvmet_ctrl {
195 	struct nvmet_subsys	*subsys;
196 	struct nvmet_sq		**sqs;
197 
198 	bool			reset_tbkas;
199 
200 	struct mutex		lock;
201 	u64			cap;
202 	u32			cc;
203 	u32			csts;
204 
205 	uuid_t			hostid;
206 	u16			cntlid;
207 	u32			kato;
208 
209 	struct nvmet_port	*port;
210 
211 	u32			aen_enabled;
212 	unsigned long		aen_masked;
213 	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
214 	unsigned int		nr_async_event_cmds;
215 	struct list_head	async_events;
216 	struct work_struct	async_event_work;
217 
218 	struct list_head	subsys_entry;
219 	struct kref		ref;
220 	struct delayed_work	ka_work;
221 	struct work_struct	fatal_err_work;
222 
223 	const struct nvmet_fabrics_ops *ops;
224 
225 	__le32			*changed_ns_list;
226 	u32			nr_changed_ns;
227 
228 	char			subsysnqn[NVMF_NQN_FIELD_LEN];
229 	char			hostnqn[NVMF_NQN_FIELD_LEN];
230 
231 	struct device		*p2p_client;
232 	struct radix_tree_root	p2p_ns_map;
233 
234 	spinlock_t		error_lock;
235 	u64			err_counter;
236 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
237 	bool			pi_support;
238 #ifdef CONFIG_NVME_TARGET_AUTH
239 	struct nvme_dhchap_key	*host_key;
240 	struct nvme_dhchap_key	*ctrl_key;
241 	u8			shash_id;
242 	struct crypto_kpp	*dh_tfm;
243 	u8			dh_gid;
244 	u8			*dh_key;
245 	size_t			dh_keysize;
246 #endif
247 };
248 
249 struct nvmet_subsys {
250 	enum nvme_subsys_type	type;
251 
252 	struct mutex		lock;
253 	struct kref		ref;
254 
255 	struct xarray		namespaces;
256 	unsigned int		nr_namespaces;
257 	u32			max_nsid;
258 	u16			cntlid_min;
259 	u16			cntlid_max;
260 
261 	struct list_head	ctrls;
262 
263 	struct list_head	hosts;
264 	bool			allow_any_host;
265 
266 	u16			max_qid;
267 
268 	u64			ver;
269 	char			serial[NVMET_SN_MAX_SIZE];
270 	bool			subsys_discovered;
271 	char			*subsysnqn;
272 	bool			pi_support;
273 
274 	struct config_group	group;
275 
276 	struct config_group	namespaces_group;
277 	struct config_group	allowed_hosts_group;
278 
279 	char			*model_number;
280 	u32			ieee_oui;
281 	char			*firmware_rev;
282 
283 #ifdef CONFIG_NVME_TARGET_PASSTHRU
284 	struct nvme_ctrl	*passthru_ctrl;
285 	char			*passthru_ctrl_path;
286 	struct config_group	passthru_group;
287 	unsigned int		admin_timeout;
288 	unsigned int		io_timeout;
289 	unsigned int		clear_ids;
290 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
291 
292 #ifdef CONFIG_BLK_DEV_ZONED
293 	u8			zasl;
294 #endif /* CONFIG_BLK_DEV_ZONED */
295 };
296 
297 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
298 {
299 	return container_of(to_config_group(item), struct nvmet_subsys, group);
300 }
301 
302 static inline struct nvmet_subsys *namespaces_to_subsys(
303 		struct config_item *item)
304 {
305 	return container_of(to_config_group(item), struct nvmet_subsys,
306 			namespaces_group);
307 }
308 
309 struct nvmet_host {
310 	struct config_group	group;
311 	u8			*dhchap_secret;
312 	u8			*dhchap_ctrl_secret;
313 	u8			dhchap_key_hash;
314 	u8			dhchap_ctrl_key_hash;
315 	u8			dhchap_hash_id;
316 	u8			dhchap_dhgroup_id;
317 };
318 
319 static inline struct nvmet_host *to_host(struct config_item *item)
320 {
321 	return container_of(to_config_group(item), struct nvmet_host, group);
322 }
323 
324 static inline char *nvmet_host_name(struct nvmet_host *host)
325 {
326 	return config_item_name(&host->group.cg_item);
327 }
328 
329 struct nvmet_host_link {
330 	struct list_head	entry;
331 	struct nvmet_host	*host;
332 };
333 
334 struct nvmet_subsys_link {
335 	struct list_head	entry;
336 	struct nvmet_subsys	*subsys;
337 };
338 
339 struct nvmet_req;
340 struct nvmet_fabrics_ops {
341 	struct module *owner;
342 	unsigned int type;
343 	unsigned int msdbd;
344 	unsigned int flags;
345 #define NVMF_KEYED_SGLS			(1 << 0)
346 #define NVMF_METADATA_SUPPORTED		(1 << 1)
347 	void (*queue_response)(struct nvmet_req *req);
348 	int (*add_port)(struct nvmet_port *port);
349 	void (*remove_port)(struct nvmet_port *port);
350 	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
351 	void (*disc_traddr)(struct nvmet_req *req,
352 			struct nvmet_port *port, char *traddr);
353 	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
354 	void (*discovery_chg)(struct nvmet_port *port);
355 	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
356 	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
357 };
358 
359 #define NVMET_MAX_INLINE_BIOVEC	8
360 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
361 
362 struct nvmet_req {
363 	struct nvme_command	*cmd;
364 	struct nvme_completion	*cqe;
365 	struct nvmet_sq		*sq;
366 	struct nvmet_cq		*cq;
367 	struct nvmet_ns		*ns;
368 	struct scatterlist	*sg;
369 	struct scatterlist	*metadata_sg;
370 	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
371 	union {
372 		struct {
373 			struct bio      inline_bio;
374 		} b;
375 		struct {
376 			bool			mpool_alloc;
377 			struct kiocb            iocb;
378 			struct bio_vec          *bvec;
379 			struct work_struct      work;
380 		} f;
381 		struct {
382 			struct bio		inline_bio;
383 			struct request		*rq;
384 			struct work_struct      work;
385 			bool			use_workqueue;
386 		} p;
387 #ifdef CONFIG_BLK_DEV_ZONED
388 		struct {
389 			struct bio		inline_bio;
390 			struct work_struct	zmgmt_work;
391 		} z;
392 #endif /* CONFIG_BLK_DEV_ZONED */
393 	};
394 	int			sg_cnt;
395 	int			metadata_sg_cnt;
396 	/* data length as parsed from the SGL descriptor: */
397 	size_t			transfer_len;
398 	size_t			metadata_len;
399 
400 	struct nvmet_port	*port;
401 
402 	void (*execute)(struct nvmet_req *req);
403 	const struct nvmet_fabrics_ops *ops;
404 
405 	struct pci_dev		*p2p_dev;
406 	struct device		*p2p_client;
407 	u16			error_loc;
408 	u64			error_slba;
409 };
410 
411 #define NVMET_MAX_MPOOL_BVEC		16
412 extern struct kmem_cache *nvmet_bvec_cache;
413 extern struct workqueue_struct *buffered_io_wq;
414 extern struct workqueue_struct *zbd_wq;
415 extern struct workqueue_struct *nvmet_wq;
416 
417 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
418 {
419 	req->cqe->result.u32 = cpu_to_le32(result);
420 }
421 
422 /*
423  * NVMe command writes actually are DMA reads for us on the target side.
424  */
425 static inline enum dma_data_direction
426 nvmet_data_dir(struct nvmet_req *req)
427 {
428 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
429 }
430 
431 struct nvmet_async_event {
432 	struct list_head	entry;
433 	u8			event_type;
434 	u8			event_info;
435 	u8			log_page;
436 };
437 
438 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
439 {
440 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
441 
442 	if (!rae)
443 		clear_bit(bn, &req->sq->ctrl->aen_masked);
444 }
445 
446 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
447 {
448 	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
449 		return true;
450 	return test_and_set_bit(bn, &ctrl->aen_masked);
451 }
452 
453 void nvmet_get_feat_kato(struct nvmet_req *req);
454 void nvmet_get_feat_async_event(struct nvmet_req *req);
455 u16 nvmet_set_feat_kato(struct nvmet_req *req);
456 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
457 void nvmet_execute_async_event(struct nvmet_req *req);
458 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
459 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
460 
461 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
462 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
463 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
464 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
465 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
466 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
467 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
468 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
469 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
470 
471 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
472 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
473 void nvmet_req_uninit(struct nvmet_req *req);
474 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
475 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
476 void nvmet_req_complete(struct nvmet_req *req, u16 status);
477 int nvmet_req_alloc_sgls(struct nvmet_req *req);
478 void nvmet_req_free_sgls(struct nvmet_req *req);
479 
480 void nvmet_execute_set_features(struct nvmet_req *req);
481 void nvmet_execute_get_features(struct nvmet_req *req);
482 void nvmet_execute_keep_alive(struct nvmet_req *req);
483 
484 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
485 		u16 size);
486 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
487 		u16 size);
488 void nvmet_sq_destroy(struct nvmet_sq *sq);
489 int nvmet_sq_init(struct nvmet_sq *sq);
490 
491 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
492 
493 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
494 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
495 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
496 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
497 				       const char *hostnqn, u16 cntlid,
498 				       struct nvmet_req *req);
499 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
500 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
501 
502 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
503 		enum nvme_subsys_type type);
504 void nvmet_subsys_put(struct nvmet_subsys *subsys);
505 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
506 
507 u16 nvmet_req_find_ns(struct nvmet_req *req);
508 void nvmet_put_namespace(struct nvmet_ns *ns);
509 int nvmet_ns_enable(struct nvmet_ns *ns);
510 void nvmet_ns_disable(struct nvmet_ns *ns);
511 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
512 void nvmet_ns_free(struct nvmet_ns *ns);
513 
514 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
515 		struct nvmet_port *port);
516 void nvmet_port_send_ana_event(struct nvmet_port *port);
517 
518 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
519 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
520 
521 void nvmet_port_del_ctrls(struct nvmet_port *port,
522 			  struct nvmet_subsys *subsys);
523 
524 int nvmet_enable_port(struct nvmet_port *port);
525 void nvmet_disable_port(struct nvmet_port *port);
526 
527 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
528 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
529 
530 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
531 		size_t len);
532 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
533 		size_t len);
534 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
535 
536 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
537 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
538 
539 extern struct list_head *nvmet_ports;
540 void nvmet_port_disc_changed(struct nvmet_port *port,
541 		struct nvmet_subsys *subsys);
542 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
543 		struct nvmet_host *host);
544 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
545 		u8 event_info, u8 log_page);
546 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
547 
548 #define NVMET_MIN_QUEUE_SIZE	16
549 #define NVMET_MAX_QUEUE_SIZE	1024
550 #define NVMET_NR_QUEUES		128
551 #define NVMET_MAX_CMD(ctrl)	(NVME_CAP_MQES(ctrl->cap) + 1)
552 
553 /*
554  * Nice round number that makes a list of nsids fit into a page.
555  * Should become tunable at some point in the future.
556  */
557 #define NVMET_MAX_NAMESPACES	1024
558 
559 /*
560  * 0 is not a valid ANA group ID, so we start numbering at 1.
561  *
562  * ANA Group 1 exists without manual intervention, has namespaces assigned to it
563  * by default, and is available in an optimized state through all ports.
564  */
565 #define NVMET_MAX_ANAGRPS	128
566 #define NVMET_DEFAULT_ANA_GRPID	1
567 
568 #define NVMET_KAS		10
569 #define NVMET_DISC_KATO_MS		120000
570 
571 int __init nvmet_init_configfs(void);
572 void __exit nvmet_exit_configfs(void);
573 
574 int __init nvmet_init_discovery(void);
575 void nvmet_exit_discovery(void);
576 
577 extern struct nvmet_subsys *nvmet_disc_subsys;
578 extern struct rw_semaphore nvmet_config_sem;
579 
580 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
581 extern u64 nvmet_ana_chgcnt;
582 extern struct rw_semaphore nvmet_ana_sem;
583 
584 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
585 
586 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
587 int nvmet_file_ns_enable(struct nvmet_ns *ns);
588 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
589 void nvmet_file_ns_disable(struct nvmet_ns *ns);
590 u16 nvmet_bdev_flush(struct nvmet_req *req);
591 u16 nvmet_file_flush(struct nvmet_req *req);
592 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
593 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
594 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
595 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
596 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
597 
598 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
599 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
600 void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
601 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
602 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
603 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
604 
605 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
606 {
607 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
608 			req->ns->blksize_shift;
609 }
610 
611 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
612 {
613 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
614 		return 0;
615 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
616 			req->ns->metadata_size;
617 }
618 
619 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
620 {
621 	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
622 		sizeof(struct nvme_dsm_range);
623 }
624 
625 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
626 {
627 	return req->sq->ctrl->subsys;
628 }
629 
630 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
631 {
632     return subsys->type != NVME_NQN_NVME;
633 }
634 
635 #ifdef CONFIG_NVME_TARGET_PASSTHRU
636 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
637 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
638 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
639 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
640 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
641 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
642 {
643 	return subsys->passthru_ctrl;
644 }
645 #else /* CONFIG_NVME_TARGET_PASSTHRU */
646 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
647 {
648 }
649 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
650 {
651 }
652 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
653 {
654 	return 0;
655 }
656 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
657 {
658 	return 0;
659 }
660 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
661 {
662 	return NULL;
663 }
664 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
665 
666 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
667 {
668 	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
669 }
670 
671 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
672 
673 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
674 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
675 
676 /* Convert a 32-bit number to a 16-bit 0's based number */
677 static inline __le16 to0based(u32 a)
678 {
679 	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
680 }
681 
682 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
683 {
684 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
685 		return false;
686 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
687 }
688 
689 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
690 {
691 	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
692 }
693 
694 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
695 {
696 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
697 }
698 
699 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
700 {
701 	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
702 	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
703 }
704 
705 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
706 {
707 	if (bio != &req->b.inline_bio)
708 		bio_put(bio);
709 }
710 
711 #ifdef CONFIG_NVME_TARGET_AUTH
712 void nvmet_execute_auth_send(struct nvmet_req *req);
713 void nvmet_execute_auth_receive(struct nvmet_req *req);
714 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
715 		       bool set_ctrl);
716 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
717 u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
718 void nvmet_auth_sq_init(struct nvmet_sq *sq);
719 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
720 void nvmet_auth_sq_free(struct nvmet_sq *sq);
721 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
722 bool nvmet_check_auth_status(struct nvmet_req *req);
723 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
724 			 unsigned int hash_len);
725 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
726 			 unsigned int hash_len);
727 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
728 {
729 	return ctrl->host_key != NULL;
730 }
731 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
732 				u8 *buf, int buf_size);
733 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
734 			    u8 *buf, int buf_size);
735 #else
736 static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
737 {
738 	return 0;
739 }
740 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
741 {
742 }
743 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
744 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
745 static inline bool nvmet_check_auth_status(struct nvmet_req *req)
746 {
747 	return true;
748 }
749 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
750 {
751 	return false;
752 }
753 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
754 #endif
755 
756 #endif /* _NVMET_H */
757