xref: /linux/drivers/nvme/target/nvmet.h (revision 202779456dc5b75d07b214064161ef6a2421e8be)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  */
5 
6 #ifndef _NVMET_H
7 #define _NVMET_H
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23 
24 #define NVMET_DEFAULT_VS		NVME_VS(1, 3, 0)
25 
26 #define NVMET_ASYNC_EVENTS		4
27 #define NVMET_ERROR_LOG_SLOTS		128
28 #define NVMET_NO_ERROR_LOC		((u16)-1)
29 #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
30 #define NVMET_MN_MAX_SIZE		40
31 #define NVMET_SN_MAX_SIZE		20
32 #define NVMET_FR_MAX_SIZE		8
33 
34 /*
35  * Supported optional AENs:
36  */
37 #define NVMET_AEN_CFG_OPTIONAL \
38 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
39 #define NVMET_DISC_AEN_CFG_OPTIONAL \
40 	(NVME_AEN_CFG_DISC_CHANGE)
41 
42 /*
43  * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
44  */
45 #define NVMET_AEN_CFG_ALL \
46 	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
47 	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
48 	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
49 
50 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
51  * The 16 bit shift is to set IATTR bit to 1, which means offending
52  * offset starts in the data section of connect()
53  */
54 #define IPO_IATTR_CONNECT_DATA(x)	\
55 	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
56 #define IPO_IATTR_CONNECT_SQE(x)	\
57 	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
58 
59 struct nvmet_ns {
60 	struct percpu_ref	ref;
61 	struct block_device	*bdev;
62 	struct file		*file;
63 	bool			readonly;
64 	u32			nsid;
65 	u32			blksize_shift;
66 	loff_t			size;
67 	u8			nguid[16];
68 	uuid_t			uuid;
69 	u32			anagrpid;
70 
71 	bool			buffered_io;
72 	bool			enabled;
73 	struct nvmet_subsys	*subsys;
74 	const char		*device_path;
75 
76 	struct config_group	device_group;
77 	struct config_group	group;
78 
79 	struct completion	disable_done;
80 	mempool_t		*bvec_pool;
81 
82 	int			use_p2pmem;
83 	struct pci_dev		*p2p_dev;
84 	int			pi_type;
85 	int			metadata_size;
86 	u8			csi;
87 };
88 
89 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
90 {
91 	return container_of(to_config_group(item), struct nvmet_ns, group);
92 }
93 
94 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
95 {
96 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
97 }
98 
99 struct nvmet_cq {
100 	u16			qid;
101 	u16			size;
102 };
103 
104 struct nvmet_sq {
105 	struct nvmet_ctrl	*ctrl;
106 	struct percpu_ref	ref;
107 	u16			qid;
108 	u16			size;
109 	u32			sqhd;
110 	bool			sqhd_disabled;
111 #ifdef CONFIG_NVME_TARGET_AUTH
112 	struct delayed_work	auth_expired_work;
113 	bool			authenticated;
114 	u16			dhchap_tid;
115 	u16			dhchap_status;
116 	int			dhchap_step;
117 	u8			*dhchap_c1;
118 	u8			*dhchap_c2;
119 	u32			dhchap_s1;
120 	u32			dhchap_s2;
121 	u8			*dhchap_skey;
122 	int			dhchap_skey_len;
123 #endif
124 	struct completion	free_done;
125 	struct completion	confirm_done;
126 };
127 
128 struct nvmet_ana_group {
129 	struct config_group	group;
130 	struct nvmet_port	*port;
131 	u32			grpid;
132 };
133 
134 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
135 {
136 	return container_of(to_config_group(item), struct nvmet_ana_group,
137 			group);
138 }
139 
140 /**
141  * struct nvmet_port -	Common structure to keep port
142  *				information for the target.
143  * @entry:		Entry into referrals or transport list.
144  * @disc_addr:		Address information is stored in a format defined
145  *				for a discovery log page entry.
146  * @group:		ConfigFS group for this element's folder.
147  * @priv:		Private data for the transport.
148  */
149 struct nvmet_port {
150 	struct list_head		entry;
151 	struct nvmf_disc_rsp_page_entry	disc_addr;
152 	struct config_group		group;
153 	struct config_group		subsys_group;
154 	struct list_head		subsystems;
155 	struct config_group		referrals_group;
156 	struct list_head		referrals;
157 	struct list_head		global_entry;
158 	struct config_group		ana_groups_group;
159 	struct nvmet_ana_group		ana_default_group;
160 	enum nvme_ana_state		*ana_state;
161 	void				*priv;
162 	bool				enabled;
163 	int				inline_data_size;
164 	const struct nvmet_fabrics_ops	*tr_ops;
165 	bool				pi_enable;
166 };
167 
168 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
169 {
170 	return container_of(to_config_group(item), struct nvmet_port,
171 			group);
172 }
173 
174 static inline struct nvmet_port *ana_groups_to_port(
175 		struct config_item *item)
176 {
177 	return container_of(to_config_group(item), struct nvmet_port,
178 			ana_groups_group);
179 }
180 
181 struct nvmet_ctrl {
182 	struct nvmet_subsys	*subsys;
183 	struct nvmet_sq		**sqs;
184 
185 	bool			reset_tbkas;
186 
187 	struct mutex		lock;
188 	u64			cap;
189 	u32			cc;
190 	u32			csts;
191 
192 	uuid_t			hostid;
193 	u16			cntlid;
194 	u32			kato;
195 
196 	struct nvmet_port	*port;
197 
198 	u32			aen_enabled;
199 	unsigned long		aen_masked;
200 	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
201 	unsigned int		nr_async_event_cmds;
202 	struct list_head	async_events;
203 	struct work_struct	async_event_work;
204 
205 	struct list_head	subsys_entry;
206 	struct kref		ref;
207 	struct delayed_work	ka_work;
208 	struct work_struct	fatal_err_work;
209 
210 	const struct nvmet_fabrics_ops *ops;
211 
212 	__le32			*changed_ns_list;
213 	u32			nr_changed_ns;
214 
215 	char			subsysnqn[NVMF_NQN_FIELD_LEN];
216 	char			hostnqn[NVMF_NQN_FIELD_LEN];
217 
218 	struct device		*p2p_client;
219 	struct radix_tree_root	p2p_ns_map;
220 
221 	spinlock_t		error_lock;
222 	u64			err_counter;
223 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
224 	bool			pi_support;
225 #ifdef CONFIG_NVME_TARGET_AUTH
226 	struct nvme_dhchap_key	*host_key;
227 	struct nvme_dhchap_key	*ctrl_key;
228 	u8			shash_id;
229 	struct crypto_kpp	*dh_tfm;
230 	u8			dh_gid;
231 	u8			*dh_key;
232 	size_t			dh_keysize;
233 #endif
234 };
235 
236 struct nvmet_subsys {
237 	enum nvme_subsys_type	type;
238 
239 	struct mutex		lock;
240 	struct kref		ref;
241 
242 	struct xarray		namespaces;
243 	unsigned int		nr_namespaces;
244 	u32			max_nsid;
245 	u16			cntlid_min;
246 	u16			cntlid_max;
247 
248 	struct list_head	ctrls;
249 
250 	struct list_head	hosts;
251 	bool			allow_any_host;
252 
253 	u16			max_qid;
254 
255 	u64			ver;
256 	char			serial[NVMET_SN_MAX_SIZE];
257 	bool			subsys_discovered;
258 	char			*subsysnqn;
259 	bool			pi_support;
260 
261 	struct config_group	group;
262 
263 	struct config_group	namespaces_group;
264 	struct config_group	allowed_hosts_group;
265 
266 	char			*model_number;
267 	u32			ieee_oui;
268 	char			*firmware_rev;
269 
270 #ifdef CONFIG_NVME_TARGET_PASSTHRU
271 	struct nvme_ctrl	*passthru_ctrl;
272 	char			*passthru_ctrl_path;
273 	struct config_group	passthru_group;
274 	unsigned int		admin_timeout;
275 	unsigned int		io_timeout;
276 	unsigned int		clear_ids;
277 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
278 
279 #ifdef CONFIG_BLK_DEV_ZONED
280 	u8			zasl;
281 #endif /* CONFIG_BLK_DEV_ZONED */
282 };
283 
284 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
285 {
286 	return container_of(to_config_group(item), struct nvmet_subsys, group);
287 }
288 
289 static inline struct nvmet_subsys *namespaces_to_subsys(
290 		struct config_item *item)
291 {
292 	return container_of(to_config_group(item), struct nvmet_subsys,
293 			namespaces_group);
294 }
295 
296 struct nvmet_host {
297 	struct config_group	group;
298 	u8			*dhchap_secret;
299 	u8			*dhchap_ctrl_secret;
300 	u8			dhchap_key_hash;
301 	u8			dhchap_ctrl_key_hash;
302 	u8			dhchap_hash_id;
303 	u8			dhchap_dhgroup_id;
304 };
305 
306 static inline struct nvmet_host *to_host(struct config_item *item)
307 {
308 	return container_of(to_config_group(item), struct nvmet_host, group);
309 }
310 
311 static inline char *nvmet_host_name(struct nvmet_host *host)
312 {
313 	return config_item_name(&host->group.cg_item);
314 }
315 
316 struct nvmet_host_link {
317 	struct list_head	entry;
318 	struct nvmet_host	*host;
319 };
320 
321 struct nvmet_subsys_link {
322 	struct list_head	entry;
323 	struct nvmet_subsys	*subsys;
324 };
325 
326 struct nvmet_req;
327 struct nvmet_fabrics_ops {
328 	struct module *owner;
329 	unsigned int type;
330 	unsigned int msdbd;
331 	unsigned int flags;
332 #define NVMF_KEYED_SGLS			(1 << 0)
333 #define NVMF_METADATA_SUPPORTED		(1 << 1)
334 	void (*queue_response)(struct nvmet_req *req);
335 	int (*add_port)(struct nvmet_port *port);
336 	void (*remove_port)(struct nvmet_port *port);
337 	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
338 	void (*disc_traddr)(struct nvmet_req *req,
339 			struct nvmet_port *port, char *traddr);
340 	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
341 	void (*discovery_chg)(struct nvmet_port *port);
342 	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
343 	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
344 };
345 
346 #define NVMET_MAX_INLINE_BIOVEC	8
347 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
348 
349 struct nvmet_req {
350 	struct nvme_command	*cmd;
351 	struct nvme_completion	*cqe;
352 	struct nvmet_sq		*sq;
353 	struct nvmet_cq		*cq;
354 	struct nvmet_ns		*ns;
355 	struct scatterlist	*sg;
356 	struct scatterlist	*metadata_sg;
357 	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
358 	union {
359 		struct {
360 			struct bio      inline_bio;
361 		} b;
362 		struct {
363 			bool			mpool_alloc;
364 			struct kiocb            iocb;
365 			struct bio_vec          *bvec;
366 			struct work_struct      work;
367 		} f;
368 		struct {
369 			struct bio		inline_bio;
370 			struct request		*rq;
371 			struct work_struct      work;
372 			bool			use_workqueue;
373 		} p;
374 #ifdef CONFIG_BLK_DEV_ZONED
375 		struct {
376 			struct bio		inline_bio;
377 			struct work_struct	zmgmt_work;
378 		} z;
379 #endif /* CONFIG_BLK_DEV_ZONED */
380 	};
381 	int			sg_cnt;
382 	int			metadata_sg_cnt;
383 	/* data length as parsed from the SGL descriptor: */
384 	size_t			transfer_len;
385 	size_t			metadata_len;
386 
387 	struct nvmet_port	*port;
388 
389 	void (*execute)(struct nvmet_req *req);
390 	const struct nvmet_fabrics_ops *ops;
391 
392 	struct pci_dev		*p2p_dev;
393 	struct device		*p2p_client;
394 	u16			error_loc;
395 	u64			error_slba;
396 };
397 
398 #define NVMET_MAX_MPOOL_BVEC		16
399 extern struct kmem_cache *nvmet_bvec_cache;
400 extern struct workqueue_struct *buffered_io_wq;
401 extern struct workqueue_struct *zbd_wq;
402 extern struct workqueue_struct *nvmet_wq;
403 
404 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
405 {
406 	req->cqe->result.u32 = cpu_to_le32(result);
407 }
408 
409 /*
410  * NVMe command writes actually are DMA reads for us on the target side.
411  */
412 static inline enum dma_data_direction
413 nvmet_data_dir(struct nvmet_req *req)
414 {
415 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
416 }
417 
418 struct nvmet_async_event {
419 	struct list_head	entry;
420 	u8			event_type;
421 	u8			event_info;
422 	u8			log_page;
423 };
424 
425 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
426 {
427 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
428 
429 	if (!rae)
430 		clear_bit(bn, &req->sq->ctrl->aen_masked);
431 }
432 
433 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
434 {
435 	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
436 		return true;
437 	return test_and_set_bit(bn, &ctrl->aen_masked);
438 }
439 
440 void nvmet_get_feat_kato(struct nvmet_req *req);
441 void nvmet_get_feat_async_event(struct nvmet_req *req);
442 u16 nvmet_set_feat_kato(struct nvmet_req *req);
443 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
444 void nvmet_execute_async_event(struct nvmet_req *req);
445 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
446 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
447 
448 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
449 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
450 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
451 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
452 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
453 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
454 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
455 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
456 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
457 
458 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
459 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
460 void nvmet_req_uninit(struct nvmet_req *req);
461 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
462 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
463 void nvmet_req_complete(struct nvmet_req *req, u16 status);
464 int nvmet_req_alloc_sgls(struct nvmet_req *req);
465 void nvmet_req_free_sgls(struct nvmet_req *req);
466 
467 void nvmet_execute_set_features(struct nvmet_req *req);
468 void nvmet_execute_get_features(struct nvmet_req *req);
469 void nvmet_execute_keep_alive(struct nvmet_req *req);
470 
471 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
472 		u16 size);
473 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
474 		u16 size);
475 void nvmet_sq_destroy(struct nvmet_sq *sq);
476 int nvmet_sq_init(struct nvmet_sq *sq);
477 
478 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
479 
480 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
481 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
482 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
483 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
484 				       const char *hostnqn, u16 cntlid,
485 				       struct nvmet_req *req);
486 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
487 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
488 
489 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
490 		enum nvme_subsys_type type);
491 void nvmet_subsys_put(struct nvmet_subsys *subsys);
492 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
493 
494 u16 nvmet_req_find_ns(struct nvmet_req *req);
495 void nvmet_put_namespace(struct nvmet_ns *ns);
496 int nvmet_ns_enable(struct nvmet_ns *ns);
497 void nvmet_ns_disable(struct nvmet_ns *ns);
498 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
499 void nvmet_ns_free(struct nvmet_ns *ns);
500 
501 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
502 		struct nvmet_port *port);
503 void nvmet_port_send_ana_event(struct nvmet_port *port);
504 
505 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
506 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
507 
508 void nvmet_port_del_ctrls(struct nvmet_port *port,
509 			  struct nvmet_subsys *subsys);
510 
511 int nvmet_enable_port(struct nvmet_port *port);
512 void nvmet_disable_port(struct nvmet_port *port);
513 
514 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
515 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
516 
517 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
518 		size_t len);
519 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
520 		size_t len);
521 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
522 
523 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
524 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
525 
526 extern struct list_head *nvmet_ports;
527 void nvmet_port_disc_changed(struct nvmet_port *port,
528 		struct nvmet_subsys *subsys);
529 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
530 		struct nvmet_host *host);
531 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
532 		u8 event_info, u8 log_page);
533 
534 #define NVMET_QUEUE_SIZE	1024
535 #define NVMET_NR_QUEUES		128
536 #define NVMET_MAX_CMD		NVMET_QUEUE_SIZE
537 
538 /*
539  * Nice round number that makes a list of nsids fit into a page.
540  * Should become tunable at some point in the future.
541  */
542 #define NVMET_MAX_NAMESPACES	1024
543 
544 /*
545  * 0 is not a valid ANA group ID, so we start numbering at 1.
546  *
547  * ANA Group 1 exists without manual intervention, has namespaces assigned to it
548  * by default, and is available in an optimized state through all ports.
549  */
550 #define NVMET_MAX_ANAGRPS	128
551 #define NVMET_DEFAULT_ANA_GRPID	1
552 
553 #define NVMET_KAS		10
554 #define NVMET_DISC_KATO_MS		120000
555 
556 int __init nvmet_init_configfs(void);
557 void __exit nvmet_exit_configfs(void);
558 
559 int __init nvmet_init_discovery(void);
560 void nvmet_exit_discovery(void);
561 
562 extern struct nvmet_subsys *nvmet_disc_subsys;
563 extern struct rw_semaphore nvmet_config_sem;
564 
565 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
566 extern u64 nvmet_ana_chgcnt;
567 extern struct rw_semaphore nvmet_ana_sem;
568 
569 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
570 
571 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
572 int nvmet_file_ns_enable(struct nvmet_ns *ns);
573 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
574 void nvmet_file_ns_disable(struct nvmet_ns *ns);
575 u16 nvmet_bdev_flush(struct nvmet_req *req);
576 u16 nvmet_file_flush(struct nvmet_req *req);
577 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
578 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
579 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
580 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
581 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
582 
583 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
584 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
585 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
586 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
587 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
588 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
589 
590 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
591 {
592 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
593 			req->ns->blksize_shift;
594 }
595 
596 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
597 {
598 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
599 		return 0;
600 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
601 			req->ns->metadata_size;
602 }
603 
604 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
605 {
606 	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
607 		sizeof(struct nvme_dsm_range);
608 }
609 
610 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
611 {
612 	return req->sq->ctrl->subsys;
613 }
614 
615 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
616 {
617     return subsys->type != NVME_NQN_NVME;
618 }
619 
620 #ifdef CONFIG_NVME_TARGET_PASSTHRU
621 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
622 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
623 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
624 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
625 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
626 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
627 {
628 	return subsys->passthru_ctrl;
629 }
630 #else /* CONFIG_NVME_TARGET_PASSTHRU */
631 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
632 {
633 }
634 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
635 {
636 }
637 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
638 {
639 	return 0;
640 }
641 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
642 {
643 	return 0;
644 }
645 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
646 {
647 	return NULL;
648 }
649 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
650 
651 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
652 {
653 	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
654 }
655 
656 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
657 
658 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
659 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
660 
661 /* Convert a 32-bit number to a 16-bit 0's based number */
662 static inline __le16 to0based(u32 a)
663 {
664 	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
665 }
666 
667 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
668 {
669 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
670 		return false;
671 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
672 }
673 
674 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
675 {
676 	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
677 }
678 
679 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
680 {
681 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
682 }
683 
684 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
685 {
686 	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
687 	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
688 }
689 
690 static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
691 {
692 	pr_debug("unhandled identify cns %d on qid %d\n",
693 	       req->cmd->identify.cns, req->sq->qid);
694 	req->error_loc = offsetof(struct nvme_identify, cns);
695 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
696 }
697 
698 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
699 {
700 	if (bio != &req->b.inline_bio)
701 		bio_put(bio);
702 }
703 
704 #ifdef CONFIG_NVME_TARGET_AUTH
705 void nvmet_execute_auth_send(struct nvmet_req *req);
706 void nvmet_execute_auth_receive(struct nvmet_req *req);
707 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
708 		       bool set_ctrl);
709 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
710 int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
711 void nvmet_auth_sq_init(struct nvmet_sq *sq);
712 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
713 void nvmet_auth_sq_free(struct nvmet_sq *sq);
714 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
715 bool nvmet_check_auth_status(struct nvmet_req *req);
716 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
717 			 unsigned int hash_len);
718 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
719 			 unsigned int hash_len);
720 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
721 {
722 	return ctrl->host_key != NULL;
723 }
724 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
725 				u8 *buf, int buf_size);
726 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
727 			    u8 *buf, int buf_size);
728 #else
729 static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
730 {
731 	return 0;
732 }
733 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
734 {
735 }
736 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
737 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
738 static inline bool nvmet_check_auth_status(struct nvmet_req *req)
739 {
740 	return true;
741 }
742 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
743 {
744 	return false;
745 }
746 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
747 #endif
748 
749 #endif /* _NVMET_H */
750