xref: /linux/drivers/nvme/target/nvmet.h (revision 2ae3aab55781428eaefecc10dc40ae65071150ed)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  */
5 
6 #ifndef _NVMET_H
7 #define _NVMET_H
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23 #include <linux/kfifo.h>
24 
25 #define NVMET_DEFAULT_VS		NVME_VS(2, 1, 0)
26 
27 #define NVMET_NS_ENABLED		XA_MARK_1
28 #define NVMET_ASYNC_EVENTS		4
29 #define NVMET_ERROR_LOG_SLOTS		128
30 #define NVMET_NO_ERROR_LOC		((u16)-1)
31 #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
32 #define NVMET_MN_MAX_SIZE		40
33 #define NVMET_SN_MAX_SIZE		20
34 #define NVMET_FR_MAX_SIZE		8
35 #define NVMET_PR_LOG_QUEUE_SIZE		64
36 
37 #define nvmet_for_each_ns(xa, index, entry) \
38 	xa_for_each(xa, index, entry)
39 
40 #define nvmet_for_each_enabled_ns(xa, index, entry) \
41 	xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED)
42 
43 /*
44  * Supported optional AENs:
45  */
46 #define NVMET_AEN_CFG_OPTIONAL \
47 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
48 #define NVMET_DISC_AEN_CFG_OPTIONAL \
49 	(NVME_AEN_CFG_DISC_CHANGE)
50 
51 /*
52  * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
53  */
54 #define NVMET_AEN_CFG_ALL \
55 	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
56 	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
57 	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
58 
59 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
60  * The 16 bit shift is to set IATTR bit to 1, which means offending
61  * offset starts in the data section of connect()
62  */
63 #define IPO_IATTR_CONNECT_DATA(x)	\
64 	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
65 #define IPO_IATTR_CONNECT_SQE(x)	\
66 	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
67 
68 struct nvmet_pr_registrant {
69 	u64			rkey;
70 	uuid_t			hostid;
71 	enum nvme_pr_type	rtype;
72 	struct list_head	entry;
73 	struct rcu_head		rcu;
74 };
75 
76 struct nvmet_pr {
77 	bool			enable;
78 	unsigned long		notify_mask;
79 	atomic_t		generation;
80 	struct nvmet_pr_registrant __rcu *holder;
81 	/*
82 	 * During the execution of the reservation command, mutual
83 	 * exclusion is required throughout the process. However,
84 	 * while waiting asynchronously for the 'per controller
85 	 * percpu_ref' to complete before the 'preempt and abort'
86 	 * command finishes, a semaphore is needed to ensure mutual
87 	 * exclusion instead of a mutex.
88 	 */
89 	struct semaphore	pr_sem;
90 	struct list_head	registrant_list;
91 };
92 
93 struct nvmet_pr_per_ctrl_ref {
94 	struct percpu_ref	ref;
95 	struct completion	free_done;
96 	struct completion	confirm_done;
97 	uuid_t			hostid;
98 };
99 
100 struct nvmet_ns {
101 	struct percpu_ref	ref;
102 	struct file		*bdev_file;
103 	struct block_device	*bdev;
104 	struct file		*file;
105 	bool			readonly;
106 	u32			nsid;
107 	u32			blksize_shift;
108 	loff_t			size;
109 	u8			nguid[16];
110 	uuid_t			uuid;
111 	u32			anagrpid;
112 
113 	bool			buffered_io;
114 	bool			enabled;
115 	struct nvmet_subsys	*subsys;
116 	const char		*device_path;
117 
118 	struct config_group	device_group;
119 	struct config_group	group;
120 
121 	struct completion	disable_done;
122 	mempool_t		*bvec_pool;
123 
124 	struct pci_dev		*p2p_dev;
125 	int			use_p2pmem;
126 	int			pi_type;
127 	int			metadata_size;
128 	u8			csi;
129 	struct nvmet_pr		pr;
130 	struct xarray		pr_per_ctrl_refs;
131 };
132 
to_nvmet_ns(struct config_item * item)133 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
134 {
135 	return container_of(to_config_group(item), struct nvmet_ns, group);
136 }
137 
nvmet_ns_dev(struct nvmet_ns * ns)138 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
139 {
140 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
141 }
142 
143 struct nvmet_cq {
144 	u16			qid;
145 	u16			size;
146 };
147 
148 struct nvmet_sq {
149 	struct nvmet_ctrl	*ctrl;
150 	struct percpu_ref	ref;
151 	u16			qid;
152 	u16			size;
153 	u32			sqhd;
154 	bool			sqhd_disabled;
155 #ifdef CONFIG_NVME_TARGET_AUTH
156 	bool			authenticated;
157 	struct delayed_work	auth_expired_work;
158 	u16			dhchap_tid;
159 	u8			dhchap_status;
160 	u8			dhchap_step;
161 	u8			*dhchap_c1;
162 	u8			*dhchap_c2;
163 	u32			dhchap_s1;
164 	u32			dhchap_s2;
165 	u8			*dhchap_skey;
166 	int			dhchap_skey_len;
167 #endif
168 	struct completion	free_done;
169 	struct completion	confirm_done;
170 };
171 
172 struct nvmet_ana_group {
173 	struct config_group	group;
174 	struct nvmet_port	*port;
175 	u32			grpid;
176 };
177 
to_ana_group(struct config_item * item)178 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
179 {
180 	return container_of(to_config_group(item), struct nvmet_ana_group,
181 			group);
182 }
183 
184 /**
185  * struct nvmet_port -	Common structure to keep port
186  *				information for the target.
187  * @entry:		Entry into referrals or transport list.
188  * @disc_addr:		Address information is stored in a format defined
189  *				for a discovery log page entry.
190  * @group:		ConfigFS group for this element's folder.
191  * @priv:		Private data for the transport.
192  */
193 struct nvmet_port {
194 	struct list_head		entry;
195 	struct nvmf_disc_rsp_page_entry	disc_addr;
196 	struct config_group		group;
197 	struct config_group		subsys_group;
198 	struct list_head		subsystems;
199 	struct config_group		referrals_group;
200 	struct list_head		referrals;
201 	struct list_head		global_entry;
202 	struct config_group		ana_groups_group;
203 	struct nvmet_ana_group		ana_default_group;
204 	enum nvme_ana_state		*ana_state;
205 	struct key			*keyring;
206 	void				*priv;
207 	bool				enabled;
208 	int				inline_data_size;
209 	int				max_queue_size;
210 	const struct nvmet_fabrics_ops	*tr_ops;
211 	bool				pi_enable;
212 };
213 
to_nvmet_port(struct config_item * item)214 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
215 {
216 	return container_of(to_config_group(item), struct nvmet_port,
217 			group);
218 }
219 
ana_groups_to_port(struct config_item * item)220 static inline struct nvmet_port *ana_groups_to_port(
221 		struct config_item *item)
222 {
223 	return container_of(to_config_group(item), struct nvmet_port,
224 			ana_groups_group);
225 }
226 
nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port * port)227 static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
228 {
229 	return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
230 }
231 
nvmet_port_secure_channel_required(struct nvmet_port * port)232 static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
233 {
234     return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
235 }
236 
237 struct nvmet_pr_log_mgr {
238 	struct mutex		lock;
239 	u64			lost_count;
240 	u64			counter;
241 	DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE);
242 };
243 
244 struct nvmet_ctrl {
245 	struct nvmet_subsys	*subsys;
246 	struct nvmet_sq		**sqs;
247 
248 	bool			reset_tbkas;
249 
250 	struct mutex		lock;
251 	u64			cap;
252 	u32			cc;
253 	u32			csts;
254 
255 	uuid_t			hostid;
256 	u16			cntlid;
257 	u32			kato;
258 
259 	struct nvmet_port	*port;
260 
261 	u32			aen_enabled;
262 	unsigned long		aen_masked;
263 	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
264 	unsigned int		nr_async_event_cmds;
265 	struct list_head	async_events;
266 	struct work_struct	async_event_work;
267 
268 	struct list_head	subsys_entry;
269 	struct kref		ref;
270 	struct delayed_work	ka_work;
271 	struct work_struct	fatal_err_work;
272 
273 	const struct nvmet_fabrics_ops *ops;
274 
275 	__le32			*changed_ns_list;
276 	u32			nr_changed_ns;
277 
278 	char			subsysnqn[NVMF_NQN_FIELD_LEN];
279 	char			hostnqn[NVMF_NQN_FIELD_LEN];
280 
281 	struct device		*p2p_client;
282 	struct radix_tree_root	p2p_ns_map;
283 #ifdef CONFIG_NVME_TARGET_DEBUGFS
284 	struct dentry		*debugfs_dir;
285 #endif
286 	spinlock_t		error_lock;
287 	u64			err_counter;
288 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
289 	bool			pi_support;
290 #ifdef CONFIG_NVME_TARGET_AUTH
291 	struct nvme_dhchap_key	*host_key;
292 	struct nvme_dhchap_key	*ctrl_key;
293 	u8			shash_id;
294 	struct crypto_kpp	*dh_tfm;
295 	u8			dh_gid;
296 	u8			*dh_key;
297 	size_t			dh_keysize;
298 #endif
299 	struct nvmet_pr_log_mgr pr_log_mgr;
300 };
301 
302 struct nvmet_subsys {
303 	enum nvme_subsys_type	type;
304 
305 	struct mutex		lock;
306 	struct kref		ref;
307 
308 	struct xarray		namespaces;
309 	unsigned int		nr_namespaces;
310 	u32			max_nsid;
311 	u16			cntlid_min;
312 	u16			cntlid_max;
313 
314 	struct list_head	ctrls;
315 
316 	struct list_head	hosts;
317 	bool			allow_any_host;
318 #ifdef CONFIG_NVME_TARGET_DEBUGFS
319 	struct dentry		*debugfs_dir;
320 #endif
321 	u16			max_qid;
322 
323 	u64			ver;
324 	char			serial[NVMET_SN_MAX_SIZE];
325 	bool			subsys_discovered;
326 	char			*subsysnqn;
327 	bool			pi_support;
328 
329 	struct config_group	group;
330 
331 	struct config_group	namespaces_group;
332 	struct config_group	allowed_hosts_group;
333 
334 	char			*model_number;
335 	u32			ieee_oui;
336 	char			*firmware_rev;
337 
338 #ifdef CONFIG_NVME_TARGET_PASSTHRU
339 	struct nvme_ctrl	*passthru_ctrl;
340 	char			*passthru_ctrl_path;
341 	struct config_group	passthru_group;
342 	unsigned int		admin_timeout;
343 	unsigned int		io_timeout;
344 	unsigned int		clear_ids;
345 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
346 
347 #ifdef CONFIG_BLK_DEV_ZONED
348 	u8			zasl;
349 #endif /* CONFIG_BLK_DEV_ZONED */
350 };
351 
to_subsys(struct config_item * item)352 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
353 {
354 	return container_of(to_config_group(item), struct nvmet_subsys, group);
355 }
356 
namespaces_to_subsys(struct config_item * item)357 static inline struct nvmet_subsys *namespaces_to_subsys(
358 		struct config_item *item)
359 {
360 	return container_of(to_config_group(item), struct nvmet_subsys,
361 			namespaces_group);
362 }
363 
364 struct nvmet_host {
365 	struct config_group	group;
366 	u8			*dhchap_secret;
367 	u8			*dhchap_ctrl_secret;
368 	u8			dhchap_key_hash;
369 	u8			dhchap_ctrl_key_hash;
370 	u8			dhchap_hash_id;
371 	u8			dhchap_dhgroup_id;
372 };
373 
to_host(struct config_item * item)374 static inline struct nvmet_host *to_host(struct config_item *item)
375 {
376 	return container_of(to_config_group(item), struct nvmet_host, group);
377 }
378 
nvmet_host_name(struct nvmet_host * host)379 static inline char *nvmet_host_name(struct nvmet_host *host)
380 {
381 	return config_item_name(&host->group.cg_item);
382 }
383 
384 struct nvmet_host_link {
385 	struct list_head	entry;
386 	struct nvmet_host	*host;
387 };
388 
389 struct nvmet_subsys_link {
390 	struct list_head	entry;
391 	struct nvmet_subsys	*subsys;
392 };
393 
394 struct nvmet_req;
395 struct nvmet_fabrics_ops {
396 	struct module *owner;
397 	unsigned int type;
398 	unsigned int msdbd;
399 	unsigned int flags;
400 #define NVMF_KEYED_SGLS			(1 << 0)
401 #define NVMF_METADATA_SUPPORTED		(1 << 1)
402 	void (*queue_response)(struct nvmet_req *req);
403 	int (*add_port)(struct nvmet_port *port);
404 	void (*remove_port)(struct nvmet_port *port);
405 	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
406 	void (*disc_traddr)(struct nvmet_req *req,
407 			struct nvmet_port *port, char *traddr);
408 	ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
409 			char *traddr, size_t traddr_len);
410 	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
411 	void (*discovery_chg)(struct nvmet_port *port);
412 	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
413 	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
414 };
415 
416 #define NVMET_MAX_INLINE_BIOVEC	8
417 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
418 
419 struct nvmet_req {
420 	struct nvme_command	*cmd;
421 	struct nvme_completion	*cqe;
422 	struct nvmet_sq		*sq;
423 	struct nvmet_cq		*cq;
424 	struct nvmet_ns		*ns;
425 	struct scatterlist	*sg;
426 	struct scatterlist	*metadata_sg;
427 	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
428 	union {
429 		struct {
430 			struct bio      inline_bio;
431 		} b;
432 		struct {
433 			bool			mpool_alloc;
434 			struct kiocb            iocb;
435 			struct bio_vec          *bvec;
436 			struct work_struct      work;
437 		} f;
438 		struct {
439 			struct bio		inline_bio;
440 			struct request		*rq;
441 			struct work_struct      work;
442 			bool			use_workqueue;
443 		} p;
444 #ifdef CONFIG_BLK_DEV_ZONED
445 		struct {
446 			struct bio		inline_bio;
447 			struct work_struct	zmgmt_work;
448 		} z;
449 #endif /* CONFIG_BLK_DEV_ZONED */
450 		struct {
451 			struct work_struct	abort_work;
452 		} r;
453 	};
454 	int			sg_cnt;
455 	int			metadata_sg_cnt;
456 	/* data length as parsed from the SGL descriptor: */
457 	size_t			transfer_len;
458 	size_t			metadata_len;
459 
460 	struct nvmet_port	*port;
461 
462 	void (*execute)(struct nvmet_req *req);
463 	const struct nvmet_fabrics_ops *ops;
464 
465 	struct pci_dev		*p2p_dev;
466 	struct device		*p2p_client;
467 	u16			error_loc;
468 	u64			error_slba;
469 	struct nvmet_pr_per_ctrl_ref *pc_ref;
470 };
471 
472 #define NVMET_MAX_MPOOL_BVEC		16
473 extern struct kmem_cache *nvmet_bvec_cache;
474 extern struct workqueue_struct *buffered_io_wq;
475 extern struct workqueue_struct *zbd_wq;
476 extern struct workqueue_struct *nvmet_wq;
477 
nvmet_set_result(struct nvmet_req * req,u32 result)478 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
479 {
480 	req->cqe->result.u32 = cpu_to_le32(result);
481 }
482 
483 /*
484  * NVMe command writes actually are DMA reads for us on the target side.
485  */
486 static inline enum dma_data_direction
nvmet_data_dir(struct nvmet_req * req)487 nvmet_data_dir(struct nvmet_req *req)
488 {
489 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
490 }
491 
492 struct nvmet_async_event {
493 	struct list_head	entry;
494 	u8			event_type;
495 	u8			event_info;
496 	u8			log_page;
497 };
498 
nvmet_clear_aen_bit(struct nvmet_req * req,u32 bn)499 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
500 {
501 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
502 
503 	if (!rae)
504 		clear_bit(bn, &req->sq->ctrl->aen_masked);
505 }
506 
nvmet_aen_bit_disabled(struct nvmet_ctrl * ctrl,u32 bn)507 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
508 {
509 	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
510 		return true;
511 	return test_and_set_bit(bn, &ctrl->aen_masked);
512 }
513 
514 void nvmet_get_feat_kato(struct nvmet_req *req);
515 void nvmet_get_feat_async_event(struct nvmet_req *req);
516 u16 nvmet_set_feat_kato(struct nvmet_req *req);
517 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
518 void nvmet_execute_async_event(struct nvmet_req *req);
519 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
520 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
521 
522 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
523 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
524 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
525 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
526 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
527 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
528 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
529 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
530 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
531 
532 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
533 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
534 void nvmet_req_uninit(struct nvmet_req *req);
535 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
536 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
537 void nvmet_req_complete(struct nvmet_req *req, u16 status);
538 int nvmet_req_alloc_sgls(struct nvmet_req *req);
539 void nvmet_req_free_sgls(struct nvmet_req *req);
540 
541 void nvmet_execute_set_features(struct nvmet_req *req);
542 void nvmet_execute_get_features(struct nvmet_req *req);
543 void nvmet_execute_keep_alive(struct nvmet_req *req);
544 
545 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
546 		u16 size);
547 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
548 		u16 size);
549 void nvmet_sq_destroy(struct nvmet_sq *sq);
550 int nvmet_sq_init(struct nvmet_sq *sq);
551 
552 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
553 
554 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
555 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
556 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
557 		uuid_t *hostid);
558 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
559 				       const char *hostnqn, u16 cntlid,
560 				       struct nvmet_req *req);
561 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
562 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
563 ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
564 		char *traddr, size_t traddr_len);
565 
566 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
567 		enum nvme_subsys_type type);
568 void nvmet_subsys_put(struct nvmet_subsys *subsys);
569 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
570 
571 u16 nvmet_req_find_ns(struct nvmet_req *req);
572 void nvmet_put_namespace(struct nvmet_ns *ns);
573 int nvmet_ns_enable(struct nvmet_ns *ns);
574 void nvmet_ns_disable(struct nvmet_ns *ns);
575 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
576 void nvmet_ns_free(struct nvmet_ns *ns);
577 
578 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
579 		struct nvmet_port *port);
580 void nvmet_port_send_ana_event(struct nvmet_port *port);
581 
582 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
583 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
584 
585 void nvmet_port_del_ctrls(struct nvmet_port *port,
586 			  struct nvmet_subsys *subsys);
587 
588 int nvmet_enable_port(struct nvmet_port *port);
589 void nvmet_disable_port(struct nvmet_port *port);
590 
591 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
592 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
593 
594 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
595 		size_t len);
596 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
597 		size_t len);
598 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
599 
600 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
601 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
602 
603 extern struct list_head *nvmet_ports;
604 void nvmet_port_disc_changed(struct nvmet_port *port,
605 		struct nvmet_subsys *subsys);
606 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
607 		struct nvmet_host *host);
608 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
609 		u8 event_info, u8 log_page);
610 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
611 
612 #define NVMET_MIN_QUEUE_SIZE	16
613 #define NVMET_MAX_QUEUE_SIZE	1024
614 #define NVMET_NR_QUEUES		128
615 #define NVMET_MAX_CMD(ctrl)	(NVME_CAP_MQES(ctrl->cap) + 1)
616 
617 /*
618  * Nice round number that makes a list of nsids fit into a page.
619  * Should become tunable at some point in the future.
620  */
621 #define NVMET_MAX_NAMESPACES	1024
622 
623 /*
624  * 0 is not a valid ANA group ID, so we start numbering at 1.
625  *
626  * ANA Group 1 exists without manual intervention, has namespaces assigned to it
627  * by default, and is available in an optimized state through all ports.
628  */
629 #define NVMET_MAX_ANAGRPS	128
630 #define NVMET_DEFAULT_ANA_GRPID	1
631 
632 #define NVMET_KAS		10
633 #define NVMET_DISC_KATO_MS		120000
634 
635 int __init nvmet_init_configfs(void);
636 void __exit nvmet_exit_configfs(void);
637 
638 int __init nvmet_init_discovery(void);
639 void nvmet_exit_discovery(void);
640 
641 extern struct nvmet_subsys *nvmet_disc_subsys;
642 extern struct rw_semaphore nvmet_config_sem;
643 
644 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
645 extern u64 nvmet_ana_chgcnt;
646 extern struct rw_semaphore nvmet_ana_sem;
647 
648 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
649 
650 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
651 int nvmet_file_ns_enable(struct nvmet_ns *ns);
652 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
653 void nvmet_file_ns_disable(struct nvmet_ns *ns);
654 u16 nvmet_bdev_flush(struct nvmet_req *req);
655 u16 nvmet_file_flush(struct nvmet_req *req);
656 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
657 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
658 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
659 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
660 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
661 
662 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
663 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
664 void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
665 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
666 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
667 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
668 
nvmet_rw_data_len(struct nvmet_req * req)669 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
670 {
671 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
672 			req->ns->blksize_shift;
673 }
674 
nvmet_rw_metadata_len(struct nvmet_req * req)675 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
676 {
677 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
678 		return 0;
679 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
680 			req->ns->metadata_size;
681 }
682 
nvmet_dsm_len(struct nvmet_req * req)683 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
684 {
685 	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
686 		sizeof(struct nvme_dsm_range);
687 }
688 
nvmet_req_subsys(struct nvmet_req * req)689 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
690 {
691 	return req->sq->ctrl->subsys;
692 }
693 
nvmet_is_disc_subsys(struct nvmet_subsys * subsys)694 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
695 {
696     return subsys->type != NVME_NQN_NVME;
697 }
698 
699 #ifdef CONFIG_NVME_TARGET_PASSTHRU
700 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
701 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
702 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
703 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
704 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)705 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
706 {
707 	return subsys->passthru_ctrl;
708 }
709 #else /* CONFIG_NVME_TARGET_PASSTHRU */
nvmet_passthru_subsys_free(struct nvmet_subsys * subsys)710 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
711 {
712 }
nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)713 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
714 {
715 }
nvmet_parse_passthru_admin_cmd(struct nvmet_req * req)716 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
717 {
718 	return 0;
719 }
nvmet_parse_passthru_io_cmd(struct nvmet_req * req)720 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
721 {
722 	return 0;
723 }
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)724 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
725 {
726 	return NULL;
727 }
728 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
729 
nvmet_is_passthru_req(struct nvmet_req * req)730 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
731 {
732 	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
733 }
734 
735 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
736 
737 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
738 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
739 
740 /* Convert a 32-bit number to a 16-bit 0's based number */
to0based(u32 a)741 static inline __le16 to0based(u32 a)
742 {
743 	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
744 }
745 
nvmet_ns_has_pi(struct nvmet_ns * ns)746 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
747 {
748 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
749 		return false;
750 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
751 }
752 
nvmet_sect_to_lba(struct nvmet_ns * ns,sector_t sect)753 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
754 {
755 	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
756 }
757 
nvmet_lba_to_sect(struct nvmet_ns * ns,__le64 lba)758 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
759 {
760 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
761 }
762 
nvmet_use_inline_bvec(struct nvmet_req * req)763 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
764 {
765 	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
766 	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
767 }
768 
nvmet_req_bio_put(struct nvmet_req * req,struct bio * bio)769 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
770 {
771 	if (bio != &req->b.inline_bio)
772 		bio_put(bio);
773 }
774 
775 #ifdef CONFIG_NVME_TARGET_AUTH
776 void nvmet_execute_auth_send(struct nvmet_req *req);
777 void nvmet_execute_auth_receive(struct nvmet_req *req);
778 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
779 		       bool set_ctrl);
780 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
781 u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
782 void nvmet_auth_sq_init(struct nvmet_sq *sq);
783 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
784 void nvmet_auth_sq_free(struct nvmet_sq *sq);
785 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
786 bool nvmet_check_auth_status(struct nvmet_req *req);
787 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
788 			 unsigned int hash_len);
789 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
790 			 unsigned int hash_len);
nvmet_has_auth(struct nvmet_ctrl * ctrl)791 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
792 {
793 	return ctrl->host_key != NULL;
794 }
795 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
796 				u8 *buf, int buf_size);
797 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
798 			    u8 *buf, int buf_size);
799 #else
nvmet_setup_auth(struct nvmet_ctrl * ctrl)800 static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
801 {
802 	return 0;
803 }
nvmet_auth_sq_init(struct nvmet_sq * sq)804 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
805 {
806 }
nvmet_destroy_auth(struct nvmet_ctrl * ctrl)807 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
nvmet_auth_sq_free(struct nvmet_sq * sq)808 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
nvmet_check_auth_status(struct nvmet_req * req)809 static inline bool nvmet_check_auth_status(struct nvmet_req *req)
810 {
811 	return true;
812 }
nvmet_has_auth(struct nvmet_ctrl * ctrl)813 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
814 {
815 	return false;
816 }
nvmet_dhchap_dhgroup_name(u8 dhgid)817 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
818 #endif
819 
820 int nvmet_pr_init_ns(struct nvmet_ns *ns);
821 u16 nvmet_parse_pr_cmd(struct nvmet_req *req);
822 u16 nvmet_pr_check_cmd_access(struct nvmet_req *req);
823 int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl);
824 void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl);
825 void nvmet_pr_exit_ns(struct nvmet_ns *ns);
826 void nvmet_execute_get_log_page_resv(struct nvmet_req *req);
827 u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask);
828 u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req);
829 u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req);
nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref * pc_ref)830 static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
831 {
832 	percpu_ref_put(&pc_ref->ref);
833 }
834 #endif /* _NVMET_H */
835