xref: /linux/drivers/nvme/target/nvmet.h (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  */
5 
6 #ifndef _NVMET_H
7 #define _NVMET_H
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23 #include <linux/kfifo.h>
24 
25 #define NVMET_DEFAULT_VS		NVME_VS(2, 1, 0)
26 
27 #define NVMET_ASYNC_EVENTS		4
28 #define NVMET_ERROR_LOG_SLOTS		128
29 #define NVMET_NO_ERROR_LOC		((u16)-1)
30 #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
31 #define NVMET_MN_MAX_SIZE		40
32 #define NVMET_SN_MAX_SIZE		20
33 #define NVMET_FR_MAX_SIZE		8
34 #define NVMET_PR_LOG_QUEUE_SIZE		64
35 
36 /*
37  * Supported optional AENs:
38  */
39 #define NVMET_AEN_CFG_OPTIONAL \
40 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
41 #define NVMET_DISC_AEN_CFG_OPTIONAL \
42 	(NVME_AEN_CFG_DISC_CHANGE)
43 
44 /*
45  * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
46  */
47 #define NVMET_AEN_CFG_ALL \
48 	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
49 	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
50 	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
51 
52 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
53  * The 16 bit shift is to set IATTR bit to 1, which means offending
54  * offset starts in the data section of connect()
55  */
56 #define IPO_IATTR_CONNECT_DATA(x)	\
57 	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
58 #define IPO_IATTR_CONNECT_SQE(x)	\
59 	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
60 
61 struct nvmet_pr_registrant {
62 	u64			rkey;
63 	uuid_t			hostid;
64 	enum nvme_pr_type	rtype;
65 	struct list_head	entry;
66 	struct rcu_head		rcu;
67 };
68 
69 struct nvmet_pr {
70 	bool			enable;
71 	unsigned long		notify_mask;
72 	atomic_t		generation;
73 	struct nvmet_pr_registrant __rcu *holder;
74 	/*
75 	 * During the execution of the reservation command, mutual
76 	 * exclusion is required throughout the process. However,
77 	 * while waiting asynchronously for the 'per controller
78 	 * percpu_ref' to complete before the 'preempt and abort'
79 	 * command finishes, a semaphore is needed to ensure mutual
80 	 * exclusion instead of a mutex.
81 	 */
82 	struct semaphore	pr_sem;
83 	struct list_head	registrant_list;
84 };
85 
86 struct nvmet_pr_per_ctrl_ref {
87 	struct percpu_ref	ref;
88 	struct completion	free_done;
89 	struct completion	confirm_done;
90 	uuid_t			hostid;
91 };
92 
93 struct nvmet_ns {
94 	struct percpu_ref	ref;
95 	struct file		*bdev_file;
96 	struct block_device	*bdev;
97 	struct file		*file;
98 	bool			readonly;
99 	u32			nsid;
100 	u32			blksize_shift;
101 	loff_t			size;
102 	u8			nguid[16];
103 	uuid_t			uuid;
104 	u32			anagrpid;
105 
106 	bool			buffered_io;
107 	bool			enabled;
108 	struct nvmet_subsys	*subsys;
109 	const char		*device_path;
110 
111 	struct config_group	device_group;
112 	struct config_group	group;
113 
114 	struct completion	disable_done;
115 	mempool_t		*bvec_pool;
116 
117 	struct pci_dev		*p2p_dev;
118 	int			use_p2pmem;
119 	int			pi_type;
120 	int			metadata_size;
121 	u8			csi;
122 	struct nvmet_pr		pr;
123 	struct xarray		pr_per_ctrl_refs;
124 };
125 
126 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
127 {
128 	return container_of(to_config_group(item), struct nvmet_ns, group);
129 }
130 
131 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
132 {
133 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
134 }
135 
136 struct nvmet_cq {
137 	u16			qid;
138 	u16			size;
139 };
140 
141 struct nvmet_sq {
142 	struct nvmet_ctrl	*ctrl;
143 	struct percpu_ref	ref;
144 	u16			qid;
145 	u16			size;
146 	u32			sqhd;
147 	bool			sqhd_disabled;
148 #ifdef CONFIG_NVME_TARGET_AUTH
149 	bool			authenticated;
150 	struct delayed_work	auth_expired_work;
151 	u16			dhchap_tid;
152 	u8			dhchap_status;
153 	u8			dhchap_step;
154 	u8			*dhchap_c1;
155 	u8			*dhchap_c2;
156 	u32			dhchap_s1;
157 	u32			dhchap_s2;
158 	u8			*dhchap_skey;
159 	int			dhchap_skey_len;
160 #endif
161 	struct completion	free_done;
162 	struct completion	confirm_done;
163 };
164 
165 struct nvmet_ana_group {
166 	struct config_group	group;
167 	struct nvmet_port	*port;
168 	u32			grpid;
169 };
170 
171 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
172 {
173 	return container_of(to_config_group(item), struct nvmet_ana_group,
174 			group);
175 }
176 
177 /**
178  * struct nvmet_port -	Common structure to keep port
179  *				information for the target.
180  * @entry:		Entry into referrals or transport list.
181  * @disc_addr:		Address information is stored in a format defined
182  *				for a discovery log page entry.
183  * @group:		ConfigFS group for this element's folder.
184  * @priv:		Private data for the transport.
185  */
186 struct nvmet_port {
187 	struct list_head		entry;
188 	struct nvmf_disc_rsp_page_entry	disc_addr;
189 	struct config_group		group;
190 	struct config_group		subsys_group;
191 	struct list_head		subsystems;
192 	struct config_group		referrals_group;
193 	struct list_head		referrals;
194 	struct list_head		global_entry;
195 	struct config_group		ana_groups_group;
196 	struct nvmet_ana_group		ana_default_group;
197 	enum nvme_ana_state		*ana_state;
198 	struct key			*keyring;
199 	void				*priv;
200 	bool				enabled;
201 	int				inline_data_size;
202 	int				max_queue_size;
203 	const struct nvmet_fabrics_ops	*tr_ops;
204 	bool				pi_enable;
205 };
206 
207 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
208 {
209 	return container_of(to_config_group(item), struct nvmet_port,
210 			group);
211 }
212 
213 static inline struct nvmet_port *ana_groups_to_port(
214 		struct config_item *item)
215 {
216 	return container_of(to_config_group(item), struct nvmet_port,
217 			ana_groups_group);
218 }
219 
220 static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
221 {
222 	return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
223 }
224 
225 static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
226 {
227     return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
228 }
229 
230 struct nvmet_pr_log_mgr {
231 	struct mutex		lock;
232 	u64			lost_count;
233 	u64			counter;
234 	DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE);
235 };
236 
237 struct nvmet_ctrl {
238 	struct nvmet_subsys	*subsys;
239 	struct nvmet_sq		**sqs;
240 
241 	bool			reset_tbkas;
242 
243 	struct mutex		lock;
244 	u64			cap;
245 	u32			cc;
246 	u32			csts;
247 
248 	uuid_t			hostid;
249 	u16			cntlid;
250 	u32			kato;
251 
252 	struct nvmet_port	*port;
253 
254 	u32			aen_enabled;
255 	unsigned long		aen_masked;
256 	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
257 	unsigned int		nr_async_event_cmds;
258 	struct list_head	async_events;
259 	struct work_struct	async_event_work;
260 
261 	struct list_head	subsys_entry;
262 	struct kref		ref;
263 	struct delayed_work	ka_work;
264 	struct work_struct	fatal_err_work;
265 
266 	const struct nvmet_fabrics_ops *ops;
267 
268 	__le32			*changed_ns_list;
269 	u32			nr_changed_ns;
270 
271 	char			subsysnqn[NVMF_NQN_FIELD_LEN];
272 	char			hostnqn[NVMF_NQN_FIELD_LEN];
273 
274 	struct device		*p2p_client;
275 	struct radix_tree_root	p2p_ns_map;
276 #ifdef CONFIG_NVME_TARGET_DEBUGFS
277 	struct dentry		*debugfs_dir;
278 #endif
279 	spinlock_t		error_lock;
280 	u64			err_counter;
281 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
282 	bool			pi_support;
283 #ifdef CONFIG_NVME_TARGET_AUTH
284 	struct nvme_dhchap_key	*host_key;
285 	struct nvme_dhchap_key	*ctrl_key;
286 	u8			shash_id;
287 	struct crypto_kpp	*dh_tfm;
288 	u8			dh_gid;
289 	u8			*dh_key;
290 	size_t			dh_keysize;
291 #endif
292 	struct nvmet_pr_log_mgr pr_log_mgr;
293 };
294 
295 struct nvmet_subsys {
296 	enum nvme_subsys_type	type;
297 
298 	struct mutex		lock;
299 	struct kref		ref;
300 
301 	struct xarray		namespaces;
302 	unsigned int		nr_namespaces;
303 	u32			max_nsid;
304 	u16			cntlid_min;
305 	u16			cntlid_max;
306 
307 	struct list_head	ctrls;
308 
309 	struct list_head	hosts;
310 	bool			allow_any_host;
311 #ifdef CONFIG_NVME_TARGET_DEBUGFS
312 	struct dentry		*debugfs_dir;
313 #endif
314 	u16			max_qid;
315 
316 	u64			ver;
317 	char			serial[NVMET_SN_MAX_SIZE];
318 	bool			subsys_discovered;
319 	char			*subsysnqn;
320 	bool			pi_support;
321 
322 	struct config_group	group;
323 
324 	struct config_group	namespaces_group;
325 	struct config_group	allowed_hosts_group;
326 
327 	char			*model_number;
328 	u32			ieee_oui;
329 	char			*firmware_rev;
330 
331 #ifdef CONFIG_NVME_TARGET_PASSTHRU
332 	struct nvme_ctrl	*passthru_ctrl;
333 	char			*passthru_ctrl_path;
334 	struct config_group	passthru_group;
335 	unsigned int		admin_timeout;
336 	unsigned int		io_timeout;
337 	unsigned int		clear_ids;
338 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
339 
340 #ifdef CONFIG_BLK_DEV_ZONED
341 	u8			zasl;
342 #endif /* CONFIG_BLK_DEV_ZONED */
343 };
344 
345 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
346 {
347 	return container_of(to_config_group(item), struct nvmet_subsys, group);
348 }
349 
350 static inline struct nvmet_subsys *namespaces_to_subsys(
351 		struct config_item *item)
352 {
353 	return container_of(to_config_group(item), struct nvmet_subsys,
354 			namespaces_group);
355 }
356 
357 struct nvmet_host {
358 	struct config_group	group;
359 	u8			*dhchap_secret;
360 	u8			*dhchap_ctrl_secret;
361 	u8			dhchap_key_hash;
362 	u8			dhchap_ctrl_key_hash;
363 	u8			dhchap_hash_id;
364 	u8			dhchap_dhgroup_id;
365 };
366 
367 static inline struct nvmet_host *to_host(struct config_item *item)
368 {
369 	return container_of(to_config_group(item), struct nvmet_host, group);
370 }
371 
372 static inline char *nvmet_host_name(struct nvmet_host *host)
373 {
374 	return config_item_name(&host->group.cg_item);
375 }
376 
377 struct nvmet_host_link {
378 	struct list_head	entry;
379 	struct nvmet_host	*host;
380 };
381 
382 struct nvmet_subsys_link {
383 	struct list_head	entry;
384 	struct nvmet_subsys	*subsys;
385 };
386 
387 struct nvmet_req;
388 struct nvmet_fabrics_ops {
389 	struct module *owner;
390 	unsigned int type;
391 	unsigned int msdbd;
392 	unsigned int flags;
393 #define NVMF_KEYED_SGLS			(1 << 0)
394 #define NVMF_METADATA_SUPPORTED		(1 << 1)
395 	void (*queue_response)(struct nvmet_req *req);
396 	int (*add_port)(struct nvmet_port *port);
397 	void (*remove_port)(struct nvmet_port *port);
398 	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
399 	void (*disc_traddr)(struct nvmet_req *req,
400 			struct nvmet_port *port, char *traddr);
401 	ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
402 			char *traddr, size_t traddr_len);
403 	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
404 	void (*discovery_chg)(struct nvmet_port *port);
405 	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
406 	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
407 };
408 
409 #define NVMET_MAX_INLINE_BIOVEC	8
410 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
411 
412 struct nvmet_req {
413 	struct nvme_command	*cmd;
414 	struct nvme_completion	*cqe;
415 	struct nvmet_sq		*sq;
416 	struct nvmet_cq		*cq;
417 	struct nvmet_ns		*ns;
418 	struct scatterlist	*sg;
419 	struct scatterlist	*metadata_sg;
420 	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
421 	union {
422 		struct {
423 			struct bio      inline_bio;
424 		} b;
425 		struct {
426 			bool			mpool_alloc;
427 			struct kiocb            iocb;
428 			struct bio_vec          *bvec;
429 			struct work_struct      work;
430 		} f;
431 		struct {
432 			struct bio		inline_bio;
433 			struct request		*rq;
434 			struct work_struct      work;
435 			bool			use_workqueue;
436 		} p;
437 #ifdef CONFIG_BLK_DEV_ZONED
438 		struct {
439 			struct bio		inline_bio;
440 			struct work_struct	zmgmt_work;
441 		} z;
442 #endif /* CONFIG_BLK_DEV_ZONED */
443 		struct {
444 			struct work_struct	abort_work;
445 		} r;
446 	};
447 	int			sg_cnt;
448 	int			metadata_sg_cnt;
449 	/* data length as parsed from the SGL descriptor: */
450 	size_t			transfer_len;
451 	size_t			metadata_len;
452 
453 	struct nvmet_port	*port;
454 
455 	void (*execute)(struct nvmet_req *req);
456 	const struct nvmet_fabrics_ops *ops;
457 
458 	struct pci_dev		*p2p_dev;
459 	struct device		*p2p_client;
460 	u16			error_loc;
461 	u64			error_slba;
462 	struct nvmet_pr_per_ctrl_ref *pc_ref;
463 };
464 
465 #define NVMET_MAX_MPOOL_BVEC		16
466 extern struct kmem_cache *nvmet_bvec_cache;
467 extern struct workqueue_struct *buffered_io_wq;
468 extern struct workqueue_struct *zbd_wq;
469 extern struct workqueue_struct *nvmet_wq;
470 
471 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
472 {
473 	req->cqe->result.u32 = cpu_to_le32(result);
474 }
475 
476 /*
477  * NVMe command writes actually are DMA reads for us on the target side.
478  */
479 static inline enum dma_data_direction
480 nvmet_data_dir(struct nvmet_req *req)
481 {
482 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
483 }
484 
485 struct nvmet_async_event {
486 	struct list_head	entry;
487 	u8			event_type;
488 	u8			event_info;
489 	u8			log_page;
490 };
491 
492 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
493 {
494 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
495 
496 	if (!rae)
497 		clear_bit(bn, &req->sq->ctrl->aen_masked);
498 }
499 
500 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
501 {
502 	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
503 		return true;
504 	return test_and_set_bit(bn, &ctrl->aen_masked);
505 }
506 
507 void nvmet_get_feat_kato(struct nvmet_req *req);
508 void nvmet_get_feat_async_event(struct nvmet_req *req);
509 u16 nvmet_set_feat_kato(struct nvmet_req *req);
510 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
511 void nvmet_execute_async_event(struct nvmet_req *req);
512 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
513 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
514 
515 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
516 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
517 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
518 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
519 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
520 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
521 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
522 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
523 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
524 
525 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
526 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
527 void nvmet_req_uninit(struct nvmet_req *req);
528 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
529 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
530 void nvmet_req_complete(struct nvmet_req *req, u16 status);
531 int nvmet_req_alloc_sgls(struct nvmet_req *req);
532 void nvmet_req_free_sgls(struct nvmet_req *req);
533 
534 void nvmet_execute_set_features(struct nvmet_req *req);
535 void nvmet_execute_get_features(struct nvmet_req *req);
536 void nvmet_execute_keep_alive(struct nvmet_req *req);
537 
538 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
539 		u16 size);
540 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
541 		u16 size);
542 void nvmet_sq_destroy(struct nvmet_sq *sq);
543 int nvmet_sq_init(struct nvmet_sq *sq);
544 
545 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
546 
547 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
548 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
549 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
550 		uuid_t *hostid);
551 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
552 				       const char *hostnqn, u16 cntlid,
553 				       struct nvmet_req *req);
554 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
555 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
556 ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
557 		char *traddr, size_t traddr_len);
558 
559 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
560 		enum nvme_subsys_type type);
561 void nvmet_subsys_put(struct nvmet_subsys *subsys);
562 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
563 
564 u16 nvmet_req_find_ns(struct nvmet_req *req);
565 void nvmet_put_namespace(struct nvmet_ns *ns);
566 int nvmet_ns_enable(struct nvmet_ns *ns);
567 void nvmet_ns_disable(struct nvmet_ns *ns);
568 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
569 void nvmet_ns_free(struct nvmet_ns *ns);
570 
571 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
572 		struct nvmet_port *port);
573 void nvmet_port_send_ana_event(struct nvmet_port *port);
574 
575 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
576 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
577 
578 void nvmet_port_del_ctrls(struct nvmet_port *port,
579 			  struct nvmet_subsys *subsys);
580 
581 int nvmet_enable_port(struct nvmet_port *port);
582 void nvmet_disable_port(struct nvmet_port *port);
583 
584 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
585 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
586 
587 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
588 		size_t len);
589 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
590 		size_t len);
591 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
592 
593 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
594 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
595 
596 extern struct list_head *nvmet_ports;
597 void nvmet_port_disc_changed(struct nvmet_port *port,
598 		struct nvmet_subsys *subsys);
599 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
600 		struct nvmet_host *host);
601 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
602 		u8 event_info, u8 log_page);
603 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
604 
605 #define NVMET_MIN_QUEUE_SIZE	16
606 #define NVMET_MAX_QUEUE_SIZE	1024
607 #define NVMET_NR_QUEUES		128
608 #define NVMET_MAX_CMD(ctrl)	(NVME_CAP_MQES(ctrl->cap) + 1)
609 
610 /*
611  * Nice round number that makes a list of nsids fit into a page.
612  * Should become tunable at some point in the future.
613  */
614 #define NVMET_MAX_NAMESPACES	1024
615 
616 /*
617  * 0 is not a valid ANA group ID, so we start numbering at 1.
618  *
619  * ANA Group 1 exists without manual intervention, has namespaces assigned to it
620  * by default, and is available in an optimized state through all ports.
621  */
622 #define NVMET_MAX_ANAGRPS	128
623 #define NVMET_DEFAULT_ANA_GRPID	1
624 
625 #define NVMET_KAS		10
626 #define NVMET_DISC_KATO_MS		120000
627 
628 int __init nvmet_init_configfs(void);
629 void __exit nvmet_exit_configfs(void);
630 
631 int __init nvmet_init_discovery(void);
632 void nvmet_exit_discovery(void);
633 
634 extern struct nvmet_subsys *nvmet_disc_subsys;
635 extern struct rw_semaphore nvmet_config_sem;
636 
637 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
638 extern u64 nvmet_ana_chgcnt;
639 extern struct rw_semaphore nvmet_ana_sem;
640 
641 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
642 
643 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
644 int nvmet_file_ns_enable(struct nvmet_ns *ns);
645 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
646 void nvmet_file_ns_disable(struct nvmet_ns *ns);
647 u16 nvmet_bdev_flush(struct nvmet_req *req);
648 u16 nvmet_file_flush(struct nvmet_req *req);
649 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
650 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
651 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
652 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
653 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
654 
655 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
656 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
657 void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
658 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
659 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
660 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
661 
662 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
663 {
664 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
665 			req->ns->blksize_shift;
666 }
667 
668 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
669 {
670 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
671 		return 0;
672 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
673 			req->ns->metadata_size;
674 }
675 
676 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
677 {
678 	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
679 		sizeof(struct nvme_dsm_range);
680 }
681 
682 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
683 {
684 	return req->sq->ctrl->subsys;
685 }
686 
687 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
688 {
689     return subsys->type != NVME_NQN_NVME;
690 }
691 
692 #ifdef CONFIG_NVME_TARGET_PASSTHRU
693 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
694 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
695 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
696 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
697 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
698 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
699 {
700 	return subsys->passthru_ctrl;
701 }
702 #else /* CONFIG_NVME_TARGET_PASSTHRU */
703 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
704 {
705 }
706 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
707 {
708 }
709 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
710 {
711 	return 0;
712 }
713 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
714 {
715 	return 0;
716 }
717 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
718 {
719 	return NULL;
720 }
721 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
722 
723 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
724 {
725 	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
726 }
727 
728 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
729 
730 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
731 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
732 
733 /* Convert a 32-bit number to a 16-bit 0's based number */
734 static inline __le16 to0based(u32 a)
735 {
736 	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
737 }
738 
739 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
740 {
741 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
742 		return false;
743 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
744 }
745 
746 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
747 {
748 	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
749 }
750 
751 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
752 {
753 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
754 }
755 
756 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
757 {
758 	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
759 	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
760 }
761 
762 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
763 {
764 	if (bio != &req->b.inline_bio)
765 		bio_put(bio);
766 }
767 
768 #ifdef CONFIG_NVME_TARGET_AUTH
769 void nvmet_execute_auth_send(struct nvmet_req *req);
770 void nvmet_execute_auth_receive(struct nvmet_req *req);
771 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
772 		       bool set_ctrl);
773 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
774 u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
775 void nvmet_auth_sq_init(struct nvmet_sq *sq);
776 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
777 void nvmet_auth_sq_free(struct nvmet_sq *sq);
778 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
779 bool nvmet_check_auth_status(struct nvmet_req *req);
780 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
781 			 unsigned int hash_len);
782 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
783 			 unsigned int hash_len);
784 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
785 {
786 	return ctrl->host_key != NULL;
787 }
788 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
789 				u8 *buf, int buf_size);
790 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
791 			    u8 *buf, int buf_size);
792 #else
793 static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
794 {
795 	return 0;
796 }
797 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
798 {
799 }
800 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
801 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
802 static inline bool nvmet_check_auth_status(struct nvmet_req *req)
803 {
804 	return true;
805 }
806 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
807 {
808 	return false;
809 }
810 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
811 #endif
812 
813 int nvmet_pr_init_ns(struct nvmet_ns *ns);
814 u16 nvmet_parse_pr_cmd(struct nvmet_req *req);
815 u16 nvmet_pr_check_cmd_access(struct nvmet_req *req);
816 int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl);
817 void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl);
818 void nvmet_pr_exit_ns(struct nvmet_ns *ns);
819 void nvmet_execute_get_log_page_resv(struct nvmet_req *req);
820 u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask);
821 u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req);
822 u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req);
823 static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
824 {
825 	percpu_ref_put(&pc_ref->ref);
826 }
827 #endif /* _NVMET_H */
828