xref: /linux/drivers/nvme/target/nvmet.h (revision a4165ffc835fcf738c2ff41ce8305b04454c07d0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  */
5 
6 #ifndef _NVMET_H
7 #define _NVMET_H
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23 #include <linux/kfifo.h>
24 
25 #define NVMET_DEFAULT_VS		NVME_VS(2, 1, 0)
26 
27 #define NVMET_NS_ENABLED		XA_MARK_1
28 #define NVMET_ASYNC_EVENTS		4
29 #define NVMET_ERROR_LOG_SLOTS		128
30 #define NVMET_NO_ERROR_LOC		((u16)-1)
31 #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
32 #define NVMET_MN_MAX_SIZE		40
33 #define NVMET_SN_MAX_SIZE		20
34 #define NVMET_FR_MAX_SIZE		8
35 #define NVMET_PR_LOG_QUEUE_SIZE		64
36 
37 #define nvmet_for_each_ns(xa, index, entry) \
38 	xa_for_each(xa, index, entry)
39 
40 #define nvmet_for_each_enabled_ns(xa, index, entry) \
41 	xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED)
42 
43 /*
44  * Supported optional AENs:
45  */
46 #define NVMET_AEN_CFG_OPTIONAL \
47 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
48 #define NVMET_DISC_AEN_CFG_OPTIONAL \
49 	(NVME_AEN_CFG_DISC_CHANGE)
50 
51 /*
52  * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
53  */
54 #define NVMET_AEN_CFG_ALL \
55 	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
56 	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
57 	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
58 
59 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
60  * The 16 bit shift is to set IATTR bit to 1, which means offending
61  * offset starts in the data section of connect()
62  */
63 #define IPO_IATTR_CONNECT_DATA(x)	\
64 	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
65 #define IPO_IATTR_CONNECT_SQE(x)	\
66 	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
67 
68 struct nvmet_pr_registrant {
69 	u64			rkey;
70 	uuid_t			hostid;
71 	enum nvme_pr_type	rtype;
72 	struct list_head	entry;
73 	struct rcu_head		rcu;
74 };
75 
76 struct nvmet_pr {
77 	bool			enable;
78 	unsigned long		notify_mask;
79 	atomic_t		generation;
80 	struct nvmet_pr_registrant __rcu *holder;
81 	/*
82 	 * During the execution of the reservation command, mutual
83 	 * exclusion is required throughout the process. However,
84 	 * while waiting asynchronously for the 'per controller
85 	 * percpu_ref' to complete before the 'preempt and abort'
86 	 * command finishes, a semaphore is needed to ensure mutual
87 	 * exclusion instead of a mutex.
88 	 */
89 	struct semaphore	pr_sem;
90 	struct list_head	registrant_list;
91 };
92 
93 struct nvmet_pr_per_ctrl_ref {
94 	struct percpu_ref	ref;
95 	struct completion	free_done;
96 	struct completion	confirm_done;
97 	uuid_t			hostid;
98 };
99 
100 struct nvmet_ns {
101 	struct percpu_ref	ref;
102 	struct file		*bdev_file;
103 	struct block_device	*bdev;
104 	struct file		*file;
105 	bool			readonly;
106 	u32			nsid;
107 	u32			blksize_shift;
108 	loff_t			size;
109 	u8			nguid[16];
110 	uuid_t			uuid;
111 	u32			anagrpid;
112 
113 	bool			buffered_io;
114 	bool			enabled;
115 	struct nvmet_subsys	*subsys;
116 	const char		*device_path;
117 
118 	struct config_group	device_group;
119 	struct config_group	group;
120 
121 	struct completion	disable_done;
122 	mempool_t		*bvec_pool;
123 
124 	struct pci_dev		*p2p_dev;
125 	int			use_p2pmem;
126 	int			pi_type;
127 	int			metadata_size;
128 	u8			csi;
129 	struct nvmet_pr		pr;
130 	struct xarray		pr_per_ctrl_refs;
131 };
132 
to_nvmet_ns(struct config_item * item)133 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
134 {
135 	return container_of(to_config_group(item), struct nvmet_ns, group);
136 }
137 
nvmet_ns_dev(struct nvmet_ns * ns)138 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
139 {
140 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
141 }
142 
143 struct nvmet_cq {
144 	struct nvmet_ctrl	*ctrl;
145 	u16			qid;
146 	u16			size;
147 	refcount_t		ref;
148 };
149 
150 struct nvmet_sq {
151 	struct nvmet_ctrl	*ctrl;
152 	struct percpu_ref	ref;
153 	struct nvmet_cq		*cq;
154 	u16			qid;
155 	u16			size;
156 	u32			sqhd;
157 	bool			sqhd_disabled;
158 #ifdef CONFIG_NVME_TARGET_AUTH
159 	bool			authenticated;
160 	struct delayed_work	auth_expired_work;
161 	u16			dhchap_tid;
162 	u8			sc_c;
163 	u8			dhchap_status;
164 	u8			dhchap_step;
165 	u8			*dhchap_c1;
166 	u8			*dhchap_c2;
167 	u32			dhchap_s1;
168 	u32			dhchap_s2;
169 	u8			*dhchap_skey;
170 	int			dhchap_skey_len;
171 #endif
172 #ifdef CONFIG_NVME_TARGET_TCP_TLS
173 	struct key		*tls_key;
174 #endif
175 	struct completion	free_done;
176 	struct completion	confirm_done;
177 };
178 
179 struct nvmet_ana_group {
180 	struct config_group	group;
181 	struct nvmet_port	*port;
182 	u32			grpid;
183 };
184 
to_ana_group(struct config_item * item)185 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
186 {
187 	return container_of(to_config_group(item), struct nvmet_ana_group,
188 			group);
189 }
190 
191 /**
192  * struct nvmet_port -	Common structure to keep port
193  *				information for the target.
194  * @entry:		Entry into referrals or transport list.
195  * @disc_addr:		Address information is stored in a format defined
196  *				for a discovery log page entry.
197  * @group:		ConfigFS group for this element's folder.
198  * @priv:		Private data for the transport.
199  */
200 struct nvmet_port {
201 	struct list_head		entry;
202 	struct nvmf_disc_rsp_page_entry	disc_addr;
203 	struct config_group		group;
204 	struct config_group		subsys_group;
205 	struct list_head		subsystems;
206 	struct config_group		referrals_group;
207 	struct list_head		referrals;
208 	struct list_head		global_entry;
209 	struct config_group		ana_groups_group;
210 	struct nvmet_ana_group		ana_default_group;
211 	enum nvme_ana_state		*ana_state;
212 	struct key			*keyring;
213 	void				*priv;
214 	bool				enabled;
215 	int				inline_data_size;
216 	int				max_queue_size;
217 	const struct nvmet_fabrics_ops	*tr_ops;
218 	bool				pi_enable;
219 };
220 
to_nvmet_port(struct config_item * item)221 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
222 {
223 	return container_of(to_config_group(item), struct nvmet_port,
224 			group);
225 }
226 
ana_groups_to_port(struct config_item * item)227 static inline struct nvmet_port *ana_groups_to_port(
228 		struct config_item *item)
229 {
230 	return container_of(to_config_group(item), struct nvmet_port,
231 			ana_groups_group);
232 }
233 
nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port * port)234 static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
235 {
236 	return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
237 }
238 
nvmet_port_secure_channel_required(struct nvmet_port * port)239 static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
240 {
241     return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
242 }
243 
244 struct nvmet_pr_log_mgr {
245 	struct mutex		lock;
246 	u64			lost_count;
247 	u64			counter;
248 	DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE);
249 };
250 
251 struct nvmet_ctrl {
252 	struct nvmet_subsys	*subsys;
253 	struct nvmet_sq		**sqs;
254 	struct nvmet_cq		**cqs;
255 
256 	void			*drvdata;
257 
258 	bool			reset_tbkas;
259 
260 	struct mutex		lock;
261 	u64			cap;
262 	u32			cc;
263 	u32			csts;
264 
265 	uuid_t			hostid;
266 	u16			cntlid;
267 	u32			kato;
268 
269 	struct nvmet_port	*port;
270 
271 	u32			aen_enabled;
272 	unsigned long		aen_masked;
273 	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
274 	unsigned int		nr_async_event_cmds;
275 	struct list_head	async_events;
276 	struct work_struct	async_event_work;
277 
278 	struct list_head	subsys_entry;
279 	struct kref		ref;
280 	struct delayed_work	ka_work;
281 	struct work_struct	fatal_err_work;
282 
283 	const struct nvmet_fabrics_ops *ops;
284 
285 	__le32			*changed_ns_list;
286 	u32			nr_changed_ns;
287 
288 	char			subsysnqn[NVMF_NQN_FIELD_LEN];
289 	char			hostnqn[NVMF_NQN_FIELD_LEN];
290 
291 	struct device		*p2p_client;
292 	struct radix_tree_root	p2p_ns_map;
293 #ifdef CONFIG_NVME_TARGET_DEBUGFS
294 	struct dentry		*debugfs_dir;
295 #endif
296 	spinlock_t		error_lock;
297 	u64			err_counter;
298 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
299 	bool			pi_support;
300 	bool			concat;
301 #ifdef CONFIG_NVME_TARGET_AUTH
302 	struct nvme_dhchap_key	*host_key;
303 	struct nvme_dhchap_key	*ctrl_key;
304 	u8			shash_id;
305 	struct crypto_kpp	*dh_tfm;
306 	u8			dh_gid;
307 	u8			*dh_key;
308 	size_t			dh_keysize;
309 #endif
310 #ifdef CONFIG_NVME_TARGET_TCP_TLS
311 	struct key		*tls_key;
312 #endif
313 	struct nvmet_pr_log_mgr pr_log_mgr;
314 };
315 
316 struct nvmet_subsys {
317 	enum nvme_subsys_type	type;
318 
319 	struct mutex		lock;
320 	struct kref		ref;
321 
322 	struct xarray		namespaces;
323 	unsigned int		nr_namespaces;
324 	u32			max_nsid;
325 	u16			cntlid_min;
326 	u16			cntlid_max;
327 
328 	struct list_head	ctrls;
329 
330 	struct list_head	hosts;
331 	bool			allow_any_host;
332 #ifdef CONFIG_NVME_TARGET_DEBUGFS
333 	struct dentry		*debugfs_dir;
334 #endif
335 	u16			max_qid;
336 
337 	u64			ver;
338 	char			serial[NVMET_SN_MAX_SIZE];
339 	bool			subsys_discovered;
340 	char			*subsysnqn;
341 	bool			pi_support;
342 
343 	struct config_group	group;
344 
345 	struct config_group	namespaces_group;
346 	struct config_group	allowed_hosts_group;
347 
348 	u16			vendor_id;
349 	u16			subsys_vendor_id;
350 	char			*model_number;
351 	u32			ieee_oui;
352 	char			*firmware_rev;
353 
354 #ifdef CONFIG_NVME_TARGET_PASSTHRU
355 	struct nvme_ctrl	*passthru_ctrl;
356 	char			*passthru_ctrl_path;
357 	struct config_group	passthru_group;
358 	unsigned int		admin_timeout;
359 	unsigned int		io_timeout;
360 	unsigned int		clear_ids;
361 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
362 
363 #ifdef CONFIG_BLK_DEV_ZONED
364 	u8			zasl;
365 #endif /* CONFIG_BLK_DEV_ZONED */
366 };
367 
to_subsys(struct config_item * item)368 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
369 {
370 	return container_of(to_config_group(item), struct nvmet_subsys, group);
371 }
372 
namespaces_to_subsys(struct config_item * item)373 static inline struct nvmet_subsys *namespaces_to_subsys(
374 		struct config_item *item)
375 {
376 	return container_of(to_config_group(item), struct nvmet_subsys,
377 			namespaces_group);
378 }
379 
380 struct nvmet_host {
381 	struct config_group	group;
382 	u8			*dhchap_secret;
383 	u8			*dhchap_ctrl_secret;
384 	u8			dhchap_key_hash;
385 	u8			dhchap_ctrl_key_hash;
386 	u8			dhchap_hash_id;
387 	u8			dhchap_dhgroup_id;
388 };
389 
to_host(struct config_item * item)390 static inline struct nvmet_host *to_host(struct config_item *item)
391 {
392 	return container_of(to_config_group(item), struct nvmet_host, group);
393 }
394 
nvmet_host_name(struct nvmet_host * host)395 static inline char *nvmet_host_name(struct nvmet_host *host)
396 {
397 	return config_item_name(&host->group.cg_item);
398 }
399 
400 struct nvmet_host_link {
401 	struct list_head	entry;
402 	struct nvmet_host	*host;
403 };
404 
405 struct nvmet_subsys_link {
406 	struct list_head	entry;
407 	struct nvmet_subsys	*subsys;
408 };
409 
410 struct nvmet_req;
411 struct nvmet_fabrics_ops {
412 	struct module *owner;
413 	unsigned int type;
414 	unsigned int msdbd;
415 	unsigned int flags;
416 #define NVMF_KEYED_SGLS			(1 << 0)
417 #define NVMF_METADATA_SUPPORTED		(1 << 1)
418 	void (*queue_response)(struct nvmet_req *req);
419 	int (*add_port)(struct nvmet_port *port);
420 	void (*remove_port)(struct nvmet_port *port);
421 	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
422 	void (*disc_traddr)(struct nvmet_req *req,
423 			struct nvmet_port *port, char *traddr);
424 	ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
425 			char *traddr, size_t traddr_len);
426 	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
427 	void (*discovery_chg)(struct nvmet_port *port);
428 	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
429 	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
430 
431 	/* Operations mandatory for PCI target controllers */
432 	u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 cqid, u16 flags,
433 			 u16 qsize, u64 prp1);
434 	u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid);
435 	u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags,
436 			 u16 qsize, u64 prp1, u16 irq_vector);
437 	u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid);
438 	u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
439 			   void *feat_data);
440 	u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
441 			   void *feat_data);
442 };
443 
444 #define NVMET_MAX_INLINE_BIOVEC	8
445 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
446 
447 struct nvmet_req {
448 	struct nvme_command	*cmd;
449 	struct nvme_completion	*cqe;
450 	struct nvmet_sq		*sq;
451 	struct nvmet_cq		*cq;
452 	struct nvmet_ns		*ns;
453 	struct scatterlist	*sg;
454 	struct scatterlist	*metadata_sg;
455 	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
456 	union {
457 		struct {
458 			struct bio      inline_bio;
459 		} b;
460 		struct {
461 			bool			mpool_alloc;
462 			struct kiocb            iocb;
463 			struct bio_vec          *bvec;
464 			struct work_struct      work;
465 		} f;
466 		struct {
467 			struct bio		inline_bio;
468 			struct request		*rq;
469 			struct work_struct      work;
470 			bool			use_workqueue;
471 		} p;
472 #ifdef CONFIG_BLK_DEV_ZONED
473 		struct {
474 			struct bio		inline_bio;
475 			struct work_struct	zmgmt_work;
476 		} z;
477 #endif /* CONFIG_BLK_DEV_ZONED */
478 		struct {
479 			struct work_struct	abort_work;
480 		} r;
481 	};
482 	int			sg_cnt;
483 	int			metadata_sg_cnt;
484 	/* data length as parsed from the SGL descriptor: */
485 	size_t			transfer_len;
486 	size_t			metadata_len;
487 
488 	struct nvmet_port	*port;
489 
490 	void (*execute)(struct nvmet_req *req);
491 	const struct nvmet_fabrics_ops *ops;
492 
493 	struct pci_dev		*p2p_dev;
494 	struct device		*p2p_client;
495 	u16			error_loc;
496 	u64			error_slba;
497 	struct nvmet_pr_per_ctrl_ref *pc_ref;
498 };
499 
500 #define NVMET_MAX_MPOOL_BVEC		16
501 extern struct kmem_cache *nvmet_bvec_cache;
502 extern struct workqueue_struct *buffered_io_wq;
503 extern struct workqueue_struct *zbd_wq;
504 extern struct workqueue_struct *nvmet_wq;
505 
nvmet_set_result(struct nvmet_req * req,u32 result)506 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
507 {
508 	req->cqe->result.u32 = cpu_to_le32(result);
509 }
510 
511 /*
512  * NVMe command writes actually are DMA reads for us on the target side.
513  */
514 static inline enum dma_data_direction
nvmet_data_dir(struct nvmet_req * req)515 nvmet_data_dir(struct nvmet_req *req)
516 {
517 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
518 }
519 
520 struct nvmet_async_event {
521 	struct list_head	entry;
522 	u8			event_type;
523 	u8			event_info;
524 	u8			log_page;
525 };
526 
nvmet_clear_aen_bit(struct nvmet_req * req,u32 bn)527 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
528 {
529 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
530 
531 	if (!rae)
532 		clear_bit(bn, &req->sq->ctrl->aen_masked);
533 }
534 
nvmet_aen_bit_disabled(struct nvmet_ctrl * ctrl,u32 bn)535 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
536 {
537 	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
538 		return true;
539 	return test_and_set_bit(bn, &ctrl->aen_masked);
540 }
541 
542 void nvmet_get_feat_kato(struct nvmet_req *req);
543 void nvmet_get_feat_async_event(struct nvmet_req *req);
544 u16 nvmet_set_feat_kato(struct nvmet_req *req);
545 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
546 void nvmet_execute_async_event(struct nvmet_req *req);
547 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
548 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
549 
550 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
551 u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
552 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
553 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
554 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
555 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
556 u32 nvmet_admin_cmd_data_len(struct nvmet_req *req);
557 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
558 u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req);
559 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
560 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
561 u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
562 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
563 u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
564 
565 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq,
566 		const struct nvmet_fabrics_ops *ops);
567 void nvmet_req_uninit(struct nvmet_req *req);
568 size_t nvmet_req_transfer_len(struct nvmet_req *req);
569 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
570 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
571 void nvmet_req_complete(struct nvmet_req *req, u16 status);
572 int nvmet_req_alloc_sgls(struct nvmet_req *req);
573 void nvmet_req_free_sgls(struct nvmet_req *req);
574 
575 void nvmet_execute_set_features(struct nvmet_req *req);
576 void nvmet_execute_get_features(struct nvmet_req *req);
577 void nvmet_execute_keep_alive(struct nvmet_req *req);
578 
579 u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create);
580 u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create);
581 void nvmet_cq_init(struct nvmet_cq *cq);
582 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
583 		u16 size);
584 u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
585 		u16 size);
586 void nvmet_cq_destroy(struct nvmet_cq *cq);
587 bool nvmet_cq_get(struct nvmet_cq *cq);
588 void nvmet_cq_put(struct nvmet_cq *cq);
589 bool nvmet_cq_in_use(struct nvmet_cq *cq);
590 u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create);
591 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
592 		u16 size);
593 u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
594 	struct nvmet_cq *cq, u16 qid, u16 size);
595 void nvmet_sq_destroy(struct nvmet_sq *sq);
596 int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq);
597 
598 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
599 
600 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
601 
602 struct nvmet_alloc_ctrl_args {
603 	struct nvmet_port	*port;
604 	struct nvmet_sq		*sq;
605 	char			*subsysnqn;
606 	char			*hostnqn;
607 	uuid_t			*hostid;
608 	const struct nvmet_fabrics_ops *ops;
609 	struct device		*p2p_client;
610 	u32			kato;
611 	__le32			result;
612 	u16			error_loc;
613 	u16			status;
614 };
615 
616 struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
617 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
618 				       const char *hostnqn, u16 cntlid,
619 				       struct nvmet_req *req);
620 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
621 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
622 ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
623 		char *traddr, size_t traddr_len);
624 
625 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
626 		enum nvme_subsys_type type);
627 void nvmet_subsys_put(struct nvmet_subsys *subsys);
628 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
629 
630 u16 nvmet_req_find_ns(struct nvmet_req *req);
631 void nvmet_put_namespace(struct nvmet_ns *ns);
632 int nvmet_ns_enable(struct nvmet_ns *ns);
633 void nvmet_ns_disable(struct nvmet_ns *ns);
634 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
635 void nvmet_ns_free(struct nvmet_ns *ns);
636 
637 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
638 		struct nvmet_port *port);
639 void nvmet_port_send_ana_event(struct nvmet_port *port);
640 
641 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
642 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
643 
644 void nvmet_port_del_ctrls(struct nvmet_port *port,
645 			  struct nvmet_subsys *subsys);
646 
647 int nvmet_enable_port(struct nvmet_port *port);
648 void nvmet_disable_port(struct nvmet_port *port);
649 
650 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
651 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
652 
653 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
654 		size_t len);
655 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
656 		size_t len);
657 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
658 
659 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
660 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
661 
662 extern struct list_head *nvmet_ports;
663 void nvmet_port_disc_changed(struct nvmet_port *port,
664 		struct nvmet_subsys *subsys);
665 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
666 		struct nvmet_host *host);
667 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
668 		u8 event_info, u8 log_page);
669 
670 #define NVMET_MIN_QUEUE_SIZE	16
671 #define NVMET_MAX_QUEUE_SIZE	1024
672 #define NVMET_NR_QUEUES		128
673 #define NVMET_MAX_CMD(ctrl)	(NVME_CAP_MQES(ctrl->cap) + 1)
674 
675 /*
676  * Nice round number that makes a list of nsids fit into a page.
677  * Should become tunable at some point in the future.
678  */
679 #define NVMET_MAX_NAMESPACES	1024
680 
681 /*
682  * 0 is not a valid ANA group ID, so we start numbering at 1.
683  *
684  * ANA Group 1 exists without manual intervention, has namespaces assigned to it
685  * by default, and is available in an optimized state through all ports.
686  */
687 #define NVMET_MAX_ANAGRPS	128
688 #define NVMET_DEFAULT_ANA_GRPID	1
689 
690 #define NVMET_KAS		10
691 #define NVMET_DISC_KATO_MS		120000
692 
693 int __init nvmet_init_configfs(void);
694 void __exit nvmet_exit_configfs(void);
695 
696 int __init nvmet_init_discovery(void);
697 void nvmet_exit_discovery(void);
698 
699 extern struct nvmet_subsys *nvmet_disc_subsys;
700 extern struct rw_semaphore nvmet_config_sem;
701 
702 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
703 extern u64 nvmet_ana_chgcnt;
704 extern struct rw_semaphore nvmet_ana_sem;
705 
706 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
707 
708 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
709 int nvmet_file_ns_enable(struct nvmet_ns *ns);
710 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
711 void nvmet_file_ns_disable(struct nvmet_ns *ns);
712 u16 nvmet_bdev_flush(struct nvmet_req *req);
713 u16 nvmet_file_flush(struct nvmet_req *req);
714 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
715 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
716 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
717 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
718 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
719 
720 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
721 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
722 void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
723 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
724 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
725 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
726 
nvmet_rw_data_len(struct nvmet_req * req)727 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
728 {
729 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
730 			req->ns->blksize_shift;
731 }
732 
nvmet_rw_metadata_len(struct nvmet_req * req)733 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
734 {
735 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
736 		return 0;
737 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
738 			req->ns->metadata_size;
739 }
740 
nvmet_dsm_len(struct nvmet_req * req)741 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
742 {
743 	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
744 		sizeof(struct nvme_dsm_range);
745 }
746 
nvmet_req_subsys(struct nvmet_req * req)747 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
748 {
749 	return req->sq->ctrl->subsys;
750 }
751 
nvmet_is_disc_subsys(struct nvmet_subsys * subsys)752 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
753 {
754     return subsys->type != NVME_NQN_NVME;
755 }
756 
nvmet_is_pci_ctrl(struct nvmet_ctrl * ctrl)757 static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
758 {
759 	return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
760 }
761 
762 #ifdef CONFIG_NVME_TARGET_PASSTHRU
763 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
764 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
765 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
766 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
767 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)768 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
769 {
770 	return subsys->passthru_ctrl;
771 }
772 #else /* CONFIG_NVME_TARGET_PASSTHRU */
nvmet_passthru_subsys_free(struct nvmet_subsys * subsys)773 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
774 {
775 }
nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)776 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
777 {
778 }
nvmet_parse_passthru_admin_cmd(struct nvmet_req * req)779 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
780 {
781 	return 0;
782 }
nvmet_parse_passthru_io_cmd(struct nvmet_req * req)783 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
784 {
785 	return 0;
786 }
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)787 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
788 {
789 	return NULL;
790 }
791 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
792 
nvmet_is_passthru_req(struct nvmet_req * req)793 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
794 {
795 	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
796 }
797 
798 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
799 
800 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
801 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
802 
nvmet_cc_en(u32 cc)803 static inline bool nvmet_cc_en(u32 cc)
804 {
805 	return (cc & NVME_CC_ENABLE) >> NVME_CC_EN_SHIFT;
806 }
807 
nvmet_cc_css(u32 cc)808 static inline u8 nvmet_cc_css(u32 cc)
809 {
810 	return (cc & NVME_CC_CSS_MASK) >> NVME_CC_CSS_SHIFT;
811 }
812 
nvmet_cc_mps(u32 cc)813 static inline u8 nvmet_cc_mps(u32 cc)
814 {
815 	return (cc & NVME_CC_MPS_MASK) >> NVME_CC_MPS_SHIFT;
816 }
817 
nvmet_cc_ams(u32 cc)818 static inline u8 nvmet_cc_ams(u32 cc)
819 {
820 	return (cc & NVME_CC_AMS_MASK) >> NVME_CC_AMS_SHIFT;
821 }
822 
nvmet_cc_shn(u32 cc)823 static inline u8 nvmet_cc_shn(u32 cc)
824 {
825 	return (cc & NVME_CC_SHN_MASK) >> NVME_CC_SHN_SHIFT;
826 }
827 
nvmet_cc_iosqes(u32 cc)828 static inline u8 nvmet_cc_iosqes(u32 cc)
829 {
830 	return (cc & NVME_CC_IOSQES_MASK) >> NVME_CC_IOSQES_SHIFT;
831 }
832 
nvmet_cc_iocqes(u32 cc)833 static inline u8 nvmet_cc_iocqes(u32 cc)
834 {
835 	return (cc & NVME_CC_IOCQES_MASK) >> NVME_CC_IOCQES_SHIFT;
836 }
837 
838 /* Convert a 32-bit number to a 16-bit 0's based number */
to0based(u32 a)839 static inline __le16 to0based(u32 a)
840 {
841 	return cpu_to_le16(clamp(a, 1U, 1U << 16) - 1);
842 }
843 
nvmet_ns_has_pi(struct nvmet_ns * ns)844 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
845 {
846 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
847 		return false;
848 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
849 }
850 
nvmet_sect_to_lba(struct nvmet_ns * ns,sector_t sect)851 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
852 {
853 	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
854 }
855 
nvmet_lba_to_sect(struct nvmet_ns * ns,__le64 lba)856 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
857 {
858 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
859 }
860 
nvmet_use_inline_bvec(struct nvmet_req * req)861 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
862 {
863 	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
864 	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
865 }
866 
nvmet_req_bio_put(struct nvmet_req * req,struct bio * bio)867 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
868 {
869 	if (bio != &req->b.inline_bio)
870 		bio_put(bio);
871 	else
872 		bio_uninit(bio);
873 }
874 
875 #ifdef CONFIG_NVME_TARGET_TCP_TLS
nvmet_queue_tls_keyid(struct nvmet_sq * sq)876 static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq)
877 {
878 	return sq->tls_key ? key_serial(sq->tls_key) : 0;
879 }
nvmet_sq_put_tls_key(struct nvmet_sq * sq)880 static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq)
881 {
882 	if (sq->tls_key) {
883 		key_put(sq->tls_key);
884 		sq->tls_key = NULL;
885 	}
886 }
887 #else
nvmet_queue_tls_keyid(struct nvmet_sq * sq)888 static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq) { return 0; }
nvmet_sq_put_tls_key(struct nvmet_sq * sq)889 static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq) {}
890 #endif
891 #ifdef CONFIG_NVME_TARGET_AUTH
892 u32 nvmet_auth_send_data_len(struct nvmet_req *req);
893 void nvmet_execute_auth_send(struct nvmet_req *req);
894 u32 nvmet_auth_receive_data_len(struct nvmet_req *req);
895 void nvmet_execute_auth_receive(struct nvmet_req *req);
896 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
897 		       bool set_ctrl);
898 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
899 u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq);
900 void nvmet_auth_sq_init(struct nvmet_sq *sq);
901 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
902 void nvmet_auth_sq_free(struct nvmet_sq *sq);
903 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
904 bool nvmet_check_auth_status(struct nvmet_req *req);
905 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
906 			 unsigned int hash_len);
907 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
908 			 unsigned int hash_len);
nvmet_has_auth(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq)909 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
910 {
911 	return ctrl->host_key != NULL && !nvmet_queue_tls_keyid(sq);
912 }
913 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
914 				u8 *buf, int buf_size);
915 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
916 			    u8 *buf, int buf_size);
917 void nvmet_auth_insert_psk(struct nvmet_sq *sq);
918 #else
nvmet_setup_auth(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq)919 static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl,
920 				  struct nvmet_sq *sq)
921 {
922 	return 0;
923 }
nvmet_auth_sq_init(struct nvmet_sq * sq)924 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
925 {
926 }
nvmet_destroy_auth(struct nvmet_ctrl * ctrl)927 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
nvmet_auth_sq_free(struct nvmet_sq * sq)928 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
nvmet_check_auth_status(struct nvmet_req * req)929 static inline bool nvmet_check_auth_status(struct nvmet_req *req)
930 {
931 	return true;
932 }
nvmet_has_auth(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq)933 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl,
934 				  struct nvmet_sq *sq)
935 {
936 	return false;
937 }
nvmet_dhchap_dhgroup_name(u8 dhgid)938 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
nvmet_auth_insert_psk(struct nvmet_sq * sq)939 static inline void nvmet_auth_insert_psk(struct nvmet_sq *sq) {};
940 #endif
941 
942 int nvmet_pr_init_ns(struct nvmet_ns *ns);
943 u16 nvmet_parse_pr_cmd(struct nvmet_req *req);
944 u16 nvmet_pr_check_cmd_access(struct nvmet_req *req);
945 int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl);
946 void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl);
947 void nvmet_pr_exit_ns(struct nvmet_ns *ns);
948 void nvmet_execute_get_log_page_resv(struct nvmet_req *req);
949 u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask);
950 u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req);
951 u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req);
nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref * pc_ref)952 static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
953 {
954 	percpu_ref_put(&pc_ref->ref);
955 }
956 
957 /*
958  * Data for the get_feature() and set_feature() operations of PCI target
959  * controllers.
960  */
961 struct nvmet_feat_irq_coalesce {
962 	u8		thr;
963 	u8		time;
964 };
965 
966 struct nvmet_feat_irq_config {
967 	u16		iv;
968 	bool		cd;
969 };
970 
971 struct nvmet_feat_arbitration {
972 	u8		hpw;
973 	u8		mpw;
974 	u8		lpw;
975 	u8		ab;
976 };
977 
978 #endif /* _NVMET_H */
979