xref: /linux/drivers/nvme/target/core.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
177141dc6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2a07b4970SChristoph Hellwig /*
3a07b4970SChristoph Hellwig  * Common code for the NVMe target.
4a07b4970SChristoph Hellwig  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5a07b4970SChristoph Hellwig  */
6a07b4970SChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7a07b4970SChristoph Hellwig #include <linux/module.h>
828b89118SSagi Grimberg #include <linux/random.h>
9b2d09103SIngo Molnar #include <linux/rculist.h>
10c6925093SLogan Gunthorpe #include <linux/pci-p2pdma.h>
11a5dffbb6SEnrico Weigelt, metux IT consult #include <linux/scatterlist.h>
12b2d09103SIngo Molnar 
1368c5444cSAleksandr Miloserdov #include <generated/utsrelease.h>
1468c5444cSAleksandr Miloserdov 
15a5448fdcSMinwoo Im #define CREATE_TRACE_POINTS
16a5448fdcSMinwoo Im #include "trace.h"
17a5448fdcSMinwoo Im 
18a07b4970SChristoph Hellwig #include "nvmet.h"
19649fd414SHannes Reinecke #include "debugfs.h"
20a07b4970SChristoph Hellwig 
21fa8f9ac4SChristoph Hellwig struct kmem_cache *nvmet_bvec_cache;
2255eb942eSChaitanya Kulkarni struct workqueue_struct *buffered_io_wq;
23aaf2e048SChaitanya Kulkarni struct workqueue_struct *zbd_wq;
24e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
2515fbad96SSagi Grimberg static DEFINE_IDA(cntlid_ida);
26a07b4970SChristoph Hellwig 
278832cf92SSagi Grimberg struct workqueue_struct *nvmet_wq;
288832cf92SSagi Grimberg EXPORT_SYMBOL_GPL(nvmet_wq);
298832cf92SSagi Grimberg 
30a07b4970SChristoph Hellwig /*
31a07b4970SChristoph Hellwig  * This read/write semaphore is used to synchronize access to configuration
32a07b4970SChristoph Hellwig  * information on a target system that will result in discovery log page
33a07b4970SChristoph Hellwig  * information change for at least one host.
34a07b4970SChristoph Hellwig  * The full list of resources to protected by this semaphore is:
35a07b4970SChristoph Hellwig  *
36a07b4970SChristoph Hellwig  *  - subsystems list
37a07b4970SChristoph Hellwig  *  - per-subsystem allowed hosts list
38a07b4970SChristoph Hellwig  *  - allow_any_host subsystem attribute
39a07b4970SChristoph Hellwig  *  - nvmet_genctr
40a07b4970SChristoph Hellwig  *  - the nvmet_transports array
41a07b4970SChristoph Hellwig  *
42a07b4970SChristoph Hellwig  * When updating any of those lists/structures write lock should be obtained,
43a07b4970SChristoph Hellwig  * while when reading (popolating discovery log page or checking host-subsystem
44a07b4970SChristoph Hellwig  * link) read lock is obtained to allow concurrent reads.
45a07b4970SChristoph Hellwig  */
46a07b4970SChristoph Hellwig DECLARE_RWSEM(nvmet_config_sem);
47a07b4970SChristoph Hellwig 
4872efd25dSChristoph Hellwig u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
4972efd25dSChristoph Hellwig u64 nvmet_ana_chgcnt;
5072efd25dSChristoph Hellwig DECLARE_RWSEM(nvmet_ana_sem);
5172efd25dSChristoph Hellwig 
errno_to_nvme_status(struct nvmet_req * req,int errno)52c6aa3542SChaitanya Kulkarni inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
53c6aa3542SChaitanya Kulkarni {
54c6aa3542SChaitanya Kulkarni 	switch (errno) {
55cfc1a1afSLogan Gunthorpe 	case 0:
567860569aSChaitanya Kulkarni 		return NVME_SC_SUCCESS;
57c6aa3542SChaitanya Kulkarni 	case -ENOSPC:
58c6aa3542SChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_rw_command, length);
59dd0b0a4aSWeiwen Hu 		return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
60c6aa3542SChaitanya Kulkarni 	case -EREMOTEIO:
61c6aa3542SChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_rw_command, slba);
62dd0b0a4aSWeiwen Hu 		return  NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
63c6aa3542SChaitanya Kulkarni 	case -EOPNOTSUPP:
64c6aa3542SChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, opcode);
65c6aa3542SChaitanya Kulkarni 		switch (req->cmd->common.opcode) {
66c6aa3542SChaitanya Kulkarni 		case nvme_cmd_dsm:
67c6aa3542SChaitanya Kulkarni 		case nvme_cmd_write_zeroes:
68dd0b0a4aSWeiwen Hu 			return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
69c6aa3542SChaitanya Kulkarni 		default:
70dd0b0a4aSWeiwen Hu 			return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
71c6aa3542SChaitanya Kulkarni 		}
72c6aa3542SChaitanya Kulkarni 		break;
73c6aa3542SChaitanya Kulkarni 	case -ENODATA:
74c6aa3542SChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
757860569aSChaitanya Kulkarni 		return NVME_SC_ACCESS_DENIED;
76c6aa3542SChaitanya Kulkarni 	case -EIO:
77df561f66SGustavo A. R. Silva 		fallthrough;
78c6aa3542SChaitanya Kulkarni 	default:
79c6aa3542SChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, opcode);
80dd0b0a4aSWeiwen Hu 		return NVME_SC_INTERNAL | NVME_STATUS_DNR;
81c6aa3542SChaitanya Kulkarni 	}
82c6aa3542SChaitanya Kulkarni }
83c6aa3542SChaitanya Kulkarni 
nvmet_report_invalid_opcode(struct nvmet_req * req)84d81d57cfSChaitanya Kulkarni u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
85d81d57cfSChaitanya Kulkarni {
86d81d57cfSChaitanya Kulkarni 	pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
87d81d57cfSChaitanya Kulkarni 		 req->sq->qid);
88d81d57cfSChaitanya Kulkarni 
89d81d57cfSChaitanya Kulkarni 	req->error_loc = offsetof(struct nvme_common_command, opcode);
90dd0b0a4aSWeiwen Hu 	return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
91d81d57cfSChaitanya Kulkarni }
92d81d57cfSChaitanya Kulkarni 
93a07b4970SChristoph Hellwig static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
94a07b4970SChristoph Hellwig 		const char *subsysnqn);
95a07b4970SChristoph Hellwig 
nvmet_copy_to_sgl(struct nvmet_req * req,off_t off,const void * buf,size_t len)96a07b4970SChristoph Hellwig u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
97a07b4970SChristoph Hellwig 		size_t len)
98a07b4970SChristoph Hellwig {
99e81446afSChaitanya Kulkarni 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
100e81446afSChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, dptr);
101dd0b0a4aSWeiwen Hu 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
102e81446afSChaitanya Kulkarni 	}
103a07b4970SChristoph Hellwig 	return 0;
104a07b4970SChristoph Hellwig }
105a07b4970SChristoph Hellwig 
nvmet_copy_from_sgl(struct nvmet_req * req,off_t off,void * buf,size_t len)106a07b4970SChristoph Hellwig u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
107a07b4970SChristoph Hellwig {
108e81446afSChaitanya Kulkarni 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
109e81446afSChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, dptr);
110dd0b0a4aSWeiwen Hu 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
111e81446afSChaitanya Kulkarni 	}
112a07b4970SChristoph Hellwig 	return 0;
113a07b4970SChristoph Hellwig }
114a07b4970SChristoph Hellwig 
nvmet_zero_sgl(struct nvmet_req * req,off_t off,size_t len)115c7759fffSChristoph Hellwig u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
116c7759fffSChristoph Hellwig {
117e81446afSChaitanya Kulkarni 	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
118e81446afSChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, dptr);
119dd0b0a4aSWeiwen Hu 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
120e81446afSChaitanya Kulkarni 	}
121c7759fffSChristoph Hellwig 	return 0;
122c7759fffSChristoph Hellwig }
123c7759fffSChristoph Hellwig 
nvmet_max_nsid(struct nvmet_subsys * subsys)124245067e3SChaitanya Kulkarni static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
125ba2dec35SRoy Shterman {
1267774e77eSChaitanya Kulkarni 	struct nvmet_ns *cur;
1277774e77eSChaitanya Kulkarni 	unsigned long idx;
128245067e3SChaitanya Kulkarni 	u32 nsid = 0;
129ba2dec35SRoy Shterman 
1307774e77eSChaitanya Kulkarni 	xa_for_each(&subsys->namespaces, idx, cur)
1317774e77eSChaitanya Kulkarni 		nsid = cur->nsid;
132ba2dec35SRoy Shterman 
1337774e77eSChaitanya Kulkarni 	return nsid;
134ba2dec35SRoy Shterman }
135ba2dec35SRoy Shterman 
nvmet_async_event_result(struct nvmet_async_event * aen)136a07b4970SChristoph Hellwig static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
137a07b4970SChristoph Hellwig {
138a07b4970SChristoph Hellwig 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
139a07b4970SChristoph Hellwig }
140a07b4970SChristoph Hellwig 
nvmet_async_events_failall(struct nvmet_ctrl * ctrl)141819f7b88SChaitanya Kulkarni static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
142819f7b88SChaitanya Kulkarni {
143819f7b88SChaitanya Kulkarni 	struct nvmet_req *req;
144819f7b88SChaitanya Kulkarni 
145819f7b88SChaitanya Kulkarni 	mutex_lock(&ctrl->lock);
146819f7b88SChaitanya Kulkarni 	while (ctrl->nr_async_event_cmds) {
147819f7b88SChaitanya Kulkarni 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
148819f7b88SChaitanya Kulkarni 		mutex_unlock(&ctrl->lock);
149dd0b0a4aSWeiwen Hu 		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR);
150819f7b88SChaitanya Kulkarni 		mutex_lock(&ctrl->lock);
151819f7b88SChaitanya Kulkarni 	}
152819f7b88SChaitanya Kulkarni 	mutex_unlock(&ctrl->lock);
153819f7b88SChaitanya Kulkarni }
154819f7b88SChaitanya Kulkarni 
nvmet_async_events_process(struct nvmet_ctrl * ctrl)155819f7b88SChaitanya Kulkarni static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
156a07b4970SChristoph Hellwig {
157a07b4970SChristoph Hellwig 	struct nvmet_async_event *aen;
158a07b4970SChristoph Hellwig 	struct nvmet_req *req;
159a07b4970SChristoph Hellwig 
160a07b4970SChristoph Hellwig 	mutex_lock(&ctrl->lock);
1611cdf9f76SDavid Milburn 	while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
1621cdf9f76SDavid Milburn 		aen = list_first_entry(&ctrl->async_events,
163a07b4970SChristoph Hellwig 				       struct nvmet_async_event, entry);
164a07b4970SChristoph Hellwig 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
165a07b4970SChristoph Hellwig 		nvmet_set_result(req, nvmet_async_event_result(aen));
166a07b4970SChristoph Hellwig 
167a07b4970SChristoph Hellwig 		list_del(&aen->entry);
168a07b4970SChristoph Hellwig 		kfree(aen);
169a07b4970SChristoph Hellwig 
170a07b4970SChristoph Hellwig 		mutex_unlock(&ctrl->lock);
171696ece75SChaitanya Kulkarni 		trace_nvmet_async_event(ctrl, req->cqe->result.u32);
172819f7b88SChaitanya Kulkarni 		nvmet_req_complete(req, 0);
1731cdf9f76SDavid Milburn 		mutex_lock(&ctrl->lock);
174a07b4970SChristoph Hellwig 	}
1751cdf9f76SDavid Milburn 	mutex_unlock(&ctrl->lock);
176a07b4970SChristoph Hellwig }
177a07b4970SChristoph Hellwig 
nvmet_async_events_free(struct nvmet_ctrl * ctrl)1780f5be6a4SDaniel Wagner static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
1790f5be6a4SDaniel Wagner {
18064f5e9cdSSagi Grimberg 	struct nvmet_async_event *aen, *tmp;
1810f5be6a4SDaniel Wagner 
1820f5be6a4SDaniel Wagner 	mutex_lock(&ctrl->lock);
18364f5e9cdSSagi Grimberg 	list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
18464f5e9cdSSagi Grimberg 		list_del(&aen->entry);
18564f5e9cdSSagi Grimberg 		kfree(aen);
1860f5be6a4SDaniel Wagner 	}
1870f5be6a4SDaniel Wagner 	mutex_unlock(&ctrl->lock);
1880f5be6a4SDaniel Wagner }
1890f5be6a4SDaniel Wagner 
nvmet_async_event_work(struct work_struct * work)1900f5be6a4SDaniel Wagner static void nvmet_async_event_work(struct work_struct *work)
1910f5be6a4SDaniel Wagner {
1920f5be6a4SDaniel Wagner 	struct nvmet_ctrl *ctrl =
1930f5be6a4SDaniel Wagner 		container_of(work, struct nvmet_ctrl, async_event_work);
1940f5be6a4SDaniel Wagner 
195819f7b88SChaitanya Kulkarni 	nvmet_async_events_process(ctrl);
1960f5be6a4SDaniel Wagner }
1970f5be6a4SDaniel Wagner 
nvmet_add_async_event(struct nvmet_ctrl * ctrl,u8 event_type,u8 event_info,u8 log_page)198b662a078SJay Sternberg void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
199a07b4970SChristoph Hellwig 		u8 event_info, u8 log_page)
200a07b4970SChristoph Hellwig {
201a07b4970SChristoph Hellwig 	struct nvmet_async_event *aen;
202a07b4970SChristoph Hellwig 
203a07b4970SChristoph Hellwig 	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
204a07b4970SChristoph Hellwig 	if (!aen)
205a07b4970SChristoph Hellwig 		return;
206a07b4970SChristoph Hellwig 
207a07b4970SChristoph Hellwig 	aen->event_type = event_type;
208a07b4970SChristoph Hellwig 	aen->event_info = event_info;
209a07b4970SChristoph Hellwig 	aen->log_page = log_page;
210a07b4970SChristoph Hellwig 
211a07b4970SChristoph Hellwig 	mutex_lock(&ctrl->lock);
212a07b4970SChristoph Hellwig 	list_add_tail(&aen->entry, &ctrl->async_events);
213a07b4970SChristoph Hellwig 	mutex_unlock(&ctrl->lock);
214a07b4970SChristoph Hellwig 
2158832cf92SSagi Grimberg 	queue_work(nvmet_wq, &ctrl->async_event_work);
216a07b4970SChristoph Hellwig }
217a07b4970SChristoph Hellwig 
nvmet_add_to_changed_ns_log(struct nvmet_ctrl * ctrl,__le32 nsid)218c16734eaSChristoph Hellwig static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
219c16734eaSChristoph Hellwig {
220c16734eaSChristoph Hellwig 	u32 i;
221c16734eaSChristoph Hellwig 
222c16734eaSChristoph Hellwig 	mutex_lock(&ctrl->lock);
223c16734eaSChristoph Hellwig 	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
224c16734eaSChristoph Hellwig 		goto out_unlock;
225c16734eaSChristoph Hellwig 
226c16734eaSChristoph Hellwig 	for (i = 0; i < ctrl->nr_changed_ns; i++) {
227c16734eaSChristoph Hellwig 		if (ctrl->changed_ns_list[i] == nsid)
228c16734eaSChristoph Hellwig 			goto out_unlock;
229c16734eaSChristoph Hellwig 	}
230c16734eaSChristoph Hellwig 
231c16734eaSChristoph Hellwig 	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
232c16734eaSChristoph Hellwig 		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
233c16734eaSChristoph Hellwig 		ctrl->nr_changed_ns = U32_MAX;
234c16734eaSChristoph Hellwig 		goto out_unlock;
235c16734eaSChristoph Hellwig 	}
236c16734eaSChristoph Hellwig 
237c16734eaSChristoph Hellwig 	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
238c16734eaSChristoph Hellwig out_unlock:
239c16734eaSChristoph Hellwig 	mutex_unlock(&ctrl->lock);
240c16734eaSChristoph Hellwig }
241c16734eaSChristoph Hellwig 
nvmet_ns_changed(struct nvmet_subsys * subsys,u32 nsid)242dedf0be5SChaitanya Kulkarni void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
243c16734eaSChristoph Hellwig {
244c16734eaSChristoph Hellwig 	struct nvmet_ctrl *ctrl;
245c16734eaSChristoph Hellwig 
246013a63efSMax Gurtovoy 	lockdep_assert_held(&subsys->lock);
247013a63efSMax Gurtovoy 
248c16734eaSChristoph Hellwig 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
249c16734eaSChristoph Hellwig 		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
2507114ddebSJay Sternberg 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
251c86b8f7bSChristoph Hellwig 			continue;
25241353fbaSGuixin Liu 		nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
253c16734eaSChristoph Hellwig 				NVME_AER_NOTICE_NS_CHANGED,
254c16734eaSChristoph Hellwig 				NVME_LOG_CHANGED_NS);
255c16734eaSChristoph Hellwig 	}
256c16734eaSChristoph Hellwig }
257c16734eaSChristoph Hellwig 
nvmet_send_ana_event(struct nvmet_subsys * subsys,struct nvmet_port * port)25862ac0d32SChristoph Hellwig void nvmet_send_ana_event(struct nvmet_subsys *subsys,
25962ac0d32SChristoph Hellwig 		struct nvmet_port *port)
26062ac0d32SChristoph Hellwig {
26162ac0d32SChristoph Hellwig 	struct nvmet_ctrl *ctrl;
26262ac0d32SChristoph Hellwig 
26362ac0d32SChristoph Hellwig 	mutex_lock(&subsys->lock);
26462ac0d32SChristoph Hellwig 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
26562ac0d32SChristoph Hellwig 		if (port && ctrl->port != port)
26662ac0d32SChristoph Hellwig 			continue;
2677114ddebSJay Sternberg 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
26862ac0d32SChristoph Hellwig 			continue;
26941353fbaSGuixin Liu 		nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
27062ac0d32SChristoph Hellwig 				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
27162ac0d32SChristoph Hellwig 	}
27262ac0d32SChristoph Hellwig 	mutex_unlock(&subsys->lock);
27362ac0d32SChristoph Hellwig }
27462ac0d32SChristoph Hellwig 
nvmet_port_send_ana_event(struct nvmet_port * port)27562ac0d32SChristoph Hellwig void nvmet_port_send_ana_event(struct nvmet_port *port)
27662ac0d32SChristoph Hellwig {
27762ac0d32SChristoph Hellwig 	struct nvmet_subsys_link *p;
27862ac0d32SChristoph Hellwig 
27962ac0d32SChristoph Hellwig 	down_read(&nvmet_config_sem);
28062ac0d32SChristoph Hellwig 	list_for_each_entry(p, &port->subsystems, entry)
28162ac0d32SChristoph Hellwig 		nvmet_send_ana_event(p->subsys, port);
28262ac0d32SChristoph Hellwig 	up_read(&nvmet_config_sem);
28362ac0d32SChristoph Hellwig }
28462ac0d32SChristoph Hellwig 
nvmet_register_transport(const struct nvmet_fabrics_ops * ops)285e929f06dSChristoph Hellwig int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
286a07b4970SChristoph Hellwig {
287a07b4970SChristoph Hellwig 	int ret = 0;
288a07b4970SChristoph Hellwig 
289a07b4970SChristoph Hellwig 	down_write(&nvmet_config_sem);
290a07b4970SChristoph Hellwig 	if (nvmet_transports[ops->type])
291a07b4970SChristoph Hellwig 		ret = -EINVAL;
292a07b4970SChristoph Hellwig 	else
293a07b4970SChristoph Hellwig 		nvmet_transports[ops->type] = ops;
294a07b4970SChristoph Hellwig 	up_write(&nvmet_config_sem);
295a07b4970SChristoph Hellwig 
296a07b4970SChristoph Hellwig 	return ret;
297a07b4970SChristoph Hellwig }
298a07b4970SChristoph Hellwig EXPORT_SYMBOL_GPL(nvmet_register_transport);
299a07b4970SChristoph Hellwig 
nvmet_unregister_transport(const struct nvmet_fabrics_ops * ops)300e929f06dSChristoph Hellwig void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
301a07b4970SChristoph Hellwig {
302a07b4970SChristoph Hellwig 	down_write(&nvmet_config_sem);
303a07b4970SChristoph Hellwig 	nvmet_transports[ops->type] = NULL;
304a07b4970SChristoph Hellwig 	up_write(&nvmet_config_sem);
305a07b4970SChristoph Hellwig }
306a07b4970SChristoph Hellwig EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
307a07b4970SChristoph Hellwig 
nvmet_port_del_ctrls(struct nvmet_port * port,struct nvmet_subsys * subsys)3083aed8673SLogan Gunthorpe void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
3093aed8673SLogan Gunthorpe {
3103aed8673SLogan Gunthorpe 	struct nvmet_ctrl *ctrl;
3113aed8673SLogan Gunthorpe 
3123aed8673SLogan Gunthorpe 	mutex_lock(&subsys->lock);
3133aed8673SLogan Gunthorpe 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
3143aed8673SLogan Gunthorpe 		if (ctrl->port == port)
3153aed8673SLogan Gunthorpe 			ctrl->ops->delete_ctrl(ctrl);
3163aed8673SLogan Gunthorpe 	}
3173aed8673SLogan Gunthorpe 	mutex_unlock(&subsys->lock);
3183aed8673SLogan Gunthorpe }
3193aed8673SLogan Gunthorpe 
nvmet_enable_port(struct nvmet_port * port)320a07b4970SChristoph Hellwig int nvmet_enable_port(struct nvmet_port *port)
321a07b4970SChristoph Hellwig {
322e929f06dSChristoph Hellwig 	const struct nvmet_fabrics_ops *ops;
323a07b4970SChristoph Hellwig 	int ret;
324a07b4970SChristoph Hellwig 
325a07b4970SChristoph Hellwig 	lockdep_assert_held(&nvmet_config_sem);
326a07b4970SChristoph Hellwig 
327a07b4970SChristoph Hellwig 	ops = nvmet_transports[port->disc_addr.trtype];
328a07b4970SChristoph Hellwig 	if (!ops) {
329a07b4970SChristoph Hellwig 		up_write(&nvmet_config_sem);
330a07b4970SChristoph Hellwig 		request_module("nvmet-transport-%d", port->disc_addr.trtype);
331a07b4970SChristoph Hellwig 		down_write(&nvmet_config_sem);
332a07b4970SChristoph Hellwig 		ops = nvmet_transports[port->disc_addr.trtype];
333a07b4970SChristoph Hellwig 		if (!ops) {
334a07b4970SChristoph Hellwig 			pr_err("transport type %d not supported\n",
335a07b4970SChristoph Hellwig 				port->disc_addr.trtype);
336a07b4970SChristoph Hellwig 			return -EINVAL;
337a07b4970SChristoph Hellwig 		}
338a07b4970SChristoph Hellwig 	}
339a07b4970SChristoph Hellwig 
340a07b4970SChristoph Hellwig 	if (!try_module_get(ops->owner))
341a07b4970SChristoph Hellwig 		return -EINVAL;
342a07b4970SChristoph Hellwig 
343ea52ac1cSIsrael Rukshin 	/*
344ea52ac1cSIsrael Rukshin 	 * If the user requested PI support and the transport isn't pi capable,
345ea52ac1cSIsrael Rukshin 	 * don't enable the port.
346ea52ac1cSIsrael Rukshin 	 */
3476fa350f7SMax Gurtovoy 	if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
348ea52ac1cSIsrael Rukshin 		pr_err("T10-PI is not supported by transport type %d\n",
349ea52ac1cSIsrael Rukshin 		       port->disc_addr.trtype);
350ea52ac1cSIsrael Rukshin 		ret = -EINVAL;
351ea52ac1cSIsrael Rukshin 		goto out_put;
352a07b4970SChristoph Hellwig 	}
353a07b4970SChristoph Hellwig 
354ea52ac1cSIsrael Rukshin 	ret = ops->add_port(port);
355ea52ac1cSIsrael Rukshin 	if (ret)
356ea52ac1cSIsrael Rukshin 		goto out_put;
357ea52ac1cSIsrael Rukshin 
3580d5ee2b2SSteve Wise 	/* If the transport didn't set inline_data_size, then disable it. */
3590d5ee2b2SSteve Wise 	if (port->inline_data_size < 0)
3600d5ee2b2SSteve Wise 		port->inline_data_size = 0;
3610d5ee2b2SSteve Wise 
362ca2b221dSMax Gurtovoy 	/*
363ca2b221dSMax Gurtovoy 	 * If the transport didn't set the max_queue_size properly, then clamp
364ca2b221dSMax Gurtovoy 	 * it to the target limits. Also set default values in case the
365ca2b221dSMax Gurtovoy 	 * transport didn't set it at all.
366ca2b221dSMax Gurtovoy 	 */
367ca2b221dSMax Gurtovoy 	if (port->max_queue_size < 0)
368ca2b221dSMax Gurtovoy 		port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
369ca2b221dSMax Gurtovoy 	else
370ca2b221dSMax Gurtovoy 		port->max_queue_size = clamp_t(int, port->max_queue_size,
371ca2b221dSMax Gurtovoy 					       NVMET_MIN_QUEUE_SIZE,
372ca2b221dSMax Gurtovoy 					       NVMET_MAX_QUEUE_SIZE);
373ca2b221dSMax Gurtovoy 
374a07b4970SChristoph Hellwig 	port->enabled = true;
3759d09dd8dSJames Smart 	port->tr_ops = ops;
376a07b4970SChristoph Hellwig 	return 0;
377ea52ac1cSIsrael Rukshin 
378ea52ac1cSIsrael Rukshin out_put:
379ea52ac1cSIsrael Rukshin 	module_put(ops->owner);
380ea52ac1cSIsrael Rukshin 	return ret;
381a07b4970SChristoph Hellwig }
382a07b4970SChristoph Hellwig 
nvmet_disable_port(struct nvmet_port * port)383a07b4970SChristoph Hellwig void nvmet_disable_port(struct nvmet_port *port)
384a07b4970SChristoph Hellwig {
385e929f06dSChristoph Hellwig 	const struct nvmet_fabrics_ops *ops;
386a07b4970SChristoph Hellwig 
387a07b4970SChristoph Hellwig 	lockdep_assert_held(&nvmet_config_sem);
388a07b4970SChristoph Hellwig 
389a07b4970SChristoph Hellwig 	port->enabled = false;
3909d09dd8dSJames Smart 	port->tr_ops = NULL;
391a07b4970SChristoph Hellwig 
392a07b4970SChristoph Hellwig 	ops = nvmet_transports[port->disc_addr.trtype];
393a07b4970SChristoph Hellwig 	ops->remove_port(port);
394a07b4970SChristoph Hellwig 	module_put(ops->owner);
395a07b4970SChristoph Hellwig }
396a07b4970SChristoph Hellwig 
nvmet_keep_alive_timer(struct work_struct * work)397a07b4970SChristoph Hellwig static void nvmet_keep_alive_timer(struct work_struct *work)
398a07b4970SChristoph Hellwig {
399a07b4970SChristoph Hellwig 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
400a07b4970SChristoph Hellwig 			struct nvmet_ctrl, ka_work);
401aaeadd70SSagi Grimberg 	bool reset_tbkas = ctrl->reset_tbkas;
402c09305aeSSagi Grimberg 
403aaeadd70SSagi Grimberg 	ctrl->reset_tbkas = false;
404aaeadd70SSagi Grimberg 	if (reset_tbkas) {
405c09305aeSSagi Grimberg 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
406c09305aeSSagi Grimberg 			ctrl->cntlid);
4078832cf92SSagi Grimberg 		queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
408c09305aeSSagi Grimberg 		return;
409c09305aeSSagi Grimberg 	}
410a07b4970SChristoph Hellwig 
411a07b4970SChristoph Hellwig 	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
412a07b4970SChristoph Hellwig 		ctrl->cntlid, ctrl->kato);
413a07b4970SChristoph Hellwig 
41423a8ed4aSSagi Grimberg 	nvmet_ctrl_fatal_error(ctrl);
415a07b4970SChristoph Hellwig }
416a07b4970SChristoph Hellwig 
nvmet_start_keep_alive_timer(struct nvmet_ctrl * ctrl)4174e683c48SAmit Engel void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
418a07b4970SChristoph Hellwig {
4190d3b6a8dSAmit Engel 	if (unlikely(ctrl->kato == 0))
4200d3b6a8dSAmit Engel 		return;
4210d3b6a8dSAmit Engel 
422a07b4970SChristoph Hellwig 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
423a07b4970SChristoph Hellwig 		ctrl->cntlid, ctrl->kato);
424a07b4970SChristoph Hellwig 
4258832cf92SSagi Grimberg 	queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
426a07b4970SChristoph Hellwig }
427a07b4970SChristoph Hellwig 
nvmet_stop_keep_alive_timer(struct nvmet_ctrl * ctrl)4284e683c48SAmit Engel void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
429a07b4970SChristoph Hellwig {
4300d3b6a8dSAmit Engel 	if (unlikely(ctrl->kato == 0))
4310d3b6a8dSAmit Engel 		return;
4320d3b6a8dSAmit Engel 
433a07b4970SChristoph Hellwig 	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
434a07b4970SChristoph Hellwig 
435a07b4970SChristoph Hellwig 	cancel_delayed_work_sync(&ctrl->ka_work);
436a07b4970SChristoph Hellwig }
437a07b4970SChristoph Hellwig 
nvmet_req_find_ns(struct nvmet_req * req)4383a1f7c79SChaitanya Kulkarni u16 nvmet_req_find_ns(struct nvmet_req *req)
439a07b4970SChristoph Hellwig {
4403a1f7c79SChaitanya Kulkarni 	u32 nsid = le32_to_cpu(req->cmd->common.nsid);
44150536395SSagi Grimberg 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
442a07b4970SChristoph Hellwig 
44350536395SSagi Grimberg 	req->ns = xa_load(&subsys->namespaces, nsid);
4443a1f7c79SChaitanya Kulkarni 	if (unlikely(!req->ns)) {
4453a1f7c79SChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, nsid);
44650536395SSagi Grimberg 		if (nvmet_subsys_nsid_exists(subsys, nsid))
44750536395SSagi Grimberg 			return NVME_SC_INTERNAL_PATH_ERROR;
448dd0b0a4aSWeiwen Hu 		return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
4493a1f7c79SChaitanya Kulkarni 	}
450a07b4970SChristoph Hellwig 
4513a1f7c79SChaitanya Kulkarni 	percpu_ref_get(&req->ns->ref);
4523a1f7c79SChaitanya Kulkarni 	return NVME_SC_SUCCESS;
453a07b4970SChristoph Hellwig }
454a07b4970SChristoph Hellwig 
nvmet_destroy_namespace(struct percpu_ref * ref)455a07b4970SChristoph Hellwig static void nvmet_destroy_namespace(struct percpu_ref *ref)
456a07b4970SChristoph Hellwig {
457a07b4970SChristoph Hellwig 	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
458a07b4970SChristoph Hellwig 
459a07b4970SChristoph Hellwig 	complete(&ns->disable_done);
460a07b4970SChristoph Hellwig }
461a07b4970SChristoph Hellwig 
nvmet_put_namespace(struct nvmet_ns * ns)462a07b4970SChristoph Hellwig void nvmet_put_namespace(struct nvmet_ns *ns)
463a07b4970SChristoph Hellwig {
464a07b4970SChristoph Hellwig 	percpu_ref_put(&ns->ref);
465a07b4970SChristoph Hellwig }
466a07b4970SChristoph Hellwig 
nvmet_ns_dev_disable(struct nvmet_ns * ns)467d5eff33eSChaitanya Kulkarni static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
468d5eff33eSChaitanya Kulkarni {
469d5eff33eSChaitanya Kulkarni 	nvmet_bdev_ns_disable(ns);
470d5eff33eSChaitanya Kulkarni 	nvmet_file_ns_disable(ns);
471d5eff33eSChaitanya Kulkarni }
472d5eff33eSChaitanya Kulkarni 
nvmet_p2pmem_ns_enable(struct nvmet_ns * ns)473c6925093SLogan Gunthorpe static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
474c6925093SLogan Gunthorpe {
475c6925093SLogan Gunthorpe 	int ret;
476c6925093SLogan Gunthorpe 	struct pci_dev *p2p_dev;
477c6925093SLogan Gunthorpe 
478c6925093SLogan Gunthorpe 	if (!ns->use_p2pmem)
479c6925093SLogan Gunthorpe 		return 0;
480c6925093SLogan Gunthorpe 
481c6925093SLogan Gunthorpe 	if (!ns->bdev) {
482c6925093SLogan Gunthorpe 		pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
483c6925093SLogan Gunthorpe 		return -EINVAL;
484c6925093SLogan Gunthorpe 	}
485c6925093SLogan Gunthorpe 
486e556f6baSChristoph Hellwig 	if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
487c6925093SLogan Gunthorpe 		pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
488c6925093SLogan Gunthorpe 		       ns->device_path);
489c6925093SLogan Gunthorpe 		return -EINVAL;
490c6925093SLogan Gunthorpe 	}
491c6925093SLogan Gunthorpe 
492c6925093SLogan Gunthorpe 	if (ns->p2p_dev) {
493c6925093SLogan Gunthorpe 		ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
494c6925093SLogan Gunthorpe 		if (ret < 0)
495c6925093SLogan Gunthorpe 			return -EINVAL;
496c6925093SLogan Gunthorpe 	} else {
497c6925093SLogan Gunthorpe 		/*
498c6925093SLogan Gunthorpe 		 * Right now we just check that there is p2pmem available so
499c6925093SLogan Gunthorpe 		 * we can report an error to the user right away if there
500c6925093SLogan Gunthorpe 		 * is not. We'll find the actual device to use once we
501c6925093SLogan Gunthorpe 		 * setup the controller when the port's device is available.
502c6925093SLogan Gunthorpe 		 */
503c6925093SLogan Gunthorpe 
504c6925093SLogan Gunthorpe 		p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
505c6925093SLogan Gunthorpe 		if (!p2p_dev) {
506c6925093SLogan Gunthorpe 			pr_err("no peer-to-peer memory is available for %s\n",
507c6925093SLogan Gunthorpe 			       ns->device_path);
508c6925093SLogan Gunthorpe 			return -EINVAL;
509c6925093SLogan Gunthorpe 		}
510c6925093SLogan Gunthorpe 
511c6925093SLogan Gunthorpe 		pci_dev_put(p2p_dev);
512c6925093SLogan Gunthorpe 	}
513c6925093SLogan Gunthorpe 
514c6925093SLogan Gunthorpe 	return 0;
515c6925093SLogan Gunthorpe }
516c6925093SLogan Gunthorpe 
517c6925093SLogan Gunthorpe /*
518c6925093SLogan Gunthorpe  * Note: ctrl->subsys->lock should be held when calling this function
519c6925093SLogan Gunthorpe  */
nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl * ctrl,struct nvmet_ns * ns)520c6925093SLogan Gunthorpe static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
521c6925093SLogan Gunthorpe 				    struct nvmet_ns *ns)
522c6925093SLogan Gunthorpe {
523c6925093SLogan Gunthorpe 	struct device *clients[2];
524c6925093SLogan Gunthorpe 	struct pci_dev *p2p_dev;
525c6925093SLogan Gunthorpe 	int ret;
526c6925093SLogan Gunthorpe 
52721d3bbddSSagi Grimberg 	if (!ctrl->p2p_client || !ns->use_p2pmem)
528c6925093SLogan Gunthorpe 		return;
529c6925093SLogan Gunthorpe 
530c6925093SLogan Gunthorpe 	if (ns->p2p_dev) {
531c6925093SLogan Gunthorpe 		ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
532c6925093SLogan Gunthorpe 		if (ret < 0)
533c6925093SLogan Gunthorpe 			return;
534c6925093SLogan Gunthorpe 
535c6925093SLogan Gunthorpe 		p2p_dev = pci_dev_get(ns->p2p_dev);
536c6925093SLogan Gunthorpe 	} else {
537c6925093SLogan Gunthorpe 		clients[0] = ctrl->p2p_client;
538c6925093SLogan Gunthorpe 		clients[1] = nvmet_ns_dev(ns);
539c6925093SLogan Gunthorpe 
540c6925093SLogan Gunthorpe 		p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
541c6925093SLogan Gunthorpe 		if (!p2p_dev) {
542c6925093SLogan Gunthorpe 			pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
543c6925093SLogan Gunthorpe 			       dev_name(ctrl->p2p_client), ns->device_path);
544c6925093SLogan Gunthorpe 			return;
545c6925093SLogan Gunthorpe 		}
546c6925093SLogan Gunthorpe 	}
547c6925093SLogan Gunthorpe 
548c6925093SLogan Gunthorpe 	ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
549c6925093SLogan Gunthorpe 	if (ret < 0)
550c6925093SLogan Gunthorpe 		pci_dev_put(p2p_dev);
551c6925093SLogan Gunthorpe 
552c6925093SLogan Gunthorpe 	pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
553c6925093SLogan Gunthorpe 		ns->nsid);
554c6925093SLogan Gunthorpe }
555c6925093SLogan Gunthorpe 
nvmet_ns_revalidate(struct nvmet_ns * ns)556da783733SChristoph Hellwig bool nvmet_ns_revalidate(struct nvmet_ns *ns)
557463c5fabSChaitanya Kulkarni {
558de124f42SChaitanya Kulkarni 	loff_t oldsize = ns->size;
559de124f42SChaitanya Kulkarni 
560463c5fabSChaitanya Kulkarni 	if (ns->bdev)
561463c5fabSChaitanya Kulkarni 		nvmet_bdev_ns_revalidate(ns);
562463c5fabSChaitanya Kulkarni 	else
563463c5fabSChaitanya Kulkarni 		nvmet_file_ns_revalidate(ns);
564de124f42SChaitanya Kulkarni 
565da783733SChristoph Hellwig 	return oldsize != ns->size;
566463c5fabSChaitanya Kulkarni }
567463c5fabSChaitanya Kulkarni 
nvmet_ns_enable(struct nvmet_ns * ns)568a07b4970SChristoph Hellwig int nvmet_ns_enable(struct nvmet_ns *ns)
569a07b4970SChristoph Hellwig {
570a07b4970SChristoph Hellwig 	struct nvmet_subsys *subsys = ns->subsys;
571c6925093SLogan Gunthorpe 	struct nvmet_ctrl *ctrl;
572793c7cfcSChristoph Hellwig 	int ret;
573a07b4970SChristoph Hellwig 
574a07b4970SChristoph Hellwig 	mutex_lock(&subsys->lock);
575793c7cfcSChristoph Hellwig 	ret = 0;
576ba76af67SLogan Gunthorpe 
577ab7a2737SChristoph Hellwig 	if (nvmet_is_passthru_subsys(subsys)) {
578ba76af67SLogan Gunthorpe 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
579ba76af67SLogan Gunthorpe 		goto out_unlock;
580ba76af67SLogan Gunthorpe 	}
581ba76af67SLogan Gunthorpe 
582e4fcf07cSSolganik Alexander 	if (ns->enabled)
583a07b4970SChristoph Hellwig 		goto out_unlock;
584a07b4970SChristoph Hellwig 
585e84c2091SMax Gurtovoy 	ret = -EMFILE;
586e84c2091SMax Gurtovoy 	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
587e84c2091SMax Gurtovoy 		goto out_unlock;
588e84c2091SMax Gurtovoy 
589d5eff33eSChaitanya Kulkarni 	ret = nvmet_bdev_ns_enable(ns);
590405a7519SHannes Reinecke 	if (ret == -ENOTBLK)
591d5eff33eSChaitanya Kulkarni 		ret = nvmet_file_ns_enable(ns);
592d5eff33eSChaitanya Kulkarni 	if (ret)
593a07b4970SChristoph Hellwig 		goto out_unlock;
594a07b4970SChristoph Hellwig 
595c6925093SLogan Gunthorpe 	ret = nvmet_p2pmem_ns_enable(ns);
596c6925093SLogan Gunthorpe 	if (ret)
597a536b497SMax Gurtovoy 		goto out_dev_disable;
598c6925093SLogan Gunthorpe 
599c6925093SLogan Gunthorpe 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
600c6925093SLogan Gunthorpe 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
601c6925093SLogan Gunthorpe 
602a07b4970SChristoph Hellwig 	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
603a07b4970SChristoph Hellwig 				0, GFP_KERNEL);
604a07b4970SChristoph Hellwig 	if (ret)
605d5eff33eSChaitanya Kulkarni 		goto out_dev_put;
606a07b4970SChristoph Hellwig 
607a07b4970SChristoph Hellwig 	if (ns->nsid > subsys->max_nsid)
608a07b4970SChristoph Hellwig 		subsys->max_nsid = ns->nsid;
609a07b4970SChristoph Hellwig 
6107774e77eSChaitanya Kulkarni 	ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
6117774e77eSChaitanya Kulkarni 	if (ret)
6127774e77eSChaitanya Kulkarni 		goto out_restore_subsys_maxnsid;
613a07b4970SChristoph Hellwig 
614793c7cfcSChristoph Hellwig 	subsys->nr_namespaces++;
615a07b4970SChristoph Hellwig 
616c16734eaSChristoph Hellwig 	nvmet_ns_changed(subsys, ns->nsid);
617e4fcf07cSSolganik Alexander 	ns->enabled = true;
618a07b4970SChristoph Hellwig 	ret = 0;
619a07b4970SChristoph Hellwig out_unlock:
620a07b4970SChristoph Hellwig 	mutex_unlock(&subsys->lock);
621a07b4970SChristoph Hellwig 	return ret;
6227774e77eSChaitanya Kulkarni 
6237774e77eSChaitanya Kulkarni out_restore_subsys_maxnsid:
6247774e77eSChaitanya Kulkarni 	subsys->max_nsid = nvmet_max_nsid(subsys);
6257774e77eSChaitanya Kulkarni 	percpu_ref_exit(&ns->ref);
626d5eff33eSChaitanya Kulkarni out_dev_put:
627c6925093SLogan Gunthorpe 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
628c6925093SLogan Gunthorpe 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
629a536b497SMax Gurtovoy out_dev_disable:
630d5eff33eSChaitanya Kulkarni 	nvmet_ns_dev_disable(ns);
631a07b4970SChristoph Hellwig 	goto out_unlock;
632a07b4970SChristoph Hellwig }
633a07b4970SChristoph Hellwig 
nvmet_ns_disable(struct nvmet_ns * ns)634a07b4970SChristoph Hellwig void nvmet_ns_disable(struct nvmet_ns *ns)
635a07b4970SChristoph Hellwig {
636a07b4970SChristoph Hellwig 	struct nvmet_subsys *subsys = ns->subsys;
637c6925093SLogan Gunthorpe 	struct nvmet_ctrl *ctrl;
638a07b4970SChristoph Hellwig 
639a07b4970SChristoph Hellwig 	mutex_lock(&subsys->lock);
640e4fcf07cSSolganik Alexander 	if (!ns->enabled)
641e4fcf07cSSolganik Alexander 		goto out_unlock;
642e4fcf07cSSolganik Alexander 
643e4fcf07cSSolganik Alexander 	ns->enabled = false;
6447774e77eSChaitanya Kulkarni 	xa_erase(&ns->subsys->namespaces, ns->nsid);
645ba2dec35SRoy Shterman 	if (ns->nsid == subsys->max_nsid)
646ba2dec35SRoy Shterman 		subsys->max_nsid = nvmet_max_nsid(subsys);
647c6925093SLogan Gunthorpe 
648c6925093SLogan Gunthorpe 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
649c6925093SLogan Gunthorpe 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
650c6925093SLogan Gunthorpe 
651a07b4970SChristoph Hellwig 	mutex_unlock(&subsys->lock);
652a07b4970SChristoph Hellwig 
653a07b4970SChristoph Hellwig 	/*
654a07b4970SChristoph Hellwig 	 * Now that we removed the namespaces from the lookup list, we
655a07b4970SChristoph Hellwig 	 * can kill the per_cpu ref and wait for any remaining references
656a07b4970SChristoph Hellwig 	 * to be dropped, as well as a RCU grace period for anyone only
657a07b4970SChristoph Hellwig 	 * using the namepace under rcu_read_lock().  Note that we can't
658a07b4970SChristoph Hellwig 	 * use call_rcu here as we need to ensure the namespaces have
659a07b4970SChristoph Hellwig 	 * been fully destroyed before unloading the module.
660a07b4970SChristoph Hellwig 	 */
661a07b4970SChristoph Hellwig 	percpu_ref_kill(&ns->ref);
662a07b4970SChristoph Hellwig 	synchronize_rcu();
663a07b4970SChristoph Hellwig 	wait_for_completion(&ns->disable_done);
664a07b4970SChristoph Hellwig 	percpu_ref_exit(&ns->ref);
665a07b4970SChristoph Hellwig 
666a07b4970SChristoph Hellwig 	mutex_lock(&subsys->lock);
667c6925093SLogan Gunthorpe 
668793c7cfcSChristoph Hellwig 	subsys->nr_namespaces--;
669c16734eaSChristoph Hellwig 	nvmet_ns_changed(subsys, ns->nsid);
670d5eff33eSChaitanya Kulkarni 	nvmet_ns_dev_disable(ns);
671e4fcf07cSSolganik Alexander out_unlock:
672a07b4970SChristoph Hellwig 	mutex_unlock(&subsys->lock);
673a07b4970SChristoph Hellwig }
674a07b4970SChristoph Hellwig 
nvmet_ns_free(struct nvmet_ns * ns)675a07b4970SChristoph Hellwig void nvmet_ns_free(struct nvmet_ns *ns)
676a07b4970SChristoph Hellwig {
677a07b4970SChristoph Hellwig 	nvmet_ns_disable(ns);
678a07b4970SChristoph Hellwig 
67972efd25dSChristoph Hellwig 	down_write(&nvmet_ana_sem);
68072efd25dSChristoph Hellwig 	nvmet_ana_group_enabled[ns->anagrpid]--;
68172efd25dSChristoph Hellwig 	up_write(&nvmet_ana_sem);
68272efd25dSChristoph Hellwig 
683a07b4970SChristoph Hellwig 	kfree(ns->device_path);
684a07b4970SChristoph Hellwig 	kfree(ns);
685a07b4970SChristoph Hellwig }
686a07b4970SChristoph Hellwig 
nvmet_ns_alloc(struct nvmet_subsys * subsys,u32 nsid)687a07b4970SChristoph Hellwig struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
688a07b4970SChristoph Hellwig {
689a07b4970SChristoph Hellwig 	struct nvmet_ns *ns;
690a07b4970SChristoph Hellwig 
691a07b4970SChristoph Hellwig 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
692a07b4970SChristoph Hellwig 	if (!ns)
693a07b4970SChristoph Hellwig 		return NULL;
694a07b4970SChristoph Hellwig 
695a07b4970SChristoph Hellwig 	init_completion(&ns->disable_done);
696a07b4970SChristoph Hellwig 
697a07b4970SChristoph Hellwig 	ns->nsid = nsid;
698a07b4970SChristoph Hellwig 	ns->subsys = subsys;
69972efd25dSChristoph Hellwig 
70072efd25dSChristoph Hellwig 	down_write(&nvmet_ana_sem);
70172efd25dSChristoph Hellwig 	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
70272efd25dSChristoph Hellwig 	nvmet_ana_group_enabled[ns->anagrpid]++;
70372efd25dSChristoph Hellwig 	up_write(&nvmet_ana_sem);
70472efd25dSChristoph Hellwig 
705637dc0f3SJohannes Thumshirn 	uuid_gen(&ns->uuid);
70655eb942eSChaitanya Kulkarni 	ns->buffered_io = false;
707ab5d0b38SChaitanya Kulkarni 	ns->csi = NVME_CSI_NVM;
708a07b4970SChristoph Hellwig 
709a07b4970SChristoph Hellwig 	return ns;
710a07b4970SChristoph Hellwig }
711a07b4970SChristoph Hellwig 
nvmet_update_sq_head(struct nvmet_req * req)712e6a622fdSSagi Grimberg static void nvmet_update_sq_head(struct nvmet_req *req)
713a07b4970SChristoph Hellwig {
714f9cf2a64SJames Smart 	if (req->sq->size) {
715e6a622fdSSagi Grimberg 		u32 old_sqhd, new_sqhd;
716e6a622fdSSagi Grimberg 
717bbf5410bSUros Bizjak 		old_sqhd = READ_ONCE(req->sq->sqhd);
718f9cf2a64SJames Smart 		do {
719f9cf2a64SJames Smart 			new_sqhd = (old_sqhd + 1) % req->sq->size;
720bbf5410bSUros Bizjak 		} while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
721f9cf2a64SJames Smart 	}
722fc6c9730SMax Gurtovoy 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
723e6a622fdSSagi Grimberg }
724e6a622fdSSagi Grimberg 
nvmet_set_error(struct nvmet_req * req,u16 status)72576574f37SChaitanya Kulkarni static void nvmet_set_error(struct nvmet_req *req, u16 status)
72676574f37SChaitanya Kulkarni {
72776574f37SChaitanya Kulkarni 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
72876574f37SChaitanya Kulkarni 	struct nvme_error_slot *new_error_slot;
72976574f37SChaitanya Kulkarni 	unsigned long flags;
73076574f37SChaitanya Kulkarni 
731fc6c9730SMax Gurtovoy 	req->cqe->status = cpu_to_le16(status << 1);
73276574f37SChaitanya Kulkarni 
7335698b805SChaitanya Kulkarni 	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
73476574f37SChaitanya Kulkarni 		return;
73576574f37SChaitanya Kulkarni 
73676574f37SChaitanya Kulkarni 	spin_lock_irqsave(&ctrl->error_lock, flags);
73776574f37SChaitanya Kulkarni 	ctrl->err_counter++;
73876574f37SChaitanya Kulkarni 	new_error_slot =
73976574f37SChaitanya Kulkarni 		&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
74076574f37SChaitanya Kulkarni 
74176574f37SChaitanya Kulkarni 	new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
74276574f37SChaitanya Kulkarni 	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
74376574f37SChaitanya Kulkarni 	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
74476574f37SChaitanya Kulkarni 	new_error_slot->status_field = cpu_to_le16(status << 1);
74576574f37SChaitanya Kulkarni 	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
74676574f37SChaitanya Kulkarni 	new_error_slot->lba = cpu_to_le64(req->error_slba);
74776574f37SChaitanya Kulkarni 	new_error_slot->nsid = req->cmd->common.nsid;
74876574f37SChaitanya Kulkarni 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
74976574f37SChaitanya Kulkarni 
75076574f37SChaitanya Kulkarni 	/* set the more bit for this request */
751fc6c9730SMax Gurtovoy 	req->cqe->status |= cpu_to_le16(1 << 14);
75276574f37SChaitanya Kulkarni }
75376574f37SChaitanya Kulkarni 
__nvmet_req_complete(struct nvmet_req * req,u16 status)754e6a622fdSSagi Grimberg static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
755e6a622fdSSagi Grimberg {
7566a02a61eSBart Van Assche 	struct nvmet_ns *ns = req->ns;
7576a02a61eSBart Van Assche 
758e6a622fdSSagi Grimberg 	if (!req->sq->sqhd_disabled)
759e6a622fdSSagi Grimberg 		nvmet_update_sq_head(req);
760fc6c9730SMax Gurtovoy 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
761fc6c9730SMax Gurtovoy 	req->cqe->command_id = req->cmd->common.command_id;
76276574f37SChaitanya Kulkarni 
763cb019da3SChaitanya Kulkarni 	if (unlikely(status))
76476574f37SChaitanya Kulkarni 		nvmet_set_error(req, status);
765a5448fdcSMinwoo Im 
766a5448fdcSMinwoo Im 	trace_nvmet_req_complete(req);
767a5448fdcSMinwoo Im 
768a07b4970SChristoph Hellwig 	req->ops->queue_response(req);
7696a02a61eSBart Van Assche 	if (ns)
7706a02a61eSBart Van Assche 		nvmet_put_namespace(ns);
771a07b4970SChristoph Hellwig }
772a07b4970SChristoph Hellwig 
nvmet_req_complete(struct nvmet_req * req,u16 status)773a07b4970SChristoph Hellwig void nvmet_req_complete(struct nvmet_req *req, u16 status)
774a07b4970SChristoph Hellwig {
7756173a77bSDamien Le Moal 	struct nvmet_sq *sq = req->sq;
7766173a77bSDamien Le Moal 
777a07b4970SChristoph Hellwig 	__nvmet_req_complete(req, status);
7786173a77bSDamien Le Moal 	percpu_ref_put(&sq->ref);
779a07b4970SChristoph Hellwig }
780a07b4970SChristoph Hellwig EXPORT_SYMBOL_GPL(nvmet_req_complete);
781a07b4970SChristoph Hellwig 
nvmet_cq_setup(struct nvmet_ctrl * ctrl,struct nvmet_cq * cq,u16 qid,u16 size)782a07b4970SChristoph Hellwig void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
783a07b4970SChristoph Hellwig 		u16 qid, u16 size)
784a07b4970SChristoph Hellwig {
785a07b4970SChristoph Hellwig 	cq->qid = qid;
786a07b4970SChristoph Hellwig 	cq->size = size;
787a07b4970SChristoph Hellwig }
788a07b4970SChristoph Hellwig 
nvmet_sq_setup(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq,u16 qid,u16 size)789a07b4970SChristoph Hellwig void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
790a07b4970SChristoph Hellwig 		u16 qid, u16 size)
791a07b4970SChristoph Hellwig {
792bb1cc747SJames Smart 	sq->sqhd = 0;
793a07b4970SChristoph Hellwig 	sq->qid = qid;
794a07b4970SChristoph Hellwig 	sq->size = size;
795a07b4970SChristoph Hellwig 
796a07b4970SChristoph Hellwig 	ctrl->sqs[qid] = sq;
797a07b4970SChristoph Hellwig }
798a07b4970SChristoph Hellwig 
nvmet_confirm_sq(struct percpu_ref * ref)799427242ceSSagi Grimberg static void nvmet_confirm_sq(struct percpu_ref *ref)
800427242ceSSagi Grimberg {
801427242ceSSagi Grimberg 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
802427242ceSSagi Grimberg 
803427242ceSSagi Grimberg 	complete(&sq->confirm_done);
804427242ceSSagi Grimberg }
805427242ceSSagi Grimberg 
nvmet_sq_destroy(struct nvmet_sq * sq)806a07b4970SChristoph Hellwig void nvmet_sq_destroy(struct nvmet_sq *sq)
807a07b4970SChristoph Hellwig {
8080f5be6a4SDaniel Wagner 	struct nvmet_ctrl *ctrl = sq->ctrl;
8090f5be6a4SDaniel Wagner 
810a07b4970SChristoph Hellwig 	/*
811a07b4970SChristoph Hellwig 	 * If this is the admin queue, complete all AERs so that our
812a07b4970SChristoph Hellwig 	 * queue doesn't have outstanding requests on it.
813a07b4970SChristoph Hellwig 	 */
81464f5e9cdSSagi Grimberg 	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
815819f7b88SChaitanya Kulkarni 		nvmet_async_events_failall(ctrl);
816427242ceSSagi Grimberg 	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
817427242ceSSagi Grimberg 	wait_for_completion(&sq->confirm_done);
818a07b4970SChristoph Hellwig 	wait_for_completion(&sq->free_done);
819a07b4970SChristoph Hellwig 	percpu_ref_exit(&sq->ref);
820db1312ddSHannes Reinecke 	nvmet_auth_sq_free(sq);
821a07b4970SChristoph Hellwig 
822c758b77dSSagi Grimberg 	/*
823c758b77dSSagi Grimberg 	 * we must reference the ctrl again after waiting for inflight IO
824c758b77dSSagi Grimberg 	 * to complete. Because admin connect may have sneaked in after we
825c758b77dSSagi Grimberg 	 * store sq->ctrl locally, but before we killed the percpu_ref. the
826c758b77dSSagi Grimberg 	 * admin connect allocates and assigns sq->ctrl, which now needs a
827c758b77dSSagi Grimberg 	 * final ref put, as this ctrl is going away.
828c758b77dSSagi Grimberg 	 */
829c758b77dSSagi Grimberg 	ctrl = sq->ctrl;
830c758b77dSSagi Grimberg 
8310f5be6a4SDaniel Wagner 	if (ctrl) {
832aaeadd70SSagi Grimberg 		/*
833aaeadd70SSagi Grimberg 		 * The teardown flow may take some time, and the host may not
834aaeadd70SSagi Grimberg 		 * send us keep-alive during this period, hence reset the
835aaeadd70SSagi Grimberg 		 * traffic based keep-alive timer so we don't trigger a
836aaeadd70SSagi Grimberg 		 * controller teardown as a result of a keep-alive expiration.
837aaeadd70SSagi Grimberg 		 */
838aaeadd70SSagi Grimberg 		ctrl->reset_tbkas = true;
839b71df126SAmit Engel 		sq->ctrl->sqs[sq->qid] = NULL;
8400f5be6a4SDaniel Wagner 		nvmet_ctrl_put(ctrl);
841a07b4970SChristoph Hellwig 		sq->ctrl = NULL; /* allows reusing the queue later */
842a07b4970SChristoph Hellwig 	}
843a07b4970SChristoph Hellwig }
844a07b4970SChristoph Hellwig EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
845a07b4970SChristoph Hellwig 
nvmet_sq_free(struct percpu_ref * ref)846a07b4970SChristoph Hellwig static void nvmet_sq_free(struct percpu_ref *ref)
847a07b4970SChristoph Hellwig {
848a07b4970SChristoph Hellwig 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
849a07b4970SChristoph Hellwig 
850a07b4970SChristoph Hellwig 	complete(&sq->free_done);
851a07b4970SChristoph Hellwig }
852a07b4970SChristoph Hellwig 
nvmet_sq_init(struct nvmet_sq * sq)853a07b4970SChristoph Hellwig int nvmet_sq_init(struct nvmet_sq *sq)
854a07b4970SChristoph Hellwig {
855a07b4970SChristoph Hellwig 	int ret;
856a07b4970SChristoph Hellwig 
857a07b4970SChristoph Hellwig 	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
858a07b4970SChristoph Hellwig 	if (ret) {
859a07b4970SChristoph Hellwig 		pr_err("percpu_ref init failed!\n");
860a07b4970SChristoph Hellwig 		return ret;
861a07b4970SChristoph Hellwig 	}
862a07b4970SChristoph Hellwig 	init_completion(&sq->free_done);
863427242ceSSagi Grimberg 	init_completion(&sq->confirm_done);
8641befd944SChristoph Hellwig 	nvmet_auth_sq_init(sq);
865a07b4970SChristoph Hellwig 
866a07b4970SChristoph Hellwig 	return 0;
867a07b4970SChristoph Hellwig }
868a07b4970SChristoph Hellwig EXPORT_SYMBOL_GPL(nvmet_sq_init);
869a07b4970SChristoph Hellwig 
nvmet_check_ana_state(struct nvmet_port * port,struct nvmet_ns * ns)87072efd25dSChristoph Hellwig static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
87172efd25dSChristoph Hellwig 		struct nvmet_ns *ns)
87272efd25dSChristoph Hellwig {
87372efd25dSChristoph Hellwig 	enum nvme_ana_state state = port->ana_state[ns->anagrpid];
87472efd25dSChristoph Hellwig 
87572efd25dSChristoph Hellwig 	if (unlikely(state == NVME_ANA_INACCESSIBLE))
87672efd25dSChristoph Hellwig 		return NVME_SC_ANA_INACCESSIBLE;
87772efd25dSChristoph Hellwig 	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
87872efd25dSChristoph Hellwig 		return NVME_SC_ANA_PERSISTENT_LOSS;
87972efd25dSChristoph Hellwig 	if (unlikely(state == NVME_ANA_CHANGE))
88072efd25dSChristoph Hellwig 		return NVME_SC_ANA_TRANSITION;
88172efd25dSChristoph Hellwig 	return 0;
88272efd25dSChristoph Hellwig }
88372efd25dSChristoph Hellwig 
nvmet_io_cmd_check_access(struct nvmet_req * req)884dedf0be5SChaitanya Kulkarni static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
885dedf0be5SChaitanya Kulkarni {
886dedf0be5SChaitanya Kulkarni 	if (unlikely(req->ns->readonly)) {
887dedf0be5SChaitanya Kulkarni 		switch (req->cmd->common.opcode) {
888dedf0be5SChaitanya Kulkarni 		case nvme_cmd_read:
889dedf0be5SChaitanya Kulkarni 		case nvme_cmd_flush:
890dedf0be5SChaitanya Kulkarni 			break;
891dedf0be5SChaitanya Kulkarni 		default:
892dedf0be5SChaitanya Kulkarni 			return NVME_SC_NS_WRITE_PROTECTED;
893dedf0be5SChaitanya Kulkarni 		}
894dedf0be5SChaitanya Kulkarni 	}
895dedf0be5SChaitanya Kulkarni 
896dedf0be5SChaitanya Kulkarni 	return 0;
897dedf0be5SChaitanya Kulkarni }
898dedf0be5SChaitanya Kulkarni 
nvmet_parse_io_cmd(struct nvmet_req * req)899d5eff33eSChaitanya Kulkarni static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
900d5eff33eSChaitanya Kulkarni {
9016490c9edSHannes Reinecke 	struct nvme_command *cmd = req->cmd;
902d5eff33eSChaitanya Kulkarni 	u16 ret;
903d5eff33eSChaitanya Kulkarni 
9046490c9edSHannes Reinecke 	if (nvme_is_fabrics(cmd))
9056490c9edSHannes Reinecke 		return nvmet_parse_fabrics_io_cmd(req);
9066490c9edSHannes Reinecke 
907db1312ddSHannes Reinecke 	if (unlikely(!nvmet_check_auth_status(req)))
908dd0b0a4aSWeiwen Hu 		return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
909db1312ddSHannes Reinecke 
9107798df6fSChaitanya Kulkarni 	ret = nvmet_check_ctrl_status(req);
911d5eff33eSChaitanya Kulkarni 	if (unlikely(ret))
912d5eff33eSChaitanya Kulkarni 		return ret;
913d5eff33eSChaitanya Kulkarni 
914ab7a2737SChristoph Hellwig 	if (nvmet_is_passthru_req(req))
915c1fef73fSLogan Gunthorpe 		return nvmet_parse_passthru_io_cmd(req);
916c1fef73fSLogan Gunthorpe 
9173a1f7c79SChaitanya Kulkarni 	ret = nvmet_req_find_ns(req);
9183a1f7c79SChaitanya Kulkarni 	if (unlikely(ret))
9193a1f7c79SChaitanya Kulkarni 		return ret;
9203a1f7c79SChaitanya Kulkarni 
92172efd25dSChristoph Hellwig 	ret = nvmet_check_ana_state(req->port, req->ns);
922e81446afSChaitanya Kulkarni 	if (unlikely(ret)) {
923e81446afSChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, nsid);
92472efd25dSChristoph Hellwig 		return ret;
925e81446afSChaitanya Kulkarni 	}
926dedf0be5SChaitanya Kulkarni 	ret = nvmet_io_cmd_check_access(req);
927e81446afSChaitanya Kulkarni 	if (unlikely(ret)) {
928e81446afSChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, nsid);
929dedf0be5SChaitanya Kulkarni 		return ret;
930e81446afSChaitanya Kulkarni 	}
931d5eff33eSChaitanya Kulkarni 
932ab5d0b38SChaitanya Kulkarni 	switch (req->ns->csi) {
933ab5d0b38SChaitanya Kulkarni 	case NVME_CSI_NVM:
934d5eff33eSChaitanya Kulkarni 		if (req->ns->file)
935d5eff33eSChaitanya Kulkarni 			return nvmet_file_parse_io_cmd(req);
936d5eff33eSChaitanya Kulkarni 		return nvmet_bdev_parse_io_cmd(req);
937aaf2e048SChaitanya Kulkarni 	case NVME_CSI_ZNS:
938aaf2e048SChaitanya Kulkarni 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
939aaf2e048SChaitanya Kulkarni 			return nvmet_bdev_zns_parse_io_cmd(req);
940aaf2e048SChaitanya Kulkarni 		return NVME_SC_INVALID_IO_CMD_SET;
941ab5d0b38SChaitanya Kulkarni 	default:
942ab5d0b38SChaitanya Kulkarni 		return NVME_SC_INVALID_IO_CMD_SET;
943ab5d0b38SChaitanya Kulkarni 	}
944d5eff33eSChaitanya Kulkarni }
945d5eff33eSChaitanya Kulkarni 
nvmet_req_init(struct nvmet_req * req,struct nvmet_cq * cq,struct nvmet_sq * sq,const struct nvmet_fabrics_ops * ops)946a07b4970SChristoph Hellwig bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
947e929f06dSChristoph Hellwig 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
948a07b4970SChristoph Hellwig {
949a07b4970SChristoph Hellwig 	u8 flags = req->cmd->common.flags;
950a07b4970SChristoph Hellwig 	u16 status;
951a07b4970SChristoph Hellwig 
952a07b4970SChristoph Hellwig 	req->cq = cq;
953a07b4970SChristoph Hellwig 	req->sq = sq;
954a07b4970SChristoph Hellwig 	req->ops = ops;
955a07b4970SChristoph Hellwig 	req->sg = NULL;
956c6e3f133SIsrael Rukshin 	req->metadata_sg = NULL;
957a07b4970SChristoph Hellwig 	req->sg_cnt = 0;
958c6e3f133SIsrael Rukshin 	req->metadata_sg_cnt = 0;
9595e62d5c9SChristoph Hellwig 	req->transfer_len = 0;
960c6e3f133SIsrael Rukshin 	req->metadata_len = 0;
961cd0c1b8eSDaniel Wagner 	req->cqe->result.u64 = 0;
962fc6c9730SMax Gurtovoy 	req->cqe->status = 0;
963fc6c9730SMax Gurtovoy 	req->cqe->sq_head = 0;
964423b4487SSagi Grimberg 	req->ns = NULL;
9655698b805SChaitanya Kulkarni 	req->error_loc = NVMET_NO_ERROR_LOC;
966e4a97625SChaitanya Kulkarni 	req->error_slba = 0;
967a07b4970SChristoph Hellwig 
968a07b4970SChristoph Hellwig 	/* no support for fused commands yet */
969a07b4970SChristoph Hellwig 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
970e81446afSChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, flags);
971dd0b0a4aSWeiwen Hu 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
972a07b4970SChristoph Hellwig 		goto fail;
973a07b4970SChristoph Hellwig 	}
974a07b4970SChristoph Hellwig 
975bffd2b61SMax Gurtovoy 	/*
976bffd2b61SMax Gurtovoy 	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
977bffd2b61SMax Gurtovoy 	 * contains an address of a single contiguous physical buffer that is
978bffd2b61SMax Gurtovoy 	 * byte aligned.
979bffd2b61SMax Gurtovoy 	 */
980bffd2b61SMax Gurtovoy 	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
981e81446afSChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, flags);
982dd0b0a4aSWeiwen Hu 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
983a07b4970SChristoph Hellwig 		goto fail;
984a07b4970SChristoph Hellwig 	}
985a07b4970SChristoph Hellwig 
986a07b4970SChristoph Hellwig 	if (unlikely(!req->sq->ctrl))
987d84dd8cdSChristoph Hellwig 		/* will return an error for any non-connect command: */
988a07b4970SChristoph Hellwig 		status = nvmet_parse_connect_cmd(req);
989a07b4970SChristoph Hellwig 	else if (likely(req->sq->qid != 0))
990a07b4970SChristoph Hellwig 		status = nvmet_parse_io_cmd(req);
991a07b4970SChristoph Hellwig 	else
992a07b4970SChristoph Hellwig 		status = nvmet_parse_admin_cmd(req);
993a07b4970SChristoph Hellwig 
994a07b4970SChristoph Hellwig 	if (status)
995a07b4970SChristoph Hellwig 		goto fail;
996a07b4970SChristoph Hellwig 
9973c3751f2SChaitanya Kulkarni 	trace_nvmet_req_init(req, req->cmd);
9983c3751f2SChaitanya Kulkarni 
999a07b4970SChristoph Hellwig 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
1000dd0b0a4aSWeiwen Hu 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1001a07b4970SChristoph Hellwig 		goto fail;
1002a07b4970SChristoph Hellwig 	}
1003a07b4970SChristoph Hellwig 
1004c09305aeSSagi Grimberg 	if (sq->ctrl)
1005aaeadd70SSagi Grimberg 		sq->ctrl->reset_tbkas = true;
1006c09305aeSSagi Grimberg 
1007a07b4970SChristoph Hellwig 	return true;
1008a07b4970SChristoph Hellwig 
1009a07b4970SChristoph Hellwig fail:
1010a07b4970SChristoph Hellwig 	__nvmet_req_complete(req, status);
1011a07b4970SChristoph Hellwig 	return false;
1012a07b4970SChristoph Hellwig }
1013a07b4970SChristoph Hellwig EXPORT_SYMBOL_GPL(nvmet_req_init);
1014a07b4970SChristoph Hellwig 
nvmet_req_uninit(struct nvmet_req * req)1015549f01aeSVijay Immanuel void nvmet_req_uninit(struct nvmet_req *req)
1016549f01aeSVijay Immanuel {
1017549f01aeSVijay Immanuel 	percpu_ref_put(&req->sq->ref);
1018423b4487SSagi Grimberg 	if (req->ns)
1019423b4487SSagi Grimberg 		nvmet_put_namespace(req->ns);
1020549f01aeSVijay Immanuel }
1021549f01aeSVijay Immanuel EXPORT_SYMBOL_GPL(nvmet_req_uninit);
1022549f01aeSVijay Immanuel 
nvmet_check_transfer_len(struct nvmet_req * req,size_t len)1023136cc1ffSIsrael Rukshin bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
10245e62d5c9SChristoph Hellwig {
1025136cc1ffSIsrael Rukshin 	if (unlikely(len != req->transfer_len)) {
1026e81446afSChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1027dd0b0a4aSWeiwen Hu 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
1028e9061c39SChristoph Hellwig 		return false;
1029e9061c39SChristoph Hellwig 	}
1030e9061c39SChristoph Hellwig 
1031e9061c39SChristoph Hellwig 	return true;
1032e9061c39SChristoph Hellwig }
1033136cc1ffSIsrael Rukshin EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
1034e9061c39SChristoph Hellwig 
nvmet_check_data_len_lte(struct nvmet_req * req,size_t data_len)1035b716e688SSagi Grimberg bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
1036b716e688SSagi Grimberg {
1037b716e688SSagi Grimberg 	if (unlikely(data_len > req->transfer_len)) {
1038b716e688SSagi Grimberg 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1039dd0b0a4aSWeiwen Hu 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
1040b716e688SSagi Grimberg 		return false;
1041b716e688SSagi Grimberg 	}
1042b716e688SSagi Grimberg 
1043b716e688SSagi Grimberg 	return true;
1044b716e688SSagi Grimberg }
1045b716e688SSagi Grimberg 
nvmet_data_transfer_len(struct nvmet_req * req)1046c6e3f133SIsrael Rukshin static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
10475b2322e4SLogan Gunthorpe {
1048c6e3f133SIsrael Rukshin 	return req->transfer_len - req->metadata_len;
1049c6e3f133SIsrael Rukshin }
1050c6925093SLogan Gunthorpe 
nvmet_req_alloc_p2pmem_sgls(struct pci_dev * p2p_dev,struct nvmet_req * req)1051bcd9a079SMax Gurtovoy static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1052bcd9a079SMax Gurtovoy 		struct nvmet_req *req)
1053c6e3f133SIsrael Rukshin {
1054bcd9a079SMax Gurtovoy 	req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1055c6e3f133SIsrael Rukshin 			nvmet_data_transfer_len(req));
1056c6e3f133SIsrael Rukshin 	if (!req->sg)
1057c6e3f133SIsrael Rukshin 		goto out_err;
1058c6e3f133SIsrael Rukshin 
1059c6e3f133SIsrael Rukshin 	if (req->metadata_len) {
1060bcd9a079SMax Gurtovoy 		req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1061c6e3f133SIsrael Rukshin 				&req->metadata_sg_cnt, req->metadata_len);
1062c6e3f133SIsrael Rukshin 		if (!req->metadata_sg)
1063c6e3f133SIsrael Rukshin 			goto out_free_sg;
1064c6e3f133SIsrael Rukshin 	}
1065bcd9a079SMax Gurtovoy 
1066bcd9a079SMax Gurtovoy 	req->p2p_dev = p2p_dev;
1067bcd9a079SMax Gurtovoy 
1068c6e3f133SIsrael Rukshin 	return 0;
1069c6e3f133SIsrael Rukshin out_free_sg:
1070c6e3f133SIsrael Rukshin 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1071c6e3f133SIsrael Rukshin out_err:
1072c6e3f133SIsrael Rukshin 	return -ENOMEM;
1073c6e3f133SIsrael Rukshin }
1074c6e3f133SIsrael Rukshin 
nvmet_req_find_p2p_dev(struct nvmet_req * req)1075bcd9a079SMax Gurtovoy static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1076c6e3f133SIsrael Rukshin {
1077bcd9a079SMax Gurtovoy 	if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1078bcd9a079SMax Gurtovoy 	    !req->sq->ctrl || !req->sq->qid || !req->ns)
1079bcd9a079SMax Gurtovoy 		return NULL;
1080bcd9a079SMax Gurtovoy 	return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1081c6925093SLogan Gunthorpe }
1082c6925093SLogan Gunthorpe 
nvmet_req_alloc_sgls(struct nvmet_req * req)1083c6e3f133SIsrael Rukshin int nvmet_req_alloc_sgls(struct nvmet_req *req)
10845b2322e4SLogan Gunthorpe {
1085bcd9a079SMax Gurtovoy 	struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1086bcd9a079SMax Gurtovoy 
1087bcd9a079SMax Gurtovoy 	if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1088c6e3f133SIsrael Rukshin 		return 0;
1089c6e3f133SIsrael Rukshin 
1090c6e3f133SIsrael Rukshin 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1091c6e3f133SIsrael Rukshin 			    &req->sg_cnt);
1092c6e3f133SIsrael Rukshin 	if (unlikely(!req->sg))
1093c6e3f133SIsrael Rukshin 		goto out;
1094c6e3f133SIsrael Rukshin 
1095c6e3f133SIsrael Rukshin 	if (req->metadata_len) {
1096c6e3f133SIsrael Rukshin 		req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1097c6e3f133SIsrael Rukshin 					     &req->metadata_sg_cnt);
1098c6e3f133SIsrael Rukshin 		if (unlikely(!req->metadata_sg))
1099c6e3f133SIsrael Rukshin 			goto out_free;
1100c6e3f133SIsrael Rukshin 	}
1101c6e3f133SIsrael Rukshin 
1102c6e3f133SIsrael Rukshin 	return 0;
1103c6e3f133SIsrael Rukshin out_free:
11045b2322e4SLogan Gunthorpe 	sgl_free(req->sg);
1105c6e3f133SIsrael Rukshin out:
1106c6e3f133SIsrael Rukshin 	return -ENOMEM;
1107c6e3f133SIsrael Rukshin }
1108c6e3f133SIsrael Rukshin EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1109c6e3f133SIsrael Rukshin 
nvmet_req_free_sgls(struct nvmet_req * req)1110c6e3f133SIsrael Rukshin void nvmet_req_free_sgls(struct nvmet_req *req)
1111c6e3f133SIsrael Rukshin {
1112c6e3f133SIsrael Rukshin 	if (req->p2p_dev) {
1113c6e3f133SIsrael Rukshin 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1114c6e3f133SIsrael Rukshin 		if (req->metadata_sg)
1115c6e3f133SIsrael Rukshin 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1116bcd9a079SMax Gurtovoy 		req->p2p_dev = NULL;
1117c6e3f133SIsrael Rukshin 	} else {
1118c6e3f133SIsrael Rukshin 		sgl_free(req->sg);
1119c6e3f133SIsrael Rukshin 		if (req->metadata_sg)
1120c6e3f133SIsrael Rukshin 			sgl_free(req->metadata_sg);
1121c6e3f133SIsrael Rukshin 	}
1122c6925093SLogan Gunthorpe 
11235b2322e4SLogan Gunthorpe 	req->sg = NULL;
1124c6e3f133SIsrael Rukshin 	req->metadata_sg = NULL;
11255b2322e4SLogan Gunthorpe 	req->sg_cnt = 0;
1126c6e3f133SIsrael Rukshin 	req->metadata_sg_cnt = 0;
11275b2322e4SLogan Gunthorpe }
1128c6e3f133SIsrael Rukshin EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
11295b2322e4SLogan Gunthorpe 
nvmet_cc_en(u32 cc)1130a07b4970SChristoph Hellwig static inline bool nvmet_cc_en(u32 cc)
1131a07b4970SChristoph Hellwig {
1132ad4e05b2SMax Gurtovoy 	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1133a07b4970SChristoph Hellwig }
1134a07b4970SChristoph Hellwig 
nvmet_cc_css(u32 cc)1135a07b4970SChristoph Hellwig static inline u8 nvmet_cc_css(u32 cc)
1136a07b4970SChristoph Hellwig {
1137ad4e05b2SMax Gurtovoy 	return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1138a07b4970SChristoph Hellwig }
1139a07b4970SChristoph Hellwig 
nvmet_cc_mps(u32 cc)1140a07b4970SChristoph Hellwig static inline u8 nvmet_cc_mps(u32 cc)
1141a07b4970SChristoph Hellwig {
1142ad4e05b2SMax Gurtovoy 	return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1143a07b4970SChristoph Hellwig }
1144a07b4970SChristoph Hellwig 
nvmet_cc_ams(u32 cc)1145a07b4970SChristoph Hellwig static inline u8 nvmet_cc_ams(u32 cc)
1146a07b4970SChristoph Hellwig {
1147ad4e05b2SMax Gurtovoy 	return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1148a07b4970SChristoph Hellwig }
1149a07b4970SChristoph Hellwig 
nvmet_cc_shn(u32 cc)1150a07b4970SChristoph Hellwig static inline u8 nvmet_cc_shn(u32 cc)
1151a07b4970SChristoph Hellwig {
1152ad4e05b2SMax Gurtovoy 	return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1153a07b4970SChristoph Hellwig }
1154a07b4970SChristoph Hellwig 
nvmet_cc_iosqes(u32 cc)1155a07b4970SChristoph Hellwig static inline u8 nvmet_cc_iosqes(u32 cc)
1156a07b4970SChristoph Hellwig {
1157ad4e05b2SMax Gurtovoy 	return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1158a07b4970SChristoph Hellwig }
1159a07b4970SChristoph Hellwig 
nvmet_cc_iocqes(u32 cc)1160a07b4970SChristoph Hellwig static inline u8 nvmet_cc_iocqes(u32 cc)
1161a07b4970SChristoph Hellwig {
1162ad4e05b2SMax Gurtovoy 	return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1163a07b4970SChristoph Hellwig }
1164a07b4970SChristoph Hellwig 
nvmet_css_supported(u8 cc_css)1165ab5d0b38SChaitanya Kulkarni static inline bool nvmet_css_supported(u8 cc_css)
1166ab5d0b38SChaitanya Kulkarni {
116763bc732cSColin Ian King 	switch (cc_css << NVME_CC_CSS_SHIFT) {
1168ab5d0b38SChaitanya Kulkarni 	case NVME_CC_CSS_NVM:
1169ab5d0b38SChaitanya Kulkarni 	case NVME_CC_CSS_CSI:
1170ab5d0b38SChaitanya Kulkarni 		return true;
1171ab5d0b38SChaitanya Kulkarni 	default:
1172ab5d0b38SChaitanya Kulkarni 		return false;
1173ab5d0b38SChaitanya Kulkarni 	}
1174ab5d0b38SChaitanya Kulkarni }
1175ab5d0b38SChaitanya Kulkarni 
nvmet_start_ctrl(struct nvmet_ctrl * ctrl)1176a07b4970SChristoph Hellwig static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1177a07b4970SChristoph Hellwig {
1178a07b4970SChristoph Hellwig 	lockdep_assert_held(&ctrl->lock);
1179a07b4970SChristoph Hellwig 
1180d218a8a3SSagi Grimberg 	/*
1181d218a8a3SSagi Grimberg 	 * Only I/O controllers should verify iosqes,iocqes.
1182d218a8a3SSagi Grimberg 	 * Strictly speaking, the spec says a discovery controller
1183d218a8a3SSagi Grimberg 	 * should verify iosqes,iocqes are zeroed, however that
1184d218a8a3SSagi Grimberg 	 * would break backwards compatibility, so don't enforce it.
1185d218a8a3SSagi Grimberg 	 */
1186a294711eSHannes Reinecke 	if (!nvmet_is_disc_subsys(ctrl->subsys) &&
1187d218a8a3SSagi Grimberg 	    (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1188d218a8a3SSagi Grimberg 	     nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1189d218a8a3SSagi Grimberg 		ctrl->csts = NVME_CSTS_CFS;
1190d218a8a3SSagi Grimberg 		return;
1191d218a8a3SSagi Grimberg 	}
1192d218a8a3SSagi Grimberg 
1193d218a8a3SSagi Grimberg 	if (nvmet_cc_mps(ctrl->cc) != 0 ||
1194a07b4970SChristoph Hellwig 	    nvmet_cc_ams(ctrl->cc) != 0 ||
1195ab5d0b38SChaitanya Kulkarni 	    !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
1196a07b4970SChristoph Hellwig 		ctrl->csts = NVME_CSTS_CFS;
1197a07b4970SChristoph Hellwig 		return;
1198a07b4970SChristoph Hellwig 	}
1199a07b4970SChristoph Hellwig 
1200a07b4970SChristoph Hellwig 	ctrl->csts = NVME_CSTS_RDY;
1201d68a90e1SMax Gurtuvoy 
1202d68a90e1SMax Gurtuvoy 	/*
1203d68a90e1SMax Gurtuvoy 	 * Controllers that are not yet enabled should not really enforce the
1204d68a90e1SMax Gurtuvoy 	 * keep alive timeout, but we still want to track a timeout and cleanup
1205d68a90e1SMax Gurtuvoy 	 * in case a host died before it enabled the controller.  Hence, simply
1206d68a90e1SMax Gurtuvoy 	 * reset the keep alive timer when the controller is enabled.
1207d68a90e1SMax Gurtuvoy 	 */
120885bd23f3Szhenwei pi 	if (ctrl->kato)
1209ddd2b8deSSagi Grimberg 		mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
1210a07b4970SChristoph Hellwig }
1211a07b4970SChristoph Hellwig 
nvmet_clear_ctrl(struct nvmet_ctrl * ctrl)1212a07b4970SChristoph Hellwig static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1213a07b4970SChristoph Hellwig {
1214a07b4970SChristoph Hellwig 	lockdep_assert_held(&ctrl->lock);
1215a07b4970SChristoph Hellwig 
1216a07b4970SChristoph Hellwig 	/* XXX: tear down queues? */
1217a07b4970SChristoph Hellwig 	ctrl->csts &= ~NVME_CSTS_RDY;
1218a07b4970SChristoph Hellwig 	ctrl->cc = 0;
1219a07b4970SChristoph Hellwig }
1220a07b4970SChristoph Hellwig 
nvmet_update_cc(struct nvmet_ctrl * ctrl,u32 new)1221a07b4970SChristoph Hellwig void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1222a07b4970SChristoph Hellwig {
1223a07b4970SChristoph Hellwig 	u32 old;
1224a07b4970SChristoph Hellwig 
1225a07b4970SChristoph Hellwig 	mutex_lock(&ctrl->lock);
1226a07b4970SChristoph Hellwig 	old = ctrl->cc;
1227a07b4970SChristoph Hellwig 	ctrl->cc = new;
1228a07b4970SChristoph Hellwig 
1229a07b4970SChristoph Hellwig 	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1230a07b4970SChristoph Hellwig 		nvmet_start_ctrl(ctrl);
1231a07b4970SChristoph Hellwig 	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1232a07b4970SChristoph Hellwig 		nvmet_clear_ctrl(ctrl);
1233a07b4970SChristoph Hellwig 	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1234a07b4970SChristoph Hellwig 		nvmet_clear_ctrl(ctrl);
1235a07b4970SChristoph Hellwig 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1236a07b4970SChristoph Hellwig 	}
1237a07b4970SChristoph Hellwig 	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1238a07b4970SChristoph Hellwig 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1239a07b4970SChristoph Hellwig 	mutex_unlock(&ctrl->lock);
1240a07b4970SChristoph Hellwig }
1241a07b4970SChristoph Hellwig 
nvmet_init_cap(struct nvmet_ctrl * ctrl)1242a07b4970SChristoph Hellwig static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1243a07b4970SChristoph Hellwig {
1244a07b4970SChristoph Hellwig 	/* command sets supported: NVMe command set: */
1245a07b4970SChristoph Hellwig 	ctrl->cap = (1ULL << 37);
1246ab5d0b38SChaitanya Kulkarni 	/* Controller supports one or more I/O Command Sets */
1247ab5d0b38SChaitanya Kulkarni 	ctrl->cap |= (1ULL << 43);
1248a07b4970SChristoph Hellwig 	/* CC.EN timeout in 500msec units: */
1249a07b4970SChristoph Hellwig 	ctrl->cap |= (15ULL << 24);
1250a07b4970SChristoph Hellwig 	/* maximum queue entries supported: */
12516d1555ccSMax Gurtovoy 	if (ctrl->ops->get_max_queue_size)
1252ca2b221dSMax Gurtovoy 		ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
1253ca2b221dSMax Gurtovoy 				   ctrl->port->max_queue_size) - 1;
12546d1555ccSMax Gurtovoy 	else
1255ca2b221dSMax Gurtovoy 		ctrl->cap |= ctrl->port->max_queue_size - 1;
125677d651a6SAdam Manzanares 
1257ab7a2737SChristoph Hellwig 	if (nvmet_is_passthru_subsys(ctrl->subsys))
125877d651a6SAdam Manzanares 		nvmet_passthrough_override_cap(ctrl);
1259a07b4970SChristoph Hellwig }
1260a07b4970SChristoph Hellwig 
nvmet_ctrl_find_get(const char * subsysnqn,const char * hostnqn,u16 cntlid,struct nvmet_req * req)1261de587804SChaitanya Kulkarni struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1262de587804SChaitanya Kulkarni 				       const char *hostnqn, u16 cntlid,
1263de587804SChaitanya Kulkarni 				       struct nvmet_req *req)
1264a07b4970SChristoph Hellwig {
1265de587804SChaitanya Kulkarni 	struct nvmet_ctrl *ctrl = NULL;
1266a07b4970SChristoph Hellwig 	struct nvmet_subsys *subsys;
1267a07b4970SChristoph Hellwig 
1268a07b4970SChristoph Hellwig 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1269a07b4970SChristoph Hellwig 	if (!subsys) {
1270a07b4970SChristoph Hellwig 		pr_warn("connect request for invalid subsystem %s!\n",
1271a07b4970SChristoph Hellwig 			subsysnqn);
1272fc6c9730SMax Gurtovoy 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1273de587804SChaitanya Kulkarni 		goto out;
1274a07b4970SChristoph Hellwig 	}
1275a07b4970SChristoph Hellwig 
1276a07b4970SChristoph Hellwig 	mutex_lock(&subsys->lock);
1277a07b4970SChristoph Hellwig 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1278a07b4970SChristoph Hellwig 		if (ctrl->cntlid == cntlid) {
1279a07b4970SChristoph Hellwig 			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1280a07b4970SChristoph Hellwig 				pr_warn("hostnqn mismatch.\n");
1281a07b4970SChristoph Hellwig 				continue;
1282a07b4970SChristoph Hellwig 			}
1283a07b4970SChristoph Hellwig 			if (!kref_get_unless_zero(&ctrl->ref))
1284a07b4970SChristoph Hellwig 				continue;
1285a07b4970SChristoph Hellwig 
1286de587804SChaitanya Kulkarni 			/* ctrl found */
1287de587804SChaitanya Kulkarni 			goto found;
1288a07b4970SChristoph Hellwig 		}
1289a07b4970SChristoph Hellwig 	}
1290a07b4970SChristoph Hellwig 
1291de587804SChaitanya Kulkarni 	ctrl = NULL; /* ctrl not found */
1292a07b4970SChristoph Hellwig 	pr_warn("could not find controller %d for subsys %s / host %s\n",
1293a07b4970SChristoph Hellwig 		cntlid, subsysnqn, hostnqn);
1294fc6c9730SMax Gurtovoy 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1295a07b4970SChristoph Hellwig 
1296de587804SChaitanya Kulkarni found:
1297a07b4970SChristoph Hellwig 	mutex_unlock(&subsys->lock);
1298a07b4970SChristoph Hellwig 	nvmet_subsys_put(subsys);
1299de587804SChaitanya Kulkarni out:
1300de587804SChaitanya Kulkarni 	return ctrl;
1301a07b4970SChristoph Hellwig }
1302a07b4970SChristoph Hellwig 
nvmet_check_ctrl_status(struct nvmet_req * req)13037798df6fSChaitanya Kulkarni u16 nvmet_check_ctrl_status(struct nvmet_req *req)
130464a0ca88SParav Pandit {
130564a0ca88SParav Pandit 	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1306b40b83e3SChaitanya Kulkarni 		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
13077798df6fSChaitanya Kulkarni 		       req->cmd->common.opcode, req->sq->qid);
1308dd0b0a4aSWeiwen Hu 		return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
130964a0ca88SParav Pandit 	}
131064a0ca88SParav Pandit 
131164a0ca88SParav Pandit 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1312b40b83e3SChaitanya Kulkarni 		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
13137798df6fSChaitanya Kulkarni 		       req->cmd->common.opcode, req->sq->qid);
1314dd0b0a4aSWeiwen Hu 		return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
131564a0ca88SParav Pandit 	}
1316db1312ddSHannes Reinecke 
1317db1312ddSHannes Reinecke 	if (unlikely(!nvmet_check_auth_status(req))) {
1318db1312ddSHannes Reinecke 		pr_warn("qid %d not authenticated\n", req->sq->qid);
1319dd0b0a4aSWeiwen Hu 		return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
1320db1312ddSHannes Reinecke 	}
132164a0ca88SParav Pandit 	return 0;
132264a0ca88SParav Pandit }
132364a0ca88SParav Pandit 
nvmet_host_allowed(struct nvmet_subsys * subsys,const char * hostnqn)1324253928eeSSagi Grimberg bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1325a07b4970SChristoph Hellwig {
1326a07b4970SChristoph Hellwig 	struct nvmet_host_link *p;
1327a07b4970SChristoph Hellwig 
1328253928eeSSagi Grimberg 	lockdep_assert_held(&nvmet_config_sem);
1329253928eeSSagi Grimberg 
1330a07b4970SChristoph Hellwig 	if (subsys->allow_any_host)
1331a07b4970SChristoph Hellwig 		return true;
1332a07b4970SChristoph Hellwig 
1333a294711eSHannes Reinecke 	if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
1334253928eeSSagi Grimberg 		return true;
1335253928eeSSagi Grimberg 
1336a07b4970SChristoph Hellwig 	list_for_each_entry(p, &subsys->hosts, entry) {
1337a07b4970SChristoph Hellwig 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
1338a07b4970SChristoph Hellwig 			return true;
1339a07b4970SChristoph Hellwig 	}
1340a07b4970SChristoph Hellwig 
1341a07b4970SChristoph Hellwig 	return false;
1342a07b4970SChristoph Hellwig }
1343a07b4970SChristoph Hellwig 
1344c6925093SLogan Gunthorpe /*
1345c6925093SLogan Gunthorpe  * Note: ctrl->subsys->lock should be held when calling this function
1346c6925093SLogan Gunthorpe  */
nvmet_setup_p2p_ns_map(struct nvmet_ctrl * ctrl,struct nvmet_req * req)1347c6925093SLogan Gunthorpe static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1348c6925093SLogan Gunthorpe 		struct nvmet_req *req)
1349c6925093SLogan Gunthorpe {
1350c6925093SLogan Gunthorpe 	struct nvmet_ns *ns;
13517774e77eSChaitanya Kulkarni 	unsigned long idx;
1352c6925093SLogan Gunthorpe 
1353c6925093SLogan Gunthorpe 	if (!req->p2p_client)
1354c6925093SLogan Gunthorpe 		return;
1355c6925093SLogan Gunthorpe 
1356c6925093SLogan Gunthorpe 	ctrl->p2p_client = get_device(req->p2p_client);
1357c6925093SLogan Gunthorpe 
13587774e77eSChaitanya Kulkarni 	xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1359c6925093SLogan Gunthorpe 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1360c6925093SLogan Gunthorpe }
1361c6925093SLogan Gunthorpe 
1362c6925093SLogan Gunthorpe /*
1363c6925093SLogan Gunthorpe  * Note: ctrl->subsys->lock should be held when calling this function
1364c6925093SLogan Gunthorpe  */
nvmet_release_p2p_ns_map(struct nvmet_ctrl * ctrl)1365c6925093SLogan Gunthorpe static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1366c6925093SLogan Gunthorpe {
1367c6925093SLogan Gunthorpe 	struct radix_tree_iter iter;
1368c6925093SLogan Gunthorpe 	void __rcu **slot;
1369c6925093SLogan Gunthorpe 
1370c6925093SLogan Gunthorpe 	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1371c6925093SLogan Gunthorpe 		pci_dev_put(radix_tree_deref_slot(slot));
1372c6925093SLogan Gunthorpe 
1373c6925093SLogan Gunthorpe 	put_device(ctrl->p2p_client);
1374c6925093SLogan Gunthorpe }
1375c6925093SLogan Gunthorpe 
nvmet_fatal_error_handler(struct work_struct * work)1376d11de63fSYufen Yu static void nvmet_fatal_error_handler(struct work_struct *work)
1377d11de63fSYufen Yu {
1378d11de63fSYufen Yu 	struct nvmet_ctrl *ctrl =
1379d11de63fSYufen Yu 			container_of(work, struct nvmet_ctrl, fatal_err_work);
1380d11de63fSYufen Yu 
1381d11de63fSYufen Yu 	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1382d11de63fSYufen Yu 	ctrl->ops->delete_ctrl(ctrl);
1383d11de63fSYufen Yu }
1384d11de63fSYufen Yu 
nvmet_alloc_ctrl(const char * subsysnqn,const char * hostnqn,struct nvmet_req * req,u32 kato,struct nvmet_ctrl ** ctrlp)1385a07b4970SChristoph Hellwig u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1386a07b4970SChristoph Hellwig 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1387a07b4970SChristoph Hellwig {
1388a07b4970SChristoph Hellwig 	struct nvmet_subsys *subsys;
1389a07b4970SChristoph Hellwig 	struct nvmet_ctrl *ctrl;
1390a07b4970SChristoph Hellwig 	int ret;
1391a07b4970SChristoph Hellwig 	u16 status;
1392a07b4970SChristoph Hellwig 
1393dd0b0a4aSWeiwen Hu 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
1394a07b4970SChristoph Hellwig 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1395a07b4970SChristoph Hellwig 	if (!subsys) {
1396a07b4970SChristoph Hellwig 		pr_warn("connect request for invalid subsystem %s!\n",
1397a07b4970SChristoph Hellwig 			subsysnqn);
1398fc6c9730SMax Gurtovoy 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1399a56f14c2SChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1400a07b4970SChristoph Hellwig 		goto out;
1401a07b4970SChristoph Hellwig 	}
1402a07b4970SChristoph Hellwig 
1403a07b4970SChristoph Hellwig 	down_read(&nvmet_config_sem);
1404253928eeSSagi Grimberg 	if (!nvmet_host_allowed(subsys, hostnqn)) {
1405a07b4970SChristoph Hellwig 		pr_info("connect by host %s for subsystem %s not allowed\n",
1406a07b4970SChristoph Hellwig 			hostnqn, subsysnqn);
1407fc6c9730SMax Gurtovoy 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1408a07b4970SChristoph Hellwig 		up_read(&nvmet_config_sem);
1409dd0b0a4aSWeiwen Hu 		status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
1410a56f14c2SChaitanya Kulkarni 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1411a07b4970SChristoph Hellwig 		goto out_put_subsystem;
1412a07b4970SChristoph Hellwig 	}
1413a07b4970SChristoph Hellwig 	up_read(&nvmet_config_sem);
1414a07b4970SChristoph Hellwig 
1415a07b4970SChristoph Hellwig 	status = NVME_SC_INTERNAL;
1416a07b4970SChristoph Hellwig 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1417a07b4970SChristoph Hellwig 	if (!ctrl)
1418a07b4970SChristoph Hellwig 		goto out_put_subsystem;
1419a07b4970SChristoph Hellwig 	mutex_init(&ctrl->lock);
1420a07b4970SChristoph Hellwig 
14214ee43280SChristoph Hellwig 	ctrl->port = req->port;
14226d1555ccSMax Gurtovoy 	ctrl->ops = req->ops;
14234ee43280SChristoph Hellwig 
142434ad6151SAlan Adamson #ifdef CONFIG_NVME_TARGET_PASSTHRU
142534ad6151SAlan Adamson 	/* By default, set loop targets to clear IDS by default */
142634ad6151SAlan Adamson 	if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
142734ad6151SAlan Adamson 		subsys->clear_ids = 1;
142834ad6151SAlan Adamson #endif
142934ad6151SAlan Adamson 
1430a07b4970SChristoph Hellwig 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1431a07b4970SChristoph Hellwig 	INIT_LIST_HEAD(&ctrl->async_events);
1432c6925093SLogan Gunthorpe 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1433d11de63fSYufen Yu 	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1434f6e8bd59SAmit Engel 	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1435a07b4970SChristoph Hellwig 
1436a07b4970SChristoph Hellwig 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1437a07b4970SChristoph Hellwig 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1438a07b4970SChristoph Hellwig 
1439a07b4970SChristoph Hellwig 	kref_init(&ctrl->ref);
1440a07b4970SChristoph Hellwig 	ctrl->subsys = subsys;
1441c82c370dSMax Gurtovoy 	ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
144277d651a6SAdam Manzanares 	nvmet_init_cap(ctrl);
1443c86b8f7bSChristoph Hellwig 	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1444a07b4970SChristoph Hellwig 
1445c16734eaSChristoph Hellwig 	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1446c16734eaSChristoph Hellwig 			sizeof(__le32), GFP_KERNEL);
1447c16734eaSChristoph Hellwig 	if (!ctrl->changed_ns_list)
1448c16734eaSChristoph Hellwig 		goto out_free_ctrl;
1449c16734eaSChristoph Hellwig 
1450a07b4970SChristoph Hellwig 	ctrl->sqs = kcalloc(subsys->max_qid + 1,
1451a07b4970SChristoph Hellwig 			sizeof(struct nvmet_sq *),
1452a07b4970SChristoph Hellwig 			GFP_KERNEL);
1453a07b4970SChristoph Hellwig 	if (!ctrl->sqs)
14546d65aeabSAmit 		goto out_free_changed_ns_list;
1455a07b4970SChristoph Hellwig 
145622027a98SSagi Grimberg 	ret = ida_alloc_range(&cntlid_ida,
145794a39d61SChaitanya Kulkarni 			     subsys->cntlid_min, subsys->cntlid_max,
1458a07b4970SChristoph Hellwig 			     GFP_KERNEL);
1459a07b4970SChristoph Hellwig 	if (ret < 0) {
1460dd0b0a4aSWeiwen Hu 		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
1461a07b4970SChristoph Hellwig 		goto out_free_sqs;
1462a07b4970SChristoph Hellwig 	}
1463a07b4970SChristoph Hellwig 	ctrl->cntlid = ret;
1464a07b4970SChristoph Hellwig 
1465a07b4970SChristoph Hellwig 	/*
1466f9362ac1SJay Sternberg 	 * Discovery controllers may use some arbitrary high value
1467f9362ac1SJay Sternberg 	 * in order to cleanup stale discovery sessions
1468a07b4970SChristoph Hellwig 	 */
1469a294711eSHannes Reinecke 	if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
1470f9362ac1SJay Sternberg 		kato = NVMET_DISC_KATO_MS;
1471f9362ac1SJay Sternberg 
1472a07b4970SChristoph Hellwig 	/* keep-alive timeout in seconds */
1473a07b4970SChristoph Hellwig 	ctrl->kato = DIV_ROUND_UP(kato, 1000);
1474f9362ac1SJay Sternberg 
1475e4a97625SChaitanya Kulkarni 	ctrl->err_counter = 0;
1476e4a97625SChaitanya Kulkarni 	spin_lock_init(&ctrl->error_lock);
1477e4a97625SChaitanya Kulkarni 
1478a07b4970SChristoph Hellwig 	nvmet_start_keep_alive_timer(ctrl);
1479a07b4970SChristoph Hellwig 
1480a07b4970SChristoph Hellwig 	mutex_lock(&subsys->lock);
1481a07b4970SChristoph Hellwig 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1482c6925093SLogan Gunthorpe 	nvmet_setup_p2p_ns_map(ctrl, req);
1483649fd414SHannes Reinecke 	nvmet_debugfs_ctrl_setup(ctrl);
1484a07b4970SChristoph Hellwig 	mutex_unlock(&subsys->lock);
1485a07b4970SChristoph Hellwig 
1486a07b4970SChristoph Hellwig 	*ctrlp = ctrl;
1487a07b4970SChristoph Hellwig 	return 0;
1488a07b4970SChristoph Hellwig 
1489a07b4970SChristoph Hellwig out_free_sqs:
1490a07b4970SChristoph Hellwig 	kfree(ctrl->sqs);
1491c16734eaSChristoph Hellwig out_free_changed_ns_list:
1492c16734eaSChristoph Hellwig 	kfree(ctrl->changed_ns_list);
1493a07b4970SChristoph Hellwig out_free_ctrl:
1494a07b4970SChristoph Hellwig 	kfree(ctrl);
1495a07b4970SChristoph Hellwig out_put_subsystem:
1496a07b4970SChristoph Hellwig 	nvmet_subsys_put(subsys);
1497a07b4970SChristoph Hellwig out:
1498a07b4970SChristoph Hellwig 	return status;
1499a07b4970SChristoph Hellwig }
1500a07b4970SChristoph Hellwig 
nvmet_ctrl_free(struct kref * ref)1501a07b4970SChristoph Hellwig static void nvmet_ctrl_free(struct kref *ref)
1502a07b4970SChristoph Hellwig {
1503a07b4970SChristoph Hellwig 	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1504a07b4970SChristoph Hellwig 	struct nvmet_subsys *subsys = ctrl->subsys;
1505a07b4970SChristoph Hellwig 
1506a07b4970SChristoph Hellwig 	mutex_lock(&subsys->lock);
1507c6925093SLogan Gunthorpe 	nvmet_release_p2p_ns_map(ctrl);
1508a07b4970SChristoph Hellwig 	list_del(&ctrl->subsys_entry);
1509a07b4970SChristoph Hellwig 	mutex_unlock(&subsys->lock);
1510a07b4970SChristoph Hellwig 
15116b1943afSIsrael Rukshin 	nvmet_stop_keep_alive_timer(ctrl);
15126b1943afSIsrael Rukshin 
151306406d81SSagi Grimberg 	flush_work(&ctrl->async_event_work);
151406406d81SSagi Grimberg 	cancel_work_sync(&ctrl->fatal_err_work);
151506406d81SSagi Grimberg 
1516db1312ddSHannes Reinecke 	nvmet_destroy_auth(ctrl);
1517db1312ddSHannes Reinecke 
1518649fd414SHannes Reinecke 	nvmet_debugfs_ctrl_free(ctrl);
1519649fd414SHannes Reinecke 
152022027a98SSagi Grimberg 	ida_free(&cntlid_ida, ctrl->cntlid);
1521a07b4970SChristoph Hellwig 
152264f5e9cdSSagi Grimberg 	nvmet_async_events_free(ctrl);
1523a07b4970SChristoph Hellwig 	kfree(ctrl->sqs);
1524c16734eaSChristoph Hellwig 	kfree(ctrl->changed_ns_list);
1525a07b4970SChristoph Hellwig 	kfree(ctrl);
15266b1943afSIsrael Rukshin 
15276b1943afSIsrael Rukshin 	nvmet_subsys_put(subsys);
1528a07b4970SChristoph Hellwig }
1529a07b4970SChristoph Hellwig 
nvmet_ctrl_put(struct nvmet_ctrl * ctrl)1530a07b4970SChristoph Hellwig void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1531a07b4970SChristoph Hellwig {
1532a07b4970SChristoph Hellwig 	kref_put(&ctrl->ref, nvmet_ctrl_free);
1533a07b4970SChristoph Hellwig }
1534a07b4970SChristoph Hellwig 
nvmet_ctrl_fatal_error(struct nvmet_ctrl * ctrl)1535a07b4970SChristoph Hellwig void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1536a07b4970SChristoph Hellwig {
15378242ddacSSagi Grimberg 	mutex_lock(&ctrl->lock);
15388242ddacSSagi Grimberg 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
1539a07b4970SChristoph Hellwig 		ctrl->csts |= NVME_CSTS_CFS;
15408832cf92SSagi Grimberg 		queue_work(nvmet_wq, &ctrl->fatal_err_work);
1541a07b4970SChristoph Hellwig 	}
15428242ddacSSagi Grimberg 	mutex_unlock(&ctrl->lock);
15438242ddacSSagi Grimberg }
1544a07b4970SChristoph Hellwig EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1545a07b4970SChristoph Hellwig 
nvmet_ctrl_host_traddr(struct nvmet_ctrl * ctrl,char * traddr,size_t traddr_len)1546*7e5c3de3SHannes Reinecke ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
1547*7e5c3de3SHannes Reinecke 		char *traddr, size_t traddr_len)
1548*7e5c3de3SHannes Reinecke {
1549*7e5c3de3SHannes Reinecke 	if (!ctrl->ops->host_traddr)
1550*7e5c3de3SHannes Reinecke 		return -EOPNOTSUPP;
1551*7e5c3de3SHannes Reinecke 	return ctrl->ops->host_traddr(ctrl, traddr, traddr_len);
1552*7e5c3de3SHannes Reinecke }
1553*7e5c3de3SHannes Reinecke 
nvmet_find_get_subsys(struct nvmet_port * port,const char * subsysnqn)1554a07b4970SChristoph Hellwig static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1555a07b4970SChristoph Hellwig 		const char *subsysnqn)
1556a07b4970SChristoph Hellwig {
1557a07b4970SChristoph Hellwig 	struct nvmet_subsys_link *p;
1558a07b4970SChristoph Hellwig 
1559a07b4970SChristoph Hellwig 	if (!port)
1560a07b4970SChristoph Hellwig 		return NULL;
1561a07b4970SChristoph Hellwig 
15620c48645aSHannes Reinecke 	if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1563a07b4970SChristoph Hellwig 		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1564a07b4970SChristoph Hellwig 			return NULL;
1565a07b4970SChristoph Hellwig 		return nvmet_disc_subsys;
1566a07b4970SChristoph Hellwig 	}
1567a07b4970SChristoph Hellwig 
1568a07b4970SChristoph Hellwig 	down_read(&nvmet_config_sem);
156995409e27SHannes Reinecke 	if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn,
157095409e27SHannes Reinecke 				NVMF_NQN_SIZE)) {
157195409e27SHannes Reinecke 		if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) {
157295409e27SHannes Reinecke 			up_read(&nvmet_config_sem);
157395409e27SHannes Reinecke 			return nvmet_disc_subsys;
157495409e27SHannes Reinecke 		}
157595409e27SHannes Reinecke 	}
1576a07b4970SChristoph Hellwig 	list_for_each_entry(p, &port->subsystems, entry) {
1577a07b4970SChristoph Hellwig 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1578a07b4970SChristoph Hellwig 				NVMF_NQN_SIZE)) {
1579a07b4970SChristoph Hellwig 			if (!kref_get_unless_zero(&p->subsys->ref))
1580a07b4970SChristoph Hellwig 				break;
1581a07b4970SChristoph Hellwig 			up_read(&nvmet_config_sem);
1582a07b4970SChristoph Hellwig 			return p->subsys;
1583a07b4970SChristoph Hellwig 		}
1584a07b4970SChristoph Hellwig 	}
1585a07b4970SChristoph Hellwig 	up_read(&nvmet_config_sem);
1586a07b4970SChristoph Hellwig 	return NULL;
1587a07b4970SChristoph Hellwig }
1588a07b4970SChristoph Hellwig 
nvmet_subsys_alloc(const char * subsysnqn,enum nvme_subsys_type type)1589a07b4970SChristoph Hellwig struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1590a07b4970SChristoph Hellwig 		enum nvme_subsys_type type)
1591a07b4970SChristoph Hellwig {
1592a07b4970SChristoph Hellwig 	struct nvmet_subsys *subsys;
1593e13b0615SNoam Gottlieb 	char serial[NVMET_SN_MAX_SIZE / 2];
15940d148efdSNoam Gottlieb 	int ret;
1595a07b4970SChristoph Hellwig 
1596a07b4970SChristoph Hellwig 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1597a07b4970SChristoph Hellwig 	if (!subsys)
15986b7e631bSMinwoo Im 		return ERR_PTR(-ENOMEM);
1599a07b4970SChristoph Hellwig 
1600ba76af67SLogan Gunthorpe 	subsys->ver = NVMET_DEFAULT_VS;
16012e7f5d2aSJohannes Thumshirn 	/* generate a random serial number as our controllers are ephemeral: */
1602e13b0615SNoam Gottlieb 	get_random_bytes(&serial, sizeof(serial));
1603e13b0615SNoam Gottlieb 	bin2hex(subsys->serial, &serial, sizeof(serial));
1604a07b4970SChristoph Hellwig 
16050d148efdSNoam Gottlieb 	subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
16060d148efdSNoam Gottlieb 	if (!subsys->model_number) {
16070d148efdSNoam Gottlieb 		ret = -ENOMEM;
16080d148efdSNoam Gottlieb 		goto free_subsys;
16090d148efdSNoam Gottlieb 	}
1610a07b4970SChristoph Hellwig 
161123855abdSAleksandr Miloserdov 	subsys->ieee_oui = 0;
161223855abdSAleksandr Miloserdov 
161368c5444cSAleksandr Miloserdov 	subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
161468c5444cSAleksandr Miloserdov 	if (!subsys->firmware_rev) {
161568c5444cSAleksandr Miloserdov 		ret = -ENOMEM;
161668c5444cSAleksandr Miloserdov 		goto free_mn;
161768c5444cSAleksandr Miloserdov 	}
161868c5444cSAleksandr Miloserdov 
1619a07b4970SChristoph Hellwig 	switch (type) {
1620a07b4970SChristoph Hellwig 	case NVME_NQN_NVME:
1621a07b4970SChristoph Hellwig 		subsys->max_qid = NVMET_NR_QUEUES;
1622a07b4970SChristoph Hellwig 		break;
1623a07b4970SChristoph Hellwig 	case NVME_NQN_DISC:
16242953b30bSHannes Reinecke 	case NVME_NQN_CURR:
1625a07b4970SChristoph Hellwig 		subsys->max_qid = 0;
1626a07b4970SChristoph Hellwig 		break;
1627a07b4970SChristoph Hellwig 	default:
1628a07b4970SChristoph Hellwig 		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
16290d148efdSNoam Gottlieb 		ret = -EINVAL;
163068c5444cSAleksandr Miloserdov 		goto free_fr;
1631a07b4970SChristoph Hellwig 	}
1632a07b4970SChristoph Hellwig 	subsys->type = type;
1633a07b4970SChristoph Hellwig 	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1634a07b4970SChristoph Hellwig 			GFP_KERNEL);
163569555af2SWei Yongjun 	if (!subsys->subsysnqn) {
16360d148efdSNoam Gottlieb 		ret = -ENOMEM;
163768c5444cSAleksandr Miloserdov 		goto free_fr;
1638a07b4970SChristoph Hellwig 	}
163994a39d61SChaitanya Kulkarni 	subsys->cntlid_min = NVME_CNTLID_MIN;
164094a39d61SChaitanya Kulkarni 	subsys->cntlid_max = NVME_CNTLID_MAX;
1641a07b4970SChristoph Hellwig 	kref_init(&subsys->ref);
1642a07b4970SChristoph Hellwig 
1643a07b4970SChristoph Hellwig 	mutex_init(&subsys->lock);
16447774e77eSChaitanya Kulkarni 	xa_init(&subsys->namespaces);
1645a07b4970SChristoph Hellwig 	INIT_LIST_HEAD(&subsys->ctrls);
1646a07b4970SChristoph Hellwig 	INIT_LIST_HEAD(&subsys->hosts);
1647a07b4970SChristoph Hellwig 
1648649fd414SHannes Reinecke 	ret = nvmet_debugfs_subsys_setup(subsys);
1649649fd414SHannes Reinecke 	if (ret)
1650649fd414SHannes Reinecke 		goto free_subsysnqn;
1651649fd414SHannes Reinecke 
1652a07b4970SChristoph Hellwig 	return subsys;
16530d148efdSNoam Gottlieb 
1654649fd414SHannes Reinecke free_subsysnqn:
1655649fd414SHannes Reinecke 	kfree(subsys->subsysnqn);
165668c5444cSAleksandr Miloserdov free_fr:
165768c5444cSAleksandr Miloserdov 	kfree(subsys->firmware_rev);
16580d148efdSNoam Gottlieb free_mn:
16590d148efdSNoam Gottlieb 	kfree(subsys->model_number);
16600d148efdSNoam Gottlieb free_subsys:
16610d148efdSNoam Gottlieb 	kfree(subsys);
16620d148efdSNoam Gottlieb 	return ERR_PTR(ret);
1663a07b4970SChristoph Hellwig }
1664a07b4970SChristoph Hellwig 
nvmet_subsys_free(struct kref * ref)1665a07b4970SChristoph Hellwig static void nvmet_subsys_free(struct kref *ref)
1666a07b4970SChristoph Hellwig {
1667a07b4970SChristoph Hellwig 	struct nvmet_subsys *subsys =
1668a07b4970SChristoph Hellwig 		container_of(ref, struct nvmet_subsys, ref);
1669a07b4970SChristoph Hellwig 
16707774e77eSChaitanya Kulkarni 	WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1671a07b4970SChristoph Hellwig 
1672649fd414SHannes Reinecke 	nvmet_debugfs_subsys_free(subsys);
1673649fd414SHannes Reinecke 
16747774e77eSChaitanya Kulkarni 	xa_destroy(&subsys->namespaces);
1675ba76af67SLogan Gunthorpe 	nvmet_passthru_subsys_free(subsys);
1676ba76af67SLogan Gunthorpe 
1677a07b4970SChristoph Hellwig 	kfree(subsys->subsysnqn);
1678d9f273b7SMax Gurtovoy 	kfree(subsys->model_number);
167968c5444cSAleksandr Miloserdov 	kfree(subsys->firmware_rev);
1680a07b4970SChristoph Hellwig 	kfree(subsys);
1681a07b4970SChristoph Hellwig }
1682a07b4970SChristoph Hellwig 
nvmet_subsys_del_ctrls(struct nvmet_subsys * subsys)1683344770b0SSagi Grimberg void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1684344770b0SSagi Grimberg {
1685344770b0SSagi Grimberg 	struct nvmet_ctrl *ctrl;
1686344770b0SSagi Grimberg 
1687344770b0SSagi Grimberg 	mutex_lock(&subsys->lock);
1688344770b0SSagi Grimberg 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1689344770b0SSagi Grimberg 		ctrl->ops->delete_ctrl(ctrl);
1690344770b0SSagi Grimberg 	mutex_unlock(&subsys->lock);
1691344770b0SSagi Grimberg }
1692344770b0SSagi Grimberg 
nvmet_subsys_put(struct nvmet_subsys * subsys)1693a07b4970SChristoph Hellwig void nvmet_subsys_put(struct nvmet_subsys *subsys)
1694a07b4970SChristoph Hellwig {
1695a07b4970SChristoph Hellwig 	kref_put(&subsys->ref, nvmet_subsys_free);
1696a07b4970SChristoph Hellwig }
1697a07b4970SChristoph Hellwig 
nvmet_init(void)1698a07b4970SChristoph Hellwig static int __init nvmet_init(void)
1699a07b4970SChristoph Hellwig {
1700fa8f9ac4SChristoph Hellwig 	int error = -ENOMEM;
1701a07b4970SChristoph Hellwig 
170272efd25dSChristoph Hellwig 	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
170372efd25dSChristoph Hellwig 
1704fa8f9ac4SChristoph Hellwig 	nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
1705fa8f9ac4SChristoph Hellwig 			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
1706fa8f9ac4SChristoph Hellwig 			SLAB_HWCACHE_ALIGN, NULL);
1707fa8f9ac4SChristoph Hellwig 	if (!nvmet_bvec_cache)
1708fa8f9ac4SChristoph Hellwig 		return -ENOMEM;
1709fa8f9ac4SChristoph Hellwig 
1710aaf2e048SChaitanya Kulkarni 	zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1711aaf2e048SChaitanya Kulkarni 	if (!zbd_wq)
1712fa8f9ac4SChristoph Hellwig 		goto out_destroy_bvec_cache;
1713aaf2e048SChaitanya Kulkarni 
171455eb942eSChaitanya Kulkarni 	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
171555eb942eSChaitanya Kulkarni 			WQ_MEM_RECLAIM, 0);
1716fa8f9ac4SChristoph Hellwig 	if (!buffered_io_wq)
1717aaf2e048SChaitanya Kulkarni 		goto out_free_zbd_work_queue;
171872efd25dSChristoph Hellwig 
171934cfb09cSSagi Grimberg 	nvmet_wq = alloc_workqueue("nvmet-wq",
172034cfb09cSSagi Grimberg 			WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1721fa8f9ac4SChristoph Hellwig 	if (!nvmet_wq)
17228832cf92SSagi Grimberg 		goto out_free_buffered_work_queue;
17238832cf92SSagi Grimberg 
1724a07b4970SChristoph Hellwig 	error = nvmet_init_discovery();
1725a07b4970SChristoph Hellwig 	if (error)
17268832cf92SSagi Grimberg 		goto out_free_nvmet_work_queue;
1727a07b4970SChristoph Hellwig 
1728649fd414SHannes Reinecke 	error = nvmet_init_debugfs();
1729a07b4970SChristoph Hellwig 	if (error)
1730a07b4970SChristoph Hellwig 		goto out_exit_discovery;
1731649fd414SHannes Reinecke 
1732649fd414SHannes Reinecke 	error = nvmet_init_configfs();
1733649fd414SHannes Reinecke 	if (error)
1734649fd414SHannes Reinecke 		goto out_exit_debugfs;
1735649fd414SHannes Reinecke 
1736a07b4970SChristoph Hellwig 	return 0;
1737a07b4970SChristoph Hellwig 
1738649fd414SHannes Reinecke out_exit_debugfs:
1739649fd414SHannes Reinecke 	nvmet_exit_debugfs();
1740a07b4970SChristoph Hellwig out_exit_discovery:
1741a07b4970SChristoph Hellwig 	nvmet_exit_discovery();
17428832cf92SSagi Grimberg out_free_nvmet_work_queue:
17438832cf92SSagi Grimberg 	destroy_workqueue(nvmet_wq);
17448832cf92SSagi Grimberg out_free_buffered_work_queue:
174504db0e5eSChaitanya Kulkarni 	destroy_workqueue(buffered_io_wq);
1746aaf2e048SChaitanya Kulkarni out_free_zbd_work_queue:
1747aaf2e048SChaitanya Kulkarni 	destroy_workqueue(zbd_wq);
1748fa8f9ac4SChristoph Hellwig out_destroy_bvec_cache:
1749fa8f9ac4SChristoph Hellwig 	kmem_cache_destroy(nvmet_bvec_cache);
1750a07b4970SChristoph Hellwig 	return error;
1751a07b4970SChristoph Hellwig }
1752a07b4970SChristoph Hellwig 
nvmet_exit(void)1753a07b4970SChristoph Hellwig static void __exit nvmet_exit(void)
1754a07b4970SChristoph Hellwig {
1755a07b4970SChristoph Hellwig 	nvmet_exit_configfs();
1756649fd414SHannes Reinecke 	nvmet_exit_debugfs();
1757a07b4970SChristoph Hellwig 	nvmet_exit_discovery();
175815fbad96SSagi Grimberg 	ida_destroy(&cntlid_ida);
17598832cf92SSagi Grimberg 	destroy_workqueue(nvmet_wq);
176055eb942eSChaitanya Kulkarni 	destroy_workqueue(buffered_io_wq);
1761aaf2e048SChaitanya Kulkarni 	destroy_workqueue(zbd_wq);
1762fa8f9ac4SChristoph Hellwig 	kmem_cache_destroy(nvmet_bvec_cache);
1763a07b4970SChristoph Hellwig 
1764a07b4970SChristoph Hellwig 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1765a07b4970SChristoph Hellwig 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1766a07b4970SChristoph Hellwig }
1767a07b4970SChristoph Hellwig 
1768a07b4970SChristoph Hellwig module_init(nvmet_init);
1769a07b4970SChristoph Hellwig module_exit(nvmet_exit);
1770a07b4970SChristoph Hellwig 
177141951f83SChaitanya Kulkarni MODULE_DESCRIPTION("NVMe target core framework");
1772a07b4970SChristoph Hellwig MODULE_LICENSE("GPL v2");
1773