xref: /linux/drivers/nvme/target/core.c (revision 6d8854216ebb60959ddb6f4ea4123bd449ba6cf6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common code for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12 
13 #include <generated/utsrelease.h>
14 
15 #define CREATE_TRACE_POINTS
16 #include "trace.h"
17 
18 #include "nvmet.h"
19 #include "debugfs.h"
20 
21 struct kmem_cache *nvmet_bvec_cache;
22 struct workqueue_struct *buffered_io_wq;
23 struct workqueue_struct *zbd_wq;
24 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
25 static DEFINE_IDA(cntlid_ida);
26 
27 struct workqueue_struct *nvmet_wq;
28 EXPORT_SYMBOL_GPL(nvmet_wq);
29 
30 /*
31  * This read/write semaphore is used to synchronize access to configuration
32  * information on a target system that will result in discovery log page
33  * information change for at least one host.
34  * The full list of resources to protected by this semaphore is:
35  *
36  *  - subsystems list
37  *  - per-subsystem allowed hosts list
38  *  - allow_any_host subsystem attribute
39  *  - nvmet_genctr
40  *  - the nvmet_transports array
41  *
42  * When updating any of those lists/structures write lock should be obtained,
43  * while when reading (popolating discovery log page or checking host-subsystem
44  * link) read lock is obtained to allow concurrent reads.
45  */
46 DECLARE_RWSEM(nvmet_config_sem);
47 
48 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
49 u64 nvmet_ana_chgcnt;
50 DECLARE_RWSEM(nvmet_ana_sem);
51 
errno_to_nvme_status(struct nvmet_req * req,int errno)52 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
53 {
54 	switch (errno) {
55 	case 0:
56 		return NVME_SC_SUCCESS;
57 	case -ENOSPC:
58 		req->error_loc = offsetof(struct nvme_rw_command, length);
59 		return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
60 	case -EREMOTEIO:
61 		req->error_loc = offsetof(struct nvme_rw_command, slba);
62 		return  NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
63 	case -EOPNOTSUPP:
64 		req->error_loc = offsetof(struct nvme_common_command, opcode);
65 		return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
66 	case -ENODATA:
67 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
68 		return NVME_SC_ACCESS_DENIED;
69 	case -EIO:
70 		fallthrough;
71 	default:
72 		req->error_loc = offsetof(struct nvme_common_command, opcode);
73 		return NVME_SC_INTERNAL | NVME_STATUS_DNR;
74 	}
75 }
76 
nvmet_report_invalid_opcode(struct nvmet_req * req)77 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
78 {
79 	pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
80 		 req->sq->qid);
81 
82 	req->error_loc = offsetof(struct nvme_common_command, opcode);
83 	return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
84 }
85 
86 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
87 		const char *subsysnqn);
88 
nvmet_copy_to_sgl(struct nvmet_req * req,off_t off,const void * buf,size_t len)89 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
90 		size_t len)
91 {
92 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
93 		req->error_loc = offsetof(struct nvme_common_command, dptr);
94 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
95 	}
96 	return 0;
97 }
98 
nvmet_copy_from_sgl(struct nvmet_req * req,off_t off,void * buf,size_t len)99 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
100 {
101 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
102 		req->error_loc = offsetof(struct nvme_common_command, dptr);
103 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
104 	}
105 	return 0;
106 }
107 
nvmet_zero_sgl(struct nvmet_req * req,off_t off,size_t len)108 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
109 {
110 	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
111 		req->error_loc = offsetof(struct nvme_common_command, dptr);
112 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
113 	}
114 	return 0;
115 }
116 
nvmet_max_nsid(struct nvmet_subsys * subsys)117 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
118 {
119 	struct nvmet_ns *cur;
120 	unsigned long idx;
121 	u32 nsid = 0;
122 
123 	nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur)
124 		nsid = cur->nsid;
125 
126 	return nsid;
127 }
128 
nvmet_async_event_result(struct nvmet_async_event * aen)129 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
130 {
131 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
132 }
133 
nvmet_async_events_failall(struct nvmet_ctrl * ctrl)134 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
135 {
136 	struct nvmet_req *req;
137 
138 	mutex_lock(&ctrl->lock);
139 	while (ctrl->nr_async_event_cmds) {
140 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
141 		mutex_unlock(&ctrl->lock);
142 		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR);
143 		mutex_lock(&ctrl->lock);
144 	}
145 	mutex_unlock(&ctrl->lock);
146 }
147 
nvmet_async_events_process(struct nvmet_ctrl * ctrl)148 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
149 {
150 	struct nvmet_async_event *aen;
151 	struct nvmet_req *req;
152 
153 	mutex_lock(&ctrl->lock);
154 	while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
155 		aen = list_first_entry(&ctrl->async_events,
156 				       struct nvmet_async_event, entry);
157 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
158 		nvmet_set_result(req, nvmet_async_event_result(aen));
159 
160 		list_del(&aen->entry);
161 		kfree(aen);
162 
163 		mutex_unlock(&ctrl->lock);
164 		trace_nvmet_async_event(ctrl, req->cqe->result.u32);
165 		nvmet_req_complete(req, 0);
166 		mutex_lock(&ctrl->lock);
167 	}
168 	mutex_unlock(&ctrl->lock);
169 }
170 
nvmet_async_events_free(struct nvmet_ctrl * ctrl)171 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
172 {
173 	struct nvmet_async_event *aen, *tmp;
174 
175 	mutex_lock(&ctrl->lock);
176 	list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
177 		list_del(&aen->entry);
178 		kfree(aen);
179 	}
180 	mutex_unlock(&ctrl->lock);
181 }
182 
nvmet_async_event_work(struct work_struct * work)183 static void nvmet_async_event_work(struct work_struct *work)
184 {
185 	struct nvmet_ctrl *ctrl =
186 		container_of(work, struct nvmet_ctrl, async_event_work);
187 
188 	nvmet_async_events_process(ctrl);
189 }
190 
nvmet_add_async_event(struct nvmet_ctrl * ctrl,u8 event_type,u8 event_info,u8 log_page)191 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
192 		u8 event_info, u8 log_page)
193 {
194 	struct nvmet_async_event *aen;
195 
196 	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
197 	if (!aen)
198 		return;
199 
200 	aen->event_type = event_type;
201 	aen->event_info = event_info;
202 	aen->log_page = log_page;
203 
204 	mutex_lock(&ctrl->lock);
205 	list_add_tail(&aen->entry, &ctrl->async_events);
206 	mutex_unlock(&ctrl->lock);
207 
208 	queue_work(nvmet_wq, &ctrl->async_event_work);
209 }
210 
nvmet_add_to_changed_ns_log(struct nvmet_ctrl * ctrl,__le32 nsid)211 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
212 {
213 	u32 i;
214 
215 	mutex_lock(&ctrl->lock);
216 	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
217 		goto out_unlock;
218 
219 	for (i = 0; i < ctrl->nr_changed_ns; i++) {
220 		if (ctrl->changed_ns_list[i] == nsid)
221 			goto out_unlock;
222 	}
223 
224 	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
225 		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
226 		ctrl->nr_changed_ns = U32_MAX;
227 		goto out_unlock;
228 	}
229 
230 	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
231 out_unlock:
232 	mutex_unlock(&ctrl->lock);
233 }
234 
nvmet_ns_changed(struct nvmet_subsys * subsys,u32 nsid)235 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
236 {
237 	struct nvmet_ctrl *ctrl;
238 
239 	lockdep_assert_held(&subsys->lock);
240 
241 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
242 		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
243 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
244 			continue;
245 		nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
246 				NVME_AER_NOTICE_NS_CHANGED,
247 				NVME_LOG_CHANGED_NS);
248 	}
249 }
250 
nvmet_send_ana_event(struct nvmet_subsys * subsys,struct nvmet_port * port)251 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
252 		struct nvmet_port *port)
253 {
254 	struct nvmet_ctrl *ctrl;
255 
256 	mutex_lock(&subsys->lock);
257 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
258 		if (port && ctrl->port != port)
259 			continue;
260 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
261 			continue;
262 		nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
263 				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
264 	}
265 	mutex_unlock(&subsys->lock);
266 }
267 
nvmet_port_send_ana_event(struct nvmet_port * port)268 void nvmet_port_send_ana_event(struct nvmet_port *port)
269 {
270 	struct nvmet_subsys_link *p;
271 
272 	down_read(&nvmet_config_sem);
273 	list_for_each_entry(p, &port->subsystems, entry)
274 		nvmet_send_ana_event(p->subsys, port);
275 	up_read(&nvmet_config_sem);
276 }
277 
nvmet_register_transport(const struct nvmet_fabrics_ops * ops)278 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
279 {
280 	int ret = 0;
281 
282 	down_write(&nvmet_config_sem);
283 	if (nvmet_transports[ops->type])
284 		ret = -EINVAL;
285 	else
286 		nvmet_transports[ops->type] = ops;
287 	up_write(&nvmet_config_sem);
288 
289 	return ret;
290 }
291 EXPORT_SYMBOL_GPL(nvmet_register_transport);
292 
nvmet_unregister_transport(const struct nvmet_fabrics_ops * ops)293 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
294 {
295 	down_write(&nvmet_config_sem);
296 	nvmet_transports[ops->type] = NULL;
297 	up_write(&nvmet_config_sem);
298 }
299 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
300 
nvmet_port_del_ctrls(struct nvmet_port * port,struct nvmet_subsys * subsys)301 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
302 {
303 	struct nvmet_ctrl *ctrl;
304 
305 	mutex_lock(&subsys->lock);
306 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
307 		if (ctrl->port == port)
308 			ctrl->ops->delete_ctrl(ctrl);
309 	}
310 	mutex_unlock(&subsys->lock);
311 }
312 
nvmet_enable_port(struct nvmet_port * port)313 int nvmet_enable_port(struct nvmet_port *port)
314 {
315 	const struct nvmet_fabrics_ops *ops;
316 	int ret;
317 
318 	lockdep_assert_held(&nvmet_config_sem);
319 
320 	if (port->disc_addr.trtype == NVMF_TRTYPE_MAX)
321 		return -EINVAL;
322 
323 	ops = nvmet_transports[port->disc_addr.trtype];
324 	if (!ops) {
325 		up_write(&nvmet_config_sem);
326 		request_module("nvmet-transport-%d", port->disc_addr.trtype);
327 		down_write(&nvmet_config_sem);
328 		ops = nvmet_transports[port->disc_addr.trtype];
329 		if (!ops) {
330 			pr_err("transport type %d not supported\n",
331 				port->disc_addr.trtype);
332 			return -EINVAL;
333 		}
334 	}
335 
336 	if (!try_module_get(ops->owner))
337 		return -EINVAL;
338 
339 	/*
340 	 * If the user requested PI support and the transport isn't pi capable,
341 	 * don't enable the port.
342 	 */
343 	if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
344 		pr_err("T10-PI is not supported by transport type %d\n",
345 		       port->disc_addr.trtype);
346 		ret = -EINVAL;
347 		goto out_put;
348 	}
349 
350 	ret = ops->add_port(port);
351 	if (ret)
352 		goto out_put;
353 
354 	/* If the transport didn't set inline_data_size, then disable it. */
355 	if (port->inline_data_size < 0)
356 		port->inline_data_size = 0;
357 
358 	/*
359 	 * If the transport didn't set the max_queue_size properly, then clamp
360 	 * it to the target limits. Also set default values in case the
361 	 * transport didn't set it at all.
362 	 */
363 	if (port->max_queue_size < 0)
364 		port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
365 	else
366 		port->max_queue_size = clamp_t(int, port->max_queue_size,
367 					       NVMET_MIN_QUEUE_SIZE,
368 					       NVMET_MAX_QUEUE_SIZE);
369 
370 	port->enabled = true;
371 	port->tr_ops = ops;
372 	return 0;
373 
374 out_put:
375 	module_put(ops->owner);
376 	return ret;
377 }
378 
nvmet_disable_port(struct nvmet_port * port)379 void nvmet_disable_port(struct nvmet_port *port)
380 {
381 	const struct nvmet_fabrics_ops *ops;
382 
383 	lockdep_assert_held(&nvmet_config_sem);
384 
385 	port->enabled = false;
386 	port->tr_ops = NULL;
387 
388 	ops = nvmet_transports[port->disc_addr.trtype];
389 	ops->remove_port(port);
390 	module_put(ops->owner);
391 }
392 
nvmet_keep_alive_timer(struct work_struct * work)393 static void nvmet_keep_alive_timer(struct work_struct *work)
394 {
395 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
396 			struct nvmet_ctrl, ka_work);
397 	bool reset_tbkas = ctrl->reset_tbkas;
398 
399 	ctrl->reset_tbkas = false;
400 	if (reset_tbkas) {
401 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
402 			ctrl->cntlid);
403 		queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
404 		return;
405 	}
406 
407 	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
408 		ctrl->cntlid, ctrl->kato);
409 
410 	nvmet_ctrl_fatal_error(ctrl);
411 }
412 
nvmet_start_keep_alive_timer(struct nvmet_ctrl * ctrl)413 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
414 {
415 	if (unlikely(ctrl->kato == 0))
416 		return;
417 
418 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
419 		ctrl->cntlid, ctrl->kato);
420 
421 	queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
422 }
423 
nvmet_stop_keep_alive_timer(struct nvmet_ctrl * ctrl)424 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
425 {
426 	if (unlikely(ctrl->kato == 0))
427 		return;
428 
429 	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
430 
431 	cancel_delayed_work_sync(&ctrl->ka_work);
432 }
433 
nvmet_req_find_ns(struct nvmet_req * req)434 u16 nvmet_req_find_ns(struct nvmet_req *req)
435 {
436 	u32 nsid = le32_to_cpu(req->cmd->common.nsid);
437 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
438 
439 	req->ns = xa_load(&subsys->namespaces, nsid);
440 	if (unlikely(!req->ns || !req->ns->enabled)) {
441 		req->error_loc = offsetof(struct nvme_common_command, nsid);
442 		if (!req->ns) /* ns doesn't exist! */
443 			return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
444 
445 		/* ns exists but it's disabled */
446 		req->ns = NULL;
447 		return NVME_SC_INTERNAL_PATH_ERROR;
448 	}
449 
450 	percpu_ref_get(&req->ns->ref);
451 	return NVME_SC_SUCCESS;
452 }
453 
nvmet_destroy_namespace(struct percpu_ref * ref)454 static void nvmet_destroy_namespace(struct percpu_ref *ref)
455 {
456 	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
457 
458 	complete(&ns->disable_done);
459 }
460 
nvmet_put_namespace(struct nvmet_ns * ns)461 void nvmet_put_namespace(struct nvmet_ns *ns)
462 {
463 	percpu_ref_put(&ns->ref);
464 }
465 
nvmet_ns_dev_disable(struct nvmet_ns * ns)466 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
467 {
468 	nvmet_bdev_ns_disable(ns);
469 	nvmet_file_ns_disable(ns);
470 }
471 
nvmet_p2pmem_ns_enable(struct nvmet_ns * ns)472 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
473 {
474 	int ret;
475 	struct pci_dev *p2p_dev;
476 
477 	if (!ns->use_p2pmem)
478 		return 0;
479 
480 	if (!ns->bdev) {
481 		pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
482 		return -EINVAL;
483 	}
484 
485 	if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
486 		pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
487 		       ns->device_path);
488 		return -EINVAL;
489 	}
490 
491 	if (ns->p2p_dev) {
492 		ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
493 		if (ret < 0)
494 			return -EINVAL;
495 	} else {
496 		/*
497 		 * Right now we just check that there is p2pmem available so
498 		 * we can report an error to the user right away if there
499 		 * is not. We'll find the actual device to use once we
500 		 * setup the controller when the port's device is available.
501 		 */
502 
503 		p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
504 		if (!p2p_dev) {
505 			pr_err("no peer-to-peer memory is available for %s\n",
506 			       ns->device_path);
507 			return -EINVAL;
508 		}
509 
510 		pci_dev_put(p2p_dev);
511 	}
512 
513 	return 0;
514 }
515 
516 /*
517  * Note: ctrl->subsys->lock should be held when calling this function
518  */
nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl * ctrl,struct nvmet_ns * ns)519 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
520 				    struct nvmet_ns *ns)
521 {
522 	struct device *clients[2];
523 	struct pci_dev *p2p_dev;
524 	int ret;
525 
526 	if (!ctrl->p2p_client || !ns->use_p2pmem)
527 		return;
528 
529 	if (ns->p2p_dev) {
530 		ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
531 		if (ret < 0)
532 			return;
533 
534 		p2p_dev = pci_dev_get(ns->p2p_dev);
535 	} else {
536 		clients[0] = ctrl->p2p_client;
537 		clients[1] = nvmet_ns_dev(ns);
538 
539 		p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
540 		if (!p2p_dev) {
541 			pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
542 			       dev_name(ctrl->p2p_client), ns->device_path);
543 			return;
544 		}
545 	}
546 
547 	ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
548 	if (ret < 0)
549 		pci_dev_put(p2p_dev);
550 
551 	pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
552 		ns->nsid);
553 }
554 
nvmet_ns_revalidate(struct nvmet_ns * ns)555 bool nvmet_ns_revalidate(struct nvmet_ns *ns)
556 {
557 	loff_t oldsize = ns->size;
558 
559 	if (ns->bdev)
560 		nvmet_bdev_ns_revalidate(ns);
561 	else
562 		nvmet_file_ns_revalidate(ns);
563 
564 	return oldsize != ns->size;
565 }
566 
nvmet_ns_enable(struct nvmet_ns * ns)567 int nvmet_ns_enable(struct nvmet_ns *ns)
568 {
569 	struct nvmet_subsys *subsys = ns->subsys;
570 	struct nvmet_ctrl *ctrl;
571 	int ret;
572 
573 	mutex_lock(&subsys->lock);
574 	ret = 0;
575 
576 	if (nvmet_is_passthru_subsys(subsys)) {
577 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
578 		goto out_unlock;
579 	}
580 
581 	if (ns->enabled)
582 		goto out_unlock;
583 
584 	ret = -EMFILE;
585 
586 	ret = nvmet_bdev_ns_enable(ns);
587 	if (ret == -ENOTBLK)
588 		ret = nvmet_file_ns_enable(ns);
589 	if (ret)
590 		goto out_unlock;
591 
592 	ret = nvmet_p2pmem_ns_enable(ns);
593 	if (ret)
594 		goto out_dev_disable;
595 
596 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
597 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
598 
599 	if (ns->pr.enable) {
600 		ret = nvmet_pr_init_ns(ns);
601 		if (ret)
602 			goto out_dev_put;
603 	}
604 
605 	if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
606 		goto out_pr_exit;
607 
608 	nvmet_ns_changed(subsys, ns->nsid);
609 	ns->enabled = true;
610 	xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
611 	ret = 0;
612 out_unlock:
613 	mutex_unlock(&subsys->lock);
614 	return ret;
615 out_pr_exit:
616 	if (ns->pr.enable)
617 		nvmet_pr_exit_ns(ns);
618 out_dev_put:
619 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
620 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
621 out_dev_disable:
622 	nvmet_ns_dev_disable(ns);
623 	goto out_unlock;
624 }
625 
nvmet_ns_disable(struct nvmet_ns * ns)626 void nvmet_ns_disable(struct nvmet_ns *ns)
627 {
628 	struct nvmet_subsys *subsys = ns->subsys;
629 	struct nvmet_ctrl *ctrl;
630 
631 	mutex_lock(&subsys->lock);
632 	if (!ns->enabled)
633 		goto out_unlock;
634 
635 	ns->enabled = false;
636 	xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
637 
638 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
639 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
640 
641 	mutex_unlock(&subsys->lock);
642 
643 	/*
644 	 * Now that we removed the namespaces from the lookup list, we
645 	 * can kill the per_cpu ref and wait for any remaining references
646 	 * to be dropped, as well as a RCU grace period for anyone only
647 	 * using the namespace under rcu_read_lock().  Note that we can't
648 	 * use call_rcu here as we need to ensure the namespaces have
649 	 * been fully destroyed before unloading the module.
650 	 */
651 	percpu_ref_kill(&ns->ref);
652 	synchronize_rcu();
653 	wait_for_completion(&ns->disable_done);
654 	percpu_ref_exit(&ns->ref);
655 
656 	if (ns->pr.enable)
657 		nvmet_pr_exit_ns(ns);
658 
659 	mutex_lock(&subsys->lock);
660 	nvmet_ns_changed(subsys, ns->nsid);
661 	nvmet_ns_dev_disable(ns);
662 out_unlock:
663 	mutex_unlock(&subsys->lock);
664 }
665 
nvmet_ns_free(struct nvmet_ns * ns)666 void nvmet_ns_free(struct nvmet_ns *ns)
667 {
668 	struct nvmet_subsys *subsys = ns->subsys;
669 
670 	nvmet_ns_disable(ns);
671 
672 	mutex_lock(&subsys->lock);
673 
674 	xa_erase(&subsys->namespaces, ns->nsid);
675 	if (ns->nsid == subsys->max_nsid)
676 		subsys->max_nsid = nvmet_max_nsid(subsys);
677 
678 	subsys->nr_namespaces--;
679 	mutex_unlock(&subsys->lock);
680 
681 	down_write(&nvmet_ana_sem);
682 	nvmet_ana_group_enabled[ns->anagrpid]--;
683 	up_write(&nvmet_ana_sem);
684 
685 	kfree(ns->device_path);
686 	kfree(ns);
687 }
688 
nvmet_ns_alloc(struct nvmet_subsys * subsys,u32 nsid)689 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
690 {
691 	struct nvmet_ns *ns;
692 
693 	mutex_lock(&subsys->lock);
694 
695 	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
696 		goto out_unlock;
697 
698 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
699 	if (!ns)
700 		goto out_unlock;
701 
702 	init_completion(&ns->disable_done);
703 
704 	ns->nsid = nsid;
705 	ns->subsys = subsys;
706 
707 	if (ns->nsid > subsys->max_nsid)
708 		subsys->max_nsid = nsid;
709 
710 	if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL))
711 		goto out_exit;
712 
713 	subsys->nr_namespaces++;
714 
715 	mutex_unlock(&subsys->lock);
716 
717 	down_write(&nvmet_ana_sem);
718 	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
719 	nvmet_ana_group_enabled[ns->anagrpid]++;
720 	up_write(&nvmet_ana_sem);
721 
722 	uuid_gen(&ns->uuid);
723 	ns->buffered_io = false;
724 	ns->csi = NVME_CSI_NVM;
725 
726 	return ns;
727 out_exit:
728 	subsys->max_nsid = nvmet_max_nsid(subsys);
729 	kfree(ns);
730 out_unlock:
731 	mutex_unlock(&subsys->lock);
732 	return NULL;
733 }
734 
nvmet_update_sq_head(struct nvmet_req * req)735 static void nvmet_update_sq_head(struct nvmet_req *req)
736 {
737 	if (req->sq->size) {
738 		u32 old_sqhd, new_sqhd;
739 
740 		old_sqhd = READ_ONCE(req->sq->sqhd);
741 		do {
742 			new_sqhd = (old_sqhd + 1) % req->sq->size;
743 		} while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
744 	}
745 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
746 }
747 
nvmet_set_error(struct nvmet_req * req,u16 status)748 static void nvmet_set_error(struct nvmet_req *req, u16 status)
749 {
750 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
751 	struct nvme_error_slot *new_error_slot;
752 	unsigned long flags;
753 
754 	req->cqe->status = cpu_to_le16(status << 1);
755 
756 	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
757 		return;
758 
759 	spin_lock_irqsave(&ctrl->error_lock, flags);
760 	ctrl->err_counter++;
761 	new_error_slot =
762 		&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
763 
764 	new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
765 	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
766 	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
767 	new_error_slot->status_field = cpu_to_le16(status << 1);
768 	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
769 	new_error_slot->lba = cpu_to_le64(req->error_slba);
770 	new_error_slot->nsid = req->cmd->common.nsid;
771 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
772 
773 	/* set the more bit for this request */
774 	req->cqe->status |= cpu_to_le16(1 << 14);
775 }
776 
__nvmet_req_complete(struct nvmet_req * req,u16 status)777 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
778 {
779 	struct nvmet_ns *ns = req->ns;
780 	struct nvmet_pr_per_ctrl_ref *pc_ref = req->pc_ref;
781 
782 	if (!req->sq->sqhd_disabled)
783 		nvmet_update_sq_head(req);
784 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
785 	req->cqe->command_id = req->cmd->common.command_id;
786 
787 	if (unlikely(status))
788 		nvmet_set_error(req, status);
789 
790 	trace_nvmet_req_complete(req);
791 
792 	req->ops->queue_response(req);
793 
794 	if (pc_ref)
795 		nvmet_pr_put_ns_pc_ref(pc_ref);
796 	if (ns)
797 		nvmet_put_namespace(ns);
798 }
799 
nvmet_req_complete(struct nvmet_req * req,u16 status)800 void nvmet_req_complete(struct nvmet_req *req, u16 status)
801 {
802 	struct nvmet_sq *sq = req->sq;
803 
804 	__nvmet_req_complete(req, status);
805 	percpu_ref_put(&sq->ref);
806 }
807 EXPORT_SYMBOL_GPL(nvmet_req_complete);
808 
nvmet_cq_init(struct nvmet_cq * cq)809 void nvmet_cq_init(struct nvmet_cq *cq)
810 {
811 	refcount_set(&cq->ref, 1);
812 }
813 EXPORT_SYMBOL_GPL(nvmet_cq_init);
814 
nvmet_cq_get(struct nvmet_cq * cq)815 bool nvmet_cq_get(struct nvmet_cq *cq)
816 {
817 	return refcount_inc_not_zero(&cq->ref);
818 }
819 EXPORT_SYMBOL_GPL(nvmet_cq_get);
820 
nvmet_cq_put(struct nvmet_cq * cq)821 void nvmet_cq_put(struct nvmet_cq *cq)
822 {
823 	if (refcount_dec_and_test(&cq->ref))
824 		nvmet_cq_destroy(cq);
825 }
826 EXPORT_SYMBOL_GPL(nvmet_cq_put);
827 
nvmet_cq_setup(struct nvmet_ctrl * ctrl,struct nvmet_cq * cq,u16 qid,u16 size)828 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
829 		u16 qid, u16 size)
830 {
831 	cq->qid = qid;
832 	cq->size = size;
833 
834 	ctrl->cqs[qid] = cq;
835 }
836 
nvmet_cq_destroy(struct nvmet_cq * cq)837 void nvmet_cq_destroy(struct nvmet_cq *cq)
838 {
839 	struct nvmet_ctrl *ctrl = cq->ctrl;
840 
841 	if (ctrl) {
842 		ctrl->cqs[cq->qid] = NULL;
843 		nvmet_ctrl_put(cq->ctrl);
844 		cq->ctrl = NULL;
845 	}
846 }
847 
nvmet_sq_setup(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq,u16 qid,u16 size)848 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
849 		u16 qid, u16 size)
850 {
851 	sq->sqhd = 0;
852 	sq->qid = qid;
853 	sq->size = size;
854 
855 	ctrl->sqs[qid] = sq;
856 }
857 
nvmet_confirm_sq(struct percpu_ref * ref)858 static void nvmet_confirm_sq(struct percpu_ref *ref)
859 {
860 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
861 
862 	complete(&sq->confirm_done);
863 }
864 
nvmet_check_cqid(struct nvmet_ctrl * ctrl,u16 cqid,bool create)865 u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create)
866 {
867 	if (!ctrl->cqs)
868 		return NVME_SC_INTERNAL | NVME_STATUS_DNR;
869 
870 	if (cqid > ctrl->subsys->max_qid)
871 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
872 
873 	if ((create && ctrl->cqs[cqid]) || (!create && !ctrl->cqs[cqid]))
874 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
875 
876 	return NVME_SC_SUCCESS;
877 }
878 
nvmet_check_io_cqid(struct nvmet_ctrl * ctrl,u16 cqid,bool create)879 u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create)
880 {
881 	if (!cqid)
882 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
883 	return nvmet_check_cqid(ctrl, cqid, create);
884 }
885 
nvmet_cq_in_use(struct nvmet_cq * cq)886 bool nvmet_cq_in_use(struct nvmet_cq *cq)
887 {
888 	return refcount_read(&cq->ref) > 1;
889 }
890 EXPORT_SYMBOL_GPL(nvmet_cq_in_use);
891 
nvmet_cq_create(struct nvmet_ctrl * ctrl,struct nvmet_cq * cq,u16 qid,u16 size)892 u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
893 		    u16 qid, u16 size)
894 {
895 	u16 status;
896 
897 	status = nvmet_check_cqid(ctrl, qid, true);
898 	if (status != NVME_SC_SUCCESS)
899 		return status;
900 
901 	if (!kref_get_unless_zero(&ctrl->ref))
902 		return NVME_SC_INTERNAL | NVME_STATUS_DNR;
903 	cq->ctrl = ctrl;
904 
905 	nvmet_cq_init(cq);
906 	nvmet_cq_setup(ctrl, cq, qid, size);
907 
908 	return NVME_SC_SUCCESS;
909 }
910 EXPORT_SYMBOL_GPL(nvmet_cq_create);
911 
nvmet_check_sqid(struct nvmet_ctrl * ctrl,u16 sqid,bool create)912 u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid,
913 		     bool create)
914 {
915 	if (!ctrl->sqs)
916 		return NVME_SC_INTERNAL | NVME_STATUS_DNR;
917 
918 	if (sqid > ctrl->subsys->max_qid)
919 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
920 
921 	if ((create && ctrl->sqs[sqid]) ||
922 	    (!create && !ctrl->sqs[sqid]))
923 		return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
924 
925 	return NVME_SC_SUCCESS;
926 }
927 
nvmet_sq_create(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq,struct nvmet_cq * cq,u16 sqid,u16 size)928 u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
929 		    struct nvmet_cq *cq, u16 sqid, u16 size)
930 {
931 	u16 status;
932 	int ret;
933 
934 	if (!kref_get_unless_zero(&ctrl->ref))
935 		return NVME_SC_INTERNAL | NVME_STATUS_DNR;
936 
937 	status = nvmet_check_sqid(ctrl, sqid, true);
938 	if (status != NVME_SC_SUCCESS)
939 		return status;
940 
941 	ret = nvmet_sq_init(sq, cq);
942 	if (ret) {
943 		status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
944 		goto ctrl_put;
945 	}
946 
947 	nvmet_sq_setup(ctrl, sq, sqid, size);
948 	sq->ctrl = ctrl;
949 
950 	return NVME_SC_SUCCESS;
951 
952 ctrl_put:
953 	nvmet_ctrl_put(ctrl);
954 	return status;
955 }
956 EXPORT_SYMBOL_GPL(nvmet_sq_create);
957 
nvmet_sq_destroy(struct nvmet_sq * sq)958 void nvmet_sq_destroy(struct nvmet_sq *sq)
959 {
960 	struct nvmet_ctrl *ctrl = sq->ctrl;
961 
962 	/*
963 	 * If this is the admin queue, complete all AERs so that our
964 	 * queue doesn't have outstanding requests on it.
965 	 */
966 	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
967 		nvmet_async_events_failall(ctrl);
968 	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
969 	wait_for_completion(&sq->confirm_done);
970 	wait_for_completion(&sq->free_done);
971 	percpu_ref_exit(&sq->ref);
972 	nvmet_auth_sq_free(sq);
973 	nvmet_cq_put(sq->cq);
974 
975 	/*
976 	 * we must reference the ctrl again after waiting for inflight IO
977 	 * to complete. Because admin connect may have sneaked in after we
978 	 * store sq->ctrl locally, but before we killed the percpu_ref. the
979 	 * admin connect allocates and assigns sq->ctrl, which now needs a
980 	 * final ref put, as this ctrl is going away.
981 	 */
982 	ctrl = sq->ctrl;
983 
984 	if (ctrl) {
985 		/*
986 		 * The teardown flow may take some time, and the host may not
987 		 * send us keep-alive during this period, hence reset the
988 		 * traffic based keep-alive timer so we don't trigger a
989 		 * controller teardown as a result of a keep-alive expiration.
990 		 */
991 		ctrl->reset_tbkas = true;
992 		sq->ctrl->sqs[sq->qid] = NULL;
993 		nvmet_ctrl_put(ctrl);
994 		sq->ctrl = NULL; /* allows reusing the queue later */
995 	}
996 }
997 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
998 
nvmet_sq_free(struct percpu_ref * ref)999 static void nvmet_sq_free(struct percpu_ref *ref)
1000 {
1001 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
1002 
1003 	complete(&sq->free_done);
1004 }
1005 
nvmet_sq_init(struct nvmet_sq * sq,struct nvmet_cq * cq)1006 int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq)
1007 {
1008 	int ret;
1009 
1010 	if (!nvmet_cq_get(cq))
1011 		return -EINVAL;
1012 
1013 	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
1014 	if (ret) {
1015 		pr_err("percpu_ref init failed!\n");
1016 		nvmet_cq_put(cq);
1017 		return ret;
1018 	}
1019 	init_completion(&sq->free_done);
1020 	init_completion(&sq->confirm_done);
1021 	nvmet_auth_sq_init(sq);
1022 	sq->cq = cq;
1023 
1024 	return 0;
1025 }
1026 EXPORT_SYMBOL_GPL(nvmet_sq_init);
1027 
nvmet_check_ana_state(struct nvmet_port * port,struct nvmet_ns * ns)1028 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
1029 		struct nvmet_ns *ns)
1030 {
1031 	enum nvme_ana_state state = port->ana_state[ns->anagrpid];
1032 
1033 	if (unlikely(state == NVME_ANA_INACCESSIBLE))
1034 		return NVME_SC_ANA_INACCESSIBLE;
1035 	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
1036 		return NVME_SC_ANA_PERSISTENT_LOSS;
1037 	if (unlikely(state == NVME_ANA_CHANGE))
1038 		return NVME_SC_ANA_TRANSITION;
1039 	return 0;
1040 }
1041 
nvmet_io_cmd_check_access(struct nvmet_req * req)1042 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
1043 {
1044 	if (unlikely(req->ns->readonly)) {
1045 		switch (req->cmd->common.opcode) {
1046 		case nvme_cmd_read:
1047 		case nvme_cmd_flush:
1048 			break;
1049 		default:
1050 			return NVME_SC_NS_WRITE_PROTECTED;
1051 		}
1052 	}
1053 
1054 	return 0;
1055 }
1056 
nvmet_io_cmd_transfer_len(struct nvmet_req * req)1057 static u32 nvmet_io_cmd_transfer_len(struct nvmet_req *req)
1058 {
1059 	struct nvme_command *cmd = req->cmd;
1060 	u32 metadata_len = 0;
1061 
1062 	if (nvme_is_fabrics(cmd))
1063 		return nvmet_fabrics_io_cmd_data_len(req);
1064 
1065 	if (!req->ns)
1066 		return 0;
1067 
1068 	switch (req->cmd->common.opcode) {
1069 	case nvme_cmd_read:
1070 	case nvme_cmd_write:
1071 	case nvme_cmd_zone_append:
1072 		if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
1073 			metadata_len = nvmet_rw_metadata_len(req);
1074 		return nvmet_rw_data_len(req) + metadata_len;
1075 	case nvme_cmd_dsm:
1076 		return nvmet_dsm_len(req);
1077 	case nvme_cmd_zone_mgmt_recv:
1078 		return (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
1079 	default:
1080 		return 0;
1081 	}
1082 }
1083 
nvmet_parse_io_cmd(struct nvmet_req * req)1084 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
1085 {
1086 	struct nvme_command *cmd = req->cmd;
1087 	u16 ret;
1088 
1089 	if (nvme_is_fabrics(cmd))
1090 		return nvmet_parse_fabrics_io_cmd(req);
1091 
1092 	if (unlikely(!nvmet_check_auth_status(req)))
1093 		return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
1094 
1095 	ret = nvmet_check_ctrl_status(req);
1096 	if (unlikely(ret))
1097 		return ret;
1098 
1099 	if (nvmet_is_passthru_req(req))
1100 		return nvmet_parse_passthru_io_cmd(req);
1101 
1102 	ret = nvmet_req_find_ns(req);
1103 	if (unlikely(ret))
1104 		return ret;
1105 
1106 	ret = nvmet_check_ana_state(req->port, req->ns);
1107 	if (unlikely(ret)) {
1108 		req->error_loc = offsetof(struct nvme_common_command, nsid);
1109 		return ret;
1110 	}
1111 	ret = nvmet_io_cmd_check_access(req);
1112 	if (unlikely(ret)) {
1113 		req->error_loc = offsetof(struct nvme_common_command, nsid);
1114 		return ret;
1115 	}
1116 
1117 	if (req->ns->pr.enable) {
1118 		ret = nvmet_parse_pr_cmd(req);
1119 		if (!ret)
1120 			return ret;
1121 	}
1122 
1123 	switch (req->ns->csi) {
1124 	case NVME_CSI_NVM:
1125 		if (req->ns->file)
1126 			ret = nvmet_file_parse_io_cmd(req);
1127 		else
1128 			ret = nvmet_bdev_parse_io_cmd(req);
1129 		break;
1130 	case NVME_CSI_ZNS:
1131 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
1132 			ret = nvmet_bdev_zns_parse_io_cmd(req);
1133 		else
1134 			ret = NVME_SC_INVALID_IO_CMD_SET;
1135 		break;
1136 	default:
1137 		ret = NVME_SC_INVALID_IO_CMD_SET;
1138 	}
1139 	if (ret)
1140 		return ret;
1141 
1142 	if (req->ns->pr.enable) {
1143 		ret = nvmet_pr_check_cmd_access(req);
1144 		if (ret)
1145 			return ret;
1146 
1147 		ret = nvmet_pr_get_ns_pc_ref(req);
1148 	}
1149 	return ret;
1150 }
1151 
nvmet_req_init(struct nvmet_req * req,struct nvmet_sq * sq,const struct nvmet_fabrics_ops * ops)1152 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq,
1153 		const struct nvmet_fabrics_ops *ops)
1154 {
1155 	u8 flags = req->cmd->common.flags;
1156 	u16 status;
1157 
1158 	req->cq = sq->cq;
1159 	req->sq = sq;
1160 	req->ops = ops;
1161 	req->sg = NULL;
1162 	req->metadata_sg = NULL;
1163 	req->sg_cnt = 0;
1164 	req->metadata_sg_cnt = 0;
1165 	req->transfer_len = 0;
1166 	req->metadata_len = 0;
1167 	req->cqe->result.u64 = 0;
1168 	req->cqe->status = 0;
1169 	req->cqe->sq_head = 0;
1170 	req->ns = NULL;
1171 	req->error_loc = NVMET_NO_ERROR_LOC;
1172 	req->error_slba = 0;
1173 	req->pc_ref = NULL;
1174 
1175 	/* no support for fused commands yet */
1176 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
1177 		req->error_loc = offsetof(struct nvme_common_command, flags);
1178 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1179 		goto fail;
1180 	}
1181 
1182 	/*
1183 	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
1184 	 * contains an address of a single contiguous physical buffer that is
1185 	 * byte aligned. For PCI controllers, this is optional so not enforced.
1186 	 */
1187 	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
1188 		if (!req->sq->ctrl || !nvmet_is_pci_ctrl(req->sq->ctrl)) {
1189 			req->error_loc =
1190 				offsetof(struct nvme_common_command, flags);
1191 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1192 			goto fail;
1193 		}
1194 	}
1195 
1196 	if (unlikely(!req->sq->ctrl))
1197 		/* will return an error for any non-connect command: */
1198 		status = nvmet_parse_connect_cmd(req);
1199 	else if (likely(req->sq->qid != 0))
1200 		status = nvmet_parse_io_cmd(req);
1201 	else
1202 		status = nvmet_parse_admin_cmd(req);
1203 
1204 	if (status)
1205 		goto fail;
1206 
1207 	trace_nvmet_req_init(req, req->cmd);
1208 
1209 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
1210 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1211 		goto fail;
1212 	}
1213 
1214 	if (sq->ctrl)
1215 		sq->ctrl->reset_tbkas = true;
1216 
1217 	return true;
1218 
1219 fail:
1220 	__nvmet_req_complete(req, status);
1221 	return false;
1222 }
1223 EXPORT_SYMBOL_GPL(nvmet_req_init);
1224 
nvmet_req_uninit(struct nvmet_req * req)1225 void nvmet_req_uninit(struct nvmet_req *req)
1226 {
1227 	percpu_ref_put(&req->sq->ref);
1228 	if (req->pc_ref)
1229 		nvmet_pr_put_ns_pc_ref(req->pc_ref);
1230 	if (req->ns)
1231 		nvmet_put_namespace(req->ns);
1232 }
1233 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
1234 
nvmet_req_transfer_len(struct nvmet_req * req)1235 size_t nvmet_req_transfer_len(struct nvmet_req *req)
1236 {
1237 	if (likely(req->sq->qid != 0))
1238 		return nvmet_io_cmd_transfer_len(req);
1239 	if (unlikely(!req->sq->ctrl))
1240 		return nvmet_connect_cmd_data_len(req);
1241 	return nvmet_admin_cmd_data_len(req);
1242 }
1243 EXPORT_SYMBOL_GPL(nvmet_req_transfer_len);
1244 
nvmet_check_transfer_len(struct nvmet_req * req,size_t len)1245 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
1246 {
1247 	if (unlikely(len != req->transfer_len)) {
1248 		u16 status;
1249 
1250 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1251 		if (req->cmd->common.flags & NVME_CMD_SGL_ALL)
1252 			status = NVME_SC_SGL_INVALID_DATA;
1253 		else
1254 			status = NVME_SC_INVALID_FIELD;
1255 		nvmet_req_complete(req, status | NVME_STATUS_DNR);
1256 		return false;
1257 	}
1258 
1259 	return true;
1260 }
1261 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
1262 
nvmet_check_data_len_lte(struct nvmet_req * req,size_t data_len)1263 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
1264 {
1265 	if (unlikely(data_len > req->transfer_len)) {
1266 		u16 status;
1267 
1268 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1269 		if (req->cmd->common.flags & NVME_CMD_SGL_ALL)
1270 			status = NVME_SC_SGL_INVALID_DATA;
1271 		else
1272 			status = NVME_SC_INVALID_FIELD;
1273 		nvmet_req_complete(req, status | NVME_STATUS_DNR);
1274 		return false;
1275 	}
1276 
1277 	return true;
1278 }
1279 
nvmet_data_transfer_len(struct nvmet_req * req)1280 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
1281 {
1282 	return req->transfer_len - req->metadata_len;
1283 }
1284 
nvmet_req_alloc_p2pmem_sgls(struct pci_dev * p2p_dev,struct nvmet_req * req)1285 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1286 		struct nvmet_req *req)
1287 {
1288 	req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1289 			nvmet_data_transfer_len(req));
1290 	if (!req->sg)
1291 		goto out_err;
1292 
1293 	if (req->metadata_len) {
1294 		req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1295 				&req->metadata_sg_cnt, req->metadata_len);
1296 		if (!req->metadata_sg)
1297 			goto out_free_sg;
1298 	}
1299 
1300 	req->p2p_dev = p2p_dev;
1301 
1302 	return 0;
1303 out_free_sg:
1304 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1305 out_err:
1306 	return -ENOMEM;
1307 }
1308 
nvmet_req_find_p2p_dev(struct nvmet_req * req)1309 static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1310 {
1311 	if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1312 	    !req->sq->ctrl || !req->sq->qid || !req->ns)
1313 		return NULL;
1314 	return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1315 }
1316 
nvmet_req_alloc_sgls(struct nvmet_req * req)1317 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1318 {
1319 	struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1320 
1321 	if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1322 		return 0;
1323 
1324 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1325 			    &req->sg_cnt);
1326 	if (unlikely(!req->sg))
1327 		goto out;
1328 
1329 	if (req->metadata_len) {
1330 		req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1331 					     &req->metadata_sg_cnt);
1332 		if (unlikely(!req->metadata_sg))
1333 			goto out_free;
1334 	}
1335 
1336 	return 0;
1337 out_free:
1338 	sgl_free(req->sg);
1339 out:
1340 	return -ENOMEM;
1341 }
1342 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1343 
nvmet_req_free_sgls(struct nvmet_req * req)1344 void nvmet_req_free_sgls(struct nvmet_req *req)
1345 {
1346 	if (req->p2p_dev) {
1347 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1348 		if (req->metadata_sg)
1349 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1350 		req->p2p_dev = NULL;
1351 	} else {
1352 		sgl_free(req->sg);
1353 		if (req->metadata_sg)
1354 			sgl_free(req->metadata_sg);
1355 	}
1356 
1357 	req->sg = NULL;
1358 	req->metadata_sg = NULL;
1359 	req->sg_cnt = 0;
1360 	req->metadata_sg_cnt = 0;
1361 }
1362 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1363 
nvmet_css_supported(u8 cc_css)1364 static inline bool nvmet_css_supported(u8 cc_css)
1365 {
1366 	switch (cc_css << NVME_CC_CSS_SHIFT) {
1367 	case NVME_CC_CSS_NVM:
1368 	case NVME_CC_CSS_CSI:
1369 		return true;
1370 	default:
1371 		return false;
1372 	}
1373 }
1374 
nvmet_start_ctrl(struct nvmet_ctrl * ctrl)1375 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1376 {
1377 	lockdep_assert_held(&ctrl->lock);
1378 
1379 	/*
1380 	 * Only I/O controllers should verify iosqes,iocqes.
1381 	 * Strictly speaking, the spec says a discovery controller
1382 	 * should verify iosqes,iocqes are zeroed, however that
1383 	 * would break backwards compatibility, so don't enforce it.
1384 	 */
1385 	if (!nvmet_is_disc_subsys(ctrl->subsys) &&
1386 	    (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1387 	     nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1388 		ctrl->csts = NVME_CSTS_CFS;
1389 		return;
1390 	}
1391 
1392 	if (nvmet_cc_mps(ctrl->cc) != 0 ||
1393 	    nvmet_cc_ams(ctrl->cc) != 0 ||
1394 	    !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
1395 		ctrl->csts = NVME_CSTS_CFS;
1396 		return;
1397 	}
1398 
1399 	ctrl->csts = NVME_CSTS_RDY;
1400 
1401 	/*
1402 	 * Controllers that are not yet enabled should not really enforce the
1403 	 * keep alive timeout, but we still want to track a timeout and cleanup
1404 	 * in case a host died before it enabled the controller.  Hence, simply
1405 	 * reset the keep alive timer when the controller is enabled.
1406 	 */
1407 	if (ctrl->kato)
1408 		mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
1409 }
1410 
nvmet_clear_ctrl(struct nvmet_ctrl * ctrl)1411 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1412 {
1413 	lockdep_assert_held(&ctrl->lock);
1414 
1415 	/* XXX: tear down queues? */
1416 	ctrl->csts &= ~NVME_CSTS_RDY;
1417 	ctrl->cc = 0;
1418 }
1419 
nvmet_update_cc(struct nvmet_ctrl * ctrl,u32 new)1420 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1421 {
1422 	u32 old;
1423 
1424 	mutex_lock(&ctrl->lock);
1425 	old = ctrl->cc;
1426 	ctrl->cc = new;
1427 
1428 	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1429 		nvmet_start_ctrl(ctrl);
1430 	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1431 		nvmet_clear_ctrl(ctrl);
1432 	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1433 		nvmet_clear_ctrl(ctrl);
1434 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1435 	}
1436 	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1437 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1438 	mutex_unlock(&ctrl->lock);
1439 }
1440 EXPORT_SYMBOL_GPL(nvmet_update_cc);
1441 
nvmet_init_cap(struct nvmet_ctrl * ctrl)1442 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1443 {
1444 	/* command sets supported: NVMe command set: */
1445 	ctrl->cap = (1ULL << 37);
1446 	/* Controller supports one or more I/O Command Sets */
1447 	ctrl->cap |= (1ULL << 43);
1448 	/* CC.EN timeout in 500msec units: */
1449 	ctrl->cap |= (15ULL << 24);
1450 	/* maximum queue entries supported: */
1451 	if (ctrl->ops->get_max_queue_size)
1452 		ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
1453 				   ctrl->port->max_queue_size) - 1;
1454 	else
1455 		ctrl->cap |= ctrl->port->max_queue_size - 1;
1456 
1457 	if (nvmet_is_passthru_subsys(ctrl->subsys))
1458 		nvmet_passthrough_override_cap(ctrl);
1459 }
1460 
nvmet_ctrl_find_get(const char * subsysnqn,const char * hostnqn,u16 cntlid,struct nvmet_req * req)1461 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1462 				       const char *hostnqn, u16 cntlid,
1463 				       struct nvmet_req *req)
1464 {
1465 	struct nvmet_ctrl *ctrl = NULL;
1466 	struct nvmet_subsys *subsys;
1467 
1468 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1469 	if (!subsys) {
1470 		pr_warn("connect request for invalid subsystem %s!\n",
1471 			subsysnqn);
1472 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1473 		goto out;
1474 	}
1475 
1476 	mutex_lock(&subsys->lock);
1477 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1478 		if (ctrl->cntlid == cntlid) {
1479 			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1480 				pr_warn("hostnqn mismatch.\n");
1481 				continue;
1482 			}
1483 			if (!kref_get_unless_zero(&ctrl->ref))
1484 				continue;
1485 
1486 			/* ctrl found */
1487 			goto found;
1488 		}
1489 	}
1490 
1491 	ctrl = NULL; /* ctrl not found */
1492 	pr_warn("could not find controller %d for subsys %s / host %s\n",
1493 		cntlid, subsysnqn, hostnqn);
1494 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1495 
1496 found:
1497 	mutex_unlock(&subsys->lock);
1498 	nvmet_subsys_put(subsys);
1499 out:
1500 	return ctrl;
1501 }
1502 
nvmet_check_ctrl_status(struct nvmet_req * req)1503 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1504 {
1505 	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1506 		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1507 		       req->cmd->common.opcode, req->sq->qid);
1508 		return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1509 	}
1510 
1511 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1512 		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1513 		       req->cmd->common.opcode, req->sq->qid);
1514 		return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1515 	}
1516 
1517 	if (unlikely(!nvmet_check_auth_status(req))) {
1518 		pr_warn("qid %d not authenticated\n", req->sq->qid);
1519 		return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
1520 	}
1521 	return 0;
1522 }
1523 
nvmet_host_allowed(struct nvmet_subsys * subsys,const char * hostnqn)1524 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1525 {
1526 	struct nvmet_host_link *p;
1527 
1528 	lockdep_assert_held(&nvmet_config_sem);
1529 
1530 	if (subsys->allow_any_host)
1531 		return true;
1532 
1533 	if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
1534 		return true;
1535 
1536 	list_for_each_entry(p, &subsys->hosts, entry) {
1537 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
1538 			return true;
1539 	}
1540 
1541 	return false;
1542 }
1543 
1544 /*
1545  * Note: ctrl->subsys->lock should be held when calling this function
1546  */
nvmet_setup_p2p_ns_map(struct nvmet_ctrl * ctrl,struct device * p2p_client)1547 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1548 		struct device *p2p_client)
1549 {
1550 	struct nvmet_ns *ns;
1551 	unsigned long idx;
1552 
1553 	if (!p2p_client)
1554 		return;
1555 
1556 	ctrl->p2p_client = get_device(p2p_client);
1557 
1558 	nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
1559 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1560 }
1561 
1562 /*
1563  * Note: ctrl->subsys->lock should be held when calling this function
1564  */
nvmet_release_p2p_ns_map(struct nvmet_ctrl * ctrl)1565 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1566 {
1567 	struct radix_tree_iter iter;
1568 	void __rcu **slot;
1569 
1570 	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1571 		pci_dev_put(radix_tree_deref_slot(slot));
1572 
1573 	put_device(ctrl->p2p_client);
1574 }
1575 
nvmet_fatal_error_handler(struct work_struct * work)1576 static void nvmet_fatal_error_handler(struct work_struct *work)
1577 {
1578 	struct nvmet_ctrl *ctrl =
1579 			container_of(work, struct nvmet_ctrl, fatal_err_work);
1580 
1581 	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1582 	ctrl->ops->delete_ctrl(ctrl);
1583 }
1584 
nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args * args)1585 struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
1586 {
1587 	struct nvmet_subsys *subsys;
1588 	struct nvmet_ctrl *ctrl;
1589 	u32 kato = args->kato;
1590 	u8 dhchap_status;
1591 	int ret;
1592 
1593 	args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
1594 	subsys = nvmet_find_get_subsys(args->port, args->subsysnqn);
1595 	if (!subsys) {
1596 		pr_warn("connect request for invalid subsystem %s!\n",
1597 			args->subsysnqn);
1598 		args->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
1599 		args->error_loc = offsetof(struct nvme_common_command, dptr);
1600 		return NULL;
1601 	}
1602 
1603 	down_read(&nvmet_config_sem);
1604 	if (!nvmet_host_allowed(subsys, args->hostnqn)) {
1605 		pr_info("connect by host %s for subsystem %s not allowed\n",
1606 			args->hostnqn, args->subsysnqn);
1607 		args->result = IPO_IATTR_CONNECT_DATA(hostnqn);
1608 		up_read(&nvmet_config_sem);
1609 		args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
1610 		args->error_loc = offsetof(struct nvme_common_command, dptr);
1611 		goto out_put_subsystem;
1612 	}
1613 	up_read(&nvmet_config_sem);
1614 
1615 	args->status = NVME_SC_INTERNAL;
1616 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1617 	if (!ctrl)
1618 		goto out_put_subsystem;
1619 	mutex_init(&ctrl->lock);
1620 
1621 	ctrl->port = args->port;
1622 	ctrl->ops = args->ops;
1623 
1624 #ifdef CONFIG_NVME_TARGET_PASSTHRU
1625 	/* By default, set loop targets to clear IDS by default */
1626 	if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
1627 		subsys->clear_ids = 1;
1628 #endif
1629 
1630 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1631 	INIT_LIST_HEAD(&ctrl->async_events);
1632 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1633 	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1634 	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1635 
1636 	memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
1637 	memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
1638 
1639 	kref_init(&ctrl->ref);
1640 	ctrl->subsys = subsys;
1641 	ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
1642 	nvmet_init_cap(ctrl);
1643 	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1644 
1645 	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1646 			sizeof(__le32), GFP_KERNEL);
1647 	if (!ctrl->changed_ns_list)
1648 		goto out_free_ctrl;
1649 
1650 	ctrl->sqs = kcalloc(subsys->max_qid + 1,
1651 			sizeof(struct nvmet_sq *),
1652 			GFP_KERNEL);
1653 	if (!ctrl->sqs)
1654 		goto out_free_changed_ns_list;
1655 
1656 	ctrl->cqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_cq *),
1657 			   GFP_KERNEL);
1658 	if (!ctrl->cqs)
1659 		goto out_free_sqs;
1660 
1661 	ret = ida_alloc_range(&cntlid_ida,
1662 			     subsys->cntlid_min, subsys->cntlid_max,
1663 			     GFP_KERNEL);
1664 	if (ret < 0) {
1665 		args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
1666 		goto out_free_cqs;
1667 	}
1668 	ctrl->cntlid = ret;
1669 
1670 	/*
1671 	 * Discovery controllers may use some arbitrary high value
1672 	 * in order to cleanup stale discovery sessions
1673 	 */
1674 	if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
1675 		kato = NVMET_DISC_KATO_MS;
1676 
1677 	/* keep-alive timeout in seconds */
1678 	ctrl->kato = DIV_ROUND_UP(kato, 1000);
1679 
1680 	ctrl->err_counter = 0;
1681 	spin_lock_init(&ctrl->error_lock);
1682 
1683 	nvmet_start_keep_alive_timer(ctrl);
1684 
1685 	mutex_lock(&subsys->lock);
1686 	ret = nvmet_ctrl_init_pr(ctrl);
1687 	if (ret)
1688 		goto init_pr_fail;
1689 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1690 	nvmet_setup_p2p_ns_map(ctrl, args->p2p_client);
1691 	nvmet_debugfs_ctrl_setup(ctrl);
1692 	mutex_unlock(&subsys->lock);
1693 
1694 	if (args->hostid)
1695 		uuid_copy(&ctrl->hostid, args->hostid);
1696 
1697 	dhchap_status = nvmet_setup_auth(ctrl, args->sq);
1698 	if (dhchap_status) {
1699 		pr_err("Failed to setup authentication, dhchap status %u\n",
1700 		       dhchap_status);
1701 		nvmet_ctrl_put(ctrl);
1702 		if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
1703 			args->status =
1704 				NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
1705 		else
1706 			args->status = NVME_SC_INTERNAL;
1707 		return NULL;
1708 	}
1709 
1710 	args->status = NVME_SC_SUCCESS;
1711 
1712 	pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s%s.\n",
1713 		nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
1714 		ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
1715 		ctrl->pi_support ? " T10-PI is enabled" : "",
1716 		nvmet_has_auth(ctrl, args->sq) ? " with DH-HMAC-CHAP" : "",
1717 		nvmet_queue_tls_keyid(args->sq) ? ", TLS" : "");
1718 
1719 	return ctrl;
1720 
1721 init_pr_fail:
1722 	mutex_unlock(&subsys->lock);
1723 	nvmet_stop_keep_alive_timer(ctrl);
1724 	ida_free(&cntlid_ida, ctrl->cntlid);
1725 out_free_cqs:
1726 	kfree(ctrl->cqs);
1727 out_free_sqs:
1728 	kfree(ctrl->sqs);
1729 out_free_changed_ns_list:
1730 	kfree(ctrl->changed_ns_list);
1731 out_free_ctrl:
1732 	kfree(ctrl);
1733 out_put_subsystem:
1734 	nvmet_subsys_put(subsys);
1735 	return NULL;
1736 }
1737 EXPORT_SYMBOL_GPL(nvmet_alloc_ctrl);
1738 
nvmet_ctrl_free(struct kref * ref)1739 static void nvmet_ctrl_free(struct kref *ref)
1740 {
1741 	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1742 	struct nvmet_subsys *subsys = ctrl->subsys;
1743 
1744 	mutex_lock(&subsys->lock);
1745 	nvmet_ctrl_destroy_pr(ctrl);
1746 	nvmet_release_p2p_ns_map(ctrl);
1747 	list_del(&ctrl->subsys_entry);
1748 	mutex_unlock(&subsys->lock);
1749 
1750 	nvmet_stop_keep_alive_timer(ctrl);
1751 
1752 	flush_work(&ctrl->async_event_work);
1753 	cancel_work_sync(&ctrl->fatal_err_work);
1754 
1755 	nvmet_destroy_auth(ctrl);
1756 
1757 	nvmet_debugfs_ctrl_free(ctrl);
1758 
1759 	ida_free(&cntlid_ida, ctrl->cntlid);
1760 
1761 	nvmet_async_events_free(ctrl);
1762 	kfree(ctrl->sqs);
1763 	kfree(ctrl->cqs);
1764 	kfree(ctrl->changed_ns_list);
1765 	kfree(ctrl);
1766 
1767 	nvmet_subsys_put(subsys);
1768 }
1769 
nvmet_ctrl_put(struct nvmet_ctrl * ctrl)1770 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1771 {
1772 	kref_put(&ctrl->ref, nvmet_ctrl_free);
1773 }
1774 EXPORT_SYMBOL_GPL(nvmet_ctrl_put);
1775 
nvmet_ctrl_fatal_error(struct nvmet_ctrl * ctrl)1776 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1777 {
1778 	mutex_lock(&ctrl->lock);
1779 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
1780 		ctrl->csts |= NVME_CSTS_CFS;
1781 		queue_work(nvmet_wq, &ctrl->fatal_err_work);
1782 	}
1783 	mutex_unlock(&ctrl->lock);
1784 }
1785 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1786 
nvmet_ctrl_host_traddr(struct nvmet_ctrl * ctrl,char * traddr,size_t traddr_len)1787 ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
1788 		char *traddr, size_t traddr_len)
1789 {
1790 	if (!ctrl->ops->host_traddr)
1791 		return -EOPNOTSUPP;
1792 	return ctrl->ops->host_traddr(ctrl, traddr, traddr_len);
1793 }
1794 
nvmet_find_get_subsys(struct nvmet_port * port,const char * subsysnqn)1795 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1796 		const char *subsysnqn)
1797 {
1798 	struct nvmet_subsys_link *p;
1799 
1800 	if (!port)
1801 		return NULL;
1802 
1803 	if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1804 		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1805 			return NULL;
1806 		return nvmet_disc_subsys;
1807 	}
1808 
1809 	down_read(&nvmet_config_sem);
1810 	if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn,
1811 				NVMF_NQN_SIZE)) {
1812 		if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) {
1813 			up_read(&nvmet_config_sem);
1814 			return nvmet_disc_subsys;
1815 		}
1816 	}
1817 	list_for_each_entry(p, &port->subsystems, entry) {
1818 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1819 				NVMF_NQN_SIZE)) {
1820 			if (!kref_get_unless_zero(&p->subsys->ref))
1821 				break;
1822 			up_read(&nvmet_config_sem);
1823 			return p->subsys;
1824 		}
1825 	}
1826 	up_read(&nvmet_config_sem);
1827 	return NULL;
1828 }
1829 
nvmet_subsys_alloc(const char * subsysnqn,enum nvme_subsys_type type)1830 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1831 		enum nvme_subsys_type type)
1832 {
1833 	struct nvmet_subsys *subsys;
1834 	char serial[NVMET_SN_MAX_SIZE / 2];
1835 	int ret;
1836 
1837 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1838 	if (!subsys)
1839 		return ERR_PTR(-ENOMEM);
1840 
1841 	subsys->ver = NVMET_DEFAULT_VS;
1842 	/* generate a random serial number as our controllers are ephemeral: */
1843 	get_random_bytes(&serial, sizeof(serial));
1844 	bin2hex(subsys->serial, &serial, sizeof(serial));
1845 
1846 	subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1847 	if (!subsys->model_number) {
1848 		ret = -ENOMEM;
1849 		goto free_subsys;
1850 	}
1851 
1852 	subsys->ieee_oui = 0;
1853 
1854 	subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
1855 	if (!subsys->firmware_rev) {
1856 		ret = -ENOMEM;
1857 		goto free_mn;
1858 	}
1859 
1860 	switch (type) {
1861 	case NVME_NQN_NVME:
1862 		subsys->max_qid = NVMET_NR_QUEUES;
1863 		break;
1864 	case NVME_NQN_DISC:
1865 	case NVME_NQN_CURR:
1866 		subsys->max_qid = 0;
1867 		break;
1868 	default:
1869 		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1870 		ret = -EINVAL;
1871 		goto free_fr;
1872 	}
1873 	subsys->type = type;
1874 	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1875 			GFP_KERNEL);
1876 	if (!subsys->subsysnqn) {
1877 		ret = -ENOMEM;
1878 		goto free_fr;
1879 	}
1880 	subsys->cntlid_min = NVME_CNTLID_MIN;
1881 	subsys->cntlid_max = NVME_CNTLID_MAX;
1882 	kref_init(&subsys->ref);
1883 
1884 	mutex_init(&subsys->lock);
1885 	xa_init(&subsys->namespaces);
1886 	INIT_LIST_HEAD(&subsys->ctrls);
1887 	INIT_LIST_HEAD(&subsys->hosts);
1888 
1889 	ret = nvmet_debugfs_subsys_setup(subsys);
1890 	if (ret)
1891 		goto free_subsysnqn;
1892 
1893 	return subsys;
1894 
1895 free_subsysnqn:
1896 	kfree(subsys->subsysnqn);
1897 free_fr:
1898 	kfree(subsys->firmware_rev);
1899 free_mn:
1900 	kfree(subsys->model_number);
1901 free_subsys:
1902 	kfree(subsys);
1903 	return ERR_PTR(ret);
1904 }
1905 
nvmet_subsys_free(struct kref * ref)1906 static void nvmet_subsys_free(struct kref *ref)
1907 {
1908 	struct nvmet_subsys *subsys =
1909 		container_of(ref, struct nvmet_subsys, ref);
1910 
1911 	WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1912 
1913 	nvmet_debugfs_subsys_free(subsys);
1914 
1915 	xa_destroy(&subsys->namespaces);
1916 	nvmet_passthru_subsys_free(subsys);
1917 
1918 	kfree(subsys->subsysnqn);
1919 	kfree(subsys->model_number);
1920 	kfree(subsys->firmware_rev);
1921 	kfree(subsys);
1922 }
1923 
nvmet_subsys_del_ctrls(struct nvmet_subsys * subsys)1924 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1925 {
1926 	struct nvmet_ctrl *ctrl;
1927 
1928 	mutex_lock(&subsys->lock);
1929 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1930 		ctrl->ops->delete_ctrl(ctrl);
1931 	mutex_unlock(&subsys->lock);
1932 }
1933 
nvmet_subsys_put(struct nvmet_subsys * subsys)1934 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1935 {
1936 	kref_put(&subsys->ref, nvmet_subsys_free);
1937 }
1938 
nvmet_init(void)1939 static int __init nvmet_init(void)
1940 {
1941 	int error = -ENOMEM;
1942 
1943 	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1944 
1945 	nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
1946 			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
1947 			SLAB_HWCACHE_ALIGN, NULL);
1948 	if (!nvmet_bvec_cache)
1949 		return -ENOMEM;
1950 
1951 	zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1952 	if (!zbd_wq)
1953 		goto out_destroy_bvec_cache;
1954 
1955 	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1956 			WQ_MEM_RECLAIM, 0);
1957 	if (!buffered_io_wq)
1958 		goto out_free_zbd_work_queue;
1959 
1960 	nvmet_wq = alloc_workqueue("nvmet-wq",
1961 			WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0);
1962 	if (!nvmet_wq)
1963 		goto out_free_buffered_work_queue;
1964 
1965 	error = nvmet_init_discovery();
1966 	if (error)
1967 		goto out_free_nvmet_work_queue;
1968 
1969 	error = nvmet_init_debugfs();
1970 	if (error)
1971 		goto out_exit_discovery;
1972 
1973 	error = nvmet_init_configfs();
1974 	if (error)
1975 		goto out_exit_debugfs;
1976 
1977 	return 0;
1978 
1979 out_exit_debugfs:
1980 	nvmet_exit_debugfs();
1981 out_exit_discovery:
1982 	nvmet_exit_discovery();
1983 out_free_nvmet_work_queue:
1984 	destroy_workqueue(nvmet_wq);
1985 out_free_buffered_work_queue:
1986 	destroy_workqueue(buffered_io_wq);
1987 out_free_zbd_work_queue:
1988 	destroy_workqueue(zbd_wq);
1989 out_destroy_bvec_cache:
1990 	kmem_cache_destroy(nvmet_bvec_cache);
1991 	return error;
1992 }
1993 
nvmet_exit(void)1994 static void __exit nvmet_exit(void)
1995 {
1996 	nvmet_exit_configfs();
1997 	nvmet_exit_debugfs();
1998 	nvmet_exit_discovery();
1999 	ida_destroy(&cntlid_ida);
2000 	destroy_workqueue(nvmet_wq);
2001 	destroy_workqueue(buffered_io_wq);
2002 	destroy_workqueue(zbd_wq);
2003 	kmem_cache_destroy(nvmet_bvec_cache);
2004 
2005 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
2006 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
2007 }
2008 
2009 module_init(nvmet_init);
2010 module_exit(nvmet_exit);
2011 
2012 MODULE_DESCRIPTION("NVMe target core framework");
2013 MODULE_LICENSE("GPL v2");
2014