xref: /linux/drivers/nvme/target/core.c (revision fcb3ad4366b9c810cbb9da34c076a9a52d8aa1e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common code for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12 
13 #include <generated/utsrelease.h>
14 
15 #define CREATE_TRACE_POINTS
16 #include "trace.h"
17 
18 #include "nvmet.h"
19 #include "debugfs.h"
20 
21 struct kmem_cache *nvmet_bvec_cache;
22 struct workqueue_struct *buffered_io_wq;
23 struct workqueue_struct *zbd_wq;
24 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
25 static DEFINE_IDA(cntlid_ida);
26 
27 struct workqueue_struct *nvmet_wq;
28 EXPORT_SYMBOL_GPL(nvmet_wq);
29 
30 /*
31  * This read/write semaphore is used to synchronize access to configuration
32  * information on a target system that will result in discovery log page
33  * information change for at least one host.
34  * The full list of resources to protected by this semaphore is:
35  *
36  *  - subsystems list
37  *  - per-subsystem allowed hosts list
38  *  - allow_any_host subsystem attribute
39  *  - nvmet_genctr
40  *  - the nvmet_transports array
41  *
42  * When updating any of those lists/structures write lock should be obtained,
43  * while when reading (popolating discovery log page or checking host-subsystem
44  * link) read lock is obtained to allow concurrent reads.
45  */
46 DECLARE_RWSEM(nvmet_config_sem);
47 
48 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
49 u64 nvmet_ana_chgcnt;
50 DECLARE_RWSEM(nvmet_ana_sem);
51 
52 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
53 {
54 	switch (errno) {
55 	case 0:
56 		return NVME_SC_SUCCESS;
57 	case -ENOSPC:
58 		req->error_loc = offsetof(struct nvme_rw_command, length);
59 		return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
60 	case -EREMOTEIO:
61 		req->error_loc = offsetof(struct nvme_rw_command, slba);
62 		return  NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
63 	case -EOPNOTSUPP:
64 		req->error_loc = offsetof(struct nvme_common_command, opcode);
65 		switch (req->cmd->common.opcode) {
66 		case nvme_cmd_dsm:
67 		case nvme_cmd_write_zeroes:
68 			return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
69 		default:
70 			return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
71 		}
72 		break;
73 	case -ENODATA:
74 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
75 		return NVME_SC_ACCESS_DENIED;
76 	case -EIO:
77 		fallthrough;
78 	default:
79 		req->error_loc = offsetof(struct nvme_common_command, opcode);
80 		return NVME_SC_INTERNAL | NVME_STATUS_DNR;
81 	}
82 }
83 
84 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
85 {
86 	pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
87 		 req->sq->qid);
88 
89 	req->error_loc = offsetof(struct nvme_common_command, opcode);
90 	return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
91 }
92 
93 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
94 		const char *subsysnqn);
95 
96 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
97 		size_t len)
98 {
99 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
100 		req->error_loc = offsetof(struct nvme_common_command, dptr);
101 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
102 	}
103 	return 0;
104 }
105 
106 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
107 {
108 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
109 		req->error_loc = offsetof(struct nvme_common_command, dptr);
110 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
111 	}
112 	return 0;
113 }
114 
115 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
116 {
117 	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
118 		req->error_loc = offsetof(struct nvme_common_command, dptr);
119 		return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
120 	}
121 	return 0;
122 }
123 
124 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
125 {
126 	struct nvmet_ns *cur;
127 	unsigned long idx;
128 	u32 nsid = 0;
129 
130 	xa_for_each(&subsys->namespaces, idx, cur)
131 		nsid = cur->nsid;
132 
133 	return nsid;
134 }
135 
136 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
137 {
138 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
139 }
140 
141 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
142 {
143 	struct nvmet_req *req;
144 
145 	mutex_lock(&ctrl->lock);
146 	while (ctrl->nr_async_event_cmds) {
147 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
148 		mutex_unlock(&ctrl->lock);
149 		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR);
150 		mutex_lock(&ctrl->lock);
151 	}
152 	mutex_unlock(&ctrl->lock);
153 }
154 
155 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
156 {
157 	struct nvmet_async_event *aen;
158 	struct nvmet_req *req;
159 
160 	mutex_lock(&ctrl->lock);
161 	while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
162 		aen = list_first_entry(&ctrl->async_events,
163 				       struct nvmet_async_event, entry);
164 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
165 		nvmet_set_result(req, nvmet_async_event_result(aen));
166 
167 		list_del(&aen->entry);
168 		kfree(aen);
169 
170 		mutex_unlock(&ctrl->lock);
171 		trace_nvmet_async_event(ctrl, req->cqe->result.u32);
172 		nvmet_req_complete(req, 0);
173 		mutex_lock(&ctrl->lock);
174 	}
175 	mutex_unlock(&ctrl->lock);
176 }
177 
178 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
179 {
180 	struct nvmet_async_event *aen, *tmp;
181 
182 	mutex_lock(&ctrl->lock);
183 	list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
184 		list_del(&aen->entry);
185 		kfree(aen);
186 	}
187 	mutex_unlock(&ctrl->lock);
188 }
189 
190 static void nvmet_async_event_work(struct work_struct *work)
191 {
192 	struct nvmet_ctrl *ctrl =
193 		container_of(work, struct nvmet_ctrl, async_event_work);
194 
195 	nvmet_async_events_process(ctrl);
196 }
197 
198 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
199 		u8 event_info, u8 log_page)
200 {
201 	struct nvmet_async_event *aen;
202 
203 	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
204 	if (!aen)
205 		return;
206 
207 	aen->event_type = event_type;
208 	aen->event_info = event_info;
209 	aen->log_page = log_page;
210 
211 	mutex_lock(&ctrl->lock);
212 	list_add_tail(&aen->entry, &ctrl->async_events);
213 	mutex_unlock(&ctrl->lock);
214 
215 	queue_work(nvmet_wq, &ctrl->async_event_work);
216 }
217 
218 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
219 {
220 	u32 i;
221 
222 	mutex_lock(&ctrl->lock);
223 	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
224 		goto out_unlock;
225 
226 	for (i = 0; i < ctrl->nr_changed_ns; i++) {
227 		if (ctrl->changed_ns_list[i] == nsid)
228 			goto out_unlock;
229 	}
230 
231 	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
232 		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
233 		ctrl->nr_changed_ns = U32_MAX;
234 		goto out_unlock;
235 	}
236 
237 	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
238 out_unlock:
239 	mutex_unlock(&ctrl->lock);
240 }
241 
242 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
243 {
244 	struct nvmet_ctrl *ctrl;
245 
246 	lockdep_assert_held(&subsys->lock);
247 
248 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
249 		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
250 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
251 			continue;
252 		nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
253 				NVME_AER_NOTICE_NS_CHANGED,
254 				NVME_LOG_CHANGED_NS);
255 	}
256 }
257 
258 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
259 		struct nvmet_port *port)
260 {
261 	struct nvmet_ctrl *ctrl;
262 
263 	mutex_lock(&subsys->lock);
264 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
265 		if (port && ctrl->port != port)
266 			continue;
267 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
268 			continue;
269 		nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
270 				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
271 	}
272 	mutex_unlock(&subsys->lock);
273 }
274 
275 void nvmet_port_send_ana_event(struct nvmet_port *port)
276 {
277 	struct nvmet_subsys_link *p;
278 
279 	down_read(&nvmet_config_sem);
280 	list_for_each_entry(p, &port->subsystems, entry)
281 		nvmet_send_ana_event(p->subsys, port);
282 	up_read(&nvmet_config_sem);
283 }
284 
285 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
286 {
287 	int ret = 0;
288 
289 	down_write(&nvmet_config_sem);
290 	if (nvmet_transports[ops->type])
291 		ret = -EINVAL;
292 	else
293 		nvmet_transports[ops->type] = ops;
294 	up_write(&nvmet_config_sem);
295 
296 	return ret;
297 }
298 EXPORT_SYMBOL_GPL(nvmet_register_transport);
299 
300 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
301 {
302 	down_write(&nvmet_config_sem);
303 	nvmet_transports[ops->type] = NULL;
304 	up_write(&nvmet_config_sem);
305 }
306 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
307 
308 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
309 {
310 	struct nvmet_ctrl *ctrl;
311 
312 	mutex_lock(&subsys->lock);
313 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
314 		if (ctrl->port == port)
315 			ctrl->ops->delete_ctrl(ctrl);
316 	}
317 	mutex_unlock(&subsys->lock);
318 }
319 
320 int nvmet_enable_port(struct nvmet_port *port)
321 {
322 	const struct nvmet_fabrics_ops *ops;
323 	int ret;
324 
325 	lockdep_assert_held(&nvmet_config_sem);
326 
327 	ops = nvmet_transports[port->disc_addr.trtype];
328 	if (!ops) {
329 		up_write(&nvmet_config_sem);
330 		request_module("nvmet-transport-%d", port->disc_addr.trtype);
331 		down_write(&nvmet_config_sem);
332 		ops = nvmet_transports[port->disc_addr.trtype];
333 		if (!ops) {
334 			pr_err("transport type %d not supported\n",
335 				port->disc_addr.trtype);
336 			return -EINVAL;
337 		}
338 	}
339 
340 	if (!try_module_get(ops->owner))
341 		return -EINVAL;
342 
343 	/*
344 	 * If the user requested PI support and the transport isn't pi capable,
345 	 * don't enable the port.
346 	 */
347 	if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
348 		pr_err("T10-PI is not supported by transport type %d\n",
349 		       port->disc_addr.trtype);
350 		ret = -EINVAL;
351 		goto out_put;
352 	}
353 
354 	ret = ops->add_port(port);
355 	if (ret)
356 		goto out_put;
357 
358 	/* If the transport didn't set inline_data_size, then disable it. */
359 	if (port->inline_data_size < 0)
360 		port->inline_data_size = 0;
361 
362 	/*
363 	 * If the transport didn't set the max_queue_size properly, then clamp
364 	 * it to the target limits. Also set default values in case the
365 	 * transport didn't set it at all.
366 	 */
367 	if (port->max_queue_size < 0)
368 		port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
369 	else
370 		port->max_queue_size = clamp_t(int, port->max_queue_size,
371 					       NVMET_MIN_QUEUE_SIZE,
372 					       NVMET_MAX_QUEUE_SIZE);
373 
374 	port->enabled = true;
375 	port->tr_ops = ops;
376 	return 0;
377 
378 out_put:
379 	module_put(ops->owner);
380 	return ret;
381 }
382 
383 void nvmet_disable_port(struct nvmet_port *port)
384 {
385 	const struct nvmet_fabrics_ops *ops;
386 
387 	lockdep_assert_held(&nvmet_config_sem);
388 
389 	port->enabled = false;
390 	port->tr_ops = NULL;
391 
392 	ops = nvmet_transports[port->disc_addr.trtype];
393 	ops->remove_port(port);
394 	module_put(ops->owner);
395 }
396 
397 static void nvmet_keep_alive_timer(struct work_struct *work)
398 {
399 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
400 			struct nvmet_ctrl, ka_work);
401 	bool reset_tbkas = ctrl->reset_tbkas;
402 
403 	ctrl->reset_tbkas = false;
404 	if (reset_tbkas) {
405 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
406 			ctrl->cntlid);
407 		queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
408 		return;
409 	}
410 
411 	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
412 		ctrl->cntlid, ctrl->kato);
413 
414 	nvmet_ctrl_fatal_error(ctrl);
415 }
416 
417 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
418 {
419 	if (unlikely(ctrl->kato == 0))
420 		return;
421 
422 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
423 		ctrl->cntlid, ctrl->kato);
424 
425 	queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
426 }
427 
428 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
429 {
430 	if (unlikely(ctrl->kato == 0))
431 		return;
432 
433 	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
434 
435 	cancel_delayed_work_sync(&ctrl->ka_work);
436 }
437 
438 u16 nvmet_req_find_ns(struct nvmet_req *req)
439 {
440 	u32 nsid = le32_to_cpu(req->cmd->common.nsid);
441 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
442 
443 	req->ns = xa_load(&subsys->namespaces, nsid);
444 	if (unlikely(!req->ns)) {
445 		req->error_loc = offsetof(struct nvme_common_command, nsid);
446 		if (nvmet_subsys_nsid_exists(subsys, nsid))
447 			return NVME_SC_INTERNAL_PATH_ERROR;
448 		return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
449 	}
450 
451 	percpu_ref_get(&req->ns->ref);
452 	return NVME_SC_SUCCESS;
453 }
454 
455 static void nvmet_destroy_namespace(struct percpu_ref *ref)
456 {
457 	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
458 
459 	complete(&ns->disable_done);
460 }
461 
462 void nvmet_put_namespace(struct nvmet_ns *ns)
463 {
464 	percpu_ref_put(&ns->ref);
465 }
466 
467 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
468 {
469 	nvmet_bdev_ns_disable(ns);
470 	nvmet_file_ns_disable(ns);
471 }
472 
473 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
474 {
475 	int ret;
476 	struct pci_dev *p2p_dev;
477 
478 	if (!ns->use_p2pmem)
479 		return 0;
480 
481 	if (!ns->bdev) {
482 		pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
483 		return -EINVAL;
484 	}
485 
486 	if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
487 		pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
488 		       ns->device_path);
489 		return -EINVAL;
490 	}
491 
492 	if (ns->p2p_dev) {
493 		ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
494 		if (ret < 0)
495 			return -EINVAL;
496 	} else {
497 		/*
498 		 * Right now we just check that there is p2pmem available so
499 		 * we can report an error to the user right away if there
500 		 * is not. We'll find the actual device to use once we
501 		 * setup the controller when the port's device is available.
502 		 */
503 
504 		p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
505 		if (!p2p_dev) {
506 			pr_err("no peer-to-peer memory is available for %s\n",
507 			       ns->device_path);
508 			return -EINVAL;
509 		}
510 
511 		pci_dev_put(p2p_dev);
512 	}
513 
514 	return 0;
515 }
516 
517 /*
518  * Note: ctrl->subsys->lock should be held when calling this function
519  */
520 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
521 				    struct nvmet_ns *ns)
522 {
523 	struct device *clients[2];
524 	struct pci_dev *p2p_dev;
525 	int ret;
526 
527 	if (!ctrl->p2p_client || !ns->use_p2pmem)
528 		return;
529 
530 	if (ns->p2p_dev) {
531 		ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
532 		if (ret < 0)
533 			return;
534 
535 		p2p_dev = pci_dev_get(ns->p2p_dev);
536 	} else {
537 		clients[0] = ctrl->p2p_client;
538 		clients[1] = nvmet_ns_dev(ns);
539 
540 		p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
541 		if (!p2p_dev) {
542 			pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
543 			       dev_name(ctrl->p2p_client), ns->device_path);
544 			return;
545 		}
546 	}
547 
548 	ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
549 	if (ret < 0)
550 		pci_dev_put(p2p_dev);
551 
552 	pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
553 		ns->nsid);
554 }
555 
556 bool nvmet_ns_revalidate(struct nvmet_ns *ns)
557 {
558 	loff_t oldsize = ns->size;
559 
560 	if (ns->bdev)
561 		nvmet_bdev_ns_revalidate(ns);
562 	else
563 		nvmet_file_ns_revalidate(ns);
564 
565 	return oldsize != ns->size;
566 }
567 
568 int nvmet_ns_enable(struct nvmet_ns *ns)
569 {
570 	struct nvmet_subsys *subsys = ns->subsys;
571 	struct nvmet_ctrl *ctrl;
572 	int ret;
573 
574 	mutex_lock(&subsys->lock);
575 	ret = 0;
576 
577 	if (nvmet_is_passthru_subsys(subsys)) {
578 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
579 		goto out_unlock;
580 	}
581 
582 	if (ns->enabled)
583 		goto out_unlock;
584 
585 	ret = -EMFILE;
586 	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
587 		goto out_unlock;
588 
589 	ret = nvmet_bdev_ns_enable(ns);
590 	if (ret == -ENOTBLK)
591 		ret = nvmet_file_ns_enable(ns);
592 	if (ret)
593 		goto out_unlock;
594 
595 	ret = nvmet_p2pmem_ns_enable(ns);
596 	if (ret)
597 		goto out_dev_disable;
598 
599 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
600 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
601 
602 	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
603 				0, GFP_KERNEL);
604 	if (ret)
605 		goto out_dev_put;
606 
607 	if (ns->nsid > subsys->max_nsid)
608 		subsys->max_nsid = ns->nsid;
609 
610 	ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
611 	if (ret)
612 		goto out_restore_subsys_maxnsid;
613 
614 	if (ns->pr.enable) {
615 		ret = nvmet_pr_init_ns(ns);
616 		if (ret)
617 			goto out_remove_from_subsys;
618 	}
619 
620 	subsys->nr_namespaces++;
621 
622 	nvmet_ns_changed(subsys, ns->nsid);
623 	ns->enabled = true;
624 	ret = 0;
625 out_unlock:
626 	mutex_unlock(&subsys->lock);
627 	return ret;
628 
629 out_remove_from_subsys:
630 	xa_erase(&subsys->namespaces, ns->nsid);
631 out_restore_subsys_maxnsid:
632 	subsys->max_nsid = nvmet_max_nsid(subsys);
633 	percpu_ref_exit(&ns->ref);
634 out_dev_put:
635 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
636 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
637 out_dev_disable:
638 	nvmet_ns_dev_disable(ns);
639 	goto out_unlock;
640 }
641 
642 void nvmet_ns_disable(struct nvmet_ns *ns)
643 {
644 	struct nvmet_subsys *subsys = ns->subsys;
645 	struct nvmet_ctrl *ctrl;
646 
647 	mutex_lock(&subsys->lock);
648 	if (!ns->enabled)
649 		goto out_unlock;
650 
651 	ns->enabled = false;
652 	xa_erase(&ns->subsys->namespaces, ns->nsid);
653 	if (ns->nsid == subsys->max_nsid)
654 		subsys->max_nsid = nvmet_max_nsid(subsys);
655 
656 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
657 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
658 
659 	mutex_unlock(&subsys->lock);
660 
661 	/*
662 	 * Now that we removed the namespaces from the lookup list, we
663 	 * can kill the per_cpu ref and wait for any remaining references
664 	 * to be dropped, as well as a RCU grace period for anyone only
665 	 * using the namepace under rcu_read_lock().  Note that we can't
666 	 * use call_rcu here as we need to ensure the namespaces have
667 	 * been fully destroyed before unloading the module.
668 	 */
669 	percpu_ref_kill(&ns->ref);
670 	synchronize_rcu();
671 	wait_for_completion(&ns->disable_done);
672 	percpu_ref_exit(&ns->ref);
673 
674 	if (ns->pr.enable)
675 		nvmet_pr_exit_ns(ns);
676 
677 	mutex_lock(&subsys->lock);
678 
679 	subsys->nr_namespaces--;
680 	nvmet_ns_changed(subsys, ns->nsid);
681 	nvmet_ns_dev_disable(ns);
682 out_unlock:
683 	mutex_unlock(&subsys->lock);
684 }
685 
686 void nvmet_ns_free(struct nvmet_ns *ns)
687 {
688 	nvmet_ns_disable(ns);
689 
690 	down_write(&nvmet_ana_sem);
691 	nvmet_ana_group_enabled[ns->anagrpid]--;
692 	up_write(&nvmet_ana_sem);
693 
694 	kfree(ns->device_path);
695 	kfree(ns);
696 }
697 
698 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
699 {
700 	struct nvmet_ns *ns;
701 
702 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
703 	if (!ns)
704 		return NULL;
705 
706 	init_completion(&ns->disable_done);
707 
708 	ns->nsid = nsid;
709 	ns->subsys = subsys;
710 
711 	down_write(&nvmet_ana_sem);
712 	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
713 	nvmet_ana_group_enabled[ns->anagrpid]++;
714 	up_write(&nvmet_ana_sem);
715 
716 	uuid_gen(&ns->uuid);
717 	ns->buffered_io = false;
718 	ns->csi = NVME_CSI_NVM;
719 
720 	return ns;
721 }
722 
723 static void nvmet_update_sq_head(struct nvmet_req *req)
724 {
725 	if (req->sq->size) {
726 		u32 old_sqhd, new_sqhd;
727 
728 		old_sqhd = READ_ONCE(req->sq->sqhd);
729 		do {
730 			new_sqhd = (old_sqhd + 1) % req->sq->size;
731 		} while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
732 	}
733 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
734 }
735 
736 static void nvmet_set_error(struct nvmet_req *req, u16 status)
737 {
738 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
739 	struct nvme_error_slot *new_error_slot;
740 	unsigned long flags;
741 
742 	req->cqe->status = cpu_to_le16(status << 1);
743 
744 	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
745 		return;
746 
747 	spin_lock_irqsave(&ctrl->error_lock, flags);
748 	ctrl->err_counter++;
749 	new_error_slot =
750 		&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
751 
752 	new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
753 	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
754 	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
755 	new_error_slot->status_field = cpu_to_le16(status << 1);
756 	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
757 	new_error_slot->lba = cpu_to_le64(req->error_slba);
758 	new_error_slot->nsid = req->cmd->common.nsid;
759 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
760 
761 	/* set the more bit for this request */
762 	req->cqe->status |= cpu_to_le16(1 << 14);
763 }
764 
765 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
766 {
767 	struct nvmet_ns *ns = req->ns;
768 	struct nvmet_pr_per_ctrl_ref *pc_ref = req->pc_ref;
769 
770 	if (!req->sq->sqhd_disabled)
771 		nvmet_update_sq_head(req);
772 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
773 	req->cqe->command_id = req->cmd->common.command_id;
774 
775 	if (unlikely(status))
776 		nvmet_set_error(req, status);
777 
778 	trace_nvmet_req_complete(req);
779 
780 	req->ops->queue_response(req);
781 
782 	if (pc_ref)
783 		nvmet_pr_put_ns_pc_ref(pc_ref);
784 	if (ns)
785 		nvmet_put_namespace(ns);
786 }
787 
788 void nvmet_req_complete(struct nvmet_req *req, u16 status)
789 {
790 	struct nvmet_sq *sq = req->sq;
791 
792 	__nvmet_req_complete(req, status);
793 	percpu_ref_put(&sq->ref);
794 }
795 EXPORT_SYMBOL_GPL(nvmet_req_complete);
796 
797 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
798 		u16 qid, u16 size)
799 {
800 	cq->qid = qid;
801 	cq->size = size;
802 }
803 
804 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
805 		u16 qid, u16 size)
806 {
807 	sq->sqhd = 0;
808 	sq->qid = qid;
809 	sq->size = size;
810 
811 	ctrl->sqs[qid] = sq;
812 }
813 
814 static void nvmet_confirm_sq(struct percpu_ref *ref)
815 {
816 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
817 
818 	complete(&sq->confirm_done);
819 }
820 
821 void nvmet_sq_destroy(struct nvmet_sq *sq)
822 {
823 	struct nvmet_ctrl *ctrl = sq->ctrl;
824 
825 	/*
826 	 * If this is the admin queue, complete all AERs so that our
827 	 * queue doesn't have outstanding requests on it.
828 	 */
829 	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
830 		nvmet_async_events_failall(ctrl);
831 	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
832 	wait_for_completion(&sq->confirm_done);
833 	wait_for_completion(&sq->free_done);
834 	percpu_ref_exit(&sq->ref);
835 	nvmet_auth_sq_free(sq);
836 
837 	/*
838 	 * we must reference the ctrl again after waiting for inflight IO
839 	 * to complete. Because admin connect may have sneaked in after we
840 	 * store sq->ctrl locally, but before we killed the percpu_ref. the
841 	 * admin connect allocates and assigns sq->ctrl, which now needs a
842 	 * final ref put, as this ctrl is going away.
843 	 */
844 	ctrl = sq->ctrl;
845 
846 	if (ctrl) {
847 		/*
848 		 * The teardown flow may take some time, and the host may not
849 		 * send us keep-alive during this period, hence reset the
850 		 * traffic based keep-alive timer so we don't trigger a
851 		 * controller teardown as a result of a keep-alive expiration.
852 		 */
853 		ctrl->reset_tbkas = true;
854 		sq->ctrl->sqs[sq->qid] = NULL;
855 		nvmet_ctrl_put(ctrl);
856 		sq->ctrl = NULL; /* allows reusing the queue later */
857 	}
858 }
859 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
860 
861 static void nvmet_sq_free(struct percpu_ref *ref)
862 {
863 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
864 
865 	complete(&sq->free_done);
866 }
867 
868 int nvmet_sq_init(struct nvmet_sq *sq)
869 {
870 	int ret;
871 
872 	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
873 	if (ret) {
874 		pr_err("percpu_ref init failed!\n");
875 		return ret;
876 	}
877 	init_completion(&sq->free_done);
878 	init_completion(&sq->confirm_done);
879 	nvmet_auth_sq_init(sq);
880 
881 	return 0;
882 }
883 EXPORT_SYMBOL_GPL(nvmet_sq_init);
884 
885 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
886 		struct nvmet_ns *ns)
887 {
888 	enum nvme_ana_state state = port->ana_state[ns->anagrpid];
889 
890 	if (unlikely(state == NVME_ANA_INACCESSIBLE))
891 		return NVME_SC_ANA_INACCESSIBLE;
892 	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
893 		return NVME_SC_ANA_PERSISTENT_LOSS;
894 	if (unlikely(state == NVME_ANA_CHANGE))
895 		return NVME_SC_ANA_TRANSITION;
896 	return 0;
897 }
898 
899 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
900 {
901 	if (unlikely(req->ns->readonly)) {
902 		switch (req->cmd->common.opcode) {
903 		case nvme_cmd_read:
904 		case nvme_cmd_flush:
905 			break;
906 		default:
907 			return NVME_SC_NS_WRITE_PROTECTED;
908 		}
909 	}
910 
911 	return 0;
912 }
913 
914 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
915 {
916 	struct nvme_command *cmd = req->cmd;
917 	u16 ret;
918 
919 	if (nvme_is_fabrics(cmd))
920 		return nvmet_parse_fabrics_io_cmd(req);
921 
922 	if (unlikely(!nvmet_check_auth_status(req)))
923 		return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
924 
925 	ret = nvmet_check_ctrl_status(req);
926 	if (unlikely(ret))
927 		return ret;
928 
929 	if (nvmet_is_passthru_req(req))
930 		return nvmet_parse_passthru_io_cmd(req);
931 
932 	ret = nvmet_req_find_ns(req);
933 	if (unlikely(ret))
934 		return ret;
935 
936 	ret = nvmet_check_ana_state(req->port, req->ns);
937 	if (unlikely(ret)) {
938 		req->error_loc = offsetof(struct nvme_common_command, nsid);
939 		return ret;
940 	}
941 	ret = nvmet_io_cmd_check_access(req);
942 	if (unlikely(ret)) {
943 		req->error_loc = offsetof(struct nvme_common_command, nsid);
944 		return ret;
945 	}
946 
947 	if (req->ns->pr.enable) {
948 		ret = nvmet_parse_pr_cmd(req);
949 		if (!ret)
950 			return ret;
951 	}
952 
953 	switch (req->ns->csi) {
954 	case NVME_CSI_NVM:
955 		if (req->ns->file)
956 			ret = nvmet_file_parse_io_cmd(req);
957 		else
958 			ret = nvmet_bdev_parse_io_cmd(req);
959 		break;
960 	case NVME_CSI_ZNS:
961 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
962 			ret = nvmet_bdev_zns_parse_io_cmd(req);
963 		else
964 			ret = NVME_SC_INVALID_IO_CMD_SET;
965 		break;
966 	default:
967 		ret = NVME_SC_INVALID_IO_CMD_SET;
968 	}
969 	if (ret)
970 		return ret;
971 
972 	if (req->ns->pr.enable) {
973 		ret = nvmet_pr_check_cmd_access(req);
974 		if (ret)
975 			return ret;
976 
977 		ret = nvmet_pr_get_ns_pc_ref(req);
978 	}
979 	return ret;
980 }
981 
982 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
983 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
984 {
985 	u8 flags = req->cmd->common.flags;
986 	u16 status;
987 
988 	req->cq = cq;
989 	req->sq = sq;
990 	req->ops = ops;
991 	req->sg = NULL;
992 	req->metadata_sg = NULL;
993 	req->sg_cnt = 0;
994 	req->metadata_sg_cnt = 0;
995 	req->transfer_len = 0;
996 	req->metadata_len = 0;
997 	req->cqe->result.u64 = 0;
998 	req->cqe->status = 0;
999 	req->cqe->sq_head = 0;
1000 	req->ns = NULL;
1001 	req->error_loc = NVMET_NO_ERROR_LOC;
1002 	req->error_slba = 0;
1003 	req->pc_ref = NULL;
1004 
1005 	/* no support for fused commands yet */
1006 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
1007 		req->error_loc = offsetof(struct nvme_common_command, flags);
1008 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1009 		goto fail;
1010 	}
1011 
1012 	/*
1013 	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
1014 	 * contains an address of a single contiguous physical buffer that is
1015 	 * byte aligned.
1016 	 */
1017 	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
1018 		req->error_loc = offsetof(struct nvme_common_command, flags);
1019 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1020 		goto fail;
1021 	}
1022 
1023 	if (unlikely(!req->sq->ctrl))
1024 		/* will return an error for any non-connect command: */
1025 		status = nvmet_parse_connect_cmd(req);
1026 	else if (likely(req->sq->qid != 0))
1027 		status = nvmet_parse_io_cmd(req);
1028 	else
1029 		status = nvmet_parse_admin_cmd(req);
1030 
1031 	if (status)
1032 		goto fail;
1033 
1034 	trace_nvmet_req_init(req, req->cmd);
1035 
1036 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
1037 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1038 		goto fail;
1039 	}
1040 
1041 	if (sq->ctrl)
1042 		sq->ctrl->reset_tbkas = true;
1043 
1044 	return true;
1045 
1046 fail:
1047 	__nvmet_req_complete(req, status);
1048 	return false;
1049 }
1050 EXPORT_SYMBOL_GPL(nvmet_req_init);
1051 
1052 void nvmet_req_uninit(struct nvmet_req *req)
1053 {
1054 	percpu_ref_put(&req->sq->ref);
1055 	if (req->pc_ref)
1056 		nvmet_pr_put_ns_pc_ref(req->pc_ref);
1057 	if (req->ns)
1058 		nvmet_put_namespace(req->ns);
1059 }
1060 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
1061 
1062 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
1063 {
1064 	if (unlikely(len != req->transfer_len)) {
1065 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1066 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
1067 		return false;
1068 	}
1069 
1070 	return true;
1071 }
1072 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
1073 
1074 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
1075 {
1076 	if (unlikely(data_len > req->transfer_len)) {
1077 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1078 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
1079 		return false;
1080 	}
1081 
1082 	return true;
1083 }
1084 
1085 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
1086 {
1087 	return req->transfer_len - req->metadata_len;
1088 }
1089 
1090 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1091 		struct nvmet_req *req)
1092 {
1093 	req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1094 			nvmet_data_transfer_len(req));
1095 	if (!req->sg)
1096 		goto out_err;
1097 
1098 	if (req->metadata_len) {
1099 		req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1100 				&req->metadata_sg_cnt, req->metadata_len);
1101 		if (!req->metadata_sg)
1102 			goto out_free_sg;
1103 	}
1104 
1105 	req->p2p_dev = p2p_dev;
1106 
1107 	return 0;
1108 out_free_sg:
1109 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1110 out_err:
1111 	return -ENOMEM;
1112 }
1113 
1114 static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1115 {
1116 	if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1117 	    !req->sq->ctrl || !req->sq->qid || !req->ns)
1118 		return NULL;
1119 	return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1120 }
1121 
1122 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1123 {
1124 	struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1125 
1126 	if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1127 		return 0;
1128 
1129 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1130 			    &req->sg_cnt);
1131 	if (unlikely(!req->sg))
1132 		goto out;
1133 
1134 	if (req->metadata_len) {
1135 		req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1136 					     &req->metadata_sg_cnt);
1137 		if (unlikely(!req->metadata_sg))
1138 			goto out_free;
1139 	}
1140 
1141 	return 0;
1142 out_free:
1143 	sgl_free(req->sg);
1144 out:
1145 	return -ENOMEM;
1146 }
1147 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1148 
1149 void nvmet_req_free_sgls(struct nvmet_req *req)
1150 {
1151 	if (req->p2p_dev) {
1152 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1153 		if (req->metadata_sg)
1154 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1155 		req->p2p_dev = NULL;
1156 	} else {
1157 		sgl_free(req->sg);
1158 		if (req->metadata_sg)
1159 			sgl_free(req->metadata_sg);
1160 	}
1161 
1162 	req->sg = NULL;
1163 	req->metadata_sg = NULL;
1164 	req->sg_cnt = 0;
1165 	req->metadata_sg_cnt = 0;
1166 }
1167 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1168 
1169 static inline bool nvmet_cc_en(u32 cc)
1170 {
1171 	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1172 }
1173 
1174 static inline u8 nvmet_cc_css(u32 cc)
1175 {
1176 	return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1177 }
1178 
1179 static inline u8 nvmet_cc_mps(u32 cc)
1180 {
1181 	return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1182 }
1183 
1184 static inline u8 nvmet_cc_ams(u32 cc)
1185 {
1186 	return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1187 }
1188 
1189 static inline u8 nvmet_cc_shn(u32 cc)
1190 {
1191 	return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1192 }
1193 
1194 static inline u8 nvmet_cc_iosqes(u32 cc)
1195 {
1196 	return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1197 }
1198 
1199 static inline u8 nvmet_cc_iocqes(u32 cc)
1200 {
1201 	return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1202 }
1203 
1204 static inline bool nvmet_css_supported(u8 cc_css)
1205 {
1206 	switch (cc_css << NVME_CC_CSS_SHIFT) {
1207 	case NVME_CC_CSS_NVM:
1208 	case NVME_CC_CSS_CSI:
1209 		return true;
1210 	default:
1211 		return false;
1212 	}
1213 }
1214 
1215 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1216 {
1217 	lockdep_assert_held(&ctrl->lock);
1218 
1219 	/*
1220 	 * Only I/O controllers should verify iosqes,iocqes.
1221 	 * Strictly speaking, the spec says a discovery controller
1222 	 * should verify iosqes,iocqes are zeroed, however that
1223 	 * would break backwards compatibility, so don't enforce it.
1224 	 */
1225 	if (!nvmet_is_disc_subsys(ctrl->subsys) &&
1226 	    (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1227 	     nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1228 		ctrl->csts = NVME_CSTS_CFS;
1229 		return;
1230 	}
1231 
1232 	if (nvmet_cc_mps(ctrl->cc) != 0 ||
1233 	    nvmet_cc_ams(ctrl->cc) != 0 ||
1234 	    !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
1235 		ctrl->csts = NVME_CSTS_CFS;
1236 		return;
1237 	}
1238 
1239 	ctrl->csts = NVME_CSTS_RDY;
1240 
1241 	/*
1242 	 * Controllers that are not yet enabled should not really enforce the
1243 	 * keep alive timeout, but we still want to track a timeout and cleanup
1244 	 * in case a host died before it enabled the controller.  Hence, simply
1245 	 * reset the keep alive timer when the controller is enabled.
1246 	 */
1247 	if (ctrl->kato)
1248 		mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
1249 }
1250 
1251 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1252 {
1253 	lockdep_assert_held(&ctrl->lock);
1254 
1255 	/* XXX: tear down queues? */
1256 	ctrl->csts &= ~NVME_CSTS_RDY;
1257 	ctrl->cc = 0;
1258 }
1259 
1260 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1261 {
1262 	u32 old;
1263 
1264 	mutex_lock(&ctrl->lock);
1265 	old = ctrl->cc;
1266 	ctrl->cc = new;
1267 
1268 	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1269 		nvmet_start_ctrl(ctrl);
1270 	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1271 		nvmet_clear_ctrl(ctrl);
1272 	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1273 		nvmet_clear_ctrl(ctrl);
1274 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1275 	}
1276 	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1277 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1278 	mutex_unlock(&ctrl->lock);
1279 }
1280 
1281 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1282 {
1283 	/* command sets supported: NVMe command set: */
1284 	ctrl->cap = (1ULL << 37);
1285 	/* Controller supports one or more I/O Command Sets */
1286 	ctrl->cap |= (1ULL << 43);
1287 	/* CC.EN timeout in 500msec units: */
1288 	ctrl->cap |= (15ULL << 24);
1289 	/* maximum queue entries supported: */
1290 	if (ctrl->ops->get_max_queue_size)
1291 		ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
1292 				   ctrl->port->max_queue_size) - 1;
1293 	else
1294 		ctrl->cap |= ctrl->port->max_queue_size - 1;
1295 
1296 	if (nvmet_is_passthru_subsys(ctrl->subsys))
1297 		nvmet_passthrough_override_cap(ctrl);
1298 }
1299 
1300 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1301 				       const char *hostnqn, u16 cntlid,
1302 				       struct nvmet_req *req)
1303 {
1304 	struct nvmet_ctrl *ctrl = NULL;
1305 	struct nvmet_subsys *subsys;
1306 
1307 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1308 	if (!subsys) {
1309 		pr_warn("connect request for invalid subsystem %s!\n",
1310 			subsysnqn);
1311 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1312 		goto out;
1313 	}
1314 
1315 	mutex_lock(&subsys->lock);
1316 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1317 		if (ctrl->cntlid == cntlid) {
1318 			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1319 				pr_warn("hostnqn mismatch.\n");
1320 				continue;
1321 			}
1322 			if (!kref_get_unless_zero(&ctrl->ref))
1323 				continue;
1324 
1325 			/* ctrl found */
1326 			goto found;
1327 		}
1328 	}
1329 
1330 	ctrl = NULL; /* ctrl not found */
1331 	pr_warn("could not find controller %d for subsys %s / host %s\n",
1332 		cntlid, subsysnqn, hostnqn);
1333 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1334 
1335 found:
1336 	mutex_unlock(&subsys->lock);
1337 	nvmet_subsys_put(subsys);
1338 out:
1339 	return ctrl;
1340 }
1341 
1342 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1343 {
1344 	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1345 		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1346 		       req->cmd->common.opcode, req->sq->qid);
1347 		return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1348 	}
1349 
1350 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1351 		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1352 		       req->cmd->common.opcode, req->sq->qid);
1353 		return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1354 	}
1355 
1356 	if (unlikely(!nvmet_check_auth_status(req))) {
1357 		pr_warn("qid %d not authenticated\n", req->sq->qid);
1358 		return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
1359 	}
1360 	return 0;
1361 }
1362 
1363 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1364 {
1365 	struct nvmet_host_link *p;
1366 
1367 	lockdep_assert_held(&nvmet_config_sem);
1368 
1369 	if (subsys->allow_any_host)
1370 		return true;
1371 
1372 	if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
1373 		return true;
1374 
1375 	list_for_each_entry(p, &subsys->hosts, entry) {
1376 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
1377 			return true;
1378 	}
1379 
1380 	return false;
1381 }
1382 
1383 /*
1384  * Note: ctrl->subsys->lock should be held when calling this function
1385  */
1386 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1387 		struct nvmet_req *req)
1388 {
1389 	struct nvmet_ns *ns;
1390 	unsigned long idx;
1391 
1392 	if (!req->p2p_client)
1393 		return;
1394 
1395 	ctrl->p2p_client = get_device(req->p2p_client);
1396 
1397 	xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1398 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1399 }
1400 
1401 /*
1402  * Note: ctrl->subsys->lock should be held when calling this function
1403  */
1404 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1405 {
1406 	struct radix_tree_iter iter;
1407 	void __rcu **slot;
1408 
1409 	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1410 		pci_dev_put(radix_tree_deref_slot(slot));
1411 
1412 	put_device(ctrl->p2p_client);
1413 }
1414 
1415 static void nvmet_fatal_error_handler(struct work_struct *work)
1416 {
1417 	struct nvmet_ctrl *ctrl =
1418 			container_of(work, struct nvmet_ctrl, fatal_err_work);
1419 
1420 	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1421 	ctrl->ops->delete_ctrl(ctrl);
1422 }
1423 
1424 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1425 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
1426 		uuid_t *hostid)
1427 {
1428 	struct nvmet_subsys *subsys;
1429 	struct nvmet_ctrl *ctrl;
1430 	int ret;
1431 	u16 status;
1432 
1433 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
1434 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1435 	if (!subsys) {
1436 		pr_warn("connect request for invalid subsystem %s!\n",
1437 			subsysnqn);
1438 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1439 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1440 		goto out;
1441 	}
1442 
1443 	down_read(&nvmet_config_sem);
1444 	if (!nvmet_host_allowed(subsys, hostnqn)) {
1445 		pr_info("connect by host %s for subsystem %s not allowed\n",
1446 			hostnqn, subsysnqn);
1447 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1448 		up_read(&nvmet_config_sem);
1449 		status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
1450 		req->error_loc = offsetof(struct nvme_common_command, dptr);
1451 		goto out_put_subsystem;
1452 	}
1453 	up_read(&nvmet_config_sem);
1454 
1455 	status = NVME_SC_INTERNAL;
1456 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1457 	if (!ctrl)
1458 		goto out_put_subsystem;
1459 	mutex_init(&ctrl->lock);
1460 
1461 	ctrl->port = req->port;
1462 	ctrl->ops = req->ops;
1463 
1464 #ifdef CONFIG_NVME_TARGET_PASSTHRU
1465 	/* By default, set loop targets to clear IDS by default */
1466 	if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
1467 		subsys->clear_ids = 1;
1468 #endif
1469 
1470 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1471 	INIT_LIST_HEAD(&ctrl->async_events);
1472 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1473 	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1474 	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1475 
1476 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1477 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1478 
1479 	kref_init(&ctrl->ref);
1480 	ctrl->subsys = subsys;
1481 	ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
1482 	nvmet_init_cap(ctrl);
1483 	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1484 
1485 	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1486 			sizeof(__le32), GFP_KERNEL);
1487 	if (!ctrl->changed_ns_list)
1488 		goto out_free_ctrl;
1489 
1490 	ctrl->sqs = kcalloc(subsys->max_qid + 1,
1491 			sizeof(struct nvmet_sq *),
1492 			GFP_KERNEL);
1493 	if (!ctrl->sqs)
1494 		goto out_free_changed_ns_list;
1495 
1496 	ret = ida_alloc_range(&cntlid_ida,
1497 			     subsys->cntlid_min, subsys->cntlid_max,
1498 			     GFP_KERNEL);
1499 	if (ret < 0) {
1500 		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
1501 		goto out_free_sqs;
1502 	}
1503 	ctrl->cntlid = ret;
1504 
1505 	uuid_copy(&ctrl->hostid, hostid);
1506 
1507 	/*
1508 	 * Discovery controllers may use some arbitrary high value
1509 	 * in order to cleanup stale discovery sessions
1510 	 */
1511 	if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
1512 		kato = NVMET_DISC_KATO_MS;
1513 
1514 	/* keep-alive timeout in seconds */
1515 	ctrl->kato = DIV_ROUND_UP(kato, 1000);
1516 
1517 	ctrl->err_counter = 0;
1518 	spin_lock_init(&ctrl->error_lock);
1519 
1520 	nvmet_start_keep_alive_timer(ctrl);
1521 
1522 	mutex_lock(&subsys->lock);
1523 	ret = nvmet_ctrl_init_pr(ctrl);
1524 	if (ret)
1525 		goto init_pr_fail;
1526 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1527 	nvmet_setup_p2p_ns_map(ctrl, req);
1528 	nvmet_debugfs_ctrl_setup(ctrl);
1529 	mutex_unlock(&subsys->lock);
1530 
1531 	*ctrlp = ctrl;
1532 	return 0;
1533 
1534 init_pr_fail:
1535 	mutex_unlock(&subsys->lock);
1536 	nvmet_stop_keep_alive_timer(ctrl);
1537 	ida_free(&cntlid_ida, ctrl->cntlid);
1538 out_free_sqs:
1539 	kfree(ctrl->sqs);
1540 out_free_changed_ns_list:
1541 	kfree(ctrl->changed_ns_list);
1542 out_free_ctrl:
1543 	kfree(ctrl);
1544 out_put_subsystem:
1545 	nvmet_subsys_put(subsys);
1546 out:
1547 	return status;
1548 }
1549 
1550 static void nvmet_ctrl_free(struct kref *ref)
1551 {
1552 	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1553 	struct nvmet_subsys *subsys = ctrl->subsys;
1554 
1555 	mutex_lock(&subsys->lock);
1556 	nvmet_ctrl_destroy_pr(ctrl);
1557 	nvmet_release_p2p_ns_map(ctrl);
1558 	list_del(&ctrl->subsys_entry);
1559 	mutex_unlock(&subsys->lock);
1560 
1561 	nvmet_stop_keep_alive_timer(ctrl);
1562 
1563 	flush_work(&ctrl->async_event_work);
1564 	cancel_work_sync(&ctrl->fatal_err_work);
1565 
1566 	nvmet_destroy_auth(ctrl);
1567 
1568 	nvmet_debugfs_ctrl_free(ctrl);
1569 
1570 	ida_free(&cntlid_ida, ctrl->cntlid);
1571 
1572 	nvmet_async_events_free(ctrl);
1573 	kfree(ctrl->sqs);
1574 	kfree(ctrl->changed_ns_list);
1575 	kfree(ctrl);
1576 
1577 	nvmet_subsys_put(subsys);
1578 }
1579 
1580 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1581 {
1582 	kref_put(&ctrl->ref, nvmet_ctrl_free);
1583 }
1584 
1585 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1586 {
1587 	mutex_lock(&ctrl->lock);
1588 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
1589 		ctrl->csts |= NVME_CSTS_CFS;
1590 		queue_work(nvmet_wq, &ctrl->fatal_err_work);
1591 	}
1592 	mutex_unlock(&ctrl->lock);
1593 }
1594 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1595 
1596 ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
1597 		char *traddr, size_t traddr_len)
1598 {
1599 	if (!ctrl->ops->host_traddr)
1600 		return -EOPNOTSUPP;
1601 	return ctrl->ops->host_traddr(ctrl, traddr, traddr_len);
1602 }
1603 
1604 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1605 		const char *subsysnqn)
1606 {
1607 	struct nvmet_subsys_link *p;
1608 
1609 	if (!port)
1610 		return NULL;
1611 
1612 	if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1613 		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1614 			return NULL;
1615 		return nvmet_disc_subsys;
1616 	}
1617 
1618 	down_read(&nvmet_config_sem);
1619 	if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn,
1620 				NVMF_NQN_SIZE)) {
1621 		if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) {
1622 			up_read(&nvmet_config_sem);
1623 			return nvmet_disc_subsys;
1624 		}
1625 	}
1626 	list_for_each_entry(p, &port->subsystems, entry) {
1627 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1628 				NVMF_NQN_SIZE)) {
1629 			if (!kref_get_unless_zero(&p->subsys->ref))
1630 				break;
1631 			up_read(&nvmet_config_sem);
1632 			return p->subsys;
1633 		}
1634 	}
1635 	up_read(&nvmet_config_sem);
1636 	return NULL;
1637 }
1638 
1639 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1640 		enum nvme_subsys_type type)
1641 {
1642 	struct nvmet_subsys *subsys;
1643 	char serial[NVMET_SN_MAX_SIZE / 2];
1644 	int ret;
1645 
1646 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1647 	if (!subsys)
1648 		return ERR_PTR(-ENOMEM);
1649 
1650 	subsys->ver = NVMET_DEFAULT_VS;
1651 	/* generate a random serial number as our controllers are ephemeral: */
1652 	get_random_bytes(&serial, sizeof(serial));
1653 	bin2hex(subsys->serial, &serial, sizeof(serial));
1654 
1655 	subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1656 	if (!subsys->model_number) {
1657 		ret = -ENOMEM;
1658 		goto free_subsys;
1659 	}
1660 
1661 	subsys->ieee_oui = 0;
1662 
1663 	subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
1664 	if (!subsys->firmware_rev) {
1665 		ret = -ENOMEM;
1666 		goto free_mn;
1667 	}
1668 
1669 	switch (type) {
1670 	case NVME_NQN_NVME:
1671 		subsys->max_qid = NVMET_NR_QUEUES;
1672 		break;
1673 	case NVME_NQN_DISC:
1674 	case NVME_NQN_CURR:
1675 		subsys->max_qid = 0;
1676 		break;
1677 	default:
1678 		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1679 		ret = -EINVAL;
1680 		goto free_fr;
1681 	}
1682 	subsys->type = type;
1683 	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1684 			GFP_KERNEL);
1685 	if (!subsys->subsysnqn) {
1686 		ret = -ENOMEM;
1687 		goto free_fr;
1688 	}
1689 	subsys->cntlid_min = NVME_CNTLID_MIN;
1690 	subsys->cntlid_max = NVME_CNTLID_MAX;
1691 	kref_init(&subsys->ref);
1692 
1693 	mutex_init(&subsys->lock);
1694 	xa_init(&subsys->namespaces);
1695 	INIT_LIST_HEAD(&subsys->ctrls);
1696 	INIT_LIST_HEAD(&subsys->hosts);
1697 
1698 	ret = nvmet_debugfs_subsys_setup(subsys);
1699 	if (ret)
1700 		goto free_subsysnqn;
1701 
1702 	return subsys;
1703 
1704 free_subsysnqn:
1705 	kfree(subsys->subsysnqn);
1706 free_fr:
1707 	kfree(subsys->firmware_rev);
1708 free_mn:
1709 	kfree(subsys->model_number);
1710 free_subsys:
1711 	kfree(subsys);
1712 	return ERR_PTR(ret);
1713 }
1714 
1715 static void nvmet_subsys_free(struct kref *ref)
1716 {
1717 	struct nvmet_subsys *subsys =
1718 		container_of(ref, struct nvmet_subsys, ref);
1719 
1720 	WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1721 
1722 	nvmet_debugfs_subsys_free(subsys);
1723 
1724 	xa_destroy(&subsys->namespaces);
1725 	nvmet_passthru_subsys_free(subsys);
1726 
1727 	kfree(subsys->subsysnqn);
1728 	kfree(subsys->model_number);
1729 	kfree(subsys->firmware_rev);
1730 	kfree(subsys);
1731 }
1732 
1733 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1734 {
1735 	struct nvmet_ctrl *ctrl;
1736 
1737 	mutex_lock(&subsys->lock);
1738 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1739 		ctrl->ops->delete_ctrl(ctrl);
1740 	mutex_unlock(&subsys->lock);
1741 }
1742 
1743 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1744 {
1745 	kref_put(&subsys->ref, nvmet_subsys_free);
1746 }
1747 
1748 static int __init nvmet_init(void)
1749 {
1750 	int error = -ENOMEM;
1751 
1752 	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1753 
1754 	nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
1755 			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
1756 			SLAB_HWCACHE_ALIGN, NULL);
1757 	if (!nvmet_bvec_cache)
1758 		return -ENOMEM;
1759 
1760 	zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1761 	if (!zbd_wq)
1762 		goto out_destroy_bvec_cache;
1763 
1764 	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1765 			WQ_MEM_RECLAIM, 0);
1766 	if (!buffered_io_wq)
1767 		goto out_free_zbd_work_queue;
1768 
1769 	nvmet_wq = alloc_workqueue("nvmet-wq",
1770 			WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0);
1771 	if (!nvmet_wq)
1772 		goto out_free_buffered_work_queue;
1773 
1774 	error = nvmet_init_discovery();
1775 	if (error)
1776 		goto out_free_nvmet_work_queue;
1777 
1778 	error = nvmet_init_debugfs();
1779 	if (error)
1780 		goto out_exit_discovery;
1781 
1782 	error = nvmet_init_configfs();
1783 	if (error)
1784 		goto out_exit_debugfs;
1785 
1786 	return 0;
1787 
1788 out_exit_debugfs:
1789 	nvmet_exit_debugfs();
1790 out_exit_discovery:
1791 	nvmet_exit_discovery();
1792 out_free_nvmet_work_queue:
1793 	destroy_workqueue(nvmet_wq);
1794 out_free_buffered_work_queue:
1795 	destroy_workqueue(buffered_io_wq);
1796 out_free_zbd_work_queue:
1797 	destroy_workqueue(zbd_wq);
1798 out_destroy_bvec_cache:
1799 	kmem_cache_destroy(nvmet_bvec_cache);
1800 	return error;
1801 }
1802 
1803 static void __exit nvmet_exit(void)
1804 {
1805 	nvmet_exit_configfs();
1806 	nvmet_exit_debugfs();
1807 	nvmet_exit_discovery();
1808 	ida_destroy(&cntlid_ida);
1809 	destroy_workqueue(nvmet_wq);
1810 	destroy_workqueue(buffered_io_wq);
1811 	destroy_workqueue(zbd_wq);
1812 	kmem_cache_destroy(nvmet_bvec_cache);
1813 
1814 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1815 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1816 }
1817 
1818 module_init(nvmet_init);
1819 module_exit(nvmet_exit);
1820 
1821 MODULE_DESCRIPTION("NVMe target core framework");
1822 MODULE_LICENSE("GPL v2");
1823