xref: /linux/drivers/nvme/host/multipath.c (revision e540341508ce2f6e27810106253d5de194b66750)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017-2018 Christoph Hellwig.
4  */
5 
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <linux/vmalloc.h>
9 #include <trace/events/block.h>
10 #include "nvme.h"
11 
12 bool multipath = true;
13 static bool multipath_always_on;
14 
multipath_param_set(const char * val,const struct kernel_param * kp)15 static int multipath_param_set(const char *val, const struct kernel_param *kp)
16 {
17 	int ret;
18 	bool *arg = kp->arg;
19 
20 	ret = param_set_bool(val, kp);
21 	if (ret)
22 		return ret;
23 
24 	if (multipath_always_on && !*arg) {
25 		pr_err("Can't disable multipath when multipath_always_on is configured.\n");
26 		*arg = true;
27 		return -EINVAL;
28 	}
29 
30 	return 0;
31 }
32 
33 static const struct kernel_param_ops multipath_param_ops = {
34 	.set = multipath_param_set,
35 	.get = param_get_bool,
36 };
37 
38 module_param_cb(multipath, &multipath_param_ops, &multipath, 0444);
39 MODULE_PARM_DESC(multipath,
40 	"turn on native support for multiple controllers per subsystem");
41 
multipath_always_on_set(const char * val,const struct kernel_param * kp)42 static int multipath_always_on_set(const char *val,
43 		const struct kernel_param *kp)
44 {
45 	int ret;
46 	bool *arg = kp->arg;
47 
48 	ret = param_set_bool(val, kp);
49 	if (ret < 0)
50 		return ret;
51 
52 	if (*arg)
53 		multipath = true;
54 
55 	return 0;
56 }
57 
58 static const struct kernel_param_ops multipath_always_on_ops = {
59 	.set = multipath_always_on_set,
60 	.get = param_get_bool,
61 };
62 
63 module_param_cb(multipath_always_on, &multipath_always_on_ops,
64 		&multipath_always_on, 0444);
65 MODULE_PARM_DESC(multipath_always_on,
66 	"create multipath node always except for private namespace with non-unique nsid; note that this also implicitly enables native multipath support");
67 
68 static const char *nvme_iopolicy_names[] = {
69 	[NVME_IOPOLICY_NUMA]	= "numa",
70 	[NVME_IOPOLICY_RR]	= "round-robin",
71 	[NVME_IOPOLICY_QD]      = "queue-depth",
72 };
73 
74 static int iopolicy = NVME_IOPOLICY_NUMA;
75 
nvme_set_iopolicy(const char * val,const struct kernel_param * kp)76 static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
77 {
78 	if (!val)
79 		return -EINVAL;
80 	if (!strncmp(val, "numa", 4))
81 		iopolicy = NVME_IOPOLICY_NUMA;
82 	else if (!strncmp(val, "round-robin", 11))
83 		iopolicy = NVME_IOPOLICY_RR;
84 	else if (!strncmp(val, "queue-depth", 11))
85 		iopolicy = NVME_IOPOLICY_QD;
86 	else
87 		return -EINVAL;
88 
89 	return 0;
90 }
91 
nvme_get_iopolicy(char * buf,const struct kernel_param * kp)92 static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
93 {
94 	return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
95 }
96 
97 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
98 	&iopolicy, 0644);
99 MODULE_PARM_DESC(iopolicy,
100 	"Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'");
101 
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)102 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
103 {
104 	subsys->iopolicy = iopolicy;
105 }
106 
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)107 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
108 {
109 	struct nvme_ns_head *h;
110 
111 	lockdep_assert_held(&subsys->lock);
112 	list_for_each_entry(h, &subsys->nsheads, entry)
113 		if (h->disk)
114 			blk_mq_unfreeze_queue_nomemrestore(h->disk->queue);
115 }
116 
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)117 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
118 {
119 	struct nvme_ns_head *h;
120 
121 	lockdep_assert_held(&subsys->lock);
122 	list_for_each_entry(h, &subsys->nsheads, entry)
123 		if (h->disk)
124 			blk_mq_freeze_queue_wait(h->disk->queue);
125 }
126 
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)127 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
128 {
129 	struct nvme_ns_head *h;
130 
131 	lockdep_assert_held(&subsys->lock);
132 	list_for_each_entry(h, &subsys->nsheads, entry)
133 		if (h->disk)
134 			blk_freeze_queue_start(h->disk->queue);
135 }
136 
nvme_failover_req(struct request * req)137 void nvme_failover_req(struct request *req)
138 {
139 	struct nvme_ns *ns = req->q->queuedata;
140 	u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK;
141 	unsigned long flags;
142 	struct bio *bio;
143 
144 	nvme_mpath_clear_current_path(ns);
145 
146 	/*
147 	 * If we got back an ANA error, we know the controller is alive but not
148 	 * ready to serve this namespace.  Kick of a re-read of the ANA
149 	 * information page, and just try any other available path for now.
150 	 */
151 	if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
152 		set_bit(NVME_NS_ANA_PENDING, &ns->flags);
153 		queue_work(nvme_wq, &ns->ctrl->ana_work);
154 	}
155 
156 	spin_lock_irqsave(&ns->head->requeue_lock, flags);
157 	for (bio = req->bio; bio; bio = bio->bi_next) {
158 		bio_set_dev(bio, ns->head->disk->part0);
159 		if (bio->bi_opf & REQ_POLLED) {
160 			bio->bi_opf &= ~REQ_POLLED;
161 			bio->bi_cookie = BLK_QC_T_NONE;
162 		}
163 		/*
164 		 * The alternate request queue that we may end up submitting
165 		 * the bio to may be frozen temporarily, in this case REQ_NOWAIT
166 		 * will fail the I/O immediately with EAGAIN to the issuer.
167 		 * We are not in the issuer context which cannot block. Clear
168 		 * the flag to avoid spurious EAGAIN I/O failures.
169 		 */
170 		bio->bi_opf &= ~REQ_NOWAIT;
171 	}
172 	blk_steal_bios(&ns->head->requeue_list, req);
173 	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
174 
175 	nvme_req(req)->status = 0;
176 	nvme_end_req(req);
177 	kblockd_schedule_work(&ns->head->requeue_work);
178 }
179 
nvme_mpath_start_request(struct request * rq)180 void nvme_mpath_start_request(struct request *rq)
181 {
182 	struct nvme_ns *ns = rq->q->queuedata;
183 	struct gendisk *disk = ns->head->disk;
184 
185 	if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
186 		atomic_inc(&ns->ctrl->nr_active);
187 		nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
188 	}
189 
190 	if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
191 		return;
192 
193 	nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
194 	nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
195 						      jiffies);
196 }
197 EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
198 
nvme_mpath_end_request(struct request * rq)199 void nvme_mpath_end_request(struct request *rq)
200 {
201 	struct nvme_ns *ns = rq->q->queuedata;
202 
203 	if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)
204 		atomic_dec_if_positive(&ns->ctrl->nr_active);
205 
206 	if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
207 		return;
208 	bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
209 			 blk_rq_bytes(rq) >> SECTOR_SHIFT,
210 			 nvme_req(rq)->start_time);
211 }
212 
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)213 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
214 {
215 	struct nvme_ns *ns;
216 	int srcu_idx;
217 
218 	srcu_idx = srcu_read_lock(&ctrl->srcu);
219 	list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
220 				 srcu_read_lock_held(&ctrl->srcu)) {
221 		if (!ns->head->disk)
222 			continue;
223 		kblockd_schedule_work(&ns->head->requeue_work);
224 		if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
225 			disk_uevent(ns->head->disk, KOBJ_CHANGE);
226 	}
227 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
228 }
229 
230 static const char *nvme_ana_state_names[] = {
231 	[0]				= "invalid state",
232 	[NVME_ANA_OPTIMIZED]		= "optimized",
233 	[NVME_ANA_NONOPTIMIZED]		= "non-optimized",
234 	[NVME_ANA_INACCESSIBLE]		= "inaccessible",
235 	[NVME_ANA_PERSISTENT_LOSS]	= "persistent-loss",
236 	[NVME_ANA_CHANGE]		= "change",
237 };
238 
nvme_mpath_clear_current_path(struct nvme_ns * ns)239 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
240 {
241 	struct nvme_ns_head *head = ns->head;
242 	bool changed = false;
243 	int node;
244 
245 	if (!head)
246 		goto out;
247 
248 	for_each_node(node) {
249 		if (ns == rcu_access_pointer(head->current_path[node])) {
250 			rcu_assign_pointer(head->current_path[node], NULL);
251 			changed = true;
252 		}
253 	}
254 out:
255 	return changed;
256 }
257 
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)258 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
259 {
260 	struct nvme_ns *ns;
261 	int srcu_idx;
262 
263 	srcu_idx = srcu_read_lock(&ctrl->srcu);
264 	list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
265 				 srcu_read_lock_held(&ctrl->srcu)) {
266 		nvme_mpath_clear_current_path(ns);
267 		kblockd_schedule_work(&ns->head->requeue_work);
268 	}
269 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
270 }
271 
nvme_mpath_revalidate_paths(struct nvme_ns * ns)272 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
273 {
274 	struct nvme_ns_head *head = ns->head;
275 	sector_t capacity = get_capacity(head->disk);
276 	int node;
277 	int srcu_idx;
278 
279 	srcu_idx = srcu_read_lock(&head->srcu);
280 	list_for_each_entry_srcu(ns, &head->list, siblings,
281 				 srcu_read_lock_held(&head->srcu)) {
282 		if (capacity != get_capacity(ns->disk))
283 			clear_bit(NVME_NS_READY, &ns->flags);
284 	}
285 	srcu_read_unlock(&head->srcu, srcu_idx);
286 
287 	for_each_node(node)
288 		rcu_assign_pointer(head->current_path[node], NULL);
289 	kblockd_schedule_work(&head->requeue_work);
290 }
291 
nvme_path_is_disabled(struct nvme_ns * ns)292 static bool nvme_path_is_disabled(struct nvme_ns *ns)
293 {
294 	enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
295 
296 	/*
297 	 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
298 	 * still be able to complete assuming that the controller is connected.
299 	 * Otherwise it will fail immediately and return to the requeue list.
300 	 */
301 	if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
302 		return true;
303 	if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
304 	    !test_bit(NVME_NS_READY, &ns->flags))
305 		return true;
306 	return false;
307 }
308 
__nvme_find_path(struct nvme_ns_head * head,int node)309 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
310 {
311 	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
312 	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
313 
314 	list_for_each_entry_srcu(ns, &head->list, siblings,
315 				 srcu_read_lock_held(&head->srcu)) {
316 		if (nvme_path_is_disabled(ns))
317 			continue;
318 
319 		if (ns->ctrl->numa_node != NUMA_NO_NODE &&
320 		    READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
321 			distance = node_distance(node, ns->ctrl->numa_node);
322 		else
323 			distance = LOCAL_DISTANCE;
324 
325 		switch (ns->ana_state) {
326 		case NVME_ANA_OPTIMIZED:
327 			if (distance < found_distance) {
328 				found_distance = distance;
329 				found = ns;
330 			}
331 			break;
332 		case NVME_ANA_NONOPTIMIZED:
333 			if (distance < fallback_distance) {
334 				fallback_distance = distance;
335 				fallback = ns;
336 			}
337 			break;
338 		default:
339 			break;
340 		}
341 	}
342 
343 	if (!found)
344 		found = fallback;
345 	if (found)
346 		rcu_assign_pointer(head->current_path[node], found);
347 	return found;
348 }
349 
nvme_next_ns(struct nvme_ns_head * head,struct nvme_ns * ns)350 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
351 		struct nvme_ns *ns)
352 {
353 	ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
354 			siblings);
355 	if (ns)
356 		return ns;
357 	return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
358 }
359 
nvme_round_robin_path(struct nvme_ns_head * head)360 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head)
361 {
362 	struct nvme_ns *ns, *found = NULL;
363 	int node = numa_node_id();
364 	struct nvme_ns *old = srcu_dereference(head->current_path[node],
365 					       &head->srcu);
366 
367 	if (unlikely(!old))
368 		return __nvme_find_path(head, node);
369 
370 	if (list_is_singular(&head->list)) {
371 		if (nvme_path_is_disabled(old))
372 			return NULL;
373 		return old;
374 	}
375 
376 	for (ns = nvme_next_ns(head, old);
377 	     ns && ns != old;
378 	     ns = nvme_next_ns(head, ns)) {
379 		if (nvme_path_is_disabled(ns))
380 			continue;
381 
382 		if (ns->ana_state == NVME_ANA_OPTIMIZED) {
383 			found = ns;
384 			goto out;
385 		}
386 		if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
387 			found = ns;
388 	}
389 
390 	/*
391 	 * The loop above skips the current path for round-robin semantics.
392 	 * Fall back to the current path if either:
393 	 *  - no other optimized path found and current is optimized,
394 	 *  - no other usable path found and current is usable.
395 	 */
396 	if (!nvme_path_is_disabled(old) &&
397 	    (old->ana_state == NVME_ANA_OPTIMIZED ||
398 	     (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
399 		return old;
400 
401 	if (!found)
402 		return NULL;
403 out:
404 	rcu_assign_pointer(head->current_path[node], found);
405 	return found;
406 }
407 
nvme_queue_depth_path(struct nvme_ns_head * head)408 static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
409 {
410 	struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns;
411 	unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
412 	unsigned int depth;
413 
414 	list_for_each_entry_srcu(ns, &head->list, siblings,
415 				 srcu_read_lock_held(&head->srcu)) {
416 		if (nvme_path_is_disabled(ns))
417 			continue;
418 
419 		depth = atomic_read(&ns->ctrl->nr_active);
420 
421 		switch (ns->ana_state) {
422 		case NVME_ANA_OPTIMIZED:
423 			if (depth < min_depth_opt) {
424 				min_depth_opt = depth;
425 				best_opt = ns;
426 			}
427 			break;
428 		case NVME_ANA_NONOPTIMIZED:
429 			if (depth < min_depth_nonopt) {
430 				min_depth_nonopt = depth;
431 				best_nonopt = ns;
432 			}
433 			break;
434 		default:
435 			break;
436 		}
437 
438 		if (min_depth_opt == 0)
439 			return best_opt;
440 	}
441 
442 	return best_opt ? best_opt : best_nonopt;
443 }
444 
nvme_path_is_optimized(struct nvme_ns * ns)445 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
446 {
447 	return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
448 		ns->ana_state == NVME_ANA_OPTIMIZED;
449 }
450 
nvme_numa_path(struct nvme_ns_head * head)451 static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head)
452 {
453 	int node = numa_node_id();
454 	struct nvme_ns *ns;
455 
456 	ns = srcu_dereference(head->current_path[node], &head->srcu);
457 	if (unlikely(!ns))
458 		return __nvme_find_path(head, node);
459 	if (unlikely(!nvme_path_is_optimized(ns)))
460 		return __nvme_find_path(head, node);
461 	return ns;
462 }
463 
nvme_find_path(struct nvme_ns_head * head)464 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
465 {
466 	switch (READ_ONCE(head->subsys->iopolicy)) {
467 	case NVME_IOPOLICY_QD:
468 		return nvme_queue_depth_path(head);
469 	case NVME_IOPOLICY_RR:
470 		return nvme_round_robin_path(head);
471 	default:
472 		return nvme_numa_path(head);
473 	}
474 }
475 
nvme_available_path(struct nvme_ns_head * head)476 static bool nvme_available_path(struct nvme_ns_head *head)
477 {
478 	struct nvme_ns *ns;
479 
480 	if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
481 		return false;
482 
483 	list_for_each_entry_srcu(ns, &head->list, siblings,
484 				 srcu_read_lock_held(&head->srcu)) {
485 		if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
486 			continue;
487 		switch (nvme_ctrl_state(ns->ctrl)) {
488 		case NVME_CTRL_LIVE:
489 		case NVME_CTRL_RESETTING:
490 		case NVME_CTRL_CONNECTING:
491 			return true;
492 		default:
493 			break;
494 		}
495 	}
496 
497 	/*
498 	 * If "head->delayed_removal_secs" is configured (i.e., non-zero), do
499 	 * not immediately fail I/O. Instead, requeue the I/O for the configured
500 	 * duration, anticipating that if there's a transient link failure then
501 	 * it may recover within this time window. This parameter is exported to
502 	 * userspace via sysfs, and its default value is zero. It is internally
503 	 * mapped to NVME_NSHEAD_QUEUE_IF_NO_PATH. When delayed_removal_secs is
504 	 * non-zero, this flag is set to true. When zero, the flag is cleared.
505 	 */
506 	return nvme_mpath_queue_if_no_path(head);
507 }
508 
nvme_ns_head_submit_bio(struct bio * bio)509 static void nvme_ns_head_submit_bio(struct bio *bio)
510 {
511 	struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
512 	struct device *dev = disk_to_dev(head->disk);
513 	struct nvme_ns *ns;
514 	int srcu_idx;
515 
516 	/*
517 	 * The namespace might be going away and the bio might be moved to a
518 	 * different queue via blk_steal_bios(), so we need to use the bio_split
519 	 * pool from the original queue to allocate the bvecs from.
520 	 */
521 	bio = bio_split_to_limits(bio);
522 	if (!bio)
523 		return;
524 
525 	srcu_idx = srcu_read_lock(&head->srcu);
526 	ns = nvme_find_path(head);
527 	if (likely(ns)) {
528 		bio_set_dev(bio, ns->disk->part0);
529 		bio->bi_opf |= REQ_NVME_MPATH;
530 		trace_block_bio_remap(bio, disk_devt(ns->head->disk),
531 				      bio->bi_iter.bi_sector);
532 		submit_bio_noacct(bio);
533 	} else if (nvme_available_path(head)) {
534 		dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
535 
536 		spin_lock_irq(&head->requeue_lock);
537 		bio_list_add(&head->requeue_list, bio);
538 		spin_unlock_irq(&head->requeue_lock);
539 	} else {
540 		dev_warn_ratelimited(dev, "no available path - failing I/O\n");
541 
542 		bio_io_error(bio);
543 	}
544 
545 	srcu_read_unlock(&head->srcu, srcu_idx);
546 }
547 
nvme_ns_head_open(struct gendisk * disk,blk_mode_t mode)548 static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode)
549 {
550 	if (!nvme_tryget_ns_head(disk->private_data))
551 		return -ENXIO;
552 	return 0;
553 }
554 
nvme_ns_head_release(struct gendisk * disk)555 static void nvme_ns_head_release(struct gendisk *disk)
556 {
557 	nvme_put_ns_head(disk->private_data);
558 }
559 
nvme_ns_head_get_unique_id(struct gendisk * disk,u8 id[16],enum blk_unique_id type)560 static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16],
561 		enum blk_unique_id type)
562 {
563 	struct nvme_ns_head *head = disk->private_data;
564 	struct nvme_ns *ns;
565 	int srcu_idx, ret = -EWOULDBLOCK;
566 
567 	srcu_idx = srcu_read_lock(&head->srcu);
568 	ns = nvme_find_path(head);
569 	if (ns)
570 		ret = nvme_ns_get_unique_id(ns, id, type);
571 	srcu_read_unlock(&head->srcu, srcu_idx);
572 	return ret;
573 }
574 
575 #ifdef CONFIG_BLK_DEV_ZONED
nvme_ns_head_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)576 static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
577 		unsigned int nr_zones, report_zones_cb cb, void *data)
578 {
579 	struct nvme_ns_head *head = disk->private_data;
580 	struct nvme_ns *ns;
581 	int srcu_idx, ret = -EWOULDBLOCK;
582 
583 	srcu_idx = srcu_read_lock(&head->srcu);
584 	ns = nvme_find_path(head);
585 	if (ns)
586 		ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
587 	srcu_read_unlock(&head->srcu, srcu_idx);
588 	return ret;
589 }
590 #else
591 #define nvme_ns_head_report_zones	NULL
592 #endif /* CONFIG_BLK_DEV_ZONED */
593 
594 const struct block_device_operations nvme_ns_head_ops = {
595 	.owner		= THIS_MODULE,
596 	.submit_bio	= nvme_ns_head_submit_bio,
597 	.open		= nvme_ns_head_open,
598 	.release	= nvme_ns_head_release,
599 	.ioctl		= nvme_ns_head_ioctl,
600 	.compat_ioctl	= blkdev_compat_ptr_ioctl,
601 	.getgeo		= nvme_getgeo,
602 	.get_unique_id	= nvme_ns_head_get_unique_id,
603 	.report_zones	= nvme_ns_head_report_zones,
604 	.pr_ops		= &nvme_pr_ops,
605 };
606 
cdev_to_ns_head(struct cdev * cdev)607 static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
608 {
609 	return container_of(cdev, struct nvme_ns_head, cdev);
610 }
611 
nvme_ns_head_chr_open(struct inode * inode,struct file * file)612 static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
613 {
614 	if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
615 		return -ENXIO;
616 	return 0;
617 }
618 
nvme_ns_head_chr_release(struct inode * inode,struct file * file)619 static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
620 {
621 	nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
622 	return 0;
623 }
624 
625 static const struct file_operations nvme_ns_head_chr_fops = {
626 	.owner		= THIS_MODULE,
627 	.open		= nvme_ns_head_chr_open,
628 	.release	= nvme_ns_head_chr_release,
629 	.unlocked_ioctl	= nvme_ns_head_chr_ioctl,
630 	.compat_ioctl	= compat_ptr_ioctl,
631 	.uring_cmd	= nvme_ns_head_chr_uring_cmd,
632 	.uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
633 };
634 
nvme_add_ns_head_cdev(struct nvme_ns_head * head)635 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
636 {
637 	int ret;
638 
639 	head->cdev_device.parent = &head->subsys->dev;
640 	ret = dev_set_name(&head->cdev_device, "ng%dn%d",
641 			   head->subsys->instance, head->instance);
642 	if (ret)
643 		return ret;
644 	ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
645 			    &nvme_ns_head_chr_fops, THIS_MODULE);
646 	return ret;
647 }
648 
nvme_partition_scan_work(struct work_struct * work)649 static void nvme_partition_scan_work(struct work_struct *work)
650 {
651 	struct nvme_ns_head *head =
652 		container_of(work, struct nvme_ns_head, partition_scan_work);
653 
654 	if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
655 					     &head->disk->state)))
656 		return;
657 
658 	mutex_lock(&head->disk->open_mutex);
659 	bdev_disk_changed(head->disk, false);
660 	mutex_unlock(&head->disk->open_mutex);
661 }
662 
nvme_requeue_work(struct work_struct * work)663 static void nvme_requeue_work(struct work_struct *work)
664 {
665 	struct nvme_ns_head *head =
666 		container_of(work, struct nvme_ns_head, requeue_work);
667 	struct bio *bio, *next;
668 
669 	spin_lock_irq(&head->requeue_lock);
670 	next = bio_list_get(&head->requeue_list);
671 	spin_unlock_irq(&head->requeue_lock);
672 
673 	while ((bio = next) != NULL) {
674 		next = bio->bi_next;
675 		bio->bi_next = NULL;
676 
677 		submit_bio_noacct(bio);
678 	}
679 }
680 
nvme_remove_head(struct nvme_ns_head * head)681 static void nvme_remove_head(struct nvme_ns_head *head)
682 {
683 	if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
684 		/*
685 		 * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
686 		 * to allow multipath to fail all I/O.
687 		 */
688 		kblockd_schedule_work(&head->requeue_work);
689 
690 		nvme_cdev_del(&head->cdev, &head->cdev_device);
691 		synchronize_srcu(&head->srcu);
692 		del_gendisk(head->disk);
693 		nvme_put_ns_head(head);
694 	}
695 }
696 
nvme_remove_head_work(struct work_struct * work)697 static void nvme_remove_head_work(struct work_struct *work)
698 {
699 	struct nvme_ns_head *head = container_of(to_delayed_work(work),
700 			struct nvme_ns_head, remove_work);
701 	bool remove = false;
702 
703 	mutex_lock(&head->subsys->lock);
704 	if (list_empty(&head->list)) {
705 		list_del_init(&head->entry);
706 		remove = true;
707 	}
708 	mutex_unlock(&head->subsys->lock);
709 	if (remove)
710 		nvme_remove_head(head);
711 
712 	module_put(THIS_MODULE);
713 }
714 
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)715 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
716 {
717 	struct queue_limits lim;
718 
719 	mutex_init(&head->lock);
720 	bio_list_init(&head->requeue_list);
721 	spin_lock_init(&head->requeue_lock);
722 	INIT_WORK(&head->requeue_work, nvme_requeue_work);
723 	INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work);
724 	INIT_DELAYED_WORK(&head->remove_work, nvme_remove_head_work);
725 	head->delayed_removal_secs = 0;
726 
727 	/*
728 	 * If "multipath_always_on" is enabled, a multipath node is added
729 	 * regardless of whether the disk is single/multi ported, and whether
730 	 * the namespace is shared or private. If "multipath_always_on" is not
731 	 * enabled, a multipath node is added only if the subsystem supports
732 	 * multiple controllers and the "multipath" option is configured. In
733 	 * either case, for private namespaces, we ensure that the NSID is
734 	 * unique.
735 	 */
736 	if (!multipath_always_on) {
737 		if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
738 				!multipath)
739 			return 0;
740 	}
741 
742 	if (!nvme_is_unique_nsid(ctrl, head))
743 		return 0;
744 
745 	blk_set_stacking_limits(&lim);
746 	lim.dma_alignment = 3;
747 	lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT |
748 		BLK_FEAT_POLL | BLK_FEAT_ATOMIC_WRITES;
749 	if (head->ids.csi == NVME_CSI_ZNS)
750 		lim.features |= BLK_FEAT_ZONED;
751 
752 	head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
753 	if (IS_ERR(head->disk))
754 		return PTR_ERR(head->disk);
755 	head->disk->fops = &nvme_ns_head_ops;
756 	head->disk->private_data = head;
757 
758 	/*
759 	 * We need to suppress the partition scan from occuring within the
760 	 * controller's scan_work context. If a path error occurs here, the IO
761 	 * will wait until a path becomes available or all paths are torn down,
762 	 * but that action also occurs within scan_work, so it would deadlock.
763 	 * Defer the partition scan to a different context that does not block
764 	 * scan_work.
765 	 */
766 	set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state);
767 	sprintf(head->disk->disk_name, "nvme%dn%d",
768 			ctrl->subsys->instance, head->instance);
769 	nvme_tryget_ns_head(head);
770 	return 0;
771 }
772 
nvme_mpath_set_live(struct nvme_ns * ns)773 static void nvme_mpath_set_live(struct nvme_ns *ns)
774 {
775 	struct nvme_ns_head *head = ns->head;
776 	int rc;
777 
778 	if (!head->disk)
779 		return;
780 
781 	/*
782 	 * test_and_set_bit() is used because it is protecting against two nvme
783 	 * paths simultaneously calling device_add_disk() on the same namespace
784 	 * head.
785 	 */
786 	if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
787 		rc = device_add_disk(&head->subsys->dev, head->disk,
788 				     nvme_ns_attr_groups);
789 		if (rc) {
790 			clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags);
791 			return;
792 		}
793 		nvme_add_ns_head_cdev(head);
794 		kblockd_schedule_work(&head->partition_scan_work);
795 	}
796 
797 	nvme_mpath_add_sysfs_link(ns->head);
798 
799 	mutex_lock(&head->lock);
800 	if (nvme_path_is_optimized(ns)) {
801 		int node, srcu_idx;
802 
803 		srcu_idx = srcu_read_lock(&head->srcu);
804 		for_each_online_node(node)
805 			__nvme_find_path(head, node);
806 		srcu_read_unlock(&head->srcu, srcu_idx);
807 	}
808 	mutex_unlock(&head->lock);
809 
810 	synchronize_srcu(&head->srcu);
811 	kblockd_schedule_work(&head->requeue_work);
812 }
813 
nvme_parse_ana_log(struct nvme_ctrl * ctrl,void * data,int (* cb)(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc *,void *))814 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
815 		int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
816 			void *))
817 {
818 	void *base = ctrl->ana_log_buf;
819 	size_t offset = sizeof(struct nvme_ana_rsp_hdr);
820 	int error, i;
821 
822 	lockdep_assert_held(&ctrl->ana_lock);
823 
824 	for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
825 		struct nvme_ana_group_desc *desc = base + offset;
826 		u32 nr_nsids;
827 		size_t nsid_buf_size;
828 
829 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
830 			return -EINVAL;
831 
832 		nr_nsids = le32_to_cpu(desc->nnsids);
833 		nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
834 
835 		if (WARN_ON_ONCE(desc->grpid == 0))
836 			return -EINVAL;
837 		if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
838 			return -EINVAL;
839 		if (WARN_ON_ONCE(desc->state == 0))
840 			return -EINVAL;
841 		if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
842 			return -EINVAL;
843 
844 		offset += sizeof(*desc);
845 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
846 			return -EINVAL;
847 
848 		error = cb(ctrl, desc, data);
849 		if (error)
850 			return error;
851 
852 		offset += nsid_buf_size;
853 	}
854 
855 	return 0;
856 }
857 
nvme_state_is_live(enum nvme_ana_state state)858 static inline bool nvme_state_is_live(enum nvme_ana_state state)
859 {
860 	return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
861 }
862 
nvme_update_ns_ana_state(struct nvme_ana_group_desc * desc,struct nvme_ns * ns)863 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
864 		struct nvme_ns *ns)
865 {
866 	ns->ana_grpid = le32_to_cpu(desc->grpid);
867 	ns->ana_state = desc->state;
868 	clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
869 	/*
870 	 * nvme_mpath_set_live() will trigger I/O to the multipath path device
871 	 * and in turn to this path device.  However we cannot accept this I/O
872 	 * if the controller is not live.  This may deadlock if called from
873 	 * nvme_mpath_init_identify() and the ctrl will never complete
874 	 * initialization, preventing I/O from completing.  For this case we
875 	 * will reprocess the ANA log page in nvme_mpath_update() once the
876 	 * controller is ready.
877 	 */
878 	if (nvme_state_is_live(ns->ana_state) &&
879 	    nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
880 		nvme_mpath_set_live(ns);
881 	else {
882 		/*
883 		 * Add sysfs link from multipath head gendisk node to path
884 		 * device gendisk node.
885 		 * If path's ana state is live (i.e. state is either optimized
886 		 * or non-optimized) while we alloc the ns then sysfs link would
887 		 * be created from nvme_mpath_set_live(). In that case we would
888 		 * not fallthrough this code path. However for the path's ana
889 		 * state other than live, we call nvme_mpath_set_live() only
890 		 * after ana state transitioned to the live state. But we still
891 		 * want to create the sysfs link from head node to a path device
892 		 * irrespctive of the path's ana state.
893 		 * If we reach through here then it means that path's ana state
894 		 * is not live but still create the sysfs link to this path from
895 		 * head node if head node of the path has already come alive.
896 		 */
897 		if (test_bit(NVME_NSHEAD_DISK_LIVE, &ns->head->flags))
898 			nvme_mpath_add_sysfs_link(ns->head);
899 	}
900 }
901 
nvme_update_ana_state(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)902 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
903 		struct nvme_ana_group_desc *desc, void *data)
904 {
905 	u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
906 	unsigned *nr_change_groups = data;
907 	struct nvme_ns *ns;
908 	int srcu_idx;
909 
910 	dev_dbg(ctrl->device, "ANA group %d: %s.\n",
911 			le32_to_cpu(desc->grpid),
912 			nvme_ana_state_names[desc->state]);
913 
914 	if (desc->state == NVME_ANA_CHANGE)
915 		(*nr_change_groups)++;
916 
917 	if (!nr_nsids)
918 		return 0;
919 
920 	srcu_idx = srcu_read_lock(&ctrl->srcu);
921 	list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
922 				 srcu_read_lock_held(&ctrl->srcu)) {
923 		unsigned nsid;
924 again:
925 		nsid = le32_to_cpu(desc->nsids[n]);
926 		if (ns->head->ns_id < nsid)
927 			continue;
928 		if (ns->head->ns_id == nsid)
929 			nvme_update_ns_ana_state(desc, ns);
930 		if (++n == nr_nsids)
931 			break;
932 		if (ns->head->ns_id > nsid)
933 			goto again;
934 	}
935 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
936 	return 0;
937 }
938 
nvme_read_ana_log(struct nvme_ctrl * ctrl)939 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
940 {
941 	u32 nr_change_groups = 0;
942 	int error;
943 
944 	mutex_lock(&ctrl->ana_lock);
945 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
946 			ctrl->ana_log_buf, ctrl->ana_log_size, 0);
947 	if (error) {
948 		dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
949 		goto out_unlock;
950 	}
951 
952 	error = nvme_parse_ana_log(ctrl, &nr_change_groups,
953 			nvme_update_ana_state);
954 	if (error)
955 		goto out_unlock;
956 
957 	/*
958 	 * In theory we should have an ANATT timer per group as they might enter
959 	 * the change state at different times.  But that is a lot of overhead
960 	 * just to protect against a target that keeps entering new changes
961 	 * states while never finishing previous ones.  But we'll still
962 	 * eventually time out once all groups are in change state, so this
963 	 * isn't a big deal.
964 	 *
965 	 * We also double the ANATT value to provide some slack for transports
966 	 * or AEN processing overhead.
967 	 */
968 	if (nr_change_groups)
969 		mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
970 	else
971 		timer_delete_sync(&ctrl->anatt_timer);
972 out_unlock:
973 	mutex_unlock(&ctrl->ana_lock);
974 	return error;
975 }
976 
nvme_ana_work(struct work_struct * work)977 static void nvme_ana_work(struct work_struct *work)
978 {
979 	struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
980 
981 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
982 		return;
983 
984 	nvme_read_ana_log(ctrl);
985 }
986 
nvme_mpath_update(struct nvme_ctrl * ctrl)987 void nvme_mpath_update(struct nvme_ctrl *ctrl)
988 {
989 	u32 nr_change_groups = 0;
990 
991 	if (!ctrl->ana_log_buf)
992 		return;
993 
994 	mutex_lock(&ctrl->ana_lock);
995 	nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
996 	mutex_unlock(&ctrl->ana_lock);
997 }
998 
nvme_anatt_timeout(struct timer_list * t)999 static void nvme_anatt_timeout(struct timer_list *t)
1000 {
1001 	struct nvme_ctrl *ctrl = timer_container_of(ctrl, t, anatt_timer);
1002 
1003 	dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
1004 	nvme_reset_ctrl(ctrl);
1005 }
1006 
nvme_mpath_stop(struct nvme_ctrl * ctrl)1007 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
1008 {
1009 	if (!nvme_ctrl_use_ana(ctrl))
1010 		return;
1011 	timer_delete_sync(&ctrl->anatt_timer);
1012 	cancel_work_sync(&ctrl->ana_work);
1013 }
1014 
1015 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store)  \
1016 	struct device_attribute subsys_attr_##_name =	\
1017 		__ATTR(_name, _mode, _show, _store)
1018 
nvme_subsys_iopolicy_show(struct device * dev,struct device_attribute * attr,char * buf)1019 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
1020 		struct device_attribute *attr, char *buf)
1021 {
1022 	struct nvme_subsystem *subsys =
1023 		container_of(dev, struct nvme_subsystem, dev);
1024 
1025 	return sysfs_emit(buf, "%s\n",
1026 			  nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
1027 }
1028 
nvme_subsys_iopolicy_update(struct nvme_subsystem * subsys,int iopolicy)1029 static void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys,
1030 		int iopolicy)
1031 {
1032 	struct nvme_ctrl *ctrl;
1033 	int old_iopolicy = READ_ONCE(subsys->iopolicy);
1034 
1035 	if (old_iopolicy == iopolicy)
1036 		return;
1037 
1038 	WRITE_ONCE(subsys->iopolicy, iopolicy);
1039 
1040 	/* iopolicy changes clear the mpath by design */
1041 	mutex_lock(&nvme_subsystems_lock);
1042 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1043 		nvme_mpath_clear_ctrl_paths(ctrl);
1044 	mutex_unlock(&nvme_subsystems_lock);
1045 
1046 	pr_notice("subsysnqn %s iopolicy changed from %s to %s\n",
1047 			subsys->subnqn,
1048 			nvme_iopolicy_names[old_iopolicy],
1049 			nvme_iopolicy_names[iopolicy]);
1050 }
1051 
nvme_subsys_iopolicy_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1052 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
1053 		struct device_attribute *attr, const char *buf, size_t count)
1054 {
1055 	struct nvme_subsystem *subsys =
1056 		container_of(dev, struct nvme_subsystem, dev);
1057 	int i;
1058 
1059 	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
1060 		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
1061 			nvme_subsys_iopolicy_update(subsys, i);
1062 			return count;
1063 		}
1064 	}
1065 
1066 	return -EINVAL;
1067 }
1068 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
1069 		      nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
1070 
ana_grpid_show(struct device * dev,struct device_attribute * attr,char * buf)1071 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
1072 		char *buf)
1073 {
1074 	return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
1075 }
1076 DEVICE_ATTR_RO(ana_grpid);
1077 
ana_state_show(struct device * dev,struct device_attribute * attr,char * buf)1078 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
1079 		char *buf)
1080 {
1081 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1082 
1083 	return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
1084 }
1085 DEVICE_ATTR_RO(ana_state);
1086 
queue_depth_show(struct device * dev,struct device_attribute * attr,char * buf)1087 static ssize_t queue_depth_show(struct device *dev,
1088 		struct device_attribute *attr, char *buf)
1089 {
1090 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1091 
1092 	if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD)
1093 		return 0;
1094 
1095 	return sysfs_emit(buf, "%d\n", atomic_read(&ns->ctrl->nr_active));
1096 }
1097 DEVICE_ATTR_RO(queue_depth);
1098 
numa_nodes_show(struct device * dev,struct device_attribute * attr,char * buf)1099 static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr,
1100 		char *buf)
1101 {
1102 	int node, srcu_idx;
1103 	nodemask_t numa_nodes;
1104 	struct nvme_ns *current_ns;
1105 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1106 	struct nvme_ns_head *head = ns->head;
1107 
1108 	if (head->subsys->iopolicy != NVME_IOPOLICY_NUMA)
1109 		return 0;
1110 
1111 	nodes_clear(numa_nodes);
1112 
1113 	srcu_idx = srcu_read_lock(&head->srcu);
1114 	for_each_node(node) {
1115 		current_ns = srcu_dereference(head->current_path[node],
1116 				&head->srcu);
1117 		if (ns == current_ns)
1118 			node_set(node, numa_nodes);
1119 	}
1120 	srcu_read_unlock(&head->srcu, srcu_idx);
1121 
1122 	return sysfs_emit(buf, "%*pbl\n", nodemask_pr_args(&numa_nodes));
1123 }
1124 DEVICE_ATTR_RO(numa_nodes);
1125 
delayed_removal_secs_show(struct device * dev,struct device_attribute * attr,char * buf)1126 static ssize_t delayed_removal_secs_show(struct device *dev,
1127 		struct device_attribute *attr, char *buf)
1128 {
1129 	struct gendisk *disk = dev_to_disk(dev);
1130 	struct nvme_ns_head *head = disk->private_data;
1131 	int ret;
1132 
1133 	mutex_lock(&head->subsys->lock);
1134 	ret = sysfs_emit(buf, "%u\n", head->delayed_removal_secs);
1135 	mutex_unlock(&head->subsys->lock);
1136 	return ret;
1137 }
1138 
delayed_removal_secs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1139 static ssize_t delayed_removal_secs_store(struct device *dev,
1140 		struct device_attribute *attr, const char *buf, size_t count)
1141 {
1142 	struct gendisk *disk = dev_to_disk(dev);
1143 	struct nvme_ns_head *head = disk->private_data;
1144 	unsigned int sec;
1145 	int ret;
1146 
1147 	ret = kstrtouint(buf, 0, &sec);
1148 	if (ret < 0)
1149 		return ret;
1150 
1151 	mutex_lock(&head->subsys->lock);
1152 	head->delayed_removal_secs = sec;
1153 	if (sec)
1154 		set_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags);
1155 	else
1156 		clear_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags);
1157 	mutex_unlock(&head->subsys->lock);
1158 	/*
1159 	 * Ensure that update to NVME_NSHEAD_QUEUE_IF_NO_PATH is seen
1160 	 * by its reader.
1161 	 */
1162 	synchronize_srcu(&head->srcu);
1163 
1164 	return count;
1165 }
1166 
1167 DEVICE_ATTR_RW(delayed_removal_secs);
1168 
nvme_lookup_ana_group_desc(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)1169 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
1170 		struct nvme_ana_group_desc *desc, void *data)
1171 {
1172 	struct nvme_ana_group_desc *dst = data;
1173 
1174 	if (desc->grpid != dst->grpid)
1175 		return 0;
1176 
1177 	*dst = *desc;
1178 	return -ENXIO; /* just break out of the loop */
1179 }
1180 
nvme_mpath_add_sysfs_link(struct nvme_ns_head * head)1181 void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head)
1182 {
1183 	struct device *target;
1184 	int rc, srcu_idx;
1185 	struct nvme_ns *ns;
1186 	struct kobject *kobj;
1187 
1188 	/*
1189 	 * Ensure head disk node is already added otherwise we may get invalid
1190 	 * kobj for head disk node
1191 	 */
1192 	if (!test_bit(GD_ADDED, &head->disk->state))
1193 		return;
1194 
1195 	kobj = &disk_to_dev(head->disk)->kobj;
1196 
1197 	/*
1198 	 * loop through each ns chained through the head->list and create the
1199 	 * sysfs link from head node to the ns path node
1200 	 */
1201 	srcu_idx = srcu_read_lock(&head->srcu);
1202 
1203 	list_for_each_entry_rcu(ns, &head->list, siblings) {
1204 		/*
1205 		 * Ensure that ns path disk node is already added otherwise we
1206 		 * may get invalid kobj name for target
1207 		 */
1208 		if (!test_bit(GD_ADDED, &ns->disk->state))
1209 			continue;
1210 
1211 		/*
1212 		 * Avoid creating link if it already exists for the given path.
1213 		 * When path ana state transitions from optimized to non-
1214 		 * optimized or vice-versa, the nvme_mpath_set_live() is
1215 		 * invoked which in truns call this function. Now if the sysfs
1216 		 * link already exists for the given path and we attempt to re-
1217 		 * create the link then sysfs code would warn about it loudly.
1218 		 * So we evaluate NVME_NS_SYSFS_ATTR_LINK flag here to ensure
1219 		 * that we're not creating duplicate link.
1220 		 * The test_and_set_bit() is used because it is protecting
1221 		 * against multiple nvme paths being simultaneously added.
1222 		 */
1223 		if (test_and_set_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags))
1224 			continue;
1225 
1226 		target = disk_to_dev(ns->disk);
1227 		/*
1228 		 * Create sysfs link from head gendisk kobject @kobj to the
1229 		 * ns path gendisk kobject @target->kobj.
1230 		 */
1231 		rc = sysfs_add_link_to_group(kobj, nvme_ns_mpath_attr_group.name,
1232 				&target->kobj, dev_name(target));
1233 		if (unlikely(rc)) {
1234 			dev_err(disk_to_dev(ns->head->disk),
1235 					"failed to create link to %s\n",
1236 					dev_name(target));
1237 			clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags);
1238 		}
1239 	}
1240 
1241 	srcu_read_unlock(&head->srcu, srcu_idx);
1242 }
1243 
nvme_mpath_remove_sysfs_link(struct nvme_ns * ns)1244 void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns)
1245 {
1246 	struct device *target;
1247 	struct kobject *kobj;
1248 
1249 	if (!test_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags))
1250 		return;
1251 
1252 	target = disk_to_dev(ns->disk);
1253 	kobj = &disk_to_dev(ns->head->disk)->kobj;
1254 	sysfs_remove_link_from_group(kobj, nvme_ns_mpath_attr_group.name,
1255 			dev_name(target));
1256 	clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags);
1257 }
1258 
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)1259 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
1260 {
1261 	if (nvme_ctrl_use_ana(ns->ctrl)) {
1262 		struct nvme_ana_group_desc desc = {
1263 			.grpid = anagrpid,
1264 			.state = 0,
1265 		};
1266 
1267 		mutex_lock(&ns->ctrl->ana_lock);
1268 		ns->ana_grpid = le32_to_cpu(anagrpid);
1269 		nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
1270 		mutex_unlock(&ns->ctrl->ana_lock);
1271 		if (desc.state) {
1272 			/* found the group desc: update */
1273 			nvme_update_ns_ana_state(&desc, ns);
1274 		} else {
1275 			/* group desc not found: trigger a re-read */
1276 			set_bit(NVME_NS_ANA_PENDING, &ns->flags);
1277 			queue_work(nvme_wq, &ns->ctrl->ana_work);
1278 		}
1279 	} else {
1280 		ns->ana_state = NVME_ANA_OPTIMIZED;
1281 		nvme_mpath_set_live(ns);
1282 	}
1283 
1284 #ifdef CONFIG_BLK_DEV_ZONED
1285 	if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
1286 		ns->head->disk->nr_zones = ns->disk->nr_zones;
1287 #endif
1288 }
1289 
nvme_mpath_remove_disk(struct nvme_ns_head * head)1290 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
1291 {
1292 	bool remove = false;
1293 
1294 	mutex_lock(&head->subsys->lock);
1295 	/*
1296 	 * We are called when all paths have been removed, and at that point
1297 	 * head->list is expected to be empty. However, nvme_remove_ns() and
1298 	 * nvme_init_ns_head() can run concurrently and so if head->delayed_
1299 	 * removal_secs is configured, it is possible that by the time we reach
1300 	 * this point, head->list may no longer be empty. Therefore, we recheck
1301 	 * head->list here. If it is no longer empty then we skip enqueuing the
1302 	 * delayed head removal work.
1303 	 */
1304 	if (!list_empty(&head->list))
1305 		goto out;
1306 
1307 	if (head->delayed_removal_secs) {
1308 		/*
1309 		 * Ensure that no one could remove this module while the head
1310 		 * remove work is pending.
1311 		 */
1312 		if (!try_module_get(THIS_MODULE))
1313 			goto out;
1314 		mod_delayed_work(nvme_wq, &head->remove_work,
1315 				head->delayed_removal_secs * HZ);
1316 	} else {
1317 		list_del_init(&head->entry);
1318 		remove = true;
1319 	}
1320 out:
1321 	mutex_unlock(&head->subsys->lock);
1322 	if (remove)
1323 		nvme_remove_head(head);
1324 }
1325 
nvme_mpath_put_disk(struct nvme_ns_head * head)1326 void nvme_mpath_put_disk(struct nvme_ns_head *head)
1327 {
1328 	if (!head->disk)
1329 		return;
1330 	/* make sure all pending bios are cleaned up */
1331 	kblockd_schedule_work(&head->requeue_work);
1332 	flush_work(&head->requeue_work);
1333 	flush_work(&head->partition_scan_work);
1334 	put_disk(head->disk);
1335 }
1336 
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)1337 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
1338 {
1339 	mutex_init(&ctrl->ana_lock);
1340 	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
1341 	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
1342 }
1343 
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)1344 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
1345 {
1346 	size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
1347 	size_t ana_log_size;
1348 	int error = 0;
1349 
1350 	/* check if multipath is enabled and we have the capability */
1351 	if (!multipath || !ctrl->subsys ||
1352 	    !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
1353 		return 0;
1354 
1355 	/* initialize this in the identify path to cover controller resets */
1356 	atomic_set(&ctrl->nr_active, 0);
1357 
1358 	if (!ctrl->max_namespaces ||
1359 	    ctrl->max_namespaces > le32_to_cpu(id->nn)) {
1360 		dev_err(ctrl->device,
1361 			"Invalid MNAN value %u\n", ctrl->max_namespaces);
1362 		return -EINVAL;
1363 	}
1364 
1365 	ctrl->anacap = id->anacap;
1366 	ctrl->anatt = id->anatt;
1367 	ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
1368 	ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
1369 
1370 	ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
1371 		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
1372 		ctrl->max_namespaces * sizeof(__le32);
1373 	if (ana_log_size > max_transfer_size) {
1374 		dev_err(ctrl->device,
1375 			"ANA log page size (%zd) larger than MDTS (%zd).\n",
1376 			ana_log_size, max_transfer_size);
1377 		dev_err(ctrl->device, "disabling ANA support.\n");
1378 		goto out_uninit;
1379 	}
1380 	if (ana_log_size > ctrl->ana_log_size) {
1381 		nvme_mpath_stop(ctrl);
1382 		nvme_mpath_uninit(ctrl);
1383 		ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
1384 		if (!ctrl->ana_log_buf)
1385 			return -ENOMEM;
1386 	}
1387 	ctrl->ana_log_size = ana_log_size;
1388 	error = nvme_read_ana_log(ctrl);
1389 	if (error)
1390 		goto out_uninit;
1391 	return 0;
1392 
1393 out_uninit:
1394 	nvme_mpath_uninit(ctrl);
1395 	return error;
1396 }
1397 
nvme_mpath_uninit(struct nvme_ctrl * ctrl)1398 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
1399 {
1400 	kvfree(ctrl->ana_log_buf);
1401 	ctrl->ana_log_buf = NULL;
1402 	ctrl->ana_log_size = 0;
1403 }
1404