xref: /linux/drivers/md/dm.c (revision e72e8bf1c9847a12de74f2fd3ea1f5511866526b)
1 /*
2  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 #include "dm-rq.h"
10 #include "dm-uevent.h"
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/sched/signal.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/mempool.h>
19 #include <linux/dax.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/uio.h>
23 #include <linux/hdreg.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
26 #include <linux/pr.h>
27 #include <linux/refcount.h>
28 #include <linux/part_stat.h>
29 
30 #define DM_MSG_PREFIX "core"
31 
32 /*
33  * Cookies are numeric values sent with CHANGE and REMOVE
34  * uevents while resuming, removing or renaming the device.
35  */
36 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
37 #define DM_COOKIE_LENGTH 24
38 
39 static const char *_name = DM_NAME;
40 
41 static unsigned int major = 0;
42 static unsigned int _major = 0;
43 
44 static DEFINE_IDR(_minor_idr);
45 
46 static DEFINE_SPINLOCK(_minor_lock);
47 
48 static void do_deferred_remove(struct work_struct *w);
49 
50 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
51 
52 static struct workqueue_struct *deferred_remove_workqueue;
53 
54 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
55 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
56 
57 void dm_issue_global_event(void)
58 {
59 	atomic_inc(&dm_global_event_nr);
60 	wake_up(&dm_global_eventq);
61 }
62 
63 /*
64  * One of these is allocated (on-stack) per original bio.
65  */
66 struct clone_info {
67 	struct dm_table *map;
68 	struct bio *bio;
69 	struct dm_io *io;
70 	sector_t sector;
71 	unsigned sector_count;
72 };
73 
74 /*
75  * One of these is allocated per clone bio.
76  */
77 #define DM_TIO_MAGIC 7282014
78 struct dm_target_io {
79 	unsigned magic;
80 	struct dm_io *io;
81 	struct dm_target *ti;
82 	unsigned target_bio_nr;
83 	unsigned *len_ptr;
84 	bool inside_dm_io;
85 	struct bio clone;
86 };
87 
88 /*
89  * One of these is allocated per original bio.
90  * It contains the first clone used for that original.
91  */
92 #define DM_IO_MAGIC 5191977
93 struct dm_io {
94 	unsigned magic;
95 	struct mapped_device *md;
96 	blk_status_t status;
97 	atomic_t io_count;
98 	struct bio *orig_bio;
99 	unsigned long start_time;
100 	spinlock_t endio_lock;
101 	struct dm_stats_aux stats_aux;
102 	/* last member of dm_target_io is 'struct bio' */
103 	struct dm_target_io tio;
104 };
105 
106 void *dm_per_bio_data(struct bio *bio, size_t data_size)
107 {
108 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
109 	if (!tio->inside_dm_io)
110 		return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
111 	return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
112 }
113 EXPORT_SYMBOL_GPL(dm_per_bio_data);
114 
115 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
116 {
117 	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
118 	if (io->magic == DM_IO_MAGIC)
119 		return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
120 	BUG_ON(io->magic != DM_TIO_MAGIC);
121 	return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
122 }
123 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
124 
125 unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
126 {
127 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
128 }
129 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
130 
131 #define MINOR_ALLOCED ((void *)-1)
132 
133 /*
134  * Bits for the md->flags field.
135  */
136 #define DMF_BLOCK_IO_FOR_SUSPEND 0
137 #define DMF_SUSPENDED 1
138 #define DMF_FROZEN 2
139 #define DMF_FREEING 3
140 #define DMF_DELETING 4
141 #define DMF_NOFLUSH_SUSPENDING 5
142 #define DMF_DEFERRED_REMOVE 6
143 #define DMF_SUSPENDED_INTERNALLY 7
144 
145 #define DM_NUMA_NODE NUMA_NO_NODE
146 static int dm_numa_node = DM_NUMA_NODE;
147 
148 /*
149  * For mempools pre-allocation at the table loading time.
150  */
151 struct dm_md_mempools {
152 	struct bio_set bs;
153 	struct bio_set io_bs;
154 };
155 
156 struct table_device {
157 	struct list_head list;
158 	refcount_t count;
159 	struct dm_dev dm_dev;
160 };
161 
162 /*
163  * Bio-based DM's mempools' reserved IOs set by the user.
164  */
165 #define RESERVED_BIO_BASED_IOS		16
166 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
167 
168 static int __dm_get_module_param_int(int *module_param, int min, int max)
169 {
170 	int param = READ_ONCE(*module_param);
171 	int modified_param = 0;
172 	bool modified = true;
173 
174 	if (param < min)
175 		modified_param = min;
176 	else if (param > max)
177 		modified_param = max;
178 	else
179 		modified = false;
180 
181 	if (modified) {
182 		(void)cmpxchg(module_param, param, modified_param);
183 		param = modified_param;
184 	}
185 
186 	return param;
187 }
188 
189 unsigned __dm_get_module_param(unsigned *module_param,
190 			       unsigned def, unsigned max)
191 {
192 	unsigned param = READ_ONCE(*module_param);
193 	unsigned modified_param = 0;
194 
195 	if (!param)
196 		modified_param = def;
197 	else if (param > max)
198 		modified_param = max;
199 
200 	if (modified_param) {
201 		(void)cmpxchg(module_param, param, modified_param);
202 		param = modified_param;
203 	}
204 
205 	return param;
206 }
207 
208 unsigned dm_get_reserved_bio_based_ios(void)
209 {
210 	return __dm_get_module_param(&reserved_bio_based_ios,
211 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
212 }
213 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
214 
215 static unsigned dm_get_numa_node(void)
216 {
217 	return __dm_get_module_param_int(&dm_numa_node,
218 					 DM_NUMA_NODE, num_online_nodes() - 1);
219 }
220 
221 static int __init local_init(void)
222 {
223 	int r;
224 
225 	r = dm_uevent_init();
226 	if (r)
227 		return r;
228 
229 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
230 	if (!deferred_remove_workqueue) {
231 		r = -ENOMEM;
232 		goto out_uevent_exit;
233 	}
234 
235 	_major = major;
236 	r = register_blkdev(_major, _name);
237 	if (r < 0)
238 		goto out_free_workqueue;
239 
240 	if (!_major)
241 		_major = r;
242 
243 	return 0;
244 
245 out_free_workqueue:
246 	destroy_workqueue(deferred_remove_workqueue);
247 out_uevent_exit:
248 	dm_uevent_exit();
249 
250 	return r;
251 }
252 
253 static void local_exit(void)
254 {
255 	flush_scheduled_work();
256 	destroy_workqueue(deferred_remove_workqueue);
257 
258 	unregister_blkdev(_major, _name);
259 	dm_uevent_exit();
260 
261 	_major = 0;
262 
263 	DMINFO("cleaned up");
264 }
265 
266 static int (*_inits[])(void) __initdata = {
267 	local_init,
268 	dm_target_init,
269 	dm_linear_init,
270 	dm_stripe_init,
271 	dm_io_init,
272 	dm_kcopyd_init,
273 	dm_interface_init,
274 	dm_statistics_init,
275 };
276 
277 static void (*_exits[])(void) = {
278 	local_exit,
279 	dm_target_exit,
280 	dm_linear_exit,
281 	dm_stripe_exit,
282 	dm_io_exit,
283 	dm_kcopyd_exit,
284 	dm_interface_exit,
285 	dm_statistics_exit,
286 };
287 
288 static int __init dm_init(void)
289 {
290 	const int count = ARRAY_SIZE(_inits);
291 
292 	int r, i;
293 
294 	for (i = 0; i < count; i++) {
295 		r = _inits[i]();
296 		if (r)
297 			goto bad;
298 	}
299 
300 	return 0;
301 
302       bad:
303 	while (i--)
304 		_exits[i]();
305 
306 	return r;
307 }
308 
309 static void __exit dm_exit(void)
310 {
311 	int i = ARRAY_SIZE(_exits);
312 
313 	while (i--)
314 		_exits[i]();
315 
316 	/*
317 	 * Should be empty by this point.
318 	 */
319 	idr_destroy(&_minor_idr);
320 }
321 
322 /*
323  * Block device functions
324  */
325 int dm_deleting_md(struct mapped_device *md)
326 {
327 	return test_bit(DMF_DELETING, &md->flags);
328 }
329 
330 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
331 {
332 	struct mapped_device *md;
333 
334 	spin_lock(&_minor_lock);
335 
336 	md = bdev->bd_disk->private_data;
337 	if (!md)
338 		goto out;
339 
340 	if (test_bit(DMF_FREEING, &md->flags) ||
341 	    dm_deleting_md(md)) {
342 		md = NULL;
343 		goto out;
344 	}
345 
346 	dm_get(md);
347 	atomic_inc(&md->open_count);
348 out:
349 	spin_unlock(&_minor_lock);
350 
351 	return md ? 0 : -ENXIO;
352 }
353 
354 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
355 {
356 	struct mapped_device *md;
357 
358 	spin_lock(&_minor_lock);
359 
360 	md = disk->private_data;
361 	if (WARN_ON(!md))
362 		goto out;
363 
364 	if (atomic_dec_and_test(&md->open_count) &&
365 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
366 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
367 
368 	dm_put(md);
369 out:
370 	spin_unlock(&_minor_lock);
371 }
372 
373 int dm_open_count(struct mapped_device *md)
374 {
375 	return atomic_read(&md->open_count);
376 }
377 
378 /*
379  * Guarantees nothing is using the device before it's deleted.
380  */
381 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
382 {
383 	int r = 0;
384 
385 	spin_lock(&_minor_lock);
386 
387 	if (dm_open_count(md)) {
388 		r = -EBUSY;
389 		if (mark_deferred)
390 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
391 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
392 		r = -EEXIST;
393 	else
394 		set_bit(DMF_DELETING, &md->flags);
395 
396 	spin_unlock(&_minor_lock);
397 
398 	return r;
399 }
400 
401 int dm_cancel_deferred_remove(struct mapped_device *md)
402 {
403 	int r = 0;
404 
405 	spin_lock(&_minor_lock);
406 
407 	if (test_bit(DMF_DELETING, &md->flags))
408 		r = -EBUSY;
409 	else
410 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
411 
412 	spin_unlock(&_minor_lock);
413 
414 	return r;
415 }
416 
417 static void do_deferred_remove(struct work_struct *w)
418 {
419 	dm_deferred_remove();
420 }
421 
422 sector_t dm_get_size(struct mapped_device *md)
423 {
424 	return get_capacity(md->disk);
425 }
426 
427 struct request_queue *dm_get_md_queue(struct mapped_device *md)
428 {
429 	return md->queue;
430 }
431 
432 struct dm_stats *dm_get_stats(struct mapped_device *md)
433 {
434 	return &md->stats;
435 }
436 
437 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
438 {
439 	struct mapped_device *md = bdev->bd_disk->private_data;
440 
441 	return dm_get_geometry(md, geo);
442 }
443 
444 #ifdef CONFIG_BLK_DEV_ZONED
445 int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
446 {
447 	struct dm_report_zones_args *args = data;
448 	sector_t sector_diff = args->tgt->begin - args->start;
449 
450 	/*
451 	 * Ignore zones beyond the target range.
452 	 */
453 	if (zone->start >= args->start + args->tgt->len)
454 		return 0;
455 
456 	/*
457 	 * Remap the start sector and write pointer position of the zone
458 	 * to match its position in the target range.
459 	 */
460 	zone->start += sector_diff;
461 	if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
462 		if (zone->cond == BLK_ZONE_COND_FULL)
463 			zone->wp = zone->start + zone->len;
464 		else if (zone->cond == BLK_ZONE_COND_EMPTY)
465 			zone->wp = zone->start;
466 		else
467 			zone->wp += sector_diff;
468 	}
469 
470 	args->next_sector = zone->start + zone->len;
471 	return args->orig_cb(zone, args->zone_idx++, args->orig_data);
472 }
473 EXPORT_SYMBOL_GPL(dm_report_zones_cb);
474 
475 static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
476 		unsigned int nr_zones, report_zones_cb cb, void *data)
477 {
478 	struct mapped_device *md = disk->private_data;
479 	struct dm_table *map;
480 	int srcu_idx, ret;
481 	struct dm_report_zones_args args = {
482 		.next_sector = sector,
483 		.orig_data = data,
484 		.orig_cb = cb,
485 	};
486 
487 	if (dm_suspended_md(md))
488 		return -EAGAIN;
489 
490 	map = dm_get_live_table(md, &srcu_idx);
491 	if (!map)
492 		return -EIO;
493 
494 	do {
495 		struct dm_target *tgt;
496 
497 		tgt = dm_table_find_target(map, args.next_sector);
498 		if (WARN_ON_ONCE(!tgt->type->report_zones)) {
499 			ret = -EIO;
500 			goto out;
501 		}
502 
503 		args.tgt = tgt;
504 		ret = tgt->type->report_zones(tgt, &args, nr_zones);
505 		if (ret < 0)
506 			goto out;
507 	} while (args.zone_idx < nr_zones &&
508 		 args.next_sector < get_capacity(disk));
509 
510 	ret = args.zone_idx;
511 out:
512 	dm_put_live_table(md, srcu_idx);
513 	return ret;
514 }
515 #else
516 #define dm_blk_report_zones		NULL
517 #endif /* CONFIG_BLK_DEV_ZONED */
518 
519 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
520 			    struct block_device **bdev)
521 	__acquires(md->io_barrier)
522 {
523 	struct dm_target *tgt;
524 	struct dm_table *map;
525 	int r;
526 
527 retry:
528 	r = -ENOTTY;
529 	map = dm_get_live_table(md, srcu_idx);
530 	if (!map || !dm_table_get_size(map))
531 		return r;
532 
533 	/* We only support devices that have a single target */
534 	if (dm_table_get_num_targets(map) != 1)
535 		return r;
536 
537 	tgt = dm_table_get_target(map, 0);
538 	if (!tgt->type->prepare_ioctl)
539 		return r;
540 
541 	if (dm_suspended_md(md))
542 		return -EAGAIN;
543 
544 	r = tgt->type->prepare_ioctl(tgt, bdev);
545 	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
546 		dm_put_live_table(md, *srcu_idx);
547 		msleep(10);
548 		goto retry;
549 	}
550 
551 	return r;
552 }
553 
554 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
555 	__releases(md->io_barrier)
556 {
557 	dm_put_live_table(md, srcu_idx);
558 }
559 
560 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
561 			unsigned int cmd, unsigned long arg)
562 {
563 	struct mapped_device *md = bdev->bd_disk->private_data;
564 	int r, srcu_idx;
565 
566 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
567 	if (r < 0)
568 		goto out;
569 
570 	if (r > 0) {
571 		/*
572 		 * Target determined this ioctl is being issued against a
573 		 * subset of the parent bdev; require extra privileges.
574 		 */
575 		if (!capable(CAP_SYS_RAWIO)) {
576 			DMWARN_LIMIT(
577 	"%s: sending ioctl %x to DM device without required privilege.",
578 				current->comm, cmd);
579 			r = -ENOIOCTLCMD;
580 			goto out;
581 		}
582 	}
583 
584 	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
585 out:
586 	dm_unprepare_ioctl(md, srcu_idx);
587 	return r;
588 }
589 
590 static void start_io_acct(struct dm_io *io);
591 
592 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
593 {
594 	struct dm_io *io;
595 	struct dm_target_io *tio;
596 	struct bio *clone;
597 
598 	clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
599 	if (!clone)
600 		return NULL;
601 
602 	tio = container_of(clone, struct dm_target_io, clone);
603 	tio->inside_dm_io = true;
604 	tio->io = NULL;
605 
606 	io = container_of(tio, struct dm_io, tio);
607 	io->magic = DM_IO_MAGIC;
608 	io->status = 0;
609 	atomic_set(&io->io_count, 1);
610 	io->orig_bio = bio;
611 	io->md = md;
612 	spin_lock_init(&io->endio_lock);
613 
614 	start_io_acct(io);
615 
616 	return io;
617 }
618 
619 static void free_io(struct mapped_device *md, struct dm_io *io)
620 {
621 	bio_put(&io->tio.clone);
622 }
623 
624 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
625 				      unsigned target_bio_nr, gfp_t gfp_mask)
626 {
627 	struct dm_target_io *tio;
628 
629 	if (!ci->io->tio.io) {
630 		/* the dm_target_io embedded in ci->io is available */
631 		tio = &ci->io->tio;
632 	} else {
633 		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
634 		if (!clone)
635 			return NULL;
636 
637 		tio = container_of(clone, struct dm_target_io, clone);
638 		tio->inside_dm_io = false;
639 	}
640 
641 	tio->magic = DM_TIO_MAGIC;
642 	tio->io = ci->io;
643 	tio->ti = ti;
644 	tio->target_bio_nr = target_bio_nr;
645 
646 	return tio;
647 }
648 
649 static void free_tio(struct dm_target_io *tio)
650 {
651 	if (tio->inside_dm_io)
652 		return;
653 	bio_put(&tio->clone);
654 }
655 
656 static bool md_in_flight_bios(struct mapped_device *md)
657 {
658 	int cpu;
659 	struct hd_struct *part = &dm_disk(md)->part0;
660 	long sum = 0;
661 
662 	for_each_possible_cpu(cpu) {
663 		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
664 		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
665 	}
666 
667 	return sum != 0;
668 }
669 
670 static bool md_in_flight(struct mapped_device *md)
671 {
672 	if (queue_is_mq(md->queue))
673 		return blk_mq_queue_inflight(md->queue);
674 	else
675 		return md_in_flight_bios(md);
676 }
677 
678 static void start_io_acct(struct dm_io *io)
679 {
680 	struct mapped_device *md = io->md;
681 	struct bio *bio = io->orig_bio;
682 
683 	io->start_time = jiffies;
684 
685 	generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
686 			      &dm_disk(md)->part0);
687 
688 	if (unlikely(dm_stats_used(&md->stats)))
689 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
690 				    bio->bi_iter.bi_sector, bio_sectors(bio),
691 				    false, 0, &io->stats_aux);
692 }
693 
694 static void end_io_acct(struct dm_io *io)
695 {
696 	struct mapped_device *md = io->md;
697 	struct bio *bio = io->orig_bio;
698 	unsigned long duration = jiffies - io->start_time;
699 
700 	generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
701 			    io->start_time);
702 
703 	if (unlikely(dm_stats_used(&md->stats)))
704 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
705 				    bio->bi_iter.bi_sector, bio_sectors(bio),
706 				    true, duration, &io->stats_aux);
707 
708 	/* nudge anyone waiting on suspend queue */
709 	if (unlikely(wq_has_sleeper(&md->wait)))
710 		wake_up(&md->wait);
711 }
712 
713 /*
714  * Add the bio to the list of deferred io.
715  */
716 static void queue_io(struct mapped_device *md, struct bio *bio)
717 {
718 	unsigned long flags;
719 
720 	spin_lock_irqsave(&md->deferred_lock, flags);
721 	bio_list_add(&md->deferred, bio);
722 	spin_unlock_irqrestore(&md->deferred_lock, flags);
723 	queue_work(md->wq, &md->work);
724 }
725 
726 /*
727  * Everyone (including functions in this file), should use this
728  * function to access the md->map field, and make sure they call
729  * dm_put_live_table() when finished.
730  */
731 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
732 {
733 	*srcu_idx = srcu_read_lock(&md->io_barrier);
734 
735 	return srcu_dereference(md->map, &md->io_barrier);
736 }
737 
738 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
739 {
740 	srcu_read_unlock(&md->io_barrier, srcu_idx);
741 }
742 
743 void dm_sync_table(struct mapped_device *md)
744 {
745 	synchronize_srcu(&md->io_barrier);
746 	synchronize_rcu_expedited();
747 }
748 
749 /*
750  * A fast alternative to dm_get_live_table/dm_put_live_table.
751  * The caller must not block between these two functions.
752  */
753 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
754 {
755 	rcu_read_lock();
756 	return rcu_dereference(md->map);
757 }
758 
759 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
760 {
761 	rcu_read_unlock();
762 }
763 
764 static char *_dm_claim_ptr = "I belong to device-mapper";
765 
766 /*
767  * Open a table device so we can use it as a map destination.
768  */
769 static int open_table_device(struct table_device *td, dev_t dev,
770 			     struct mapped_device *md)
771 {
772 	struct block_device *bdev;
773 
774 	int r;
775 
776 	BUG_ON(td->dm_dev.bdev);
777 
778 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
779 	if (IS_ERR(bdev))
780 		return PTR_ERR(bdev);
781 
782 	r = bd_link_disk_holder(bdev, dm_disk(md));
783 	if (r) {
784 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
785 		return r;
786 	}
787 
788 	td->dm_dev.bdev = bdev;
789 	td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
790 	return 0;
791 }
792 
793 /*
794  * Close a table device that we've been using.
795  */
796 static void close_table_device(struct table_device *td, struct mapped_device *md)
797 {
798 	if (!td->dm_dev.bdev)
799 		return;
800 
801 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
802 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
803 	put_dax(td->dm_dev.dax_dev);
804 	td->dm_dev.bdev = NULL;
805 	td->dm_dev.dax_dev = NULL;
806 }
807 
808 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
809 					      fmode_t mode)
810 {
811 	struct table_device *td;
812 
813 	list_for_each_entry(td, l, list)
814 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
815 			return td;
816 
817 	return NULL;
818 }
819 
820 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
821 			struct dm_dev **result)
822 {
823 	int r;
824 	struct table_device *td;
825 
826 	mutex_lock(&md->table_devices_lock);
827 	td = find_table_device(&md->table_devices, dev, mode);
828 	if (!td) {
829 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
830 		if (!td) {
831 			mutex_unlock(&md->table_devices_lock);
832 			return -ENOMEM;
833 		}
834 
835 		td->dm_dev.mode = mode;
836 		td->dm_dev.bdev = NULL;
837 
838 		if ((r = open_table_device(td, dev, md))) {
839 			mutex_unlock(&md->table_devices_lock);
840 			kfree(td);
841 			return r;
842 		}
843 
844 		format_dev_t(td->dm_dev.name, dev);
845 
846 		refcount_set(&td->count, 1);
847 		list_add(&td->list, &md->table_devices);
848 	} else {
849 		refcount_inc(&td->count);
850 	}
851 	mutex_unlock(&md->table_devices_lock);
852 
853 	*result = &td->dm_dev;
854 	return 0;
855 }
856 EXPORT_SYMBOL_GPL(dm_get_table_device);
857 
858 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
859 {
860 	struct table_device *td = container_of(d, struct table_device, dm_dev);
861 
862 	mutex_lock(&md->table_devices_lock);
863 	if (refcount_dec_and_test(&td->count)) {
864 		close_table_device(td, md);
865 		list_del(&td->list);
866 		kfree(td);
867 	}
868 	mutex_unlock(&md->table_devices_lock);
869 }
870 EXPORT_SYMBOL(dm_put_table_device);
871 
872 static void free_table_devices(struct list_head *devices)
873 {
874 	struct list_head *tmp, *next;
875 
876 	list_for_each_safe(tmp, next, devices) {
877 		struct table_device *td = list_entry(tmp, struct table_device, list);
878 
879 		DMWARN("dm_destroy: %s still exists with %d references",
880 		       td->dm_dev.name, refcount_read(&td->count));
881 		kfree(td);
882 	}
883 }
884 
885 /*
886  * Get the geometry associated with a dm device
887  */
888 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
889 {
890 	*geo = md->geometry;
891 
892 	return 0;
893 }
894 
895 /*
896  * Set the geometry of a device.
897  */
898 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
899 {
900 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
901 
902 	if (geo->start > sz) {
903 		DMWARN("Start sector is beyond the geometry limits.");
904 		return -EINVAL;
905 	}
906 
907 	md->geometry = *geo;
908 
909 	return 0;
910 }
911 
912 static int __noflush_suspending(struct mapped_device *md)
913 {
914 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
915 }
916 
917 /*
918  * Decrements the number of outstanding ios that a bio has been
919  * cloned into, completing the original io if necc.
920  */
921 static void dec_pending(struct dm_io *io, blk_status_t error)
922 {
923 	unsigned long flags;
924 	blk_status_t io_error;
925 	struct bio *bio;
926 	struct mapped_device *md = io->md;
927 
928 	/* Push-back supersedes any I/O errors */
929 	if (unlikely(error)) {
930 		spin_lock_irqsave(&io->endio_lock, flags);
931 		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
932 			io->status = error;
933 		spin_unlock_irqrestore(&io->endio_lock, flags);
934 	}
935 
936 	if (atomic_dec_and_test(&io->io_count)) {
937 		if (io->status == BLK_STS_DM_REQUEUE) {
938 			/*
939 			 * Target requested pushing back the I/O.
940 			 */
941 			spin_lock_irqsave(&md->deferred_lock, flags);
942 			if (__noflush_suspending(md))
943 				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
944 				bio_list_add_head(&md->deferred, io->orig_bio);
945 			else
946 				/* noflush suspend was interrupted. */
947 				io->status = BLK_STS_IOERR;
948 			spin_unlock_irqrestore(&md->deferred_lock, flags);
949 		}
950 
951 		io_error = io->status;
952 		bio = io->orig_bio;
953 		end_io_acct(io);
954 		free_io(md, io);
955 
956 		if (io_error == BLK_STS_DM_REQUEUE)
957 			return;
958 
959 		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
960 			/*
961 			 * Preflush done for flush with data, reissue
962 			 * without REQ_PREFLUSH.
963 			 */
964 			bio->bi_opf &= ~REQ_PREFLUSH;
965 			queue_io(md, bio);
966 		} else {
967 			/* done with normal IO or empty flush */
968 			if (io_error)
969 				bio->bi_status = io_error;
970 			bio_endio(bio);
971 		}
972 	}
973 }
974 
975 void disable_discard(struct mapped_device *md)
976 {
977 	struct queue_limits *limits = dm_get_queue_limits(md);
978 
979 	/* device doesn't really support DISCARD, disable it */
980 	limits->max_discard_sectors = 0;
981 	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
982 }
983 
984 void disable_write_same(struct mapped_device *md)
985 {
986 	struct queue_limits *limits = dm_get_queue_limits(md);
987 
988 	/* device doesn't really support WRITE SAME, disable it */
989 	limits->max_write_same_sectors = 0;
990 }
991 
992 void disable_write_zeroes(struct mapped_device *md)
993 {
994 	struct queue_limits *limits = dm_get_queue_limits(md);
995 
996 	/* device doesn't really support WRITE ZEROES, disable it */
997 	limits->max_write_zeroes_sectors = 0;
998 }
999 
1000 static void clone_endio(struct bio *bio)
1001 {
1002 	blk_status_t error = bio->bi_status;
1003 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1004 	struct dm_io *io = tio->io;
1005 	struct mapped_device *md = tio->io->md;
1006 	dm_endio_fn endio = tio->ti->type->end_io;
1007 
1008 	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
1009 		if (bio_op(bio) == REQ_OP_DISCARD &&
1010 		    !bio->bi_disk->queue->limits.max_discard_sectors)
1011 			disable_discard(md);
1012 		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
1013 			 !bio->bi_disk->queue->limits.max_write_same_sectors)
1014 			disable_write_same(md);
1015 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1016 			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
1017 			disable_write_zeroes(md);
1018 	}
1019 
1020 	if (endio) {
1021 		int r = endio(tio->ti, bio, &error);
1022 		switch (r) {
1023 		case DM_ENDIO_REQUEUE:
1024 			error = BLK_STS_DM_REQUEUE;
1025 			/*FALLTHRU*/
1026 		case DM_ENDIO_DONE:
1027 			break;
1028 		case DM_ENDIO_INCOMPLETE:
1029 			/* The target will handle the io */
1030 			return;
1031 		default:
1032 			DMWARN("unimplemented target endio return value: %d", r);
1033 			BUG();
1034 		}
1035 	}
1036 
1037 	free_tio(tio);
1038 	dec_pending(io, error);
1039 }
1040 
1041 /*
1042  * Return maximum size of I/O possible at the supplied sector up to the current
1043  * target boundary.
1044  */
1045 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1046 {
1047 	sector_t target_offset = dm_target_offset(ti, sector);
1048 
1049 	return ti->len - target_offset;
1050 }
1051 
1052 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1053 {
1054 	sector_t len = max_io_len_target_boundary(sector, ti);
1055 	sector_t offset, max_len;
1056 
1057 	/*
1058 	 * Does the target need to split even further?
1059 	 */
1060 	if (ti->max_io_len) {
1061 		offset = dm_target_offset(ti, sector);
1062 		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1063 			max_len = sector_div(offset, ti->max_io_len);
1064 		else
1065 			max_len = offset & (ti->max_io_len - 1);
1066 		max_len = ti->max_io_len - max_len;
1067 
1068 		if (len > max_len)
1069 			len = max_len;
1070 	}
1071 
1072 	return len;
1073 }
1074 
1075 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1076 {
1077 	if (len > UINT_MAX) {
1078 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1079 		      (unsigned long long)len, UINT_MAX);
1080 		ti->error = "Maximum size of target IO is too large";
1081 		return -EINVAL;
1082 	}
1083 
1084 	ti->max_io_len = (uint32_t) len;
1085 
1086 	return 0;
1087 }
1088 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1089 
1090 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1091 						sector_t sector, int *srcu_idx)
1092 	__acquires(md->io_barrier)
1093 {
1094 	struct dm_table *map;
1095 	struct dm_target *ti;
1096 
1097 	map = dm_get_live_table(md, srcu_idx);
1098 	if (!map)
1099 		return NULL;
1100 
1101 	ti = dm_table_find_target(map, sector);
1102 	if (!ti)
1103 		return NULL;
1104 
1105 	return ti;
1106 }
1107 
1108 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1109 				 long nr_pages, void **kaddr, pfn_t *pfn)
1110 {
1111 	struct mapped_device *md = dax_get_private(dax_dev);
1112 	sector_t sector = pgoff * PAGE_SECTORS;
1113 	struct dm_target *ti;
1114 	long len, ret = -EIO;
1115 	int srcu_idx;
1116 
1117 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1118 
1119 	if (!ti)
1120 		goto out;
1121 	if (!ti->type->direct_access)
1122 		goto out;
1123 	len = max_io_len(sector, ti) / PAGE_SECTORS;
1124 	if (len < 1)
1125 		goto out;
1126 	nr_pages = min(len, nr_pages);
1127 	ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1128 
1129  out:
1130 	dm_put_live_table(md, srcu_idx);
1131 
1132 	return ret;
1133 }
1134 
1135 static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
1136 		int blocksize, sector_t start, sector_t len)
1137 {
1138 	struct mapped_device *md = dax_get_private(dax_dev);
1139 	struct dm_table *map;
1140 	int srcu_idx;
1141 	bool ret;
1142 
1143 	map = dm_get_live_table(md, &srcu_idx);
1144 	if (!map)
1145 		return false;
1146 
1147 	ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
1148 
1149 	dm_put_live_table(md, srcu_idx);
1150 
1151 	return ret;
1152 }
1153 
1154 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1155 				    void *addr, size_t bytes, struct iov_iter *i)
1156 {
1157 	struct mapped_device *md = dax_get_private(dax_dev);
1158 	sector_t sector = pgoff * PAGE_SECTORS;
1159 	struct dm_target *ti;
1160 	long ret = 0;
1161 	int srcu_idx;
1162 
1163 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1164 
1165 	if (!ti)
1166 		goto out;
1167 	if (!ti->type->dax_copy_from_iter) {
1168 		ret = copy_from_iter(addr, bytes, i);
1169 		goto out;
1170 	}
1171 	ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
1172  out:
1173 	dm_put_live_table(md, srcu_idx);
1174 
1175 	return ret;
1176 }
1177 
1178 static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1179 		void *addr, size_t bytes, struct iov_iter *i)
1180 {
1181 	struct mapped_device *md = dax_get_private(dax_dev);
1182 	sector_t sector = pgoff * PAGE_SECTORS;
1183 	struct dm_target *ti;
1184 	long ret = 0;
1185 	int srcu_idx;
1186 
1187 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1188 
1189 	if (!ti)
1190 		goto out;
1191 	if (!ti->type->dax_copy_to_iter) {
1192 		ret = copy_to_iter(addr, bytes, i);
1193 		goto out;
1194 	}
1195 	ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
1196  out:
1197 	dm_put_live_table(md, srcu_idx);
1198 
1199 	return ret;
1200 }
1201 
1202 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1203 				  size_t nr_pages)
1204 {
1205 	struct mapped_device *md = dax_get_private(dax_dev);
1206 	sector_t sector = pgoff * PAGE_SECTORS;
1207 	struct dm_target *ti;
1208 	int ret = -EIO;
1209 	int srcu_idx;
1210 
1211 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1212 
1213 	if (!ti)
1214 		goto out;
1215 	if (WARN_ON(!ti->type->dax_zero_page_range)) {
1216 		/*
1217 		 * ->zero_page_range() is mandatory dax operation. If we are
1218 		 *  here, something is wrong.
1219 		 */
1220 		dm_put_live_table(md, srcu_idx);
1221 		goto out;
1222 	}
1223 	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1224 
1225  out:
1226 	dm_put_live_table(md, srcu_idx);
1227 
1228 	return ret;
1229 }
1230 
1231 /*
1232  * A target may call dm_accept_partial_bio only from the map routine.  It is
1233  * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
1234  * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
1235  *
1236  * dm_accept_partial_bio informs the dm that the target only wants to process
1237  * additional n_sectors sectors of the bio and the rest of the data should be
1238  * sent in a next bio.
1239  *
1240  * A diagram that explains the arithmetics:
1241  * +--------------------+---------------+-------+
1242  * |         1          |       2       |   3   |
1243  * +--------------------+---------------+-------+
1244  *
1245  * <-------------- *tio->len_ptr --------------->
1246  *                      <------- bi_size ------->
1247  *                      <-- n_sectors -->
1248  *
1249  * Region 1 was already iterated over with bio_advance or similar function.
1250  *	(it may be empty if the target doesn't use bio_advance)
1251  * Region 2 is the remaining bio size that the target wants to process.
1252  *	(it may be empty if region 1 is non-empty, although there is no reason
1253  *	 to make it empty)
1254  * The target requires that region 3 is to be sent in the next bio.
1255  *
1256  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1257  * the partially processed part (the sum of regions 1+2) must be the same for all
1258  * copies of the bio.
1259  */
1260 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1261 {
1262 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1263 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1264 	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1265 	BUG_ON(bi_size > *tio->len_ptr);
1266 	BUG_ON(n_sectors > bi_size);
1267 	*tio->len_ptr -= bi_size - n_sectors;
1268 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1269 }
1270 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1271 
1272 static blk_qc_t __map_bio(struct dm_target_io *tio)
1273 {
1274 	int r;
1275 	sector_t sector;
1276 	struct bio *clone = &tio->clone;
1277 	struct dm_io *io = tio->io;
1278 	struct mapped_device *md = io->md;
1279 	struct dm_target *ti = tio->ti;
1280 	blk_qc_t ret = BLK_QC_T_NONE;
1281 
1282 	clone->bi_end_io = clone_endio;
1283 
1284 	/*
1285 	 * Map the clone.  If r == 0 we don't need to do
1286 	 * anything, the target has assumed ownership of
1287 	 * this io.
1288 	 */
1289 	atomic_inc(&io->io_count);
1290 	sector = clone->bi_iter.bi_sector;
1291 
1292 	r = ti->type->map(ti, clone);
1293 	switch (r) {
1294 	case DM_MAPIO_SUBMITTED:
1295 		break;
1296 	case DM_MAPIO_REMAPPED:
1297 		/* the bio has been remapped so dispatch it */
1298 		trace_block_bio_remap(clone->bi_disk->queue, clone,
1299 				      bio_dev(io->orig_bio), sector);
1300 		if (md->type == DM_TYPE_NVME_BIO_BASED)
1301 			ret = direct_make_request(clone);
1302 		else
1303 			ret = generic_make_request(clone);
1304 		break;
1305 	case DM_MAPIO_KILL:
1306 		free_tio(tio);
1307 		dec_pending(io, BLK_STS_IOERR);
1308 		break;
1309 	case DM_MAPIO_REQUEUE:
1310 		free_tio(tio);
1311 		dec_pending(io, BLK_STS_DM_REQUEUE);
1312 		break;
1313 	default:
1314 		DMWARN("unimplemented target map return value: %d", r);
1315 		BUG();
1316 	}
1317 
1318 	return ret;
1319 }
1320 
1321 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1322 {
1323 	bio->bi_iter.bi_sector = sector;
1324 	bio->bi_iter.bi_size = to_bytes(len);
1325 }
1326 
1327 /*
1328  * Creates a bio that consists of range of complete bvecs.
1329  */
1330 static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1331 		     sector_t sector, unsigned len)
1332 {
1333 	struct bio *clone = &tio->clone;
1334 
1335 	__bio_clone_fast(clone, bio);
1336 
1337 	if (bio_integrity(bio)) {
1338 		int r;
1339 
1340 		if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
1341 			     !dm_target_passes_integrity(tio->ti->type))) {
1342 			DMWARN("%s: the target %s doesn't support integrity data.",
1343 				dm_device_name(tio->io->md),
1344 				tio->ti->type->name);
1345 			return -EIO;
1346 		}
1347 
1348 		r = bio_integrity_clone(clone, bio, GFP_NOIO);
1349 		if (r < 0)
1350 			return r;
1351 	}
1352 
1353 	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1354 	clone->bi_iter.bi_size = to_bytes(len);
1355 
1356 	if (bio_integrity(bio))
1357 		bio_integrity_trim(clone);
1358 
1359 	return 0;
1360 }
1361 
1362 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1363 				struct dm_target *ti, unsigned num_bios)
1364 {
1365 	struct dm_target_io *tio;
1366 	int try;
1367 
1368 	if (!num_bios)
1369 		return;
1370 
1371 	if (num_bios == 1) {
1372 		tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1373 		bio_list_add(blist, &tio->clone);
1374 		return;
1375 	}
1376 
1377 	for (try = 0; try < 2; try++) {
1378 		int bio_nr;
1379 		struct bio *bio;
1380 
1381 		if (try)
1382 			mutex_lock(&ci->io->md->table_devices_lock);
1383 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1384 			tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
1385 			if (!tio)
1386 				break;
1387 
1388 			bio_list_add(blist, &tio->clone);
1389 		}
1390 		if (try)
1391 			mutex_unlock(&ci->io->md->table_devices_lock);
1392 		if (bio_nr == num_bios)
1393 			return;
1394 
1395 		while ((bio = bio_list_pop(blist))) {
1396 			tio = container_of(bio, struct dm_target_io, clone);
1397 			free_tio(tio);
1398 		}
1399 	}
1400 }
1401 
1402 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
1403 					   struct dm_target_io *tio, unsigned *len)
1404 {
1405 	struct bio *clone = &tio->clone;
1406 
1407 	tio->len_ptr = len;
1408 
1409 	__bio_clone_fast(clone, ci->bio);
1410 	if (len)
1411 		bio_setup_sector(clone, ci->sector, *len);
1412 
1413 	return __map_bio(tio);
1414 }
1415 
1416 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1417 				  unsigned num_bios, unsigned *len)
1418 {
1419 	struct bio_list blist = BIO_EMPTY_LIST;
1420 	struct bio *bio;
1421 	struct dm_target_io *tio;
1422 
1423 	alloc_multiple_bios(&blist, ci, ti, num_bios);
1424 
1425 	while ((bio = bio_list_pop(&blist))) {
1426 		tio = container_of(bio, struct dm_target_io, clone);
1427 		(void) __clone_and_map_simple_bio(ci, tio, len);
1428 	}
1429 }
1430 
1431 static int __send_empty_flush(struct clone_info *ci)
1432 {
1433 	unsigned target_nr = 0;
1434 	struct dm_target *ti;
1435 
1436 	/*
1437 	 * Empty flush uses a statically initialized bio, as the base for
1438 	 * cloning.  However, blkg association requires that a bdev is
1439 	 * associated with a gendisk, which doesn't happen until the bdev is
1440 	 * opened.  So, blkg association is done at issue time of the flush
1441 	 * rather than when the device is created in alloc_dev().
1442 	 */
1443 	bio_set_dev(ci->bio, ci->io->md->bdev);
1444 
1445 	BUG_ON(bio_has_data(ci->bio));
1446 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1447 		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1448 
1449 	bio_disassociate_blkg(ci->bio);
1450 
1451 	return 0;
1452 }
1453 
1454 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1455 				    sector_t sector, unsigned *len)
1456 {
1457 	struct bio *bio = ci->bio;
1458 	struct dm_target_io *tio;
1459 	int r;
1460 
1461 	tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1462 	tio->len_ptr = len;
1463 	r = clone_bio(tio, bio, sector, *len);
1464 	if (r < 0) {
1465 		free_tio(tio);
1466 		return r;
1467 	}
1468 	(void) __map_bio(tio);
1469 
1470 	return 0;
1471 }
1472 
1473 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1474 
1475 static unsigned get_num_discard_bios(struct dm_target *ti)
1476 {
1477 	return ti->num_discard_bios;
1478 }
1479 
1480 static unsigned get_num_secure_erase_bios(struct dm_target *ti)
1481 {
1482 	return ti->num_secure_erase_bios;
1483 }
1484 
1485 static unsigned get_num_write_same_bios(struct dm_target *ti)
1486 {
1487 	return ti->num_write_same_bios;
1488 }
1489 
1490 static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
1491 {
1492 	return ti->num_write_zeroes_bios;
1493 }
1494 
1495 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1496 				       unsigned num_bios)
1497 {
1498 	unsigned len;
1499 
1500 	/*
1501 	 * Even though the device advertised support for this type of
1502 	 * request, that does not mean every target supports it, and
1503 	 * reconfiguration might also have changed that since the
1504 	 * check was performed.
1505 	 */
1506 	if (!num_bios)
1507 		return -EOPNOTSUPP;
1508 
1509 	len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1510 
1511 	__send_duplicate_bios(ci, ti, num_bios, &len);
1512 
1513 	ci->sector += len;
1514 	ci->sector_count -= len;
1515 
1516 	return 0;
1517 }
1518 
1519 static int __send_discard(struct clone_info *ci, struct dm_target *ti)
1520 {
1521 	return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
1522 }
1523 
1524 static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
1525 {
1526 	return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
1527 }
1528 
1529 static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
1530 {
1531 	return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
1532 }
1533 
1534 static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
1535 {
1536 	return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
1537 }
1538 
1539 static bool is_abnormal_io(struct bio *bio)
1540 {
1541 	bool r = false;
1542 
1543 	switch (bio_op(bio)) {
1544 	case REQ_OP_DISCARD:
1545 	case REQ_OP_SECURE_ERASE:
1546 	case REQ_OP_WRITE_SAME:
1547 	case REQ_OP_WRITE_ZEROES:
1548 		r = true;
1549 		break;
1550 	}
1551 
1552 	return r;
1553 }
1554 
1555 static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1556 				  int *result)
1557 {
1558 	struct bio *bio = ci->bio;
1559 
1560 	if (bio_op(bio) == REQ_OP_DISCARD)
1561 		*result = __send_discard(ci, ti);
1562 	else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
1563 		*result = __send_secure_erase(ci, ti);
1564 	else if (bio_op(bio) == REQ_OP_WRITE_SAME)
1565 		*result = __send_write_same(ci, ti);
1566 	else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
1567 		*result = __send_write_zeroes(ci, ti);
1568 	else
1569 		return false;
1570 
1571 	return true;
1572 }
1573 
1574 /*
1575  * Select the correct strategy for processing a non-flush bio.
1576  */
1577 static int __split_and_process_non_flush(struct clone_info *ci)
1578 {
1579 	struct dm_target *ti;
1580 	unsigned len;
1581 	int r;
1582 
1583 	ti = dm_table_find_target(ci->map, ci->sector);
1584 	if (!ti)
1585 		return -EIO;
1586 
1587 	if (__process_abnormal_io(ci, ti, &r))
1588 		return r;
1589 
1590 	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1591 
1592 	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1593 	if (r < 0)
1594 		return r;
1595 
1596 	ci->sector += len;
1597 	ci->sector_count -= len;
1598 
1599 	return 0;
1600 }
1601 
1602 static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1603 			    struct dm_table *map, struct bio *bio)
1604 {
1605 	ci->map = map;
1606 	ci->io = alloc_io(md, bio);
1607 	ci->sector = bio->bi_iter.bi_sector;
1608 }
1609 
1610 #define __dm_part_stat_sub(part, field, subnd)	\
1611 	(part_stat_get(part, field) -= (subnd))
1612 
1613 /*
1614  * Entry point to split a bio into clones and submit them to the targets.
1615  */
1616 static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1617 					struct dm_table *map, struct bio *bio)
1618 {
1619 	struct clone_info ci;
1620 	blk_qc_t ret = BLK_QC_T_NONE;
1621 	int error = 0;
1622 
1623 	init_clone_info(&ci, md, map, bio);
1624 
1625 	if (bio->bi_opf & REQ_PREFLUSH) {
1626 		struct bio flush_bio;
1627 
1628 		/*
1629 		 * Use an on-stack bio for this, it's safe since we don't
1630 		 * need to reference it after submit. It's just used as
1631 		 * the basis for the clone(s).
1632 		 */
1633 		bio_init(&flush_bio, NULL, 0);
1634 		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1635 		ci.bio = &flush_bio;
1636 		ci.sector_count = 0;
1637 		error = __send_empty_flush(&ci);
1638 		/* dec_pending submits any data associated with flush */
1639 	} else if (op_is_zone_mgmt(bio_op(bio))) {
1640 		ci.bio = bio;
1641 		ci.sector_count = 0;
1642 		error = __split_and_process_non_flush(&ci);
1643 	} else {
1644 		ci.bio = bio;
1645 		ci.sector_count = bio_sectors(bio);
1646 		while (ci.sector_count && !error) {
1647 			error = __split_and_process_non_flush(&ci);
1648 			if (current->bio_list && ci.sector_count && !error) {
1649 				/*
1650 				 * Remainder must be passed to generic_make_request()
1651 				 * so that it gets handled *after* bios already submitted
1652 				 * have been completely processed.
1653 				 * We take a clone of the original to store in
1654 				 * ci.io->orig_bio to be used by end_io_acct() and
1655 				 * for dec_pending to use for completion handling.
1656 				 */
1657 				struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1658 							  GFP_NOIO, &md->queue->bio_split);
1659 				ci.io->orig_bio = b;
1660 
1661 				/*
1662 				 * Adjust IO stats for each split, otherwise upon queue
1663 				 * reentry there will be redundant IO accounting.
1664 				 * NOTE: this is a stop-gap fix, a proper fix involves
1665 				 * significant refactoring of DM core's bio splitting
1666 				 * (by eliminating DM's splitting and just using bio_split)
1667 				 */
1668 				part_stat_lock();
1669 				__dm_part_stat_sub(&dm_disk(md)->part0,
1670 						   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1671 				part_stat_unlock();
1672 
1673 				bio_chain(b, bio);
1674 				trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1675 				ret = generic_make_request(bio);
1676 				break;
1677 			}
1678 		}
1679 	}
1680 
1681 	/* drop the extra reference count */
1682 	dec_pending(ci.io, errno_to_blk_status(error));
1683 	return ret;
1684 }
1685 
1686 /*
1687  * Optimized variant of __split_and_process_bio that leverages the
1688  * fact that targets that use it do _not_ have a need to split bios.
1689  */
1690 static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
1691 			      struct bio *bio, struct dm_target *ti)
1692 {
1693 	struct clone_info ci;
1694 	blk_qc_t ret = BLK_QC_T_NONE;
1695 	int error = 0;
1696 
1697 	init_clone_info(&ci, md, map, bio);
1698 
1699 	if (bio->bi_opf & REQ_PREFLUSH) {
1700 		struct bio flush_bio;
1701 
1702 		/*
1703 		 * Use an on-stack bio for this, it's safe since we don't
1704 		 * need to reference it after submit. It's just used as
1705 		 * the basis for the clone(s).
1706 		 */
1707 		bio_init(&flush_bio, NULL, 0);
1708 		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1709 		ci.bio = &flush_bio;
1710 		ci.sector_count = 0;
1711 		error = __send_empty_flush(&ci);
1712 		/* dec_pending submits any data associated with flush */
1713 	} else {
1714 		struct dm_target_io *tio;
1715 
1716 		ci.bio = bio;
1717 		ci.sector_count = bio_sectors(bio);
1718 		if (__process_abnormal_io(&ci, ti, &error))
1719 			goto out;
1720 
1721 		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
1722 		ret = __clone_and_map_simple_bio(&ci, tio, NULL);
1723 	}
1724 out:
1725 	/* drop the extra reference count */
1726 	dec_pending(ci.io, errno_to_blk_status(error));
1727 	return ret;
1728 }
1729 
1730 static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
1731 {
1732 	unsigned len, sector_count;
1733 
1734 	sector_count = bio_sectors(*bio);
1735 	len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
1736 
1737 	if (sector_count > len) {
1738 		struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
1739 
1740 		bio_chain(split, *bio);
1741 		trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
1742 		generic_make_request(*bio);
1743 		*bio = split;
1744 	}
1745 }
1746 
1747 static blk_qc_t dm_process_bio(struct mapped_device *md,
1748 			       struct dm_table *map, struct bio *bio)
1749 {
1750 	blk_qc_t ret = BLK_QC_T_NONE;
1751 	struct dm_target *ti = md->immutable_target;
1752 
1753 	if (unlikely(!map)) {
1754 		bio_io_error(bio);
1755 		return ret;
1756 	}
1757 
1758 	if (!ti) {
1759 		ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
1760 		if (unlikely(!ti)) {
1761 			bio_io_error(bio);
1762 			return ret;
1763 		}
1764 	}
1765 
1766 	/*
1767 	 * If in ->make_request_fn we need to use blk_queue_split(), otherwise
1768 	 * queue_limits for abnormal requests (e.g. discard, writesame, etc)
1769 	 * won't be imposed.
1770 	 */
1771 	if (current->bio_list) {
1772 		if (is_abnormal_io(bio))
1773 			blk_queue_split(md->queue, &bio);
1774 		else
1775 			dm_queue_split(md, ti, &bio);
1776 	}
1777 
1778 	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1779 		return __process_bio(md, map, bio, ti);
1780 	else
1781 		return __split_and_process_bio(md, map, bio);
1782 }
1783 
1784 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1785 {
1786 	struct mapped_device *md = q->queuedata;
1787 	blk_qc_t ret = BLK_QC_T_NONE;
1788 	int srcu_idx;
1789 	struct dm_table *map;
1790 
1791 	if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED)
1792 		return blk_mq_make_request(q, bio);
1793 
1794 	map = dm_get_live_table(md, &srcu_idx);
1795 
1796 	/* if we're suspended, we have to queue this io for later */
1797 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1798 		dm_put_live_table(md, srcu_idx);
1799 
1800 		if (!(bio->bi_opf & REQ_RAHEAD))
1801 			queue_io(md, bio);
1802 		else
1803 			bio_io_error(bio);
1804 		return ret;
1805 	}
1806 
1807 	ret = dm_process_bio(md, map, bio);
1808 
1809 	dm_put_live_table(md, srcu_idx);
1810 	return ret;
1811 }
1812 
1813 static int dm_any_congested(void *congested_data, int bdi_bits)
1814 {
1815 	int r = bdi_bits;
1816 	struct mapped_device *md = congested_data;
1817 	struct dm_table *map;
1818 
1819 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1820 		if (dm_request_based(md)) {
1821 			/*
1822 			 * With request-based DM we only need to check the
1823 			 * top-level queue for congestion.
1824 			 */
1825 			struct backing_dev_info *bdi = md->queue->backing_dev_info;
1826 			r = bdi->wb.congested->state & bdi_bits;
1827 		} else {
1828 			map = dm_get_live_table_fast(md);
1829 			if (map)
1830 				r = dm_table_any_congested(map, bdi_bits);
1831 			dm_put_live_table_fast(md);
1832 		}
1833 	}
1834 
1835 	return r;
1836 }
1837 
1838 /*-----------------------------------------------------------------
1839  * An IDR is used to keep track of allocated minor numbers.
1840  *---------------------------------------------------------------*/
1841 static void free_minor(int minor)
1842 {
1843 	spin_lock(&_minor_lock);
1844 	idr_remove(&_minor_idr, minor);
1845 	spin_unlock(&_minor_lock);
1846 }
1847 
1848 /*
1849  * See if the device with a specific minor # is free.
1850  */
1851 static int specific_minor(int minor)
1852 {
1853 	int r;
1854 
1855 	if (minor >= (1 << MINORBITS))
1856 		return -EINVAL;
1857 
1858 	idr_preload(GFP_KERNEL);
1859 	spin_lock(&_minor_lock);
1860 
1861 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1862 
1863 	spin_unlock(&_minor_lock);
1864 	idr_preload_end();
1865 	if (r < 0)
1866 		return r == -ENOSPC ? -EBUSY : r;
1867 	return 0;
1868 }
1869 
1870 static int next_free_minor(int *minor)
1871 {
1872 	int r;
1873 
1874 	idr_preload(GFP_KERNEL);
1875 	spin_lock(&_minor_lock);
1876 
1877 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1878 
1879 	spin_unlock(&_minor_lock);
1880 	idr_preload_end();
1881 	if (r < 0)
1882 		return r;
1883 	*minor = r;
1884 	return 0;
1885 }
1886 
1887 static const struct block_device_operations dm_blk_dops;
1888 static const struct dax_operations dm_dax_ops;
1889 
1890 static void dm_wq_work(struct work_struct *work);
1891 
1892 static void cleanup_mapped_device(struct mapped_device *md)
1893 {
1894 	if (md->wq)
1895 		destroy_workqueue(md->wq);
1896 	bioset_exit(&md->bs);
1897 	bioset_exit(&md->io_bs);
1898 
1899 	if (md->dax_dev) {
1900 		kill_dax(md->dax_dev);
1901 		put_dax(md->dax_dev);
1902 		md->dax_dev = NULL;
1903 	}
1904 
1905 	if (md->disk) {
1906 		spin_lock(&_minor_lock);
1907 		md->disk->private_data = NULL;
1908 		spin_unlock(&_minor_lock);
1909 		del_gendisk(md->disk);
1910 		put_disk(md->disk);
1911 	}
1912 
1913 	if (md->queue)
1914 		blk_cleanup_queue(md->queue);
1915 
1916 	cleanup_srcu_struct(&md->io_barrier);
1917 
1918 	if (md->bdev) {
1919 		bdput(md->bdev);
1920 		md->bdev = NULL;
1921 	}
1922 
1923 	mutex_destroy(&md->suspend_lock);
1924 	mutex_destroy(&md->type_lock);
1925 	mutex_destroy(&md->table_devices_lock);
1926 
1927 	dm_mq_cleanup_mapped_device(md);
1928 }
1929 
1930 /*
1931  * Allocate and initialise a blank device with a given minor.
1932  */
1933 static struct mapped_device *alloc_dev(int minor)
1934 {
1935 	int r, numa_node_id = dm_get_numa_node();
1936 	struct mapped_device *md;
1937 	void *old_md;
1938 
1939 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1940 	if (!md) {
1941 		DMWARN("unable to allocate device, out of memory.");
1942 		return NULL;
1943 	}
1944 
1945 	if (!try_module_get(THIS_MODULE))
1946 		goto bad_module_get;
1947 
1948 	/* get a minor number for the dev */
1949 	if (minor == DM_ANY_MINOR)
1950 		r = next_free_minor(&minor);
1951 	else
1952 		r = specific_minor(minor);
1953 	if (r < 0)
1954 		goto bad_minor;
1955 
1956 	r = init_srcu_struct(&md->io_barrier);
1957 	if (r < 0)
1958 		goto bad_io_barrier;
1959 
1960 	md->numa_node_id = numa_node_id;
1961 	md->init_tio_pdu = false;
1962 	md->type = DM_TYPE_NONE;
1963 	mutex_init(&md->suspend_lock);
1964 	mutex_init(&md->type_lock);
1965 	mutex_init(&md->table_devices_lock);
1966 	spin_lock_init(&md->deferred_lock);
1967 	atomic_set(&md->holders, 1);
1968 	atomic_set(&md->open_count, 0);
1969 	atomic_set(&md->event_nr, 0);
1970 	atomic_set(&md->uevent_seq, 0);
1971 	INIT_LIST_HEAD(&md->uevent_list);
1972 	INIT_LIST_HEAD(&md->table_devices);
1973 	spin_lock_init(&md->uevent_lock);
1974 
1975 	/*
1976 	 * default to bio-based required ->make_request_fn until DM
1977 	 * table is loaded and md->type established. If request-based
1978 	 * table is loaded: blk-mq will override accordingly.
1979 	 */
1980 	md->queue = blk_alloc_queue(dm_make_request, numa_node_id);
1981 	if (!md->queue)
1982 		goto bad;
1983 	md->queue->queuedata = md;
1984 
1985 	md->disk = alloc_disk_node(1, md->numa_node_id);
1986 	if (!md->disk)
1987 		goto bad;
1988 
1989 	init_waitqueue_head(&md->wait);
1990 	INIT_WORK(&md->work, dm_wq_work);
1991 	init_waitqueue_head(&md->eventq);
1992 	init_completion(&md->kobj_holder.completion);
1993 
1994 	md->disk->major = _major;
1995 	md->disk->first_minor = minor;
1996 	md->disk->fops = &dm_blk_dops;
1997 	md->disk->queue = md->queue;
1998 	md->disk->private_data = md;
1999 	sprintf(md->disk->disk_name, "dm-%d", minor);
2000 
2001 	if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
2002 		md->dax_dev = alloc_dax(md, md->disk->disk_name,
2003 					&dm_dax_ops, 0);
2004 		if (IS_ERR(md->dax_dev))
2005 			goto bad;
2006 	}
2007 
2008 	add_disk_no_queue_reg(md->disk);
2009 	format_dev_t(md->name, MKDEV(_major, minor));
2010 
2011 	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2012 	if (!md->wq)
2013 		goto bad;
2014 
2015 	md->bdev = bdget_disk(md->disk, 0);
2016 	if (!md->bdev)
2017 		goto bad;
2018 
2019 	dm_stats_init(&md->stats);
2020 
2021 	/* Populate the mapping, nobody knows we exist yet */
2022 	spin_lock(&_minor_lock);
2023 	old_md = idr_replace(&_minor_idr, md, minor);
2024 	spin_unlock(&_minor_lock);
2025 
2026 	BUG_ON(old_md != MINOR_ALLOCED);
2027 
2028 	return md;
2029 
2030 bad:
2031 	cleanup_mapped_device(md);
2032 bad_io_barrier:
2033 	free_minor(minor);
2034 bad_minor:
2035 	module_put(THIS_MODULE);
2036 bad_module_get:
2037 	kvfree(md);
2038 	return NULL;
2039 }
2040 
2041 static void unlock_fs(struct mapped_device *md);
2042 
2043 static void free_dev(struct mapped_device *md)
2044 {
2045 	int minor = MINOR(disk_devt(md->disk));
2046 
2047 	unlock_fs(md);
2048 
2049 	cleanup_mapped_device(md);
2050 
2051 	free_table_devices(&md->table_devices);
2052 	dm_stats_cleanup(&md->stats);
2053 	free_minor(minor);
2054 
2055 	module_put(THIS_MODULE);
2056 	kvfree(md);
2057 }
2058 
2059 static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
2060 {
2061 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2062 	int ret = 0;
2063 
2064 	if (dm_table_bio_based(t)) {
2065 		/*
2066 		 * The md may already have mempools that need changing.
2067 		 * If so, reload bioset because front_pad may have changed
2068 		 * because a different table was loaded.
2069 		 */
2070 		bioset_exit(&md->bs);
2071 		bioset_exit(&md->io_bs);
2072 
2073 	} else if (bioset_initialized(&md->bs)) {
2074 		/*
2075 		 * There's no need to reload with request-based dm
2076 		 * because the size of front_pad doesn't change.
2077 		 * Note for future: If you are to reload bioset,
2078 		 * prep-ed requests in the queue may refer
2079 		 * to bio from the old bioset, so you must walk
2080 		 * through the queue to unprep.
2081 		 */
2082 		goto out;
2083 	}
2084 
2085 	BUG_ON(!p ||
2086 	       bioset_initialized(&md->bs) ||
2087 	       bioset_initialized(&md->io_bs));
2088 
2089 	ret = bioset_init_from_src(&md->bs, &p->bs);
2090 	if (ret)
2091 		goto out;
2092 	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
2093 	if (ret)
2094 		bioset_exit(&md->bs);
2095 out:
2096 	/* mempool bind completed, no longer need any mempools in the table */
2097 	dm_table_free_md_mempools(t);
2098 	return ret;
2099 }
2100 
2101 /*
2102  * Bind a table to the device.
2103  */
2104 static void event_callback(void *context)
2105 {
2106 	unsigned long flags;
2107 	LIST_HEAD(uevents);
2108 	struct mapped_device *md = (struct mapped_device *) context;
2109 
2110 	spin_lock_irqsave(&md->uevent_lock, flags);
2111 	list_splice_init(&md->uevent_list, &uevents);
2112 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2113 
2114 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2115 
2116 	atomic_inc(&md->event_nr);
2117 	wake_up(&md->eventq);
2118 	dm_issue_global_event();
2119 }
2120 
2121 /*
2122  * Protected by md->suspend_lock obtained by dm_swap_table().
2123  */
2124 static void __set_size(struct mapped_device *md, sector_t size)
2125 {
2126 	lockdep_assert_held(&md->suspend_lock);
2127 
2128 	set_capacity(md->disk, size);
2129 
2130 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2131 }
2132 
2133 /*
2134  * Returns old map, which caller must destroy.
2135  */
2136 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2137 			       struct queue_limits *limits)
2138 {
2139 	struct dm_table *old_map;
2140 	struct request_queue *q = md->queue;
2141 	bool request_based = dm_table_request_based(t);
2142 	sector_t size;
2143 	int ret;
2144 
2145 	lockdep_assert_held(&md->suspend_lock);
2146 
2147 	size = dm_table_get_size(t);
2148 
2149 	/*
2150 	 * Wipe any geometry if the size of the table changed.
2151 	 */
2152 	if (size != dm_get_size(md))
2153 		memset(&md->geometry, 0, sizeof(md->geometry));
2154 
2155 	__set_size(md, size);
2156 
2157 	dm_table_event_callback(t, event_callback, md);
2158 
2159 	/*
2160 	 * The queue hasn't been stopped yet, if the old table type wasn't
2161 	 * for request-based during suspension.  So stop it to prevent
2162 	 * I/O mapping before resume.
2163 	 * This must be done before setting the queue restrictions,
2164 	 * because request-based dm may be run just after the setting.
2165 	 */
2166 	if (request_based)
2167 		dm_stop_queue(q);
2168 
2169 	if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
2170 		/*
2171 		 * Leverage the fact that request-based DM targets and
2172 		 * NVMe bio based targets are immutable singletons
2173 		 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
2174 		 *   and __process_bio.
2175 		 */
2176 		md->immutable_target = dm_table_get_immutable_target(t);
2177 	}
2178 
2179 	ret = __bind_mempools(md, t);
2180 	if (ret) {
2181 		old_map = ERR_PTR(ret);
2182 		goto out;
2183 	}
2184 
2185 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2186 	rcu_assign_pointer(md->map, (void *)t);
2187 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2188 
2189 	dm_table_set_restrictions(t, q, limits);
2190 	if (old_map)
2191 		dm_sync_table(md);
2192 
2193 out:
2194 	return old_map;
2195 }
2196 
2197 /*
2198  * Returns unbound table for the caller to free.
2199  */
2200 static struct dm_table *__unbind(struct mapped_device *md)
2201 {
2202 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
2203 
2204 	if (!map)
2205 		return NULL;
2206 
2207 	dm_table_event_callback(map, NULL, NULL);
2208 	RCU_INIT_POINTER(md->map, NULL);
2209 	dm_sync_table(md);
2210 
2211 	return map;
2212 }
2213 
2214 /*
2215  * Constructor for a new device.
2216  */
2217 int dm_create(int minor, struct mapped_device **result)
2218 {
2219 	int r;
2220 	struct mapped_device *md;
2221 
2222 	md = alloc_dev(minor);
2223 	if (!md)
2224 		return -ENXIO;
2225 
2226 	r = dm_sysfs_init(md);
2227 	if (r) {
2228 		free_dev(md);
2229 		return r;
2230 	}
2231 
2232 	*result = md;
2233 	return 0;
2234 }
2235 
2236 /*
2237  * Functions to manage md->type.
2238  * All are required to hold md->type_lock.
2239  */
2240 void dm_lock_md_type(struct mapped_device *md)
2241 {
2242 	mutex_lock(&md->type_lock);
2243 }
2244 
2245 void dm_unlock_md_type(struct mapped_device *md)
2246 {
2247 	mutex_unlock(&md->type_lock);
2248 }
2249 
2250 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2251 {
2252 	BUG_ON(!mutex_is_locked(&md->type_lock));
2253 	md->type = type;
2254 }
2255 
2256 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2257 {
2258 	return md->type;
2259 }
2260 
2261 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2262 {
2263 	return md->immutable_target_type;
2264 }
2265 
2266 /*
2267  * The queue_limits are only valid as long as you have a reference
2268  * count on 'md'.
2269  */
2270 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2271 {
2272 	BUG_ON(!atomic_read(&md->holders));
2273 	return &md->queue->limits;
2274 }
2275 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2276 
2277 static void dm_init_congested_fn(struct mapped_device *md)
2278 {
2279 	md->queue->backing_dev_info->congested_data = md;
2280 	md->queue->backing_dev_info->congested_fn = dm_any_congested;
2281 }
2282 
2283 /*
2284  * Setup the DM device's queue based on md's type
2285  */
2286 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2287 {
2288 	int r;
2289 	struct queue_limits limits;
2290 	enum dm_queue_mode type = dm_get_md_type(md);
2291 
2292 	switch (type) {
2293 	case DM_TYPE_REQUEST_BASED:
2294 		r = dm_mq_init_request_queue(md, t);
2295 		if (r) {
2296 			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2297 			return r;
2298 		}
2299 		dm_init_congested_fn(md);
2300 		break;
2301 	case DM_TYPE_BIO_BASED:
2302 	case DM_TYPE_DAX_BIO_BASED:
2303 	case DM_TYPE_NVME_BIO_BASED:
2304 		dm_init_congested_fn(md);
2305 		break;
2306 	case DM_TYPE_NONE:
2307 		WARN_ON_ONCE(true);
2308 		break;
2309 	}
2310 
2311 	r = dm_calculate_queue_limits(t, &limits);
2312 	if (r) {
2313 		DMERR("Cannot calculate initial queue limits");
2314 		return r;
2315 	}
2316 	dm_table_set_restrictions(t, md->queue, &limits);
2317 	blk_register_queue(md->disk);
2318 
2319 	return 0;
2320 }
2321 
2322 struct mapped_device *dm_get_md(dev_t dev)
2323 {
2324 	struct mapped_device *md;
2325 	unsigned minor = MINOR(dev);
2326 
2327 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2328 		return NULL;
2329 
2330 	spin_lock(&_minor_lock);
2331 
2332 	md = idr_find(&_minor_idr, minor);
2333 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2334 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2335 		md = NULL;
2336 		goto out;
2337 	}
2338 	dm_get(md);
2339 out:
2340 	spin_unlock(&_minor_lock);
2341 
2342 	return md;
2343 }
2344 EXPORT_SYMBOL_GPL(dm_get_md);
2345 
2346 void *dm_get_mdptr(struct mapped_device *md)
2347 {
2348 	return md->interface_ptr;
2349 }
2350 
2351 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2352 {
2353 	md->interface_ptr = ptr;
2354 }
2355 
2356 void dm_get(struct mapped_device *md)
2357 {
2358 	atomic_inc(&md->holders);
2359 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2360 }
2361 
2362 int dm_hold(struct mapped_device *md)
2363 {
2364 	spin_lock(&_minor_lock);
2365 	if (test_bit(DMF_FREEING, &md->flags)) {
2366 		spin_unlock(&_minor_lock);
2367 		return -EBUSY;
2368 	}
2369 	dm_get(md);
2370 	spin_unlock(&_minor_lock);
2371 	return 0;
2372 }
2373 EXPORT_SYMBOL_GPL(dm_hold);
2374 
2375 const char *dm_device_name(struct mapped_device *md)
2376 {
2377 	return md->name;
2378 }
2379 EXPORT_SYMBOL_GPL(dm_device_name);
2380 
2381 static void __dm_destroy(struct mapped_device *md, bool wait)
2382 {
2383 	struct dm_table *map;
2384 	int srcu_idx;
2385 
2386 	might_sleep();
2387 
2388 	spin_lock(&_minor_lock);
2389 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2390 	set_bit(DMF_FREEING, &md->flags);
2391 	spin_unlock(&_minor_lock);
2392 
2393 	blk_set_queue_dying(md->queue);
2394 
2395 	/*
2396 	 * Take suspend_lock so that presuspend and postsuspend methods
2397 	 * do not race with internal suspend.
2398 	 */
2399 	mutex_lock(&md->suspend_lock);
2400 	map = dm_get_live_table(md, &srcu_idx);
2401 	if (!dm_suspended_md(md)) {
2402 		dm_table_presuspend_targets(map);
2403 		set_bit(DMF_SUSPENDED, &md->flags);
2404 		dm_table_postsuspend_targets(map);
2405 	}
2406 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2407 	dm_put_live_table(md, srcu_idx);
2408 	mutex_unlock(&md->suspend_lock);
2409 
2410 	/*
2411 	 * Rare, but there may be I/O requests still going to complete,
2412 	 * for example.  Wait for all references to disappear.
2413 	 * No one should increment the reference count of the mapped_device,
2414 	 * after the mapped_device state becomes DMF_FREEING.
2415 	 */
2416 	if (wait)
2417 		while (atomic_read(&md->holders))
2418 			msleep(1);
2419 	else if (atomic_read(&md->holders))
2420 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2421 		       dm_device_name(md), atomic_read(&md->holders));
2422 
2423 	dm_sysfs_exit(md);
2424 	dm_table_destroy(__unbind(md));
2425 	free_dev(md);
2426 }
2427 
2428 void dm_destroy(struct mapped_device *md)
2429 {
2430 	__dm_destroy(md, true);
2431 }
2432 
2433 void dm_destroy_immediate(struct mapped_device *md)
2434 {
2435 	__dm_destroy(md, false);
2436 }
2437 
2438 void dm_put(struct mapped_device *md)
2439 {
2440 	atomic_dec(&md->holders);
2441 }
2442 EXPORT_SYMBOL_GPL(dm_put);
2443 
2444 static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2445 {
2446 	int r = 0;
2447 	DEFINE_WAIT(wait);
2448 
2449 	while (1) {
2450 		prepare_to_wait(&md->wait, &wait, task_state);
2451 
2452 		if (!md_in_flight(md))
2453 			break;
2454 
2455 		if (signal_pending_state(task_state, current)) {
2456 			r = -EINTR;
2457 			break;
2458 		}
2459 
2460 		io_schedule();
2461 	}
2462 	finish_wait(&md->wait, &wait);
2463 
2464 	return r;
2465 }
2466 
2467 /*
2468  * Process the deferred bios
2469  */
2470 static void dm_wq_work(struct work_struct *work)
2471 {
2472 	struct mapped_device *md = container_of(work, struct mapped_device,
2473 						work);
2474 	struct bio *c;
2475 	int srcu_idx;
2476 	struct dm_table *map;
2477 
2478 	map = dm_get_live_table(md, &srcu_idx);
2479 
2480 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2481 		spin_lock_irq(&md->deferred_lock);
2482 		c = bio_list_pop(&md->deferred);
2483 		spin_unlock_irq(&md->deferred_lock);
2484 
2485 		if (!c)
2486 			break;
2487 
2488 		if (dm_request_based(md))
2489 			(void) generic_make_request(c);
2490 		else
2491 			(void) dm_process_bio(md, map, c);
2492 	}
2493 
2494 	dm_put_live_table(md, srcu_idx);
2495 }
2496 
2497 static void dm_queue_flush(struct mapped_device *md)
2498 {
2499 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2500 	smp_mb__after_atomic();
2501 	queue_work(md->wq, &md->work);
2502 }
2503 
2504 /*
2505  * Swap in a new table, returning the old one for the caller to destroy.
2506  */
2507 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2508 {
2509 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2510 	struct queue_limits limits;
2511 	int r;
2512 
2513 	mutex_lock(&md->suspend_lock);
2514 
2515 	/* device must be suspended */
2516 	if (!dm_suspended_md(md))
2517 		goto out;
2518 
2519 	/*
2520 	 * If the new table has no data devices, retain the existing limits.
2521 	 * This helps multipath with queue_if_no_path if all paths disappear,
2522 	 * then new I/O is queued based on these limits, and then some paths
2523 	 * reappear.
2524 	 */
2525 	if (dm_table_has_no_data_devices(table)) {
2526 		live_map = dm_get_live_table_fast(md);
2527 		if (live_map)
2528 			limits = md->queue->limits;
2529 		dm_put_live_table_fast(md);
2530 	}
2531 
2532 	if (!live_map) {
2533 		r = dm_calculate_queue_limits(table, &limits);
2534 		if (r) {
2535 			map = ERR_PTR(r);
2536 			goto out;
2537 		}
2538 	}
2539 
2540 	map = __bind(md, table, &limits);
2541 	dm_issue_global_event();
2542 
2543 out:
2544 	mutex_unlock(&md->suspend_lock);
2545 	return map;
2546 }
2547 
2548 /*
2549  * Functions to lock and unlock any filesystem running on the
2550  * device.
2551  */
2552 static int lock_fs(struct mapped_device *md)
2553 {
2554 	int r;
2555 
2556 	WARN_ON(md->frozen_sb);
2557 
2558 	md->frozen_sb = freeze_bdev(md->bdev);
2559 	if (IS_ERR(md->frozen_sb)) {
2560 		r = PTR_ERR(md->frozen_sb);
2561 		md->frozen_sb = NULL;
2562 		return r;
2563 	}
2564 
2565 	set_bit(DMF_FROZEN, &md->flags);
2566 
2567 	return 0;
2568 }
2569 
2570 static void unlock_fs(struct mapped_device *md)
2571 {
2572 	if (!test_bit(DMF_FROZEN, &md->flags))
2573 		return;
2574 
2575 	thaw_bdev(md->bdev, md->frozen_sb);
2576 	md->frozen_sb = NULL;
2577 	clear_bit(DMF_FROZEN, &md->flags);
2578 }
2579 
2580 /*
2581  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2582  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2583  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2584  *
2585  * If __dm_suspend returns 0, the device is completely quiescent
2586  * now. There is no request-processing activity. All new requests
2587  * are being added to md->deferred list.
2588  */
2589 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2590 			unsigned suspend_flags, long task_state,
2591 			int dmf_suspended_flag)
2592 {
2593 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2594 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2595 	int r;
2596 
2597 	lockdep_assert_held(&md->suspend_lock);
2598 
2599 	/*
2600 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2601 	 * This flag is cleared before dm_suspend returns.
2602 	 */
2603 	if (noflush)
2604 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2605 	else
2606 		pr_debug("%s: suspending with flush\n", dm_device_name(md));
2607 
2608 	/*
2609 	 * This gets reverted if there's an error later and the targets
2610 	 * provide the .presuspend_undo hook.
2611 	 */
2612 	dm_table_presuspend_targets(map);
2613 
2614 	/*
2615 	 * Flush I/O to the device.
2616 	 * Any I/O submitted after lock_fs() may not be flushed.
2617 	 * noflush takes precedence over do_lockfs.
2618 	 * (lock_fs() flushes I/Os and waits for them to complete.)
2619 	 */
2620 	if (!noflush && do_lockfs) {
2621 		r = lock_fs(md);
2622 		if (r) {
2623 			dm_table_presuspend_undo_targets(map);
2624 			return r;
2625 		}
2626 	}
2627 
2628 	/*
2629 	 * Here we must make sure that no processes are submitting requests
2630 	 * to target drivers i.e. no one may be executing
2631 	 * __split_and_process_bio. This is called from dm_request and
2632 	 * dm_wq_work.
2633 	 *
2634 	 * To get all processes out of __split_and_process_bio in dm_request,
2635 	 * we take the write lock. To prevent any process from reentering
2636 	 * __split_and_process_bio from dm_request and quiesce the thread
2637 	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2638 	 * flush_workqueue(md->wq).
2639 	 */
2640 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2641 	if (map)
2642 		synchronize_srcu(&md->io_barrier);
2643 
2644 	/*
2645 	 * Stop md->queue before flushing md->wq in case request-based
2646 	 * dm defers requests to md->wq from md->queue.
2647 	 */
2648 	if (dm_request_based(md))
2649 		dm_stop_queue(md->queue);
2650 
2651 	flush_workqueue(md->wq);
2652 
2653 	/*
2654 	 * At this point no more requests are entering target request routines.
2655 	 * We call dm_wait_for_completion to wait for all existing requests
2656 	 * to finish.
2657 	 */
2658 	r = dm_wait_for_completion(md, task_state);
2659 	if (!r)
2660 		set_bit(dmf_suspended_flag, &md->flags);
2661 
2662 	if (noflush)
2663 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2664 	if (map)
2665 		synchronize_srcu(&md->io_barrier);
2666 
2667 	/* were we interrupted ? */
2668 	if (r < 0) {
2669 		dm_queue_flush(md);
2670 
2671 		if (dm_request_based(md))
2672 			dm_start_queue(md->queue);
2673 
2674 		unlock_fs(md);
2675 		dm_table_presuspend_undo_targets(map);
2676 		/* pushback list is already flushed, so skip flush */
2677 	}
2678 
2679 	return r;
2680 }
2681 
2682 /*
2683  * We need to be able to change a mapping table under a mounted
2684  * filesystem.  For example we might want to move some data in
2685  * the background.  Before the table can be swapped with
2686  * dm_bind_table, dm_suspend must be called to flush any in
2687  * flight bios and ensure that any further io gets deferred.
2688  */
2689 /*
2690  * Suspend mechanism in request-based dm.
2691  *
2692  * 1. Flush all I/Os by lock_fs() if needed.
2693  * 2. Stop dispatching any I/O by stopping the request_queue.
2694  * 3. Wait for all in-flight I/Os to be completed or requeued.
2695  *
2696  * To abort suspend, start the request_queue.
2697  */
2698 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2699 {
2700 	struct dm_table *map = NULL;
2701 	int r = 0;
2702 
2703 retry:
2704 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2705 
2706 	if (dm_suspended_md(md)) {
2707 		r = -EINVAL;
2708 		goto out_unlock;
2709 	}
2710 
2711 	if (dm_suspended_internally_md(md)) {
2712 		/* already internally suspended, wait for internal resume */
2713 		mutex_unlock(&md->suspend_lock);
2714 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2715 		if (r)
2716 			return r;
2717 		goto retry;
2718 	}
2719 
2720 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2721 
2722 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2723 	if (r)
2724 		goto out_unlock;
2725 
2726 	dm_table_postsuspend_targets(map);
2727 
2728 out_unlock:
2729 	mutex_unlock(&md->suspend_lock);
2730 	return r;
2731 }
2732 
2733 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2734 {
2735 	if (map) {
2736 		int r = dm_table_resume_targets(map);
2737 		if (r)
2738 			return r;
2739 	}
2740 
2741 	dm_queue_flush(md);
2742 
2743 	/*
2744 	 * Flushing deferred I/Os must be done after targets are resumed
2745 	 * so that mapping of targets can work correctly.
2746 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2747 	 */
2748 	if (dm_request_based(md))
2749 		dm_start_queue(md->queue);
2750 
2751 	unlock_fs(md);
2752 
2753 	return 0;
2754 }
2755 
2756 int dm_resume(struct mapped_device *md)
2757 {
2758 	int r;
2759 	struct dm_table *map = NULL;
2760 
2761 retry:
2762 	r = -EINVAL;
2763 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2764 
2765 	if (!dm_suspended_md(md))
2766 		goto out;
2767 
2768 	if (dm_suspended_internally_md(md)) {
2769 		/* already internally suspended, wait for internal resume */
2770 		mutex_unlock(&md->suspend_lock);
2771 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2772 		if (r)
2773 			return r;
2774 		goto retry;
2775 	}
2776 
2777 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2778 	if (!map || !dm_table_get_size(map))
2779 		goto out;
2780 
2781 	r = __dm_resume(md, map);
2782 	if (r)
2783 		goto out;
2784 
2785 	clear_bit(DMF_SUSPENDED, &md->flags);
2786 out:
2787 	mutex_unlock(&md->suspend_lock);
2788 
2789 	return r;
2790 }
2791 
2792 /*
2793  * Internal suspend/resume works like userspace-driven suspend. It waits
2794  * until all bios finish and prevents issuing new bios to the target drivers.
2795  * It may be used only from the kernel.
2796  */
2797 
2798 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2799 {
2800 	struct dm_table *map = NULL;
2801 
2802 	lockdep_assert_held(&md->suspend_lock);
2803 
2804 	if (md->internal_suspend_count++)
2805 		return; /* nested internal suspend */
2806 
2807 	if (dm_suspended_md(md)) {
2808 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2809 		return; /* nest suspend */
2810 	}
2811 
2812 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2813 
2814 	/*
2815 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2816 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2817 	 * would require changing .presuspend to return an error -- avoid this
2818 	 * until there is a need for more elaborate variants of internal suspend.
2819 	 */
2820 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2821 			    DMF_SUSPENDED_INTERNALLY);
2822 
2823 	dm_table_postsuspend_targets(map);
2824 }
2825 
2826 static void __dm_internal_resume(struct mapped_device *md)
2827 {
2828 	BUG_ON(!md->internal_suspend_count);
2829 
2830 	if (--md->internal_suspend_count)
2831 		return; /* resume from nested internal suspend */
2832 
2833 	if (dm_suspended_md(md))
2834 		goto done; /* resume from nested suspend */
2835 
2836 	/*
2837 	 * NOTE: existing callers don't need to call dm_table_resume_targets
2838 	 * (which may fail -- so best to avoid it for now by passing NULL map)
2839 	 */
2840 	(void) __dm_resume(md, NULL);
2841 
2842 done:
2843 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2844 	smp_mb__after_atomic();
2845 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2846 }
2847 
2848 void dm_internal_suspend_noflush(struct mapped_device *md)
2849 {
2850 	mutex_lock(&md->suspend_lock);
2851 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2852 	mutex_unlock(&md->suspend_lock);
2853 }
2854 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2855 
2856 void dm_internal_resume(struct mapped_device *md)
2857 {
2858 	mutex_lock(&md->suspend_lock);
2859 	__dm_internal_resume(md);
2860 	mutex_unlock(&md->suspend_lock);
2861 }
2862 EXPORT_SYMBOL_GPL(dm_internal_resume);
2863 
2864 /*
2865  * Fast variants of internal suspend/resume hold md->suspend_lock,
2866  * which prevents interaction with userspace-driven suspend.
2867  */
2868 
2869 void dm_internal_suspend_fast(struct mapped_device *md)
2870 {
2871 	mutex_lock(&md->suspend_lock);
2872 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2873 		return;
2874 
2875 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2876 	synchronize_srcu(&md->io_barrier);
2877 	flush_workqueue(md->wq);
2878 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2879 }
2880 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2881 
2882 void dm_internal_resume_fast(struct mapped_device *md)
2883 {
2884 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2885 		goto done;
2886 
2887 	dm_queue_flush(md);
2888 
2889 done:
2890 	mutex_unlock(&md->suspend_lock);
2891 }
2892 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2893 
2894 /*-----------------------------------------------------------------
2895  * Event notification.
2896  *---------------------------------------------------------------*/
2897 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2898 		       unsigned cookie)
2899 {
2900 	char udev_cookie[DM_COOKIE_LENGTH];
2901 	char *envp[] = { udev_cookie, NULL };
2902 
2903 	if (!cookie)
2904 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2905 	else {
2906 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2907 			 DM_COOKIE_ENV_VAR_NAME, cookie);
2908 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2909 					  action, envp);
2910 	}
2911 }
2912 
2913 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2914 {
2915 	return atomic_add_return(1, &md->uevent_seq);
2916 }
2917 
2918 uint32_t dm_get_event_nr(struct mapped_device *md)
2919 {
2920 	return atomic_read(&md->event_nr);
2921 }
2922 
2923 int dm_wait_event(struct mapped_device *md, int event_nr)
2924 {
2925 	return wait_event_interruptible(md->eventq,
2926 			(event_nr != atomic_read(&md->event_nr)));
2927 }
2928 
2929 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2930 {
2931 	unsigned long flags;
2932 
2933 	spin_lock_irqsave(&md->uevent_lock, flags);
2934 	list_add(elist, &md->uevent_list);
2935 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2936 }
2937 
2938 /*
2939  * The gendisk is only valid as long as you have a reference
2940  * count on 'md'.
2941  */
2942 struct gendisk *dm_disk(struct mapped_device *md)
2943 {
2944 	return md->disk;
2945 }
2946 EXPORT_SYMBOL_GPL(dm_disk);
2947 
2948 struct kobject *dm_kobject(struct mapped_device *md)
2949 {
2950 	return &md->kobj_holder.kobj;
2951 }
2952 
2953 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2954 {
2955 	struct mapped_device *md;
2956 
2957 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2958 
2959 	spin_lock(&_minor_lock);
2960 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2961 		md = NULL;
2962 		goto out;
2963 	}
2964 	dm_get(md);
2965 out:
2966 	spin_unlock(&_minor_lock);
2967 
2968 	return md;
2969 }
2970 
2971 int dm_suspended_md(struct mapped_device *md)
2972 {
2973 	return test_bit(DMF_SUSPENDED, &md->flags);
2974 }
2975 
2976 int dm_suspended_internally_md(struct mapped_device *md)
2977 {
2978 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2979 }
2980 
2981 int dm_test_deferred_remove_flag(struct mapped_device *md)
2982 {
2983 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2984 }
2985 
2986 int dm_suspended(struct dm_target *ti)
2987 {
2988 	return dm_suspended_md(dm_table_get_md(ti->table));
2989 }
2990 EXPORT_SYMBOL_GPL(dm_suspended);
2991 
2992 int dm_noflush_suspending(struct dm_target *ti)
2993 {
2994 	return __noflush_suspending(dm_table_get_md(ti->table));
2995 }
2996 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2997 
2998 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2999 					    unsigned integrity, unsigned per_io_data_size,
3000 					    unsigned min_pool_size)
3001 {
3002 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
3003 	unsigned int pool_size = 0;
3004 	unsigned int front_pad, io_front_pad;
3005 	int ret;
3006 
3007 	if (!pools)
3008 		return NULL;
3009 
3010 	switch (type) {
3011 	case DM_TYPE_BIO_BASED:
3012 	case DM_TYPE_DAX_BIO_BASED:
3013 	case DM_TYPE_NVME_BIO_BASED:
3014 		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
3015 		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3016 		io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
3017 		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
3018 		if (ret)
3019 			goto out;
3020 		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
3021 			goto out;
3022 		break;
3023 	case DM_TYPE_REQUEST_BASED:
3024 		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
3025 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3026 		/* per_io_data_size is used for blk-mq pdu at queue allocation */
3027 		break;
3028 	default:
3029 		BUG();
3030 	}
3031 
3032 	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
3033 	if (ret)
3034 		goto out;
3035 
3036 	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
3037 		goto out;
3038 
3039 	return pools;
3040 
3041 out:
3042 	dm_free_md_mempools(pools);
3043 
3044 	return NULL;
3045 }
3046 
3047 void dm_free_md_mempools(struct dm_md_mempools *pools)
3048 {
3049 	if (!pools)
3050 		return;
3051 
3052 	bioset_exit(&pools->bs);
3053 	bioset_exit(&pools->io_bs);
3054 
3055 	kfree(pools);
3056 }
3057 
3058 struct dm_pr {
3059 	u64	old_key;
3060 	u64	new_key;
3061 	u32	flags;
3062 	bool	fail_early;
3063 };
3064 
3065 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3066 		      void *data)
3067 {
3068 	struct mapped_device *md = bdev->bd_disk->private_data;
3069 	struct dm_table *table;
3070 	struct dm_target *ti;
3071 	int ret = -ENOTTY, srcu_idx;
3072 
3073 	table = dm_get_live_table(md, &srcu_idx);
3074 	if (!table || !dm_table_get_size(table))
3075 		goto out;
3076 
3077 	/* We only support devices that have a single target */
3078 	if (dm_table_get_num_targets(table) != 1)
3079 		goto out;
3080 	ti = dm_table_get_target(table, 0);
3081 
3082 	ret = -EINVAL;
3083 	if (!ti->type->iterate_devices)
3084 		goto out;
3085 
3086 	ret = ti->type->iterate_devices(ti, fn, data);
3087 out:
3088 	dm_put_live_table(md, srcu_idx);
3089 	return ret;
3090 }
3091 
3092 /*
3093  * For register / unregister we need to manually call out to every path.
3094  */
3095 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3096 			    sector_t start, sector_t len, void *data)
3097 {
3098 	struct dm_pr *pr = data;
3099 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3100 
3101 	if (!ops || !ops->pr_register)
3102 		return -EOPNOTSUPP;
3103 	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3104 }
3105 
3106 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3107 			  u32 flags)
3108 {
3109 	struct dm_pr pr = {
3110 		.old_key	= old_key,
3111 		.new_key	= new_key,
3112 		.flags		= flags,
3113 		.fail_early	= true,
3114 	};
3115 	int ret;
3116 
3117 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3118 	if (ret && new_key) {
3119 		/* unregister all paths if we failed to register any path */
3120 		pr.old_key = new_key;
3121 		pr.new_key = 0;
3122 		pr.flags = 0;
3123 		pr.fail_early = false;
3124 		dm_call_pr(bdev, __dm_pr_register, &pr);
3125 	}
3126 
3127 	return ret;
3128 }
3129 
3130 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3131 			 u32 flags)
3132 {
3133 	struct mapped_device *md = bdev->bd_disk->private_data;
3134 	const struct pr_ops *ops;
3135 	int r, srcu_idx;
3136 
3137 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3138 	if (r < 0)
3139 		goto out;
3140 
3141 	ops = bdev->bd_disk->fops->pr_ops;
3142 	if (ops && ops->pr_reserve)
3143 		r = ops->pr_reserve(bdev, key, type, flags);
3144 	else
3145 		r = -EOPNOTSUPP;
3146 out:
3147 	dm_unprepare_ioctl(md, srcu_idx);
3148 	return r;
3149 }
3150 
3151 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3152 {
3153 	struct mapped_device *md = bdev->bd_disk->private_data;
3154 	const struct pr_ops *ops;
3155 	int r, srcu_idx;
3156 
3157 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3158 	if (r < 0)
3159 		goto out;
3160 
3161 	ops = bdev->bd_disk->fops->pr_ops;
3162 	if (ops && ops->pr_release)
3163 		r = ops->pr_release(bdev, key, type);
3164 	else
3165 		r = -EOPNOTSUPP;
3166 out:
3167 	dm_unprepare_ioctl(md, srcu_idx);
3168 	return r;
3169 }
3170 
3171 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3172 			 enum pr_type type, bool abort)
3173 {
3174 	struct mapped_device *md = bdev->bd_disk->private_data;
3175 	const struct pr_ops *ops;
3176 	int r, srcu_idx;
3177 
3178 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3179 	if (r < 0)
3180 		goto out;
3181 
3182 	ops = bdev->bd_disk->fops->pr_ops;
3183 	if (ops && ops->pr_preempt)
3184 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3185 	else
3186 		r = -EOPNOTSUPP;
3187 out:
3188 	dm_unprepare_ioctl(md, srcu_idx);
3189 	return r;
3190 }
3191 
3192 static int dm_pr_clear(struct block_device *bdev, u64 key)
3193 {
3194 	struct mapped_device *md = bdev->bd_disk->private_data;
3195 	const struct pr_ops *ops;
3196 	int r, srcu_idx;
3197 
3198 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3199 	if (r < 0)
3200 		goto out;
3201 
3202 	ops = bdev->bd_disk->fops->pr_ops;
3203 	if (ops && ops->pr_clear)
3204 		r = ops->pr_clear(bdev, key);
3205 	else
3206 		r = -EOPNOTSUPP;
3207 out:
3208 	dm_unprepare_ioctl(md, srcu_idx);
3209 	return r;
3210 }
3211 
3212 static const struct pr_ops dm_pr_ops = {
3213 	.pr_register	= dm_pr_register,
3214 	.pr_reserve	= dm_pr_reserve,
3215 	.pr_release	= dm_pr_release,
3216 	.pr_preempt	= dm_pr_preempt,
3217 	.pr_clear	= dm_pr_clear,
3218 };
3219 
3220 static const struct block_device_operations dm_blk_dops = {
3221 	.open = dm_blk_open,
3222 	.release = dm_blk_close,
3223 	.ioctl = dm_blk_ioctl,
3224 	.getgeo = dm_blk_getgeo,
3225 	.report_zones = dm_blk_report_zones,
3226 	.pr_ops = &dm_pr_ops,
3227 	.owner = THIS_MODULE
3228 };
3229 
3230 static const struct dax_operations dm_dax_ops = {
3231 	.direct_access = dm_dax_direct_access,
3232 	.dax_supported = dm_dax_supported,
3233 	.copy_from_iter = dm_dax_copy_from_iter,
3234 	.copy_to_iter = dm_dax_copy_to_iter,
3235 	.zero_page_range = dm_dax_zero_page_range,
3236 };
3237 
3238 /*
3239  * module hooks
3240  */
3241 module_init(dm_init);
3242 module_exit(dm_exit);
3243 
3244 module_param(major, uint, 0);
3245 MODULE_PARM_DESC(major, "The major number of the device mapper");
3246 
3247 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3248 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3249 
3250 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3251 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3252 
3253 MODULE_DESCRIPTION(DM_NAME " driver");
3254 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3255 MODULE_LICENSE("GPL");
3256