xref: /linux/drivers/md/dm.c (revision 6c59f64b7ecf2bccbe73931d7d573d66ed13b537)
1 /*
2  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-core.h"
9 #include "dm-rq.h"
10 #include "dm-uevent.h"
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/sched/signal.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/mempool.h>
19 #include <linux/dax.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/uio.h>
23 #include <linux/hdreg.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
26 #include <linux/pr.h>
27 #include <linux/refcount.h>
28 
29 #define DM_MSG_PREFIX "core"
30 
31 /*
32  * Cookies are numeric values sent with CHANGE and REMOVE
33  * uevents while resuming, removing or renaming the device.
34  */
35 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
36 #define DM_COOKIE_LENGTH 24
37 
38 static const char *_name = DM_NAME;
39 
40 static unsigned int major = 0;
41 static unsigned int _major = 0;
42 
43 static DEFINE_IDR(_minor_idr);
44 
45 static DEFINE_SPINLOCK(_minor_lock);
46 
47 static void do_deferred_remove(struct work_struct *w);
48 
49 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
50 
51 static struct workqueue_struct *deferred_remove_workqueue;
52 
53 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
54 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
55 
56 void dm_issue_global_event(void)
57 {
58 	atomic_inc(&dm_global_event_nr);
59 	wake_up(&dm_global_eventq);
60 }
61 
62 /*
63  * One of these is allocated (on-stack) per original bio.
64  */
65 struct clone_info {
66 	struct dm_table *map;
67 	struct bio *bio;
68 	struct dm_io *io;
69 	sector_t sector;
70 	unsigned sector_count;
71 };
72 
73 /*
74  * One of these is allocated per clone bio.
75  */
76 #define DM_TIO_MAGIC 7282014
77 struct dm_target_io {
78 	unsigned magic;
79 	struct dm_io *io;
80 	struct dm_target *ti;
81 	unsigned target_bio_nr;
82 	unsigned *len_ptr;
83 	bool inside_dm_io;
84 	struct bio clone;
85 };
86 
87 /*
88  * One of these is allocated per original bio.
89  * It contains the first clone used for that original.
90  */
91 #define DM_IO_MAGIC 5191977
92 struct dm_io {
93 	unsigned magic;
94 	struct mapped_device *md;
95 	blk_status_t status;
96 	atomic_t io_count;
97 	struct bio *orig_bio;
98 	unsigned long start_time;
99 	spinlock_t endio_lock;
100 	struct dm_stats_aux stats_aux;
101 	/* last member of dm_target_io is 'struct bio' */
102 	struct dm_target_io tio;
103 };
104 
105 void *dm_per_bio_data(struct bio *bio, size_t data_size)
106 {
107 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
108 	if (!tio->inside_dm_io)
109 		return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
110 	return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
111 }
112 EXPORT_SYMBOL_GPL(dm_per_bio_data);
113 
114 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
115 {
116 	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
117 	if (io->magic == DM_IO_MAGIC)
118 		return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
119 	BUG_ON(io->magic != DM_TIO_MAGIC);
120 	return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
121 }
122 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
123 
124 unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
125 {
126 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
127 }
128 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
129 
130 #define MINOR_ALLOCED ((void *)-1)
131 
132 /*
133  * Bits for the md->flags field.
134  */
135 #define DMF_BLOCK_IO_FOR_SUSPEND 0
136 #define DMF_SUSPENDED 1
137 #define DMF_FROZEN 2
138 #define DMF_FREEING 3
139 #define DMF_DELETING 4
140 #define DMF_NOFLUSH_SUSPENDING 5
141 #define DMF_DEFERRED_REMOVE 6
142 #define DMF_SUSPENDED_INTERNALLY 7
143 
144 #define DM_NUMA_NODE NUMA_NO_NODE
145 static int dm_numa_node = DM_NUMA_NODE;
146 
147 /*
148  * For mempools pre-allocation at the table loading time.
149  */
150 struct dm_md_mempools {
151 	struct bio_set *bs;
152 	struct bio_set *io_bs;
153 };
154 
155 struct table_device {
156 	struct list_head list;
157 	refcount_t count;
158 	struct dm_dev dm_dev;
159 };
160 
161 static struct kmem_cache *_rq_tio_cache;
162 static struct kmem_cache *_rq_cache;
163 
164 /*
165  * Bio-based DM's mempools' reserved IOs set by the user.
166  */
167 #define RESERVED_BIO_BASED_IOS		16
168 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
169 
170 static int __dm_get_module_param_int(int *module_param, int min, int max)
171 {
172 	int param = READ_ONCE(*module_param);
173 	int modified_param = 0;
174 	bool modified = true;
175 
176 	if (param < min)
177 		modified_param = min;
178 	else if (param > max)
179 		modified_param = max;
180 	else
181 		modified = false;
182 
183 	if (modified) {
184 		(void)cmpxchg(module_param, param, modified_param);
185 		param = modified_param;
186 	}
187 
188 	return param;
189 }
190 
191 unsigned __dm_get_module_param(unsigned *module_param,
192 			       unsigned def, unsigned max)
193 {
194 	unsigned param = READ_ONCE(*module_param);
195 	unsigned modified_param = 0;
196 
197 	if (!param)
198 		modified_param = def;
199 	else if (param > max)
200 		modified_param = max;
201 
202 	if (modified_param) {
203 		(void)cmpxchg(module_param, param, modified_param);
204 		param = modified_param;
205 	}
206 
207 	return param;
208 }
209 
210 unsigned dm_get_reserved_bio_based_ios(void)
211 {
212 	return __dm_get_module_param(&reserved_bio_based_ios,
213 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
214 }
215 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
216 
217 static unsigned dm_get_numa_node(void)
218 {
219 	return __dm_get_module_param_int(&dm_numa_node,
220 					 DM_NUMA_NODE, num_online_nodes() - 1);
221 }
222 
223 static int __init local_init(void)
224 {
225 	int r = -ENOMEM;
226 
227 	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
228 	if (!_rq_tio_cache)
229 		return r;
230 
231 	_rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
232 				      __alignof__(struct request), 0, NULL);
233 	if (!_rq_cache)
234 		goto out_free_rq_tio_cache;
235 
236 	r = dm_uevent_init();
237 	if (r)
238 		goto out_free_rq_cache;
239 
240 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
241 	if (!deferred_remove_workqueue) {
242 		r = -ENOMEM;
243 		goto out_uevent_exit;
244 	}
245 
246 	_major = major;
247 	r = register_blkdev(_major, _name);
248 	if (r < 0)
249 		goto out_free_workqueue;
250 
251 	if (!_major)
252 		_major = r;
253 
254 	return 0;
255 
256 out_free_workqueue:
257 	destroy_workqueue(deferred_remove_workqueue);
258 out_uevent_exit:
259 	dm_uevent_exit();
260 out_free_rq_cache:
261 	kmem_cache_destroy(_rq_cache);
262 out_free_rq_tio_cache:
263 	kmem_cache_destroy(_rq_tio_cache);
264 
265 	return r;
266 }
267 
268 static void local_exit(void)
269 {
270 	flush_scheduled_work();
271 	destroy_workqueue(deferred_remove_workqueue);
272 
273 	kmem_cache_destroy(_rq_cache);
274 	kmem_cache_destroy(_rq_tio_cache);
275 	unregister_blkdev(_major, _name);
276 	dm_uevent_exit();
277 
278 	_major = 0;
279 
280 	DMINFO("cleaned up");
281 }
282 
283 static int (*_inits[])(void) __initdata = {
284 	local_init,
285 	dm_target_init,
286 	dm_linear_init,
287 	dm_stripe_init,
288 	dm_io_init,
289 	dm_kcopyd_init,
290 	dm_interface_init,
291 	dm_statistics_init,
292 };
293 
294 static void (*_exits[])(void) = {
295 	local_exit,
296 	dm_target_exit,
297 	dm_linear_exit,
298 	dm_stripe_exit,
299 	dm_io_exit,
300 	dm_kcopyd_exit,
301 	dm_interface_exit,
302 	dm_statistics_exit,
303 };
304 
305 static int __init dm_init(void)
306 {
307 	const int count = ARRAY_SIZE(_inits);
308 
309 	int r, i;
310 
311 	for (i = 0; i < count; i++) {
312 		r = _inits[i]();
313 		if (r)
314 			goto bad;
315 	}
316 
317 	return 0;
318 
319       bad:
320 	while (i--)
321 		_exits[i]();
322 
323 	return r;
324 }
325 
326 static void __exit dm_exit(void)
327 {
328 	int i = ARRAY_SIZE(_exits);
329 
330 	while (i--)
331 		_exits[i]();
332 
333 	/*
334 	 * Should be empty by this point.
335 	 */
336 	idr_destroy(&_minor_idr);
337 }
338 
339 /*
340  * Block device functions
341  */
342 int dm_deleting_md(struct mapped_device *md)
343 {
344 	return test_bit(DMF_DELETING, &md->flags);
345 }
346 
347 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
348 {
349 	struct mapped_device *md;
350 
351 	spin_lock(&_minor_lock);
352 
353 	md = bdev->bd_disk->private_data;
354 	if (!md)
355 		goto out;
356 
357 	if (test_bit(DMF_FREEING, &md->flags) ||
358 	    dm_deleting_md(md)) {
359 		md = NULL;
360 		goto out;
361 	}
362 
363 	dm_get(md);
364 	atomic_inc(&md->open_count);
365 out:
366 	spin_unlock(&_minor_lock);
367 
368 	return md ? 0 : -ENXIO;
369 }
370 
371 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
372 {
373 	struct mapped_device *md;
374 
375 	spin_lock(&_minor_lock);
376 
377 	md = disk->private_data;
378 	if (WARN_ON(!md))
379 		goto out;
380 
381 	if (atomic_dec_and_test(&md->open_count) &&
382 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
383 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
384 
385 	dm_put(md);
386 out:
387 	spin_unlock(&_minor_lock);
388 }
389 
390 int dm_open_count(struct mapped_device *md)
391 {
392 	return atomic_read(&md->open_count);
393 }
394 
395 /*
396  * Guarantees nothing is using the device before it's deleted.
397  */
398 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
399 {
400 	int r = 0;
401 
402 	spin_lock(&_minor_lock);
403 
404 	if (dm_open_count(md)) {
405 		r = -EBUSY;
406 		if (mark_deferred)
407 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
408 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
409 		r = -EEXIST;
410 	else
411 		set_bit(DMF_DELETING, &md->flags);
412 
413 	spin_unlock(&_minor_lock);
414 
415 	return r;
416 }
417 
418 int dm_cancel_deferred_remove(struct mapped_device *md)
419 {
420 	int r = 0;
421 
422 	spin_lock(&_minor_lock);
423 
424 	if (test_bit(DMF_DELETING, &md->flags))
425 		r = -EBUSY;
426 	else
427 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
428 
429 	spin_unlock(&_minor_lock);
430 
431 	return r;
432 }
433 
434 static void do_deferred_remove(struct work_struct *w)
435 {
436 	dm_deferred_remove();
437 }
438 
439 sector_t dm_get_size(struct mapped_device *md)
440 {
441 	return get_capacity(md->disk);
442 }
443 
444 struct request_queue *dm_get_md_queue(struct mapped_device *md)
445 {
446 	return md->queue;
447 }
448 
449 struct dm_stats *dm_get_stats(struct mapped_device *md)
450 {
451 	return &md->stats;
452 }
453 
454 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
455 {
456 	struct mapped_device *md = bdev->bd_disk->private_data;
457 
458 	return dm_get_geometry(md, geo);
459 }
460 
461 static char *_dm_claim_ptr = "I belong to device-mapper";
462 
463 static int dm_get_bdev_for_ioctl(struct mapped_device *md,
464 				 struct block_device **bdev,
465 				 fmode_t *mode)
466 {
467 	struct dm_target *tgt;
468 	struct dm_table *map;
469 	int srcu_idx, r;
470 
471 retry:
472 	r = -ENOTTY;
473 	map = dm_get_live_table(md, &srcu_idx);
474 	if (!map || !dm_table_get_size(map))
475 		goto out;
476 
477 	/* We only support devices that have a single target */
478 	if (dm_table_get_num_targets(map) != 1)
479 		goto out;
480 
481 	tgt = dm_table_get_target(map, 0);
482 	if (!tgt->type->prepare_ioctl)
483 		goto out;
484 
485 	if (dm_suspended_md(md)) {
486 		r = -EAGAIN;
487 		goto out;
488 	}
489 
490 	r = tgt->type->prepare_ioctl(tgt, bdev, mode);
491 	if (r < 0)
492 		goto out;
493 
494 	bdgrab(*bdev);
495 	r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
496 	if (r < 0)
497 		goto out;
498 
499 	dm_put_live_table(md, srcu_idx);
500 	return r;
501 
502 out:
503 	dm_put_live_table(md, srcu_idx);
504 	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
505 		msleep(10);
506 		goto retry;
507 	}
508 	return r;
509 }
510 
511 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
512 			unsigned int cmd, unsigned long arg)
513 {
514 	struct mapped_device *md = bdev->bd_disk->private_data;
515 	int r;
516 
517 	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
518 	if (r < 0)
519 		return r;
520 
521 	if (r > 0) {
522 		/*
523 		 * Target determined this ioctl is being issued against a
524 		 * subset of the parent bdev; require extra privileges.
525 		 */
526 		if (!capable(CAP_SYS_RAWIO)) {
527 			DMWARN_LIMIT(
528 	"%s: sending ioctl %x to DM device without required privilege.",
529 				current->comm, cmd);
530 			r = -ENOIOCTLCMD;
531 			goto out;
532 		}
533 	}
534 
535 	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
536 out:
537 	blkdev_put(bdev, mode);
538 	return r;
539 }
540 
541 static void start_io_acct(struct dm_io *io);
542 
543 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
544 {
545 	struct dm_io *io;
546 	struct dm_target_io *tio;
547 	struct bio *clone;
548 
549 	clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs);
550 	if (!clone)
551 		return NULL;
552 
553 	tio = container_of(clone, struct dm_target_io, clone);
554 	tio->inside_dm_io = true;
555 	tio->io = NULL;
556 
557 	io = container_of(tio, struct dm_io, tio);
558 	io->magic = DM_IO_MAGIC;
559 	io->status = 0;
560 	atomic_set(&io->io_count, 1);
561 	io->orig_bio = bio;
562 	io->md = md;
563 	spin_lock_init(&io->endio_lock);
564 
565 	start_io_acct(io);
566 
567 	return io;
568 }
569 
570 static void free_io(struct mapped_device *md, struct dm_io *io)
571 {
572 	bio_put(&io->tio.clone);
573 }
574 
575 static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
576 				      unsigned target_bio_nr, gfp_t gfp_mask)
577 {
578 	struct dm_target_io *tio;
579 
580 	if (!ci->io->tio.io) {
581 		/* the dm_target_io embedded in ci->io is available */
582 		tio = &ci->io->tio;
583 	} else {
584 		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->io->md->bs);
585 		if (!clone)
586 			return NULL;
587 
588 		tio = container_of(clone, struct dm_target_io, clone);
589 		tio->inside_dm_io = false;
590 	}
591 
592 	tio->magic = DM_TIO_MAGIC;
593 	tio->io = ci->io;
594 	tio->ti = ti;
595 	tio->target_bio_nr = target_bio_nr;
596 
597 	return tio;
598 }
599 
600 static void free_tio(struct dm_target_io *tio)
601 {
602 	if (tio->inside_dm_io)
603 		return;
604 	bio_put(&tio->clone);
605 }
606 
607 int md_in_flight(struct mapped_device *md)
608 {
609 	return atomic_read(&md->pending[READ]) +
610 	       atomic_read(&md->pending[WRITE]);
611 }
612 
613 static void start_io_acct(struct dm_io *io)
614 {
615 	struct mapped_device *md = io->md;
616 	struct bio *bio = io->orig_bio;
617 	int rw = bio_data_dir(bio);
618 
619 	io->start_time = jiffies;
620 
621 	generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0);
622 
623 	atomic_set(&dm_disk(md)->part0.in_flight[rw],
624 		   atomic_inc_return(&md->pending[rw]));
625 
626 	if (unlikely(dm_stats_used(&md->stats)))
627 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
628 				    bio->bi_iter.bi_sector, bio_sectors(bio),
629 				    false, 0, &io->stats_aux);
630 }
631 
632 static void end_io_acct(struct dm_io *io)
633 {
634 	struct mapped_device *md = io->md;
635 	struct bio *bio = io->orig_bio;
636 	unsigned long duration = jiffies - io->start_time;
637 	int pending;
638 	int rw = bio_data_dir(bio);
639 
640 	generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time);
641 
642 	if (unlikely(dm_stats_used(&md->stats)))
643 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
644 				    bio->bi_iter.bi_sector, bio_sectors(bio),
645 				    true, duration, &io->stats_aux);
646 
647 	/*
648 	 * After this is decremented the bio must not be touched if it is
649 	 * a flush.
650 	 */
651 	pending = atomic_dec_return(&md->pending[rw]);
652 	atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
653 	pending += atomic_read(&md->pending[rw^0x1]);
654 
655 	/* nudge anyone waiting on suspend queue */
656 	if (!pending)
657 		wake_up(&md->wait);
658 }
659 
660 /*
661  * Add the bio to the list of deferred io.
662  */
663 static void queue_io(struct mapped_device *md, struct bio *bio)
664 {
665 	unsigned long flags;
666 
667 	spin_lock_irqsave(&md->deferred_lock, flags);
668 	bio_list_add(&md->deferred, bio);
669 	spin_unlock_irqrestore(&md->deferred_lock, flags);
670 	queue_work(md->wq, &md->work);
671 }
672 
673 /*
674  * Everyone (including functions in this file), should use this
675  * function to access the md->map field, and make sure they call
676  * dm_put_live_table() when finished.
677  */
678 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
679 {
680 	*srcu_idx = srcu_read_lock(&md->io_barrier);
681 
682 	return srcu_dereference(md->map, &md->io_barrier);
683 }
684 
685 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
686 {
687 	srcu_read_unlock(&md->io_barrier, srcu_idx);
688 }
689 
690 void dm_sync_table(struct mapped_device *md)
691 {
692 	synchronize_srcu(&md->io_barrier);
693 	synchronize_rcu_expedited();
694 }
695 
696 /*
697  * A fast alternative to dm_get_live_table/dm_put_live_table.
698  * The caller must not block between these two functions.
699  */
700 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
701 {
702 	rcu_read_lock();
703 	return rcu_dereference(md->map);
704 }
705 
706 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
707 {
708 	rcu_read_unlock();
709 }
710 
711 /*
712  * Open a table device so we can use it as a map destination.
713  */
714 static int open_table_device(struct table_device *td, dev_t dev,
715 			     struct mapped_device *md)
716 {
717 	struct block_device *bdev;
718 
719 	int r;
720 
721 	BUG_ON(td->dm_dev.bdev);
722 
723 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
724 	if (IS_ERR(bdev))
725 		return PTR_ERR(bdev);
726 
727 	r = bd_link_disk_holder(bdev, dm_disk(md));
728 	if (r) {
729 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
730 		return r;
731 	}
732 
733 	td->dm_dev.bdev = bdev;
734 	td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
735 	return 0;
736 }
737 
738 /*
739  * Close a table device that we've been using.
740  */
741 static void close_table_device(struct table_device *td, struct mapped_device *md)
742 {
743 	if (!td->dm_dev.bdev)
744 		return;
745 
746 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
747 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
748 	put_dax(td->dm_dev.dax_dev);
749 	td->dm_dev.bdev = NULL;
750 	td->dm_dev.dax_dev = NULL;
751 }
752 
753 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
754 					      fmode_t mode) {
755 	struct table_device *td;
756 
757 	list_for_each_entry(td, l, list)
758 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
759 			return td;
760 
761 	return NULL;
762 }
763 
764 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
765 			struct dm_dev **result) {
766 	int r;
767 	struct table_device *td;
768 
769 	mutex_lock(&md->table_devices_lock);
770 	td = find_table_device(&md->table_devices, dev, mode);
771 	if (!td) {
772 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
773 		if (!td) {
774 			mutex_unlock(&md->table_devices_lock);
775 			return -ENOMEM;
776 		}
777 
778 		td->dm_dev.mode = mode;
779 		td->dm_dev.bdev = NULL;
780 
781 		if ((r = open_table_device(td, dev, md))) {
782 			mutex_unlock(&md->table_devices_lock);
783 			kfree(td);
784 			return r;
785 		}
786 
787 		format_dev_t(td->dm_dev.name, dev);
788 
789 		refcount_set(&td->count, 1);
790 		list_add(&td->list, &md->table_devices);
791 	} else {
792 		refcount_inc(&td->count);
793 	}
794 	mutex_unlock(&md->table_devices_lock);
795 
796 	*result = &td->dm_dev;
797 	return 0;
798 }
799 EXPORT_SYMBOL_GPL(dm_get_table_device);
800 
801 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
802 {
803 	struct table_device *td = container_of(d, struct table_device, dm_dev);
804 
805 	mutex_lock(&md->table_devices_lock);
806 	if (refcount_dec_and_test(&td->count)) {
807 		close_table_device(td, md);
808 		list_del(&td->list);
809 		kfree(td);
810 	}
811 	mutex_unlock(&md->table_devices_lock);
812 }
813 EXPORT_SYMBOL(dm_put_table_device);
814 
815 static void free_table_devices(struct list_head *devices)
816 {
817 	struct list_head *tmp, *next;
818 
819 	list_for_each_safe(tmp, next, devices) {
820 		struct table_device *td = list_entry(tmp, struct table_device, list);
821 
822 		DMWARN("dm_destroy: %s still exists with %d references",
823 		       td->dm_dev.name, refcount_read(&td->count));
824 		kfree(td);
825 	}
826 }
827 
828 /*
829  * Get the geometry associated with a dm device
830  */
831 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
832 {
833 	*geo = md->geometry;
834 
835 	return 0;
836 }
837 
838 /*
839  * Set the geometry of a device.
840  */
841 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
842 {
843 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
844 
845 	if (geo->start > sz) {
846 		DMWARN("Start sector is beyond the geometry limits.");
847 		return -EINVAL;
848 	}
849 
850 	md->geometry = *geo;
851 
852 	return 0;
853 }
854 
855 static int __noflush_suspending(struct mapped_device *md)
856 {
857 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
858 }
859 
860 /*
861  * Decrements the number of outstanding ios that a bio has been
862  * cloned into, completing the original io if necc.
863  */
864 static void dec_pending(struct dm_io *io, blk_status_t error)
865 {
866 	unsigned long flags;
867 	blk_status_t io_error;
868 	struct bio *bio;
869 	struct mapped_device *md = io->md;
870 
871 	/* Push-back supersedes any I/O errors */
872 	if (unlikely(error)) {
873 		spin_lock_irqsave(&io->endio_lock, flags);
874 		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
875 			io->status = error;
876 		spin_unlock_irqrestore(&io->endio_lock, flags);
877 	}
878 
879 	if (atomic_dec_and_test(&io->io_count)) {
880 		if (io->status == BLK_STS_DM_REQUEUE) {
881 			/*
882 			 * Target requested pushing back the I/O.
883 			 */
884 			spin_lock_irqsave(&md->deferred_lock, flags);
885 			if (__noflush_suspending(md))
886 				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
887 				bio_list_add_head(&md->deferred, io->orig_bio);
888 			else
889 				/* noflush suspend was interrupted. */
890 				io->status = BLK_STS_IOERR;
891 			spin_unlock_irqrestore(&md->deferred_lock, flags);
892 		}
893 
894 		io_error = io->status;
895 		bio = io->orig_bio;
896 		end_io_acct(io);
897 		free_io(md, io);
898 
899 		if (io_error == BLK_STS_DM_REQUEUE)
900 			return;
901 
902 		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
903 			/*
904 			 * Preflush done for flush with data, reissue
905 			 * without REQ_PREFLUSH.
906 			 */
907 			bio->bi_opf &= ~REQ_PREFLUSH;
908 			queue_io(md, bio);
909 		} else {
910 			/* done with normal IO or empty flush */
911 			if (io_error)
912 				bio->bi_status = io_error;
913 			bio_endio(bio);
914 		}
915 	}
916 }
917 
918 void disable_write_same(struct mapped_device *md)
919 {
920 	struct queue_limits *limits = dm_get_queue_limits(md);
921 
922 	/* device doesn't really support WRITE SAME, disable it */
923 	limits->max_write_same_sectors = 0;
924 }
925 
926 void disable_write_zeroes(struct mapped_device *md)
927 {
928 	struct queue_limits *limits = dm_get_queue_limits(md);
929 
930 	/* device doesn't really support WRITE ZEROES, disable it */
931 	limits->max_write_zeroes_sectors = 0;
932 }
933 
934 static void clone_endio(struct bio *bio)
935 {
936 	blk_status_t error = bio->bi_status;
937 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
938 	struct dm_io *io = tio->io;
939 	struct mapped_device *md = tio->io->md;
940 	dm_endio_fn endio = tio->ti->type->end_io;
941 
942 	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
943 		if (bio_op(bio) == REQ_OP_WRITE_SAME &&
944 		    !bio->bi_disk->queue->limits.max_write_same_sectors)
945 			disable_write_same(md);
946 		if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
947 		    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
948 			disable_write_zeroes(md);
949 	}
950 
951 	if (endio) {
952 		int r = endio(tio->ti, bio, &error);
953 		switch (r) {
954 		case DM_ENDIO_REQUEUE:
955 			error = BLK_STS_DM_REQUEUE;
956 			/*FALLTHRU*/
957 		case DM_ENDIO_DONE:
958 			break;
959 		case DM_ENDIO_INCOMPLETE:
960 			/* The target will handle the io */
961 			return;
962 		default:
963 			DMWARN("unimplemented target endio return value: %d", r);
964 			BUG();
965 		}
966 	}
967 
968 	free_tio(tio);
969 	dec_pending(io, error);
970 }
971 
972 /*
973  * Return maximum size of I/O possible at the supplied sector up to the current
974  * target boundary.
975  */
976 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
977 {
978 	sector_t target_offset = dm_target_offset(ti, sector);
979 
980 	return ti->len - target_offset;
981 }
982 
983 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
984 {
985 	sector_t len = max_io_len_target_boundary(sector, ti);
986 	sector_t offset, max_len;
987 
988 	/*
989 	 * Does the target need to split even further?
990 	 */
991 	if (ti->max_io_len) {
992 		offset = dm_target_offset(ti, sector);
993 		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
994 			max_len = sector_div(offset, ti->max_io_len);
995 		else
996 			max_len = offset & (ti->max_io_len - 1);
997 		max_len = ti->max_io_len - max_len;
998 
999 		if (len > max_len)
1000 			len = max_len;
1001 	}
1002 
1003 	return len;
1004 }
1005 
1006 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1007 {
1008 	if (len > UINT_MAX) {
1009 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1010 		      (unsigned long long)len, UINT_MAX);
1011 		ti->error = "Maximum size of target IO is too large";
1012 		return -EINVAL;
1013 	}
1014 
1015 	/*
1016 	 * BIO based queue uses its own splitting. When multipage bvecs
1017 	 * is switched on, size of the incoming bio may be too big to
1018 	 * be handled in some targets, such as crypt.
1019 	 *
1020 	 * When these targets are ready for the big bio, we can remove
1021 	 * the limit.
1022 	 */
1023 	ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
1024 
1025 	return 0;
1026 }
1027 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1028 
1029 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1030 		sector_t sector, int *srcu_idx)
1031 {
1032 	struct dm_table *map;
1033 	struct dm_target *ti;
1034 
1035 	map = dm_get_live_table(md, srcu_idx);
1036 	if (!map)
1037 		return NULL;
1038 
1039 	ti = dm_table_find_target(map, sector);
1040 	if (!dm_target_is_valid(ti))
1041 		return NULL;
1042 
1043 	return ti;
1044 }
1045 
1046 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1047 		long nr_pages, void **kaddr, pfn_t *pfn)
1048 {
1049 	struct mapped_device *md = dax_get_private(dax_dev);
1050 	sector_t sector = pgoff * PAGE_SECTORS;
1051 	struct dm_target *ti;
1052 	long len, ret = -EIO;
1053 	int srcu_idx;
1054 
1055 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1056 
1057 	if (!ti)
1058 		goto out;
1059 	if (!ti->type->direct_access)
1060 		goto out;
1061 	len = max_io_len(sector, ti) / PAGE_SECTORS;
1062 	if (len < 1)
1063 		goto out;
1064 	nr_pages = min(len, nr_pages);
1065 	if (ti->type->direct_access)
1066 		ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1067 
1068  out:
1069 	dm_put_live_table(md, srcu_idx);
1070 
1071 	return ret;
1072 }
1073 
1074 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1075 		void *addr, size_t bytes, struct iov_iter *i)
1076 {
1077 	struct mapped_device *md = dax_get_private(dax_dev);
1078 	sector_t sector = pgoff * PAGE_SECTORS;
1079 	struct dm_target *ti;
1080 	long ret = 0;
1081 	int srcu_idx;
1082 
1083 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1084 
1085 	if (!ti)
1086 		goto out;
1087 	if (!ti->type->dax_copy_from_iter) {
1088 		ret = copy_from_iter(addr, bytes, i);
1089 		goto out;
1090 	}
1091 	ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
1092  out:
1093 	dm_put_live_table(md, srcu_idx);
1094 
1095 	return ret;
1096 }
1097 
1098 /*
1099  * A target may call dm_accept_partial_bio only from the map routine.  It is
1100  * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
1101  *
1102  * dm_accept_partial_bio informs the dm that the target only wants to process
1103  * additional n_sectors sectors of the bio and the rest of the data should be
1104  * sent in a next bio.
1105  *
1106  * A diagram that explains the arithmetics:
1107  * +--------------------+---------------+-------+
1108  * |         1          |       2       |   3   |
1109  * +--------------------+---------------+-------+
1110  *
1111  * <-------------- *tio->len_ptr --------------->
1112  *                      <------- bi_size ------->
1113  *                      <-- n_sectors -->
1114  *
1115  * Region 1 was already iterated over with bio_advance or similar function.
1116  *	(it may be empty if the target doesn't use bio_advance)
1117  * Region 2 is the remaining bio size that the target wants to process.
1118  *	(it may be empty if region 1 is non-empty, although there is no reason
1119  *	 to make it empty)
1120  * The target requires that region 3 is to be sent in the next bio.
1121  *
1122  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1123  * the partially processed part (the sum of regions 1+2) must be the same for all
1124  * copies of the bio.
1125  */
1126 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1127 {
1128 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1129 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1130 	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1131 	BUG_ON(bi_size > *tio->len_ptr);
1132 	BUG_ON(n_sectors > bi_size);
1133 	*tio->len_ptr -= bi_size - n_sectors;
1134 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1135 }
1136 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1137 
1138 /*
1139  * The zone descriptors obtained with a zone report indicate
1140  * zone positions within the target device. The zone descriptors
1141  * must be remapped to match their position within the dm device.
1142  * A target may call dm_remap_zone_report after completion of a
1143  * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
1144  * from the target device mapping to the dm device.
1145  */
1146 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
1147 {
1148 #ifdef CONFIG_BLK_DEV_ZONED
1149 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1150 	struct bio *report_bio = tio->io->orig_bio;
1151 	struct blk_zone_report_hdr *hdr = NULL;
1152 	struct blk_zone *zone;
1153 	unsigned int nr_rep = 0;
1154 	unsigned int ofst;
1155 	struct bio_vec bvec;
1156 	struct bvec_iter iter;
1157 	void *addr;
1158 
1159 	if (bio->bi_status)
1160 		return;
1161 
1162 	/*
1163 	 * Remap the start sector of the reported zones. For sequential zones,
1164 	 * also remap the write pointer position.
1165 	 */
1166 	bio_for_each_segment(bvec, report_bio, iter) {
1167 		addr = kmap_atomic(bvec.bv_page);
1168 
1169 		/* Remember the report header in the first page */
1170 		if (!hdr) {
1171 			hdr = addr;
1172 			ofst = sizeof(struct blk_zone_report_hdr);
1173 		} else
1174 			ofst = 0;
1175 
1176 		/* Set zones start sector */
1177 		while (hdr->nr_zones && ofst < bvec.bv_len) {
1178 			zone = addr + ofst;
1179 			if (zone->start >= start + ti->len) {
1180 				hdr->nr_zones = 0;
1181 				break;
1182 			}
1183 			zone->start = zone->start + ti->begin - start;
1184 			if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
1185 				if (zone->cond == BLK_ZONE_COND_FULL)
1186 					zone->wp = zone->start + zone->len;
1187 				else if (zone->cond == BLK_ZONE_COND_EMPTY)
1188 					zone->wp = zone->start;
1189 				else
1190 					zone->wp = zone->wp + ti->begin - start;
1191 			}
1192 			ofst += sizeof(struct blk_zone);
1193 			hdr->nr_zones--;
1194 			nr_rep++;
1195 		}
1196 
1197 		if (addr != hdr)
1198 			kunmap_atomic(addr);
1199 
1200 		if (!hdr->nr_zones)
1201 			break;
1202 	}
1203 
1204 	if (hdr) {
1205 		hdr->nr_zones = nr_rep;
1206 		kunmap_atomic(hdr);
1207 	}
1208 
1209 	bio_advance(report_bio, report_bio->bi_iter.bi_size);
1210 
1211 #else /* !CONFIG_BLK_DEV_ZONED */
1212 	bio->bi_status = BLK_STS_NOTSUPP;
1213 #endif
1214 }
1215 EXPORT_SYMBOL_GPL(dm_remap_zone_report);
1216 
1217 static blk_qc_t __map_bio(struct dm_target_io *tio)
1218 {
1219 	int r;
1220 	sector_t sector;
1221 	struct bio *clone = &tio->clone;
1222 	struct dm_io *io = tio->io;
1223 	struct mapped_device *md = io->md;
1224 	struct dm_target *ti = tio->ti;
1225 	blk_qc_t ret = BLK_QC_T_NONE;
1226 
1227 	clone->bi_end_io = clone_endio;
1228 
1229 	/*
1230 	 * Map the clone.  If r == 0 we don't need to do
1231 	 * anything, the target has assumed ownership of
1232 	 * this io.
1233 	 */
1234 	atomic_inc(&io->io_count);
1235 	sector = clone->bi_iter.bi_sector;
1236 
1237 	r = ti->type->map(ti, clone);
1238 	switch (r) {
1239 	case DM_MAPIO_SUBMITTED:
1240 		break;
1241 	case DM_MAPIO_REMAPPED:
1242 		/* the bio has been remapped so dispatch it */
1243 		trace_block_bio_remap(clone->bi_disk->queue, clone,
1244 				      bio_dev(io->orig_bio), sector);
1245 		if (md->type == DM_TYPE_NVME_BIO_BASED)
1246 			ret = direct_make_request(clone);
1247 		else
1248 			ret = generic_make_request(clone);
1249 		break;
1250 	case DM_MAPIO_KILL:
1251 		free_tio(tio);
1252 		dec_pending(io, BLK_STS_IOERR);
1253 		break;
1254 	case DM_MAPIO_REQUEUE:
1255 		free_tio(tio);
1256 		dec_pending(io, BLK_STS_DM_REQUEUE);
1257 		break;
1258 	default:
1259 		DMWARN("unimplemented target map return value: %d", r);
1260 		BUG();
1261 	}
1262 
1263 	return ret;
1264 }
1265 
1266 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1267 {
1268 	bio->bi_iter.bi_sector = sector;
1269 	bio->bi_iter.bi_size = to_bytes(len);
1270 }
1271 
1272 /*
1273  * Creates a bio that consists of range of complete bvecs.
1274  */
1275 static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1276 		     sector_t sector, unsigned len)
1277 {
1278 	struct bio *clone = &tio->clone;
1279 
1280 	__bio_clone_fast(clone, bio);
1281 
1282 	if (unlikely(bio_integrity(bio) != NULL)) {
1283 		int r;
1284 
1285 		if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
1286 			     !dm_target_passes_integrity(tio->ti->type))) {
1287 			DMWARN("%s: the target %s doesn't support integrity data.",
1288 				dm_device_name(tio->io->md),
1289 				tio->ti->type->name);
1290 			return -EIO;
1291 		}
1292 
1293 		r = bio_integrity_clone(clone, bio, GFP_NOIO);
1294 		if (r < 0)
1295 			return r;
1296 	}
1297 
1298 	if (bio_op(bio) != REQ_OP_ZONE_REPORT)
1299 		bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1300 	clone->bi_iter.bi_size = to_bytes(len);
1301 
1302 	if (unlikely(bio_integrity(bio) != NULL))
1303 		bio_integrity_trim(clone);
1304 
1305 	return 0;
1306 }
1307 
1308 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1309 				struct dm_target *ti, unsigned num_bios)
1310 {
1311 	struct dm_target_io *tio;
1312 	int try;
1313 
1314 	if (!num_bios)
1315 		return;
1316 
1317 	if (num_bios == 1) {
1318 		tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1319 		bio_list_add(blist, &tio->clone);
1320 		return;
1321 	}
1322 
1323 	for (try = 0; try < 2; try++) {
1324 		int bio_nr;
1325 		struct bio *bio;
1326 
1327 		if (try)
1328 			mutex_lock(&ci->io->md->table_devices_lock);
1329 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1330 			tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
1331 			if (!tio)
1332 				break;
1333 
1334 			bio_list_add(blist, &tio->clone);
1335 		}
1336 		if (try)
1337 			mutex_unlock(&ci->io->md->table_devices_lock);
1338 		if (bio_nr == num_bios)
1339 			return;
1340 
1341 		while ((bio = bio_list_pop(blist))) {
1342 			tio = container_of(bio, struct dm_target_io, clone);
1343 			free_tio(tio);
1344 		}
1345 	}
1346 }
1347 
1348 static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
1349 					   struct dm_target_io *tio, unsigned *len)
1350 {
1351 	struct bio *clone = &tio->clone;
1352 
1353 	tio->len_ptr = len;
1354 
1355 	__bio_clone_fast(clone, ci->bio);
1356 	if (len)
1357 		bio_setup_sector(clone, ci->sector, *len);
1358 
1359 	return __map_bio(tio);
1360 }
1361 
1362 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1363 				  unsigned num_bios, unsigned *len)
1364 {
1365 	struct bio_list blist = BIO_EMPTY_LIST;
1366 	struct bio *bio;
1367 	struct dm_target_io *tio;
1368 
1369 	alloc_multiple_bios(&blist, ci, ti, num_bios);
1370 
1371 	while ((bio = bio_list_pop(&blist))) {
1372 		tio = container_of(bio, struct dm_target_io, clone);
1373 		(void) __clone_and_map_simple_bio(ci, tio, len);
1374 	}
1375 }
1376 
1377 static int __send_empty_flush(struct clone_info *ci)
1378 {
1379 	unsigned target_nr = 0;
1380 	struct dm_target *ti;
1381 
1382 	BUG_ON(bio_has_data(ci->bio));
1383 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1384 		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1385 
1386 	return 0;
1387 }
1388 
1389 static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1390 				    sector_t sector, unsigned *len)
1391 {
1392 	struct bio *bio = ci->bio;
1393 	struct dm_target_io *tio;
1394 	int r;
1395 
1396 	tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1397 	tio->len_ptr = len;
1398 	r = clone_bio(tio, bio, sector, *len);
1399 	if (r < 0) {
1400 		free_tio(tio);
1401 		return r;
1402 	}
1403 	(void) __map_bio(tio);
1404 
1405 	return 0;
1406 }
1407 
1408 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1409 
1410 static unsigned get_num_discard_bios(struct dm_target *ti)
1411 {
1412 	return ti->num_discard_bios;
1413 }
1414 
1415 static unsigned get_num_write_same_bios(struct dm_target *ti)
1416 {
1417 	return ti->num_write_same_bios;
1418 }
1419 
1420 static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
1421 {
1422 	return ti->num_write_zeroes_bios;
1423 }
1424 
1425 typedef bool (*is_split_required_fn)(struct dm_target *ti);
1426 
1427 static bool is_split_required_for_discard(struct dm_target *ti)
1428 {
1429 	return ti->split_discard_bios;
1430 }
1431 
1432 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1433 				       get_num_bios_fn get_num_bios,
1434 				       is_split_required_fn is_split_required)
1435 {
1436 	unsigned len;
1437 	unsigned num_bios;
1438 
1439 	/*
1440 	 * Even though the device advertised support for this type of
1441 	 * request, that does not mean every target supports it, and
1442 	 * reconfiguration might also have changed that since the
1443 	 * check was performed.
1444 	 */
1445 	num_bios = get_num_bios ? get_num_bios(ti) : 0;
1446 	if (!num_bios)
1447 		return -EOPNOTSUPP;
1448 
1449 	if (is_split_required && !is_split_required(ti))
1450 		len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1451 	else
1452 		len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1453 
1454 	__send_duplicate_bios(ci, ti, num_bios, &len);
1455 
1456 	ci->sector += len;
1457 	ci->sector_count -= len;
1458 
1459 	return 0;
1460 }
1461 
1462 static int __send_discard(struct clone_info *ci, struct dm_target *ti)
1463 {
1464 	return __send_changing_extent_only(ci, ti, get_num_discard_bios,
1465 					   is_split_required_for_discard);
1466 }
1467 
1468 static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
1469 {
1470 	return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL);
1471 }
1472 
1473 static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
1474 {
1475 	return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL);
1476 }
1477 
1478 /*
1479  * Select the correct strategy for processing a non-flush bio.
1480  */
1481 static int __split_and_process_non_flush(struct clone_info *ci)
1482 {
1483 	struct bio *bio = ci->bio;
1484 	struct dm_target *ti;
1485 	unsigned len;
1486 	int r;
1487 
1488 	ti = dm_table_find_target(ci->map, ci->sector);
1489 	if (!dm_target_is_valid(ti))
1490 		return -EIO;
1491 
1492 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1493 		return __send_discard(ci, ti);
1494 	else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1495 		return __send_write_same(ci, ti);
1496 	else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
1497 		return __send_write_zeroes(ci, ti);
1498 
1499 	if (bio_op(bio) == REQ_OP_ZONE_REPORT)
1500 		len = ci->sector_count;
1501 	else
1502 		len = min_t(sector_t, max_io_len(ci->sector, ti),
1503 			    ci->sector_count);
1504 
1505 	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1506 	if (r < 0)
1507 		return r;
1508 
1509 	ci->sector += len;
1510 	ci->sector_count -= len;
1511 
1512 	return 0;
1513 }
1514 
1515 static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1516 			    struct dm_table *map, struct bio *bio)
1517 {
1518 	ci->map = map;
1519 	ci->io = alloc_io(md, bio);
1520 	ci->sector = bio->bi_iter.bi_sector;
1521 }
1522 
1523 /*
1524  * Entry point to split a bio into clones and submit them to the targets.
1525  */
1526 static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1527 					struct dm_table *map, struct bio *bio)
1528 {
1529 	struct clone_info ci;
1530 	blk_qc_t ret = BLK_QC_T_NONE;
1531 	int error = 0;
1532 
1533 	if (unlikely(!map)) {
1534 		bio_io_error(bio);
1535 		return ret;
1536 	}
1537 
1538 	init_clone_info(&ci, md, map, bio);
1539 
1540 	if (bio->bi_opf & REQ_PREFLUSH) {
1541 		ci.bio = &ci.io->md->flush_bio;
1542 		ci.sector_count = 0;
1543 		error = __send_empty_flush(&ci);
1544 		/* dec_pending submits any data associated with flush */
1545 	} else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
1546 		ci.bio = bio;
1547 		ci.sector_count = 0;
1548 		error = __split_and_process_non_flush(&ci);
1549 	} else {
1550 		ci.bio = bio;
1551 		ci.sector_count = bio_sectors(bio);
1552 		while (ci.sector_count && !error) {
1553 			error = __split_and_process_non_flush(&ci);
1554 			if (current->bio_list && ci.sector_count && !error) {
1555 				/*
1556 				 * Remainder must be passed to generic_make_request()
1557 				 * so that it gets handled *after* bios already submitted
1558 				 * have been completely processed.
1559 				 * We take a clone of the original to store in
1560 				 * ci.io->orig_bio to be used by end_io_acct() and
1561 				 * for dec_pending to use for completion handling.
1562 				 * As this path is not used for REQ_OP_ZONE_REPORT,
1563 				 * the usage of io->orig_bio in dm_remap_zone_report()
1564 				 * won't be affected by this reassignment.
1565 				 */
1566 				struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
1567 								 md->queue->bio_split);
1568 				ci.io->orig_bio = b;
1569 				bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
1570 				bio_chain(b, bio);
1571 				ret = generic_make_request(bio);
1572 				break;
1573 			}
1574 		}
1575 	}
1576 
1577 	/* drop the extra reference count */
1578 	dec_pending(ci.io, errno_to_blk_status(error));
1579 	return ret;
1580 }
1581 
1582 /*
1583  * Optimized variant of __split_and_process_bio that leverages the
1584  * fact that targets that use it do _not_ have a need to split bios.
1585  */
1586 static blk_qc_t __process_bio(struct mapped_device *md,
1587 			      struct dm_table *map, struct bio *bio)
1588 {
1589 	struct clone_info ci;
1590 	blk_qc_t ret = BLK_QC_T_NONE;
1591 	int error = 0;
1592 
1593 	if (unlikely(!map)) {
1594 		bio_io_error(bio);
1595 		return ret;
1596 	}
1597 
1598 	init_clone_info(&ci, md, map, bio);
1599 
1600 	if (bio->bi_opf & REQ_PREFLUSH) {
1601 		ci.bio = &ci.io->md->flush_bio;
1602 		ci.sector_count = 0;
1603 		error = __send_empty_flush(&ci);
1604 		/* dec_pending submits any data associated with flush */
1605 	} else {
1606 		struct dm_target *ti = md->immutable_target;
1607 		struct dm_target_io *tio;
1608 
1609 		/*
1610 		 * Defend against IO still getting in during teardown
1611 		 * - as was seen for a time with nvme-fcloop
1612 		 */
1613 		if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
1614 			error = -EIO;
1615 			goto out;
1616 		}
1617 
1618 		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
1619 		ci.bio = bio;
1620 		ci.sector_count = bio_sectors(bio);
1621 		ret = __clone_and_map_simple_bio(&ci, tio, NULL);
1622 	}
1623 out:
1624 	/* drop the extra reference count */
1625 	dec_pending(ci.io, errno_to_blk_status(error));
1626 	return ret;
1627 }
1628 
1629 typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);
1630 
1631 static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
1632 				  process_bio_fn process_bio)
1633 {
1634 	struct mapped_device *md = q->queuedata;
1635 	blk_qc_t ret = BLK_QC_T_NONE;
1636 	int srcu_idx;
1637 	struct dm_table *map;
1638 
1639 	map = dm_get_live_table(md, &srcu_idx);
1640 
1641 	/* if we're suspended, we have to queue this io for later */
1642 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1643 		dm_put_live_table(md, srcu_idx);
1644 
1645 		if (!(bio->bi_opf & REQ_RAHEAD))
1646 			queue_io(md, bio);
1647 		else
1648 			bio_io_error(bio);
1649 		return ret;
1650 	}
1651 
1652 	ret = process_bio(md, map, bio);
1653 
1654 	dm_put_live_table(md, srcu_idx);
1655 	return ret;
1656 }
1657 
1658 /*
1659  * The request function that remaps the bio to one target and
1660  * splits off any remainder.
1661  */
1662 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1663 {
1664 	return __dm_make_request(q, bio, __split_and_process_bio);
1665 }
1666 
1667 static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
1668 {
1669 	return __dm_make_request(q, bio, __process_bio);
1670 }
1671 
1672 static int dm_any_congested(void *congested_data, int bdi_bits)
1673 {
1674 	int r = bdi_bits;
1675 	struct mapped_device *md = congested_data;
1676 	struct dm_table *map;
1677 
1678 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1679 		if (dm_request_based(md)) {
1680 			/*
1681 			 * With request-based DM we only need to check the
1682 			 * top-level queue for congestion.
1683 			 */
1684 			r = md->queue->backing_dev_info->wb.state & bdi_bits;
1685 		} else {
1686 			map = dm_get_live_table_fast(md);
1687 			if (map)
1688 				r = dm_table_any_congested(map, bdi_bits);
1689 			dm_put_live_table_fast(md);
1690 		}
1691 	}
1692 
1693 	return r;
1694 }
1695 
1696 /*-----------------------------------------------------------------
1697  * An IDR is used to keep track of allocated minor numbers.
1698  *---------------------------------------------------------------*/
1699 static void free_minor(int minor)
1700 {
1701 	spin_lock(&_minor_lock);
1702 	idr_remove(&_minor_idr, minor);
1703 	spin_unlock(&_minor_lock);
1704 }
1705 
1706 /*
1707  * See if the device with a specific minor # is free.
1708  */
1709 static int specific_minor(int minor)
1710 {
1711 	int r;
1712 
1713 	if (minor >= (1 << MINORBITS))
1714 		return -EINVAL;
1715 
1716 	idr_preload(GFP_KERNEL);
1717 	spin_lock(&_minor_lock);
1718 
1719 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1720 
1721 	spin_unlock(&_minor_lock);
1722 	idr_preload_end();
1723 	if (r < 0)
1724 		return r == -ENOSPC ? -EBUSY : r;
1725 	return 0;
1726 }
1727 
1728 static int next_free_minor(int *minor)
1729 {
1730 	int r;
1731 
1732 	idr_preload(GFP_KERNEL);
1733 	spin_lock(&_minor_lock);
1734 
1735 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1736 
1737 	spin_unlock(&_minor_lock);
1738 	idr_preload_end();
1739 	if (r < 0)
1740 		return r;
1741 	*minor = r;
1742 	return 0;
1743 }
1744 
1745 static const struct block_device_operations dm_blk_dops;
1746 static const struct dax_operations dm_dax_ops;
1747 
1748 static void dm_wq_work(struct work_struct *work);
1749 
1750 static void dm_init_normal_md_queue(struct mapped_device *md)
1751 {
1752 	md->use_blk_mq = false;
1753 
1754 	/*
1755 	 * Initialize aspects of queue that aren't relevant for blk-mq
1756 	 */
1757 	md->queue->backing_dev_info->congested_fn = dm_any_congested;
1758 }
1759 
1760 static void cleanup_mapped_device(struct mapped_device *md)
1761 {
1762 	if (md->wq)
1763 		destroy_workqueue(md->wq);
1764 	if (md->kworker_task)
1765 		kthread_stop(md->kworker_task);
1766 	if (md->bs)
1767 		bioset_free(md->bs);
1768 	if (md->io_bs)
1769 		bioset_free(md->io_bs);
1770 
1771 	if (md->dax_dev) {
1772 		kill_dax(md->dax_dev);
1773 		put_dax(md->dax_dev);
1774 		md->dax_dev = NULL;
1775 	}
1776 
1777 	if (md->disk) {
1778 		spin_lock(&_minor_lock);
1779 		md->disk->private_data = NULL;
1780 		spin_unlock(&_minor_lock);
1781 		del_gendisk(md->disk);
1782 		put_disk(md->disk);
1783 	}
1784 
1785 	if (md->queue)
1786 		blk_cleanup_queue(md->queue);
1787 
1788 	cleanup_srcu_struct(&md->io_barrier);
1789 
1790 	if (md->bdev) {
1791 		bdput(md->bdev);
1792 		md->bdev = NULL;
1793 	}
1794 
1795 	mutex_destroy(&md->suspend_lock);
1796 	mutex_destroy(&md->type_lock);
1797 	mutex_destroy(&md->table_devices_lock);
1798 
1799 	dm_mq_cleanup_mapped_device(md);
1800 }
1801 
1802 /*
1803  * Allocate and initialise a blank device with a given minor.
1804  */
1805 static struct mapped_device *alloc_dev(int minor)
1806 {
1807 	int r, numa_node_id = dm_get_numa_node();
1808 	struct dax_device *dax_dev;
1809 	struct mapped_device *md;
1810 	void *old_md;
1811 
1812 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1813 	if (!md) {
1814 		DMWARN("unable to allocate device, out of memory.");
1815 		return NULL;
1816 	}
1817 
1818 	if (!try_module_get(THIS_MODULE))
1819 		goto bad_module_get;
1820 
1821 	/* get a minor number for the dev */
1822 	if (minor == DM_ANY_MINOR)
1823 		r = next_free_minor(&minor);
1824 	else
1825 		r = specific_minor(minor);
1826 	if (r < 0)
1827 		goto bad_minor;
1828 
1829 	r = init_srcu_struct(&md->io_barrier);
1830 	if (r < 0)
1831 		goto bad_io_barrier;
1832 
1833 	md->numa_node_id = numa_node_id;
1834 	md->use_blk_mq = dm_use_blk_mq_default();
1835 	md->init_tio_pdu = false;
1836 	md->type = DM_TYPE_NONE;
1837 	mutex_init(&md->suspend_lock);
1838 	mutex_init(&md->type_lock);
1839 	mutex_init(&md->table_devices_lock);
1840 	spin_lock_init(&md->deferred_lock);
1841 	atomic_set(&md->holders, 1);
1842 	atomic_set(&md->open_count, 0);
1843 	atomic_set(&md->event_nr, 0);
1844 	atomic_set(&md->uevent_seq, 0);
1845 	INIT_LIST_HEAD(&md->uevent_list);
1846 	INIT_LIST_HEAD(&md->table_devices);
1847 	spin_lock_init(&md->uevent_lock);
1848 
1849 	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
1850 	if (!md->queue)
1851 		goto bad;
1852 	md->queue->queuedata = md;
1853 	md->queue->backing_dev_info->congested_data = md;
1854 
1855 	md->disk = alloc_disk_node(1, md->numa_node_id);
1856 	if (!md->disk)
1857 		goto bad;
1858 
1859 	atomic_set(&md->pending[0], 0);
1860 	atomic_set(&md->pending[1], 0);
1861 	init_waitqueue_head(&md->wait);
1862 	INIT_WORK(&md->work, dm_wq_work);
1863 	init_waitqueue_head(&md->eventq);
1864 	init_completion(&md->kobj_holder.completion);
1865 	md->kworker_task = NULL;
1866 
1867 	md->disk->major = _major;
1868 	md->disk->first_minor = minor;
1869 	md->disk->fops = &dm_blk_dops;
1870 	md->disk->queue = md->queue;
1871 	md->disk->private_data = md;
1872 	sprintf(md->disk->disk_name, "dm-%d", minor);
1873 
1874 	dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
1875 	if (!dax_dev)
1876 		goto bad;
1877 	md->dax_dev = dax_dev;
1878 
1879 	add_disk_no_queue_reg(md->disk);
1880 	format_dev_t(md->name, MKDEV(_major, minor));
1881 
1882 	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
1883 	if (!md->wq)
1884 		goto bad;
1885 
1886 	md->bdev = bdget_disk(md->disk, 0);
1887 	if (!md->bdev)
1888 		goto bad;
1889 
1890 	bio_init(&md->flush_bio, NULL, 0);
1891 	bio_set_dev(&md->flush_bio, md->bdev);
1892 	md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1893 
1894 	dm_stats_init(&md->stats);
1895 
1896 	/* Populate the mapping, nobody knows we exist yet */
1897 	spin_lock(&_minor_lock);
1898 	old_md = idr_replace(&_minor_idr, md, minor);
1899 	spin_unlock(&_minor_lock);
1900 
1901 	BUG_ON(old_md != MINOR_ALLOCED);
1902 
1903 	return md;
1904 
1905 bad:
1906 	cleanup_mapped_device(md);
1907 bad_io_barrier:
1908 	free_minor(minor);
1909 bad_minor:
1910 	module_put(THIS_MODULE);
1911 bad_module_get:
1912 	kvfree(md);
1913 	return NULL;
1914 }
1915 
1916 static void unlock_fs(struct mapped_device *md);
1917 
1918 static void free_dev(struct mapped_device *md)
1919 {
1920 	int minor = MINOR(disk_devt(md->disk));
1921 
1922 	unlock_fs(md);
1923 
1924 	cleanup_mapped_device(md);
1925 
1926 	free_table_devices(&md->table_devices);
1927 	dm_stats_cleanup(&md->stats);
1928 	free_minor(minor);
1929 
1930 	module_put(THIS_MODULE);
1931 	kvfree(md);
1932 }
1933 
1934 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1935 {
1936 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1937 
1938 	if (dm_table_bio_based(t)) {
1939 		/*
1940 		 * The md may already have mempools that need changing.
1941 		 * If so, reload bioset because front_pad may have changed
1942 		 * because a different table was loaded.
1943 		 */
1944 		if (md->bs) {
1945 			bioset_free(md->bs);
1946 			md->bs = NULL;
1947 		}
1948 		if (md->io_bs) {
1949 			bioset_free(md->io_bs);
1950 			md->io_bs = NULL;
1951 		}
1952 
1953 	} else if (md->bs) {
1954 		/*
1955 		 * There's no need to reload with request-based dm
1956 		 * because the size of front_pad doesn't change.
1957 		 * Note for future: If you are to reload bioset,
1958 		 * prep-ed requests in the queue may refer
1959 		 * to bio from the old bioset, so you must walk
1960 		 * through the queue to unprep.
1961 		 */
1962 		goto out;
1963 	}
1964 
1965 	BUG_ON(!p || md->bs || md->io_bs);
1966 
1967 	md->bs = p->bs;
1968 	p->bs = NULL;
1969 	md->io_bs = p->io_bs;
1970 	p->io_bs = NULL;
1971 out:
1972 	/* mempool bind completed, no longer need any mempools in the table */
1973 	dm_table_free_md_mempools(t);
1974 }
1975 
1976 /*
1977  * Bind a table to the device.
1978  */
1979 static void event_callback(void *context)
1980 {
1981 	unsigned long flags;
1982 	LIST_HEAD(uevents);
1983 	struct mapped_device *md = (struct mapped_device *) context;
1984 
1985 	spin_lock_irqsave(&md->uevent_lock, flags);
1986 	list_splice_init(&md->uevent_list, &uevents);
1987 	spin_unlock_irqrestore(&md->uevent_lock, flags);
1988 
1989 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1990 
1991 	atomic_inc(&md->event_nr);
1992 	wake_up(&md->eventq);
1993 	dm_issue_global_event();
1994 }
1995 
1996 /*
1997  * Protected by md->suspend_lock obtained by dm_swap_table().
1998  */
1999 static void __set_size(struct mapped_device *md, sector_t size)
2000 {
2001 	lockdep_assert_held(&md->suspend_lock);
2002 
2003 	set_capacity(md->disk, size);
2004 
2005 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2006 }
2007 
2008 /*
2009  * Returns old map, which caller must destroy.
2010  */
2011 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2012 			       struct queue_limits *limits)
2013 {
2014 	struct dm_table *old_map;
2015 	struct request_queue *q = md->queue;
2016 	bool request_based = dm_table_request_based(t);
2017 	sector_t size;
2018 
2019 	lockdep_assert_held(&md->suspend_lock);
2020 
2021 	size = dm_table_get_size(t);
2022 
2023 	/*
2024 	 * Wipe any geometry if the size of the table changed.
2025 	 */
2026 	if (size != dm_get_size(md))
2027 		memset(&md->geometry, 0, sizeof(md->geometry));
2028 
2029 	__set_size(md, size);
2030 
2031 	dm_table_event_callback(t, event_callback, md);
2032 
2033 	/*
2034 	 * The queue hasn't been stopped yet, if the old table type wasn't
2035 	 * for request-based during suspension.  So stop it to prevent
2036 	 * I/O mapping before resume.
2037 	 * This must be done before setting the queue restrictions,
2038 	 * because request-based dm may be run just after the setting.
2039 	 */
2040 	if (request_based)
2041 		dm_stop_queue(q);
2042 
2043 	if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
2044 		/*
2045 		 * Leverage the fact that request-based DM targets and
2046 		 * NVMe bio based targets are immutable singletons
2047 		 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
2048 		 *   and __process_bio.
2049 		 */
2050 		md->immutable_target = dm_table_get_immutable_target(t);
2051 	}
2052 
2053 	__bind_mempools(md, t);
2054 
2055 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2056 	rcu_assign_pointer(md->map, (void *)t);
2057 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2058 
2059 	dm_table_set_restrictions(t, q, limits);
2060 	if (old_map)
2061 		dm_sync_table(md);
2062 
2063 	return old_map;
2064 }
2065 
2066 /*
2067  * Returns unbound table for the caller to free.
2068  */
2069 static struct dm_table *__unbind(struct mapped_device *md)
2070 {
2071 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
2072 
2073 	if (!map)
2074 		return NULL;
2075 
2076 	dm_table_event_callback(map, NULL, NULL);
2077 	RCU_INIT_POINTER(md->map, NULL);
2078 	dm_sync_table(md);
2079 
2080 	return map;
2081 }
2082 
2083 /*
2084  * Constructor for a new device.
2085  */
2086 int dm_create(int minor, struct mapped_device **result)
2087 {
2088 	int r;
2089 	struct mapped_device *md;
2090 
2091 	md = alloc_dev(minor);
2092 	if (!md)
2093 		return -ENXIO;
2094 
2095 	r = dm_sysfs_init(md);
2096 	if (r) {
2097 		free_dev(md);
2098 		return r;
2099 	}
2100 
2101 	*result = md;
2102 	return 0;
2103 }
2104 
2105 /*
2106  * Functions to manage md->type.
2107  * All are required to hold md->type_lock.
2108  */
2109 void dm_lock_md_type(struct mapped_device *md)
2110 {
2111 	mutex_lock(&md->type_lock);
2112 }
2113 
2114 void dm_unlock_md_type(struct mapped_device *md)
2115 {
2116 	mutex_unlock(&md->type_lock);
2117 }
2118 
2119 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2120 {
2121 	BUG_ON(!mutex_is_locked(&md->type_lock));
2122 	md->type = type;
2123 }
2124 
2125 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2126 {
2127 	return md->type;
2128 }
2129 
2130 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2131 {
2132 	return md->immutable_target_type;
2133 }
2134 
2135 /*
2136  * The queue_limits are only valid as long as you have a reference
2137  * count on 'md'.
2138  */
2139 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2140 {
2141 	BUG_ON(!atomic_read(&md->holders));
2142 	return &md->queue->limits;
2143 }
2144 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2145 
2146 /*
2147  * Setup the DM device's queue based on md's type
2148  */
2149 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2150 {
2151 	int r;
2152 	struct queue_limits limits;
2153 	enum dm_queue_mode type = dm_get_md_type(md);
2154 
2155 	switch (type) {
2156 	case DM_TYPE_REQUEST_BASED:
2157 		dm_init_normal_md_queue(md);
2158 		r = dm_old_init_request_queue(md, t);
2159 		if (r) {
2160 			DMERR("Cannot initialize queue for request-based mapped device");
2161 			return r;
2162 		}
2163 		break;
2164 	case DM_TYPE_MQ_REQUEST_BASED:
2165 		r = dm_mq_init_request_queue(md, t);
2166 		if (r) {
2167 			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2168 			return r;
2169 		}
2170 		break;
2171 	case DM_TYPE_BIO_BASED:
2172 	case DM_TYPE_DAX_BIO_BASED:
2173 		dm_init_normal_md_queue(md);
2174 		blk_queue_make_request(md->queue, dm_make_request);
2175 		break;
2176 	case DM_TYPE_NVME_BIO_BASED:
2177 		dm_init_normal_md_queue(md);
2178 		blk_queue_make_request(md->queue, dm_make_request_nvme);
2179 		break;
2180 	case DM_TYPE_NONE:
2181 		WARN_ON_ONCE(true);
2182 		break;
2183 	}
2184 
2185 	r = dm_calculate_queue_limits(t, &limits);
2186 	if (r) {
2187 		DMERR("Cannot calculate initial queue limits");
2188 		return r;
2189 	}
2190 	dm_table_set_restrictions(t, md->queue, &limits);
2191 	blk_register_queue(md->disk);
2192 
2193 	return 0;
2194 }
2195 
2196 struct mapped_device *dm_get_md(dev_t dev)
2197 {
2198 	struct mapped_device *md;
2199 	unsigned minor = MINOR(dev);
2200 
2201 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2202 		return NULL;
2203 
2204 	spin_lock(&_minor_lock);
2205 
2206 	md = idr_find(&_minor_idr, minor);
2207 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2208 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2209 		md = NULL;
2210 		goto out;
2211 	}
2212 	dm_get(md);
2213 out:
2214 	spin_unlock(&_minor_lock);
2215 
2216 	return md;
2217 }
2218 EXPORT_SYMBOL_GPL(dm_get_md);
2219 
2220 void *dm_get_mdptr(struct mapped_device *md)
2221 {
2222 	return md->interface_ptr;
2223 }
2224 
2225 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2226 {
2227 	md->interface_ptr = ptr;
2228 }
2229 
2230 void dm_get(struct mapped_device *md)
2231 {
2232 	atomic_inc(&md->holders);
2233 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2234 }
2235 
2236 int dm_hold(struct mapped_device *md)
2237 {
2238 	spin_lock(&_minor_lock);
2239 	if (test_bit(DMF_FREEING, &md->flags)) {
2240 		spin_unlock(&_minor_lock);
2241 		return -EBUSY;
2242 	}
2243 	dm_get(md);
2244 	spin_unlock(&_minor_lock);
2245 	return 0;
2246 }
2247 EXPORT_SYMBOL_GPL(dm_hold);
2248 
2249 const char *dm_device_name(struct mapped_device *md)
2250 {
2251 	return md->name;
2252 }
2253 EXPORT_SYMBOL_GPL(dm_device_name);
2254 
2255 static void __dm_destroy(struct mapped_device *md, bool wait)
2256 {
2257 	struct dm_table *map;
2258 	int srcu_idx;
2259 
2260 	might_sleep();
2261 
2262 	spin_lock(&_minor_lock);
2263 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2264 	set_bit(DMF_FREEING, &md->flags);
2265 	spin_unlock(&_minor_lock);
2266 
2267 	blk_set_queue_dying(md->queue);
2268 
2269 	if (dm_request_based(md) && md->kworker_task)
2270 		kthread_flush_worker(&md->kworker);
2271 
2272 	/*
2273 	 * Take suspend_lock so that presuspend and postsuspend methods
2274 	 * do not race with internal suspend.
2275 	 */
2276 	mutex_lock(&md->suspend_lock);
2277 	map = dm_get_live_table(md, &srcu_idx);
2278 	if (!dm_suspended_md(md)) {
2279 		dm_table_presuspend_targets(map);
2280 		dm_table_postsuspend_targets(map);
2281 	}
2282 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2283 	dm_put_live_table(md, srcu_idx);
2284 	mutex_unlock(&md->suspend_lock);
2285 
2286 	/*
2287 	 * Rare, but there may be I/O requests still going to complete,
2288 	 * for example.  Wait for all references to disappear.
2289 	 * No one should increment the reference count of the mapped_device,
2290 	 * after the mapped_device state becomes DMF_FREEING.
2291 	 */
2292 	if (wait)
2293 		while (atomic_read(&md->holders))
2294 			msleep(1);
2295 	else if (atomic_read(&md->holders))
2296 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2297 		       dm_device_name(md), atomic_read(&md->holders));
2298 
2299 	dm_sysfs_exit(md);
2300 	dm_table_destroy(__unbind(md));
2301 	free_dev(md);
2302 }
2303 
2304 void dm_destroy(struct mapped_device *md)
2305 {
2306 	__dm_destroy(md, true);
2307 }
2308 
2309 void dm_destroy_immediate(struct mapped_device *md)
2310 {
2311 	__dm_destroy(md, false);
2312 }
2313 
2314 void dm_put(struct mapped_device *md)
2315 {
2316 	atomic_dec(&md->holders);
2317 }
2318 EXPORT_SYMBOL_GPL(dm_put);
2319 
2320 static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2321 {
2322 	int r = 0;
2323 	DEFINE_WAIT(wait);
2324 
2325 	while (1) {
2326 		prepare_to_wait(&md->wait, &wait, task_state);
2327 
2328 		if (!md_in_flight(md))
2329 			break;
2330 
2331 		if (signal_pending_state(task_state, current)) {
2332 			r = -EINTR;
2333 			break;
2334 		}
2335 
2336 		io_schedule();
2337 	}
2338 	finish_wait(&md->wait, &wait);
2339 
2340 	return r;
2341 }
2342 
2343 /*
2344  * Process the deferred bios
2345  */
2346 static void dm_wq_work(struct work_struct *work)
2347 {
2348 	struct mapped_device *md = container_of(work, struct mapped_device,
2349 						work);
2350 	struct bio *c;
2351 	int srcu_idx;
2352 	struct dm_table *map;
2353 
2354 	map = dm_get_live_table(md, &srcu_idx);
2355 
2356 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2357 		spin_lock_irq(&md->deferred_lock);
2358 		c = bio_list_pop(&md->deferred);
2359 		spin_unlock_irq(&md->deferred_lock);
2360 
2361 		if (!c)
2362 			break;
2363 
2364 		if (dm_request_based(md))
2365 			generic_make_request(c);
2366 		else
2367 			__split_and_process_bio(md, map, c);
2368 	}
2369 
2370 	dm_put_live_table(md, srcu_idx);
2371 }
2372 
2373 static void dm_queue_flush(struct mapped_device *md)
2374 {
2375 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2376 	smp_mb__after_atomic();
2377 	queue_work(md->wq, &md->work);
2378 }
2379 
2380 /*
2381  * Swap in a new table, returning the old one for the caller to destroy.
2382  */
2383 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2384 {
2385 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2386 	struct queue_limits limits;
2387 	int r;
2388 
2389 	mutex_lock(&md->suspend_lock);
2390 
2391 	/* device must be suspended */
2392 	if (!dm_suspended_md(md))
2393 		goto out;
2394 
2395 	/*
2396 	 * If the new table has no data devices, retain the existing limits.
2397 	 * This helps multipath with queue_if_no_path if all paths disappear,
2398 	 * then new I/O is queued based on these limits, and then some paths
2399 	 * reappear.
2400 	 */
2401 	if (dm_table_has_no_data_devices(table)) {
2402 		live_map = dm_get_live_table_fast(md);
2403 		if (live_map)
2404 			limits = md->queue->limits;
2405 		dm_put_live_table_fast(md);
2406 	}
2407 
2408 	if (!live_map) {
2409 		r = dm_calculate_queue_limits(table, &limits);
2410 		if (r) {
2411 			map = ERR_PTR(r);
2412 			goto out;
2413 		}
2414 	}
2415 
2416 	map = __bind(md, table, &limits);
2417 	dm_issue_global_event();
2418 
2419 out:
2420 	mutex_unlock(&md->suspend_lock);
2421 	return map;
2422 }
2423 
2424 /*
2425  * Functions to lock and unlock any filesystem running on the
2426  * device.
2427  */
2428 static int lock_fs(struct mapped_device *md)
2429 {
2430 	int r;
2431 
2432 	WARN_ON(md->frozen_sb);
2433 
2434 	md->frozen_sb = freeze_bdev(md->bdev);
2435 	if (IS_ERR(md->frozen_sb)) {
2436 		r = PTR_ERR(md->frozen_sb);
2437 		md->frozen_sb = NULL;
2438 		return r;
2439 	}
2440 
2441 	set_bit(DMF_FROZEN, &md->flags);
2442 
2443 	return 0;
2444 }
2445 
2446 static void unlock_fs(struct mapped_device *md)
2447 {
2448 	if (!test_bit(DMF_FROZEN, &md->flags))
2449 		return;
2450 
2451 	thaw_bdev(md->bdev, md->frozen_sb);
2452 	md->frozen_sb = NULL;
2453 	clear_bit(DMF_FROZEN, &md->flags);
2454 }
2455 
2456 /*
2457  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2458  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2459  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2460  *
2461  * If __dm_suspend returns 0, the device is completely quiescent
2462  * now. There is no request-processing activity. All new requests
2463  * are being added to md->deferred list.
2464  */
2465 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2466 			unsigned suspend_flags, long task_state,
2467 			int dmf_suspended_flag)
2468 {
2469 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2470 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2471 	int r;
2472 
2473 	lockdep_assert_held(&md->suspend_lock);
2474 
2475 	/*
2476 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2477 	 * This flag is cleared before dm_suspend returns.
2478 	 */
2479 	if (noflush)
2480 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2481 	else
2482 		pr_debug("%s: suspending with flush\n", dm_device_name(md));
2483 
2484 	/*
2485 	 * This gets reverted if there's an error later and the targets
2486 	 * provide the .presuspend_undo hook.
2487 	 */
2488 	dm_table_presuspend_targets(map);
2489 
2490 	/*
2491 	 * Flush I/O to the device.
2492 	 * Any I/O submitted after lock_fs() may not be flushed.
2493 	 * noflush takes precedence over do_lockfs.
2494 	 * (lock_fs() flushes I/Os and waits for them to complete.)
2495 	 */
2496 	if (!noflush && do_lockfs) {
2497 		r = lock_fs(md);
2498 		if (r) {
2499 			dm_table_presuspend_undo_targets(map);
2500 			return r;
2501 		}
2502 	}
2503 
2504 	/*
2505 	 * Here we must make sure that no processes are submitting requests
2506 	 * to target drivers i.e. no one may be executing
2507 	 * __split_and_process_bio. This is called from dm_request and
2508 	 * dm_wq_work.
2509 	 *
2510 	 * To get all processes out of __split_and_process_bio in dm_request,
2511 	 * we take the write lock. To prevent any process from reentering
2512 	 * __split_and_process_bio from dm_request and quiesce the thread
2513 	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2514 	 * flush_workqueue(md->wq).
2515 	 */
2516 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2517 	if (map)
2518 		synchronize_srcu(&md->io_barrier);
2519 
2520 	/*
2521 	 * Stop md->queue before flushing md->wq in case request-based
2522 	 * dm defers requests to md->wq from md->queue.
2523 	 */
2524 	if (dm_request_based(md)) {
2525 		dm_stop_queue(md->queue);
2526 		if (md->kworker_task)
2527 			kthread_flush_worker(&md->kworker);
2528 	}
2529 
2530 	flush_workqueue(md->wq);
2531 
2532 	/*
2533 	 * At this point no more requests are entering target request routines.
2534 	 * We call dm_wait_for_completion to wait for all existing requests
2535 	 * to finish.
2536 	 */
2537 	r = dm_wait_for_completion(md, task_state);
2538 	if (!r)
2539 		set_bit(dmf_suspended_flag, &md->flags);
2540 
2541 	if (noflush)
2542 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2543 	if (map)
2544 		synchronize_srcu(&md->io_barrier);
2545 
2546 	/* were we interrupted ? */
2547 	if (r < 0) {
2548 		dm_queue_flush(md);
2549 
2550 		if (dm_request_based(md))
2551 			dm_start_queue(md->queue);
2552 
2553 		unlock_fs(md);
2554 		dm_table_presuspend_undo_targets(map);
2555 		/* pushback list is already flushed, so skip flush */
2556 	}
2557 
2558 	return r;
2559 }
2560 
2561 /*
2562  * We need to be able to change a mapping table under a mounted
2563  * filesystem.  For example we might want to move some data in
2564  * the background.  Before the table can be swapped with
2565  * dm_bind_table, dm_suspend must be called to flush any in
2566  * flight bios and ensure that any further io gets deferred.
2567  */
2568 /*
2569  * Suspend mechanism in request-based dm.
2570  *
2571  * 1. Flush all I/Os by lock_fs() if needed.
2572  * 2. Stop dispatching any I/O by stopping the request_queue.
2573  * 3. Wait for all in-flight I/Os to be completed or requeued.
2574  *
2575  * To abort suspend, start the request_queue.
2576  */
2577 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2578 {
2579 	struct dm_table *map = NULL;
2580 	int r = 0;
2581 
2582 retry:
2583 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2584 
2585 	if (dm_suspended_md(md)) {
2586 		r = -EINVAL;
2587 		goto out_unlock;
2588 	}
2589 
2590 	if (dm_suspended_internally_md(md)) {
2591 		/* already internally suspended, wait for internal resume */
2592 		mutex_unlock(&md->suspend_lock);
2593 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2594 		if (r)
2595 			return r;
2596 		goto retry;
2597 	}
2598 
2599 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2600 
2601 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2602 	if (r)
2603 		goto out_unlock;
2604 
2605 	dm_table_postsuspend_targets(map);
2606 
2607 out_unlock:
2608 	mutex_unlock(&md->suspend_lock);
2609 	return r;
2610 }
2611 
2612 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2613 {
2614 	if (map) {
2615 		int r = dm_table_resume_targets(map);
2616 		if (r)
2617 			return r;
2618 	}
2619 
2620 	dm_queue_flush(md);
2621 
2622 	/*
2623 	 * Flushing deferred I/Os must be done after targets are resumed
2624 	 * so that mapping of targets can work correctly.
2625 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2626 	 */
2627 	if (dm_request_based(md))
2628 		dm_start_queue(md->queue);
2629 
2630 	unlock_fs(md);
2631 
2632 	return 0;
2633 }
2634 
2635 int dm_resume(struct mapped_device *md)
2636 {
2637 	int r;
2638 	struct dm_table *map = NULL;
2639 
2640 retry:
2641 	r = -EINVAL;
2642 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2643 
2644 	if (!dm_suspended_md(md))
2645 		goto out;
2646 
2647 	if (dm_suspended_internally_md(md)) {
2648 		/* already internally suspended, wait for internal resume */
2649 		mutex_unlock(&md->suspend_lock);
2650 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2651 		if (r)
2652 			return r;
2653 		goto retry;
2654 	}
2655 
2656 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2657 	if (!map || !dm_table_get_size(map))
2658 		goto out;
2659 
2660 	r = __dm_resume(md, map);
2661 	if (r)
2662 		goto out;
2663 
2664 	clear_bit(DMF_SUSPENDED, &md->flags);
2665 out:
2666 	mutex_unlock(&md->suspend_lock);
2667 
2668 	return r;
2669 }
2670 
2671 /*
2672  * Internal suspend/resume works like userspace-driven suspend. It waits
2673  * until all bios finish and prevents issuing new bios to the target drivers.
2674  * It may be used only from the kernel.
2675  */
2676 
2677 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2678 {
2679 	struct dm_table *map = NULL;
2680 
2681 	lockdep_assert_held(&md->suspend_lock);
2682 
2683 	if (md->internal_suspend_count++)
2684 		return; /* nested internal suspend */
2685 
2686 	if (dm_suspended_md(md)) {
2687 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2688 		return; /* nest suspend */
2689 	}
2690 
2691 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2692 
2693 	/*
2694 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2695 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2696 	 * would require changing .presuspend to return an error -- avoid this
2697 	 * until there is a need for more elaborate variants of internal suspend.
2698 	 */
2699 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2700 			    DMF_SUSPENDED_INTERNALLY);
2701 
2702 	dm_table_postsuspend_targets(map);
2703 }
2704 
2705 static void __dm_internal_resume(struct mapped_device *md)
2706 {
2707 	BUG_ON(!md->internal_suspend_count);
2708 
2709 	if (--md->internal_suspend_count)
2710 		return; /* resume from nested internal suspend */
2711 
2712 	if (dm_suspended_md(md))
2713 		goto done; /* resume from nested suspend */
2714 
2715 	/*
2716 	 * NOTE: existing callers don't need to call dm_table_resume_targets
2717 	 * (which may fail -- so best to avoid it for now by passing NULL map)
2718 	 */
2719 	(void) __dm_resume(md, NULL);
2720 
2721 done:
2722 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2723 	smp_mb__after_atomic();
2724 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2725 }
2726 
2727 void dm_internal_suspend_noflush(struct mapped_device *md)
2728 {
2729 	mutex_lock(&md->suspend_lock);
2730 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2731 	mutex_unlock(&md->suspend_lock);
2732 }
2733 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2734 
2735 void dm_internal_resume(struct mapped_device *md)
2736 {
2737 	mutex_lock(&md->suspend_lock);
2738 	__dm_internal_resume(md);
2739 	mutex_unlock(&md->suspend_lock);
2740 }
2741 EXPORT_SYMBOL_GPL(dm_internal_resume);
2742 
2743 /*
2744  * Fast variants of internal suspend/resume hold md->suspend_lock,
2745  * which prevents interaction with userspace-driven suspend.
2746  */
2747 
2748 void dm_internal_suspend_fast(struct mapped_device *md)
2749 {
2750 	mutex_lock(&md->suspend_lock);
2751 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2752 		return;
2753 
2754 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2755 	synchronize_srcu(&md->io_barrier);
2756 	flush_workqueue(md->wq);
2757 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2758 }
2759 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2760 
2761 void dm_internal_resume_fast(struct mapped_device *md)
2762 {
2763 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2764 		goto done;
2765 
2766 	dm_queue_flush(md);
2767 
2768 done:
2769 	mutex_unlock(&md->suspend_lock);
2770 }
2771 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2772 
2773 /*-----------------------------------------------------------------
2774  * Event notification.
2775  *---------------------------------------------------------------*/
2776 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2777 		       unsigned cookie)
2778 {
2779 	char udev_cookie[DM_COOKIE_LENGTH];
2780 	char *envp[] = { udev_cookie, NULL };
2781 
2782 	if (!cookie)
2783 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2784 	else {
2785 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2786 			 DM_COOKIE_ENV_VAR_NAME, cookie);
2787 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2788 					  action, envp);
2789 	}
2790 }
2791 
2792 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2793 {
2794 	return atomic_add_return(1, &md->uevent_seq);
2795 }
2796 
2797 uint32_t dm_get_event_nr(struct mapped_device *md)
2798 {
2799 	return atomic_read(&md->event_nr);
2800 }
2801 
2802 int dm_wait_event(struct mapped_device *md, int event_nr)
2803 {
2804 	return wait_event_interruptible(md->eventq,
2805 			(event_nr != atomic_read(&md->event_nr)));
2806 }
2807 
2808 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2809 {
2810 	unsigned long flags;
2811 
2812 	spin_lock_irqsave(&md->uevent_lock, flags);
2813 	list_add(elist, &md->uevent_list);
2814 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2815 }
2816 
2817 /*
2818  * The gendisk is only valid as long as you have a reference
2819  * count on 'md'.
2820  */
2821 struct gendisk *dm_disk(struct mapped_device *md)
2822 {
2823 	return md->disk;
2824 }
2825 EXPORT_SYMBOL_GPL(dm_disk);
2826 
2827 struct kobject *dm_kobject(struct mapped_device *md)
2828 {
2829 	return &md->kobj_holder.kobj;
2830 }
2831 
2832 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2833 {
2834 	struct mapped_device *md;
2835 
2836 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2837 
2838 	spin_lock(&_minor_lock);
2839 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2840 		md = NULL;
2841 		goto out;
2842 	}
2843 	dm_get(md);
2844 out:
2845 	spin_unlock(&_minor_lock);
2846 
2847 	return md;
2848 }
2849 
2850 int dm_suspended_md(struct mapped_device *md)
2851 {
2852 	return test_bit(DMF_SUSPENDED, &md->flags);
2853 }
2854 
2855 int dm_suspended_internally_md(struct mapped_device *md)
2856 {
2857 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2858 }
2859 
2860 int dm_test_deferred_remove_flag(struct mapped_device *md)
2861 {
2862 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2863 }
2864 
2865 int dm_suspended(struct dm_target *ti)
2866 {
2867 	return dm_suspended_md(dm_table_get_md(ti->table));
2868 }
2869 EXPORT_SYMBOL_GPL(dm_suspended);
2870 
2871 int dm_noflush_suspending(struct dm_target *ti)
2872 {
2873 	return __noflush_suspending(dm_table_get_md(ti->table));
2874 }
2875 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2876 
2877 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2878 					    unsigned integrity, unsigned per_io_data_size,
2879 					    unsigned min_pool_size)
2880 {
2881 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2882 	unsigned int pool_size = 0;
2883 	unsigned int front_pad, io_front_pad;
2884 
2885 	if (!pools)
2886 		return NULL;
2887 
2888 	switch (type) {
2889 	case DM_TYPE_BIO_BASED:
2890 	case DM_TYPE_DAX_BIO_BASED:
2891 	case DM_TYPE_NVME_BIO_BASED:
2892 		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
2893 		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2894 		io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
2895 		pools->io_bs = bioset_create(pool_size, io_front_pad, 0);
2896 		if (!pools->io_bs)
2897 			goto out;
2898 		if (integrity && bioset_integrity_create(pools->io_bs, pool_size))
2899 			goto out;
2900 		break;
2901 	case DM_TYPE_REQUEST_BASED:
2902 	case DM_TYPE_MQ_REQUEST_BASED:
2903 		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
2904 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2905 		/* per_io_data_size is used for blk-mq pdu at queue allocation */
2906 		break;
2907 	default:
2908 		BUG();
2909 	}
2910 
2911 	pools->bs = bioset_create(pool_size, front_pad, 0);
2912 	if (!pools->bs)
2913 		goto out;
2914 
2915 	if (integrity && bioset_integrity_create(pools->bs, pool_size))
2916 		goto out;
2917 
2918 	return pools;
2919 
2920 out:
2921 	dm_free_md_mempools(pools);
2922 
2923 	return NULL;
2924 }
2925 
2926 void dm_free_md_mempools(struct dm_md_mempools *pools)
2927 {
2928 	if (!pools)
2929 		return;
2930 
2931 	if (pools->bs)
2932 		bioset_free(pools->bs);
2933 	if (pools->io_bs)
2934 		bioset_free(pools->io_bs);
2935 
2936 	kfree(pools);
2937 }
2938 
2939 struct dm_pr {
2940 	u64	old_key;
2941 	u64	new_key;
2942 	u32	flags;
2943 	bool	fail_early;
2944 };
2945 
2946 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
2947 		      void *data)
2948 {
2949 	struct mapped_device *md = bdev->bd_disk->private_data;
2950 	struct dm_table *table;
2951 	struct dm_target *ti;
2952 	int ret = -ENOTTY, srcu_idx;
2953 
2954 	table = dm_get_live_table(md, &srcu_idx);
2955 	if (!table || !dm_table_get_size(table))
2956 		goto out;
2957 
2958 	/* We only support devices that have a single target */
2959 	if (dm_table_get_num_targets(table) != 1)
2960 		goto out;
2961 	ti = dm_table_get_target(table, 0);
2962 
2963 	ret = -EINVAL;
2964 	if (!ti->type->iterate_devices)
2965 		goto out;
2966 
2967 	ret = ti->type->iterate_devices(ti, fn, data);
2968 out:
2969 	dm_put_live_table(md, srcu_idx);
2970 	return ret;
2971 }
2972 
2973 /*
2974  * For register / unregister we need to manually call out to every path.
2975  */
2976 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
2977 			    sector_t start, sector_t len, void *data)
2978 {
2979 	struct dm_pr *pr = data;
2980 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
2981 
2982 	if (!ops || !ops->pr_register)
2983 		return -EOPNOTSUPP;
2984 	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
2985 }
2986 
2987 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
2988 			  u32 flags)
2989 {
2990 	struct dm_pr pr = {
2991 		.old_key	= old_key,
2992 		.new_key	= new_key,
2993 		.flags		= flags,
2994 		.fail_early	= true,
2995 	};
2996 	int ret;
2997 
2998 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
2999 	if (ret && new_key) {
3000 		/* unregister all paths if we failed to register any path */
3001 		pr.old_key = new_key;
3002 		pr.new_key = 0;
3003 		pr.flags = 0;
3004 		pr.fail_early = false;
3005 		dm_call_pr(bdev, __dm_pr_register, &pr);
3006 	}
3007 
3008 	return ret;
3009 }
3010 
3011 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3012 			 u32 flags)
3013 {
3014 	struct mapped_device *md = bdev->bd_disk->private_data;
3015 	const struct pr_ops *ops;
3016 	fmode_t mode;
3017 	int r;
3018 
3019 	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3020 	if (r < 0)
3021 		return r;
3022 
3023 	ops = bdev->bd_disk->fops->pr_ops;
3024 	if (ops && ops->pr_reserve)
3025 		r = ops->pr_reserve(bdev, key, type, flags);
3026 	else
3027 		r = -EOPNOTSUPP;
3028 
3029 	blkdev_put(bdev, mode);
3030 	return r;
3031 }
3032 
3033 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3034 {
3035 	struct mapped_device *md = bdev->bd_disk->private_data;
3036 	const struct pr_ops *ops;
3037 	fmode_t mode;
3038 	int r;
3039 
3040 	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3041 	if (r < 0)
3042 		return r;
3043 
3044 	ops = bdev->bd_disk->fops->pr_ops;
3045 	if (ops && ops->pr_release)
3046 		r = ops->pr_release(bdev, key, type);
3047 	else
3048 		r = -EOPNOTSUPP;
3049 
3050 	blkdev_put(bdev, mode);
3051 	return r;
3052 }
3053 
3054 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3055 			 enum pr_type type, bool abort)
3056 {
3057 	struct mapped_device *md = bdev->bd_disk->private_data;
3058 	const struct pr_ops *ops;
3059 	fmode_t mode;
3060 	int r;
3061 
3062 	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3063 	if (r < 0)
3064 		return r;
3065 
3066 	ops = bdev->bd_disk->fops->pr_ops;
3067 	if (ops && ops->pr_preempt)
3068 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3069 	else
3070 		r = -EOPNOTSUPP;
3071 
3072 	blkdev_put(bdev, mode);
3073 	return r;
3074 }
3075 
3076 static int dm_pr_clear(struct block_device *bdev, u64 key)
3077 {
3078 	struct mapped_device *md = bdev->bd_disk->private_data;
3079 	const struct pr_ops *ops;
3080 	fmode_t mode;
3081 	int r;
3082 
3083 	r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3084 	if (r < 0)
3085 		return r;
3086 
3087 	ops = bdev->bd_disk->fops->pr_ops;
3088 	if (ops && ops->pr_clear)
3089 		r = ops->pr_clear(bdev, key);
3090 	else
3091 		r = -EOPNOTSUPP;
3092 
3093 	blkdev_put(bdev, mode);
3094 	return r;
3095 }
3096 
3097 static const struct pr_ops dm_pr_ops = {
3098 	.pr_register	= dm_pr_register,
3099 	.pr_reserve	= dm_pr_reserve,
3100 	.pr_release	= dm_pr_release,
3101 	.pr_preempt	= dm_pr_preempt,
3102 	.pr_clear	= dm_pr_clear,
3103 };
3104 
3105 static const struct block_device_operations dm_blk_dops = {
3106 	.open = dm_blk_open,
3107 	.release = dm_blk_close,
3108 	.ioctl = dm_blk_ioctl,
3109 	.getgeo = dm_blk_getgeo,
3110 	.pr_ops = &dm_pr_ops,
3111 	.owner = THIS_MODULE
3112 };
3113 
3114 static const struct dax_operations dm_dax_ops = {
3115 	.direct_access = dm_dax_direct_access,
3116 	.copy_from_iter = dm_dax_copy_from_iter,
3117 };
3118 
3119 /*
3120  * module hooks
3121  */
3122 module_init(dm_init);
3123 module_exit(dm_exit);
3124 
3125 module_param(major, uint, 0);
3126 MODULE_PARM_DESC(major, "The major number of the device mapper");
3127 
3128 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3129 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3130 
3131 module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3132 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3133 
3134 MODULE_DESCRIPTION(DM_NAME " driver");
3135 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3136 MODULE_LICENSE("GPL");
3137