xref: /linux/drivers/md/dm.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-core.h"
10 #include "dm-rq.h"
11 #include "dm-uevent.h"
12 #include "dm-ima.h"
13 
14 #include <linux/bio-integrity.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/signal.h>
20 #include <linux/blkpg.h>
21 #include <linux/bio.h>
22 #include <linux/mempool.h>
23 #include <linux/dax.h>
24 #include <linux/slab.h>
25 #include <linux/idr.h>
26 #include <linux/uio.h>
27 #include <linux/hdreg.h>
28 #include <linux/delay.h>
29 #include <linux/wait.h>
30 #include <linux/pr.h>
31 #include <linux/refcount.h>
32 #include <linux/part_stat.h>
33 #include <linux/blk-crypto.h>
34 #include <linux/blk-crypto-profile.h>
35 
36 #define DM_MSG_PREFIX "core"
37 
38 /*
39  * Cookies are numeric values sent with CHANGE and REMOVE
40  * uevents while resuming, removing or renaming the device.
41  */
42 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
43 #define DM_COOKIE_LENGTH 24
44 
45 /*
46  * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
47  * dm_io into one list, and reuse bio->bi_private as the list head. Before
48  * ending this fs bio, we will recover its ->bi_private.
49  */
50 #define REQ_DM_POLL_LIST	REQ_DRV
51 
52 static const char *_name = DM_NAME;
53 
54 static unsigned int major;
55 static unsigned int _major;
56 
57 static DEFINE_IDR(_minor_idr);
58 
59 static DEFINE_SPINLOCK(_minor_lock);
60 
61 static void do_deferred_remove(struct work_struct *w);
62 
63 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
64 
65 static struct workqueue_struct *deferred_remove_workqueue;
66 
67 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
68 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
69 
70 void dm_issue_global_event(void)
71 {
72 	atomic_inc(&dm_global_event_nr);
73 	wake_up(&dm_global_eventq);
74 }
75 
76 DEFINE_STATIC_KEY_FALSE(stats_enabled);
77 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
78 DEFINE_STATIC_KEY_FALSE(zoned_enabled);
79 
80 /*
81  * One of these is allocated (on-stack) per original bio.
82  */
83 struct clone_info {
84 	struct dm_table *map;
85 	struct bio *bio;
86 	struct dm_io *io;
87 	sector_t sector;
88 	unsigned int sector_count;
89 	bool is_abnormal_io:1;
90 	bool submit_as_polled:1;
91 };
92 
93 static inline struct dm_target_io *clone_to_tio(struct bio *clone)
94 {
95 	return container_of(clone, struct dm_target_io, clone);
96 }
97 
98 void *dm_per_bio_data(struct bio *bio, size_t data_size)
99 {
100 	if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
101 		return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
102 	return (char *)bio - DM_IO_BIO_OFFSET - data_size;
103 }
104 EXPORT_SYMBOL_GPL(dm_per_bio_data);
105 
106 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
107 {
108 	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
109 
110 	if (io->magic == DM_IO_MAGIC)
111 		return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
112 	BUG_ON(io->magic != DM_TIO_MAGIC);
113 	return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
114 }
115 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
116 
117 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
118 {
119 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
120 }
121 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
122 
123 #define MINOR_ALLOCED ((void *)-1)
124 
125 #define DM_NUMA_NODE NUMA_NO_NODE
126 static int dm_numa_node = DM_NUMA_NODE;
127 
128 #define DEFAULT_SWAP_BIOS	(8 * 1048576 / PAGE_SIZE)
129 static int swap_bios = DEFAULT_SWAP_BIOS;
130 static int get_swap_bios(void)
131 {
132 	int latch = READ_ONCE(swap_bios);
133 
134 	if (unlikely(latch <= 0))
135 		latch = DEFAULT_SWAP_BIOS;
136 	return latch;
137 }
138 
139 struct table_device {
140 	struct list_head list;
141 	refcount_t count;
142 	struct dm_dev dm_dev;
143 };
144 
145 /*
146  * Bio-based DM's mempools' reserved IOs set by the user.
147  */
148 #define RESERVED_BIO_BASED_IOS		16
149 static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
150 
151 static int __dm_get_module_param_int(int *module_param, int min, int max)
152 {
153 	int param = READ_ONCE(*module_param);
154 	int modified_param = 0;
155 	bool modified = true;
156 
157 	if (param < min)
158 		modified_param = min;
159 	else if (param > max)
160 		modified_param = max;
161 	else
162 		modified = false;
163 
164 	if (modified) {
165 		(void)cmpxchg(module_param, param, modified_param);
166 		param = modified_param;
167 	}
168 
169 	return param;
170 }
171 
172 unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
173 {
174 	unsigned int param = READ_ONCE(*module_param);
175 	unsigned int modified_param = 0;
176 
177 	if (!param)
178 		modified_param = def;
179 	else if (param > max)
180 		modified_param = max;
181 
182 	if (modified_param) {
183 		(void)cmpxchg(module_param, param, modified_param);
184 		param = modified_param;
185 	}
186 
187 	return param;
188 }
189 
190 unsigned int dm_get_reserved_bio_based_ios(void)
191 {
192 	return __dm_get_module_param(&reserved_bio_based_ios,
193 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
194 }
195 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
196 
197 static unsigned int dm_get_numa_node(void)
198 {
199 	return __dm_get_module_param_int(&dm_numa_node,
200 					 DM_NUMA_NODE, num_online_nodes() - 1);
201 }
202 
203 static int __init local_init(void)
204 {
205 	int r;
206 
207 	r = dm_uevent_init();
208 	if (r)
209 		return r;
210 
211 	deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
212 	if (!deferred_remove_workqueue) {
213 		r = -ENOMEM;
214 		goto out_uevent_exit;
215 	}
216 
217 	_major = major;
218 	r = register_blkdev(_major, _name);
219 	if (r < 0)
220 		goto out_free_workqueue;
221 
222 	if (!_major)
223 		_major = r;
224 
225 	return 0;
226 
227 out_free_workqueue:
228 	destroy_workqueue(deferred_remove_workqueue);
229 out_uevent_exit:
230 	dm_uevent_exit();
231 
232 	return r;
233 }
234 
235 static void local_exit(void)
236 {
237 	destroy_workqueue(deferred_remove_workqueue);
238 
239 	unregister_blkdev(_major, _name);
240 	dm_uevent_exit();
241 
242 	_major = 0;
243 
244 	DMINFO("cleaned up");
245 }
246 
247 static int (*_inits[])(void) __initdata = {
248 	local_init,
249 	dm_target_init,
250 	dm_linear_init,
251 	dm_stripe_init,
252 	dm_io_init,
253 	dm_kcopyd_init,
254 	dm_interface_init,
255 	dm_statistics_init,
256 };
257 
258 static void (*_exits[])(void) = {
259 	local_exit,
260 	dm_target_exit,
261 	dm_linear_exit,
262 	dm_stripe_exit,
263 	dm_io_exit,
264 	dm_kcopyd_exit,
265 	dm_interface_exit,
266 	dm_statistics_exit,
267 };
268 
269 static int __init dm_init(void)
270 {
271 	const int count = ARRAY_SIZE(_inits);
272 	int r, i;
273 
274 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
275 	DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
276 	       " Duplicate IMA measurements will not be recorded in the IMA log.");
277 #endif
278 
279 	for (i = 0; i < count; i++) {
280 		r = _inits[i]();
281 		if (r)
282 			goto bad;
283 	}
284 
285 	return 0;
286 bad:
287 	while (i--)
288 		_exits[i]();
289 
290 	return r;
291 }
292 
293 static void __exit dm_exit(void)
294 {
295 	int i = ARRAY_SIZE(_exits);
296 
297 	while (i--)
298 		_exits[i]();
299 
300 	/*
301 	 * Should be empty by this point.
302 	 */
303 	idr_destroy(&_minor_idr);
304 }
305 
306 /*
307  * Block device functions
308  */
309 int dm_deleting_md(struct mapped_device *md)
310 {
311 	return test_bit(DMF_DELETING, &md->flags);
312 }
313 
314 static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
315 {
316 	struct mapped_device *md;
317 
318 	spin_lock(&_minor_lock);
319 
320 	md = disk->private_data;
321 	if (!md)
322 		goto out;
323 
324 	if (test_bit(DMF_FREEING, &md->flags) ||
325 	    dm_deleting_md(md)) {
326 		md = NULL;
327 		goto out;
328 	}
329 
330 	dm_get(md);
331 	atomic_inc(&md->open_count);
332 out:
333 	spin_unlock(&_minor_lock);
334 
335 	return md ? 0 : -ENXIO;
336 }
337 
338 static void dm_blk_close(struct gendisk *disk)
339 {
340 	struct mapped_device *md;
341 
342 	spin_lock(&_minor_lock);
343 
344 	md = disk->private_data;
345 	if (WARN_ON(!md))
346 		goto out;
347 
348 	if (atomic_dec_and_test(&md->open_count) &&
349 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
350 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
351 
352 	dm_put(md);
353 out:
354 	spin_unlock(&_minor_lock);
355 }
356 
357 int dm_open_count(struct mapped_device *md)
358 {
359 	return atomic_read(&md->open_count);
360 }
361 
362 /*
363  * Guarantees nothing is using the device before it's deleted.
364  */
365 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
366 {
367 	int r = 0;
368 
369 	spin_lock(&_minor_lock);
370 
371 	if (dm_open_count(md)) {
372 		r = -EBUSY;
373 		if (mark_deferred)
374 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
375 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
376 		r = -EEXIST;
377 	else
378 		set_bit(DMF_DELETING, &md->flags);
379 
380 	spin_unlock(&_minor_lock);
381 
382 	return r;
383 }
384 
385 int dm_cancel_deferred_remove(struct mapped_device *md)
386 {
387 	int r = 0;
388 
389 	spin_lock(&_minor_lock);
390 
391 	if (test_bit(DMF_DELETING, &md->flags))
392 		r = -EBUSY;
393 	else
394 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
395 
396 	spin_unlock(&_minor_lock);
397 
398 	return r;
399 }
400 
401 static void do_deferred_remove(struct work_struct *w)
402 {
403 	dm_deferred_remove();
404 }
405 
406 static int dm_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
407 {
408 	struct mapped_device *md = disk->private_data;
409 
410 	return dm_get_geometry(md, geo);
411 }
412 
413 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
414 			    struct block_device **bdev, unsigned int cmd,
415 			    unsigned long arg, bool *forward)
416 {
417 	struct dm_target *ti;
418 	struct dm_table *map;
419 	int r;
420 
421 retry:
422 	r = -ENOTTY;
423 	map = dm_get_live_table(md, srcu_idx);
424 	if (!map || !dm_table_get_size(map))
425 		return r;
426 
427 	/* We only support devices that have a single target */
428 	if (map->num_targets != 1)
429 		return r;
430 
431 	ti = dm_table_get_target(map, 0);
432 	if (!ti->type->prepare_ioctl)
433 		return r;
434 
435 	if (dm_suspended_md(md))
436 		return -EAGAIN;
437 
438 	r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward);
439 	if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) {
440 		dm_put_live_table(md, *srcu_idx);
441 		fsleep(10000);
442 		goto retry;
443 	}
444 
445 	return r;
446 }
447 
448 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
449 {
450 	dm_put_live_table(md, srcu_idx);
451 }
452 
453 static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
454 			unsigned int cmd, unsigned long arg)
455 {
456 	struct mapped_device *md = bdev->bd_disk->private_data;
457 	int r, srcu_idx;
458 	bool forward = true;
459 
460 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward);
461 	if (!forward || r < 0)
462 		goto out;
463 
464 	if (r > 0) {
465 		/*
466 		 * Target determined this ioctl is being issued against a
467 		 * subset of the parent bdev; require extra privileges.
468 		 */
469 		if (!capable(CAP_SYS_RAWIO)) {
470 			DMDEBUG_LIMIT(
471 	"%s: sending ioctl %x to DM device without required privilege.",
472 				current->comm, cmd);
473 			r = -ENOIOCTLCMD;
474 			goto out;
475 		}
476 	}
477 
478 	if (!bdev->bd_disk->fops->ioctl)
479 		r = -ENOTTY;
480 	else
481 		r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
482 out:
483 	dm_unprepare_ioctl(md, srcu_idx);
484 	return r;
485 }
486 
487 u64 dm_start_time_ns_from_clone(struct bio *bio)
488 {
489 	return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
490 }
491 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
492 
493 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
494 {
495 	/*
496 	 * If REQ_PREFLUSH set, don't account payload, it will be
497 	 * submitted (and accounted) after this flush completes.
498 	 */
499 	if (io->requeue_flush_with_data)
500 		return 0;
501 	if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
502 		return io->sectors;
503 	return bio_sectors(bio);
504 }
505 
506 static void dm_io_acct(struct dm_io *io, bool end)
507 {
508 	struct bio *bio = io->orig_bio;
509 
510 	if (dm_io_flagged(io, DM_IO_BLK_STAT)) {
511 		if (!end)
512 			bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
513 					   io->start_time);
514 		else
515 			bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
516 					 dm_io_sectors(io, bio),
517 					 io->start_time);
518 	}
519 
520 	if (static_branch_unlikely(&stats_enabled) &&
521 	    unlikely(dm_stats_used(&io->md->stats))) {
522 		sector_t sector;
523 
524 		if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
525 			sector = bio_end_sector(bio) - io->sector_offset;
526 		else
527 			sector = bio->bi_iter.bi_sector;
528 
529 		dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
530 				    sector, dm_io_sectors(io, bio),
531 				    end, io->start_time, &io->stats_aux);
532 	}
533 }
534 
535 static void __dm_start_io_acct(struct dm_io *io)
536 {
537 	dm_io_acct(io, false);
538 }
539 
540 static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
541 {
542 	/*
543 	 * Ensure IO accounting is only ever started once.
544 	 */
545 	if (dm_io_flagged(io, DM_IO_ACCOUNTED))
546 		return;
547 
548 	/* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
549 	if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
550 		dm_io_set_flag(io, DM_IO_ACCOUNTED);
551 	} else {
552 		unsigned long flags;
553 		/* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
554 		spin_lock_irqsave(&io->lock, flags);
555 		if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
556 			spin_unlock_irqrestore(&io->lock, flags);
557 			return;
558 		}
559 		dm_io_set_flag(io, DM_IO_ACCOUNTED);
560 		spin_unlock_irqrestore(&io->lock, flags);
561 	}
562 
563 	__dm_start_io_acct(io);
564 }
565 
566 static void dm_end_io_acct(struct dm_io *io)
567 {
568 	dm_io_acct(io, true);
569 }
570 
571 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
572 {
573 	struct dm_io *io;
574 	struct dm_target_io *tio;
575 	struct bio *clone;
576 
577 	clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
578 	if (unlikely(!clone))
579 		return NULL;
580 	tio = clone_to_tio(clone);
581 	tio->flags = 0;
582 	dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
583 	tio->io = NULL;
584 
585 	io = container_of(tio, struct dm_io, tio);
586 	io->magic = DM_IO_MAGIC;
587 	io->status = BLK_STS_OK;
588 	io->requeue_flush_with_data = false;
589 
590 	/* one ref is for submission, the other is for completion */
591 	atomic_set(&io->io_count, 2);
592 	this_cpu_inc(*md->pending_io);
593 	io->orig_bio = bio;
594 	io->md = md;
595 	spin_lock_init(&io->lock);
596 	io->start_time = jiffies;
597 	io->flags = 0;
598 	if (blk_queue_io_stat(md->queue))
599 		dm_io_set_flag(io, DM_IO_BLK_STAT);
600 
601 	if (static_branch_unlikely(&stats_enabled) &&
602 	    unlikely(dm_stats_used(&md->stats)))
603 		dm_stats_record_start(&md->stats, &io->stats_aux);
604 
605 	return io;
606 }
607 
608 static void free_io(struct dm_io *io)
609 {
610 	bio_put(&io->tio.clone);
611 }
612 
613 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
614 			     unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
615 {
616 	struct mapped_device *md = ci->io->md;
617 	struct dm_target_io *tio;
618 	struct bio *clone;
619 
620 	if (!ci->io->tio.io) {
621 		/* the dm_target_io embedded in ci->io is available */
622 		tio = &ci->io->tio;
623 		/* alloc_io() already initialized embedded clone */
624 		clone = &tio->clone;
625 	} else {
626 		clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
627 					&md->mempools->bs);
628 		if (!clone)
629 			return NULL;
630 
631 		/* REQ_DM_POLL_LIST shouldn't be inherited */
632 		clone->bi_opf &= ~REQ_DM_POLL_LIST;
633 
634 		tio = clone_to_tio(clone);
635 		tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
636 	}
637 
638 	tio->magic = DM_TIO_MAGIC;
639 	tio->io = ci->io;
640 	tio->ti = ti;
641 	tio->target_bio_nr = target_bio_nr;
642 	tio->len_ptr = len;
643 	tio->old_sector = 0;
644 
645 	/* Set default bdev, but target must bio_set_dev() before issuing IO */
646 	clone->bi_bdev = md->disk->part0;
647 	if (likely(ti != NULL) && unlikely(ti->needs_bio_set_dev))
648 		bio_set_dev(clone, md->disk->part0);
649 
650 	if (len) {
651 		clone->bi_iter.bi_size = to_bytes(*len);
652 		if (bio_integrity(clone))
653 			bio_integrity_trim(clone);
654 	}
655 
656 	return clone;
657 }
658 
659 static void free_tio(struct bio *clone)
660 {
661 	if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
662 		return;
663 	bio_put(clone);
664 }
665 
666 /*
667  * Add the bio to the list of deferred io.
668  */
669 static void queue_io(struct mapped_device *md, struct bio *bio)
670 {
671 	unsigned long flags;
672 
673 	spin_lock_irqsave(&md->deferred_lock, flags);
674 	bio_list_add(&md->deferred, bio);
675 	spin_unlock_irqrestore(&md->deferred_lock, flags);
676 	queue_work(md->wq, &md->work);
677 }
678 
679 /*
680  * Everyone (including functions in this file), should use this
681  * function to access the md->map field, and make sure they call
682  * dm_put_live_table() when finished.
683  */
684 struct dm_table *dm_get_live_table(struct mapped_device *md,
685 				   int *srcu_idx) __acquires(md->io_barrier)
686 {
687 	*srcu_idx = srcu_read_lock(&md->io_barrier);
688 
689 	return srcu_dereference(md->map, &md->io_barrier);
690 }
691 
692 void dm_put_live_table(struct mapped_device *md,
693 		       int srcu_idx) __releases(md->io_barrier)
694 {
695 	srcu_read_unlock(&md->io_barrier, srcu_idx);
696 }
697 
698 void dm_sync_table(struct mapped_device *md)
699 {
700 	synchronize_srcu(&md->io_barrier);
701 	synchronize_rcu_expedited();
702 }
703 
704 /*
705  * A fast alternative to dm_get_live_table/dm_put_live_table.
706  * The caller must not block between these two functions.
707  */
708 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
709 {
710 	rcu_read_lock();
711 	return rcu_dereference(md->map);
712 }
713 
714 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
715 {
716 	rcu_read_unlock();
717 }
718 
719 static char *_dm_claim_ptr = "I belong to device-mapper";
720 
721 /*
722  * Open a table device so we can use it as a map destination.
723  */
724 static struct table_device *open_table_device(struct mapped_device *md,
725 		dev_t dev, blk_mode_t mode)
726 {
727 	struct table_device *td;
728 	struct file *bdev_file;
729 	struct block_device *bdev;
730 	u64 part_off;
731 	int r;
732 
733 	td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
734 	if (!td)
735 		return ERR_PTR(-ENOMEM);
736 	refcount_set(&td->count, 1);
737 
738 	bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
739 	if (IS_ERR(bdev_file)) {
740 		r = PTR_ERR(bdev_file);
741 		goto out_free_td;
742 	}
743 
744 	bdev = file_bdev(bdev_file);
745 
746 	/*
747 	 * We can be called before the dm disk is added.  In that case we can't
748 	 * register the holder relation here.  It will be done once add_disk was
749 	 * called.
750 	 */
751 	if (md->disk->slave_dir) {
752 		r = bd_link_disk_holder(bdev, md->disk);
753 		if (r)
754 			goto out_blkdev_put;
755 	}
756 
757 	td->dm_dev.mode = mode;
758 	td->dm_dev.bdev = bdev;
759 	td->dm_dev.bdev_file = bdev_file;
760 	td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off,
761 						NULL, NULL);
762 	format_dev_t(td->dm_dev.name, dev);
763 	list_add(&td->list, &md->table_devices);
764 	return td;
765 
766 out_blkdev_put:
767 	__fput_sync(bdev_file);
768 out_free_td:
769 	kfree(td);
770 	return ERR_PTR(r);
771 }
772 
773 /*
774  * Close a table device that we've been using.
775  */
776 static void close_table_device(struct table_device *td, struct mapped_device *md)
777 {
778 	if (md->disk->slave_dir)
779 		bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
780 
781 	/* Leverage async fput() if DMF_DEFERRED_REMOVE set */
782 	if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
783 		fput(td->dm_dev.bdev_file);
784 	else
785 		__fput_sync(td->dm_dev.bdev_file);
786 
787 	put_dax(td->dm_dev.dax_dev);
788 	list_del(&td->list);
789 	kfree(td);
790 }
791 
792 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
793 					      blk_mode_t mode)
794 {
795 	struct table_device *td;
796 
797 	list_for_each_entry(td, l, list)
798 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
799 			return td;
800 
801 	return NULL;
802 }
803 
804 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
805 			struct dm_dev **result)
806 {
807 	struct table_device *td;
808 
809 	mutex_lock(&md->table_devices_lock);
810 	td = find_table_device(&md->table_devices, dev, mode);
811 	if (!td) {
812 		td = open_table_device(md, dev, mode);
813 		if (IS_ERR(td)) {
814 			mutex_unlock(&md->table_devices_lock);
815 			return PTR_ERR(td);
816 		}
817 	} else {
818 		refcount_inc(&td->count);
819 	}
820 	mutex_unlock(&md->table_devices_lock);
821 
822 	*result = &td->dm_dev;
823 	return 0;
824 }
825 
826 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
827 {
828 	struct table_device *td = container_of(d, struct table_device, dm_dev);
829 
830 	mutex_lock(&md->table_devices_lock);
831 	if (refcount_dec_and_test(&td->count))
832 		close_table_device(td, md);
833 	mutex_unlock(&md->table_devices_lock);
834 }
835 
836 /*
837  * Get the geometry associated with a dm device
838  */
839 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
840 {
841 	*geo = md->geometry;
842 
843 	return 0;
844 }
845 
846 /*
847  * Set the geometry of a device.
848  */
849 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
850 {
851 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
852 
853 	if (geo->start > sz) {
854 		DMERR("Start sector is beyond the geometry limits.");
855 		return -EINVAL;
856 	}
857 
858 	md->geometry = *geo;
859 
860 	return 0;
861 }
862 
863 static int __noflush_suspending(struct mapped_device *md)
864 {
865 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
866 }
867 
868 static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
869 {
870 	struct mapped_device *md = io->md;
871 
872 	if (first_stage) {
873 		struct dm_io *next = md->requeue_list;
874 
875 		md->requeue_list = io;
876 		io->next = next;
877 	} else {
878 		bio_list_add_head(&md->deferred, io->orig_bio);
879 	}
880 }
881 
882 static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
883 {
884 	if (first_stage)
885 		queue_work(md->wq, &md->requeue_work);
886 	else
887 		queue_work(md->wq, &md->work);
888 }
889 
890 /*
891  * Return true if the dm_io's original bio is requeued.
892  * io->status is updated with error if requeue disallowed.
893  */
894 static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
895 {
896 	struct bio *bio = io->orig_bio;
897 	bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
898 	bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
899 				     (bio->bi_opf & REQ_POLLED));
900 	struct mapped_device *md = io->md;
901 	bool requeued = false;
902 
903 	if (handle_requeue || handle_polled_eagain) {
904 		unsigned long flags;
905 
906 		if (bio->bi_opf & REQ_POLLED) {
907 			/*
908 			 * Upper layer won't help us poll split bio
909 			 * (io->orig_bio may only reflect a subset of the
910 			 * pre-split original) so clear REQ_POLLED.
911 			 */
912 			bio_clear_polled(bio);
913 		}
914 
915 		/*
916 		 * Target requested pushing back the I/O or
917 		 * polled IO hit BLK_STS_AGAIN.
918 		 */
919 		spin_lock_irqsave(&md->deferred_lock, flags);
920 		if ((__noflush_suspending(md) &&
921 		     !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
922 		    handle_polled_eagain || first_stage) {
923 			dm_requeue_add_io(io, first_stage);
924 			requeued = true;
925 		} else {
926 			/*
927 			 * noflush suspend was interrupted or this is
928 			 * a write to a zoned target.
929 			 */
930 			io->status = BLK_STS_IOERR;
931 		}
932 		spin_unlock_irqrestore(&md->deferred_lock, flags);
933 	}
934 
935 	if (requeued)
936 		dm_kick_requeue(md, first_stage);
937 
938 	return requeued;
939 }
940 
941 static void __dm_io_complete(struct dm_io *io, bool first_stage)
942 {
943 	struct bio *bio = io->orig_bio;
944 	struct mapped_device *md = io->md;
945 	blk_status_t io_error;
946 	bool requeued;
947 	bool requeue_flush_with_data;
948 
949 	requeued = dm_handle_requeue(io, first_stage);
950 	if (requeued && first_stage)
951 		return;
952 
953 	io_error = io->status;
954 	if (dm_io_flagged(io, DM_IO_ACCOUNTED))
955 		dm_end_io_acct(io);
956 	else if (!io_error) {
957 		/*
958 		 * Must handle target that DM_MAPIO_SUBMITTED only to
959 		 * then bio_endio() rather than dm_submit_bio_remap()
960 		 */
961 		__dm_start_io_acct(io);
962 		dm_end_io_acct(io);
963 	}
964 	requeue_flush_with_data = io->requeue_flush_with_data;
965 	free_io(io);
966 	smp_wmb();
967 	this_cpu_dec(*md->pending_io);
968 
969 	/* nudge anyone waiting on suspend queue */
970 	if (unlikely(wq_has_sleeper(&md->wait)))
971 		wake_up(&md->wait);
972 
973 	/* Return early if the original bio was requeued */
974 	if (requeued)
975 		return;
976 
977 	if (unlikely(requeue_flush_with_data)) {
978 		/*
979 		 * Preflush done for flush with data, reissue
980 		 * without REQ_PREFLUSH.
981 		 */
982 		bio->bi_opf &= ~REQ_PREFLUSH;
983 		queue_io(md, bio);
984 	} else {
985 		/* done with normal IO or empty flush */
986 		if (io_error)
987 			bio->bi_status = io_error;
988 		bio_endio(bio);
989 	}
990 }
991 
992 static void dm_wq_requeue_work(struct work_struct *work)
993 {
994 	struct mapped_device *md = container_of(work, struct mapped_device,
995 						requeue_work);
996 	unsigned long flags;
997 	struct dm_io *io;
998 
999 	/* reuse deferred lock to simplify dm_handle_requeue */
1000 	spin_lock_irqsave(&md->deferred_lock, flags);
1001 	io = md->requeue_list;
1002 	md->requeue_list = NULL;
1003 	spin_unlock_irqrestore(&md->deferred_lock, flags);
1004 
1005 	while (io) {
1006 		struct dm_io *next = io->next;
1007 
1008 		dm_io_rewind(io, &md->disk->bio_split);
1009 
1010 		io->next = NULL;
1011 		__dm_io_complete(io, false);
1012 		io = next;
1013 		cond_resched();
1014 	}
1015 }
1016 
1017 /*
1018  * Two staged requeue:
1019  *
1020  * 1) io->orig_bio points to the real original bio, and the part mapped to
1021  *    this io must be requeued, instead of other parts of the original bio.
1022  *
1023  * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
1024  */
1025 static inline void dm_io_complete(struct dm_io *io)
1026 {
1027 	/*
1028 	 * Only dm_io that has been split needs two stage requeue, otherwise
1029 	 * we may run into long bio clone chain during suspend and OOM could
1030 	 * be triggered.
1031 	 *
1032 	 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
1033 	 * also aren't handled via the first stage requeue.
1034 	 */
1035 	__dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT));
1036 }
1037 
1038 /*
1039  * Decrements the number of outstanding ios that a bio has been
1040  * cloned into, completing the original io if necc.
1041  */
1042 static inline void __dm_io_dec_pending(struct dm_io *io)
1043 {
1044 	if (atomic_dec_and_test(&io->io_count))
1045 		dm_io_complete(io);
1046 }
1047 
1048 static void dm_io_set_error(struct dm_io *io, blk_status_t error)
1049 {
1050 	unsigned long flags;
1051 
1052 	/* Push-back supersedes any I/O errors */
1053 	spin_lock_irqsave(&io->lock, flags);
1054 	if (!(io->status == BLK_STS_DM_REQUEUE &&
1055 	      __noflush_suspending(io->md))) {
1056 		io->status = error;
1057 	}
1058 	spin_unlock_irqrestore(&io->lock, flags);
1059 }
1060 
1061 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
1062 {
1063 	if (unlikely(error))
1064 		dm_io_set_error(io, error);
1065 
1066 	__dm_io_dec_pending(io);
1067 }
1068 
1069 /*
1070  * The queue_limits are only valid as long as you have a reference
1071  * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
1072  */
1073 static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1074 {
1075 	return &md->queue->limits;
1076 }
1077 
1078 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1079 {
1080 	return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
1081 }
1082 
1083 static void clone_endio(struct bio *bio)
1084 {
1085 	blk_status_t error = bio->bi_status;
1086 	struct dm_target_io *tio = clone_to_tio(bio);
1087 	struct dm_target *ti = tio->ti;
1088 	dm_endio_fn endio = likely(ti != NULL) ? ti->type->end_io : NULL;
1089 	struct dm_io *io = tio->io;
1090 	struct mapped_device *md = io->md;
1091 
1092 	if (unlikely(error == BLK_STS_TARGET)) {
1093 		if (bio_op(bio) == REQ_OP_DISCARD &&
1094 		    !bdev_max_discard_sectors(bio->bi_bdev))
1095 			blk_queue_disable_discard(md->queue);
1096 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1097 			 !bdev_write_zeroes_sectors(bio->bi_bdev))
1098 			blk_queue_disable_write_zeroes(md->queue);
1099 	}
1100 
1101 	if (static_branch_unlikely(&zoned_enabled) &&
1102 	    unlikely(bdev_is_zoned(bio->bi_bdev)))
1103 		dm_zone_endio(io, bio);
1104 
1105 	if (endio) {
1106 		int r = endio(ti, bio, &error);
1107 
1108 		switch (r) {
1109 		case DM_ENDIO_REQUEUE:
1110 			if (static_branch_unlikely(&zoned_enabled)) {
1111 				/*
1112 				 * Requeuing writes to a sequential zone of a zoned
1113 				 * target will break the sequential write pattern:
1114 				 * fail such IO.
1115 				 */
1116 				if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1117 					error = BLK_STS_IOERR;
1118 				else
1119 					error = BLK_STS_DM_REQUEUE;
1120 			} else
1121 				error = BLK_STS_DM_REQUEUE;
1122 			fallthrough;
1123 		case DM_ENDIO_DONE:
1124 			break;
1125 		case DM_ENDIO_INCOMPLETE:
1126 			/* The target will handle the io */
1127 			return;
1128 		default:
1129 			DMCRIT("unimplemented target endio return value: %d", r);
1130 			BUG();
1131 		}
1132 	}
1133 
1134 	if (static_branch_unlikely(&swap_bios_enabled) &&
1135 	    likely(ti != NULL) && unlikely(swap_bios_limit(ti, bio)))
1136 		up(&md->swap_bios_semaphore);
1137 
1138 	free_tio(bio);
1139 	dm_io_dec_pending(io, error);
1140 }
1141 
1142 /*
1143  * Return maximum size of I/O possible at the supplied sector up to the current
1144  * target boundary.
1145  */
1146 static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
1147 						  sector_t target_offset)
1148 {
1149 	return ti->len - target_offset;
1150 }
1151 
1152 static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
1153 			     unsigned int max_granularity,
1154 			     unsigned int max_sectors)
1155 {
1156 	sector_t target_offset = dm_target_offset(ti, sector);
1157 	sector_t len = max_io_len_target_boundary(ti, target_offset);
1158 
1159 	/*
1160 	 * Does the target need to split IO even further?
1161 	 * - varied (per target) IO splitting is a tenet of DM; this
1162 	 *   explains why stacked chunk_sectors based splitting via
1163 	 *   bio_split_to_limits() isn't possible here.
1164 	 */
1165 	if (!max_granularity)
1166 		return len;
1167 	return min_t(sector_t, len,
1168 		min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
1169 		    blk_boundary_sectors_left(target_offset, max_granularity)));
1170 }
1171 
1172 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
1173 {
1174 	return __max_io_len(ti, sector, ti->max_io_len, 0);
1175 }
1176 
1177 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1178 {
1179 	if (len > UINT_MAX) {
1180 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1181 		      (unsigned long long)len, UINT_MAX);
1182 		ti->error = "Maximum size of target IO is too large";
1183 		return -EINVAL;
1184 	}
1185 
1186 	ti->max_io_len = (uint32_t) len;
1187 
1188 	return 0;
1189 }
1190 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1191 
1192 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1193 						sector_t sector, int *srcu_idx)
1194 	__acquires(md->io_barrier)
1195 {
1196 	struct dm_table *map;
1197 	struct dm_target *ti;
1198 
1199 	map = dm_get_live_table(md, srcu_idx);
1200 	if (!map)
1201 		return NULL;
1202 
1203 	ti = dm_table_find_target(map, sector);
1204 	if (!ti)
1205 		return NULL;
1206 
1207 	return ti;
1208 }
1209 
1210 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1211 		long nr_pages, enum dax_access_mode mode, void **kaddr,
1212 		unsigned long *pfn)
1213 {
1214 	struct mapped_device *md = dax_get_private(dax_dev);
1215 	sector_t sector = pgoff * PAGE_SECTORS;
1216 	struct dm_target *ti;
1217 	long len, ret = -EIO;
1218 	int srcu_idx;
1219 
1220 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1221 
1222 	if (!ti)
1223 		goto out;
1224 	if (!ti->type->direct_access)
1225 		goto out;
1226 	len = max_io_len(ti, sector) / PAGE_SECTORS;
1227 	if (len < 1)
1228 		goto out;
1229 	nr_pages = min(len, nr_pages);
1230 	ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1231 
1232  out:
1233 	dm_put_live_table(md, srcu_idx);
1234 
1235 	return ret;
1236 }
1237 
1238 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1239 				  size_t nr_pages)
1240 {
1241 	struct mapped_device *md = dax_get_private(dax_dev);
1242 	sector_t sector = pgoff * PAGE_SECTORS;
1243 	struct dm_target *ti;
1244 	int ret = -EIO;
1245 	int srcu_idx;
1246 
1247 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1248 
1249 	if (!ti)
1250 		goto out;
1251 	if (WARN_ON(!ti->type->dax_zero_page_range)) {
1252 		/*
1253 		 * ->zero_page_range() is mandatory dax operation. If we are
1254 		 *  here, something is wrong.
1255 		 */
1256 		goto out;
1257 	}
1258 	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1259  out:
1260 	dm_put_live_table(md, srcu_idx);
1261 
1262 	return ret;
1263 }
1264 
1265 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
1266 		void *addr, size_t bytes, struct iov_iter *i)
1267 {
1268 	struct mapped_device *md = dax_get_private(dax_dev);
1269 	sector_t sector = pgoff * PAGE_SECTORS;
1270 	struct dm_target *ti;
1271 	int srcu_idx;
1272 	long ret = 0;
1273 
1274 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1275 	if (!ti || !ti->type->dax_recovery_write)
1276 		goto out;
1277 
1278 	ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1279 out:
1280 	dm_put_live_table(md, srcu_idx);
1281 	return ret;
1282 }
1283 
1284 /*
1285  * A target may call dm_accept_partial_bio only from the map routine.  It is
1286  * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1287  * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated
1288  * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced
1289  * by __send_duplicate_bios().
1290  *
1291  * dm_accept_partial_bio informs the dm that the target only wants to process
1292  * additional n_sectors sectors of the bio and the rest of the data should be
1293  * sent in a next bio.
1294  *
1295  * A diagram that explains the arithmetics:
1296  * +--------------------+---------------+-------+
1297  * |         1          |       2       |   3   |
1298  * +--------------------+---------------+-------+
1299  *
1300  * <-------------- *tio->len_ptr --------------->
1301  *                      <----- bio_sectors ----->
1302  *                      <-- n_sectors -->
1303  *
1304  * Region 1 was already iterated over with bio_advance or similar function.
1305  *	(it may be empty if the target doesn't use bio_advance)
1306  * Region 2 is the remaining bio size that the target wants to process.
1307  *	(it may be empty if region 1 is non-empty, although there is no reason
1308  *	 to make it empty)
1309  * The target requires that region 3 is to be sent in the next bio.
1310  *
1311  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1312  * the partially processed part (the sum of regions 1+2) must be the same for all
1313  * copies of the bio.
1314  */
1315 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
1316 {
1317 	struct dm_target_io *tio = clone_to_tio(bio);
1318 	struct dm_io *io = tio->io;
1319 	unsigned int bio_sectors = bio_sectors(bio);
1320 
1321 	BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
1322 	BUG_ON(bio_sectors > *tio->len_ptr);
1323 	BUG_ON(n_sectors > bio_sectors);
1324 
1325 	if (static_branch_unlikely(&zoned_enabled) &&
1326 	    unlikely(bdev_is_zoned(bio->bi_bdev))) {
1327 		enum req_op op = bio_op(bio);
1328 
1329 		BUG_ON(op_is_zone_mgmt(op));
1330 		BUG_ON(op == REQ_OP_WRITE);
1331 		BUG_ON(op == REQ_OP_WRITE_ZEROES);
1332 		BUG_ON(op == REQ_OP_ZONE_APPEND);
1333 	}
1334 
1335 	*tio->len_ptr -= bio_sectors - n_sectors;
1336 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1337 
1338 	/*
1339 	 * __split_and_process_bio() may have already saved mapped part
1340 	 * for accounting but it is being reduced so update accordingly.
1341 	 */
1342 	dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1343 	io->sectors = n_sectors;
1344 	io->sector_offset = bio_sectors(io->orig_bio);
1345 }
1346 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1347 
1348 /*
1349  * @clone: clone bio that DM core passed to target's .map function
1350  * @tgt_clone: clone of @clone bio that target needs submitted
1351  *
1352  * Targets should use this interface to submit bios they take
1353  * ownership of when returning DM_MAPIO_SUBMITTED.
1354  *
1355  * Target should also enable ti->accounts_remapped_io
1356  */
1357 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
1358 {
1359 	struct dm_target_io *tio = clone_to_tio(clone);
1360 	struct dm_io *io = tio->io;
1361 
1362 	/* establish bio that will get submitted */
1363 	if (!tgt_clone)
1364 		tgt_clone = clone;
1365 
1366 	/*
1367 	 * Account io->origin_bio to DM dev on behalf of target
1368 	 * that took ownership of IO with DM_MAPIO_SUBMITTED.
1369 	 */
1370 	dm_start_io_acct(io, clone);
1371 
1372 	trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
1373 			      tio->old_sector);
1374 	submit_bio_noacct(tgt_clone);
1375 }
1376 EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
1377 
1378 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1379 {
1380 	mutex_lock(&md->swap_bios_lock);
1381 	while (latch < md->swap_bios) {
1382 		cond_resched();
1383 		down(&md->swap_bios_semaphore);
1384 		md->swap_bios--;
1385 	}
1386 	while (latch > md->swap_bios) {
1387 		cond_resched();
1388 		up(&md->swap_bios_semaphore);
1389 		md->swap_bios++;
1390 	}
1391 	mutex_unlock(&md->swap_bios_lock);
1392 }
1393 
1394 static void __map_bio(struct bio *clone)
1395 {
1396 	struct dm_target_io *tio = clone_to_tio(clone);
1397 	struct dm_target *ti = tio->ti;
1398 	struct dm_io *io = tio->io;
1399 	struct mapped_device *md = io->md;
1400 	int r;
1401 
1402 	clone->bi_end_io = clone_endio;
1403 
1404 	/*
1405 	 * Map the clone.
1406 	 */
1407 	tio->old_sector = clone->bi_iter.bi_sector;
1408 
1409 	if (static_branch_unlikely(&swap_bios_enabled) &&
1410 	    unlikely(swap_bios_limit(ti, clone))) {
1411 		int latch = get_swap_bios();
1412 
1413 		if (unlikely(latch != md->swap_bios))
1414 			__set_swap_bios_limit(md, latch);
1415 		down(&md->swap_bios_semaphore);
1416 	}
1417 
1418 	if (likely(ti->type->map == linear_map))
1419 		r = linear_map(ti, clone);
1420 	else if (ti->type->map == stripe_map)
1421 		r = stripe_map(ti, clone);
1422 	else
1423 		r = ti->type->map(ti, clone);
1424 
1425 	switch (r) {
1426 	case DM_MAPIO_SUBMITTED:
1427 		/* target has assumed ownership of this io */
1428 		if (!ti->accounts_remapped_io)
1429 			dm_start_io_acct(io, clone);
1430 		break;
1431 	case DM_MAPIO_REMAPPED:
1432 		dm_submit_bio_remap(clone, NULL);
1433 		break;
1434 	case DM_MAPIO_KILL:
1435 	case DM_MAPIO_REQUEUE:
1436 		if (static_branch_unlikely(&swap_bios_enabled) &&
1437 		    unlikely(swap_bios_limit(ti, clone)))
1438 			up(&md->swap_bios_semaphore);
1439 		free_tio(clone);
1440 		if (r == DM_MAPIO_KILL)
1441 			dm_io_dec_pending(io, BLK_STS_IOERR);
1442 		else
1443 			dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1444 		break;
1445 	default:
1446 		DMCRIT("unimplemented target map return value: %d", r);
1447 		BUG();
1448 	}
1449 }
1450 
1451 static void setup_split_accounting(struct clone_info *ci, unsigned int len)
1452 {
1453 	struct dm_io *io = ci->io;
1454 
1455 	if (ci->sector_count > len) {
1456 		/*
1457 		 * Split needed, save the mapped part for accounting.
1458 		 * NOTE: dm_accept_partial_bio() will update accordingly.
1459 		 */
1460 		dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1461 		io->sectors = len;
1462 		io->sector_offset = bio_sectors(ci->bio);
1463 	}
1464 }
1465 
1466 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1467 				struct dm_target *ti, unsigned int num_bios,
1468 				unsigned *len)
1469 {
1470 	struct bio *bio;
1471 	int try;
1472 
1473 	for (try = 0; try < 2; try++) {
1474 		int bio_nr;
1475 
1476 		if (try && num_bios > 1)
1477 			mutex_lock(&ci->io->md->table_devices_lock);
1478 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1479 			bio = alloc_tio(ci, ti, bio_nr, len,
1480 					try ? GFP_NOIO : GFP_NOWAIT);
1481 			if (!bio)
1482 				break;
1483 
1484 			bio_list_add(blist, bio);
1485 		}
1486 		if (try && num_bios > 1)
1487 			mutex_unlock(&ci->io->md->table_devices_lock);
1488 		if (bio_nr == num_bios)
1489 			return;
1490 
1491 		while ((bio = bio_list_pop(blist)))
1492 			free_tio(bio);
1493 	}
1494 }
1495 
1496 static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1497 					  unsigned int num_bios, unsigned int *len)
1498 {
1499 	struct bio_list blist = BIO_EMPTY_LIST;
1500 	struct bio *clone;
1501 	unsigned int ret = 0;
1502 
1503 	if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */
1504 		return 0;
1505 
1506 	/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
1507 	if (len)
1508 		setup_split_accounting(ci, *len);
1509 
1510 	/*
1511 	 * Using alloc_multiple_bios(), even if num_bios is 1, to consistently
1512 	 * support allocating using GFP_NOWAIT with GFP_NOIO fallback.
1513 	 */
1514 	alloc_multiple_bios(&blist, ci, ti, num_bios, len);
1515 	while ((clone = bio_list_pop(&blist))) {
1516 		if (num_bios > 1)
1517 			dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
1518 		__map_bio(clone);
1519 		ret += 1;
1520 	}
1521 
1522 	return ret;
1523 }
1524 
1525 static void __send_empty_flush(struct clone_info *ci)
1526 {
1527 	struct dm_table *t = ci->map;
1528 	struct bio flush_bio;
1529 	blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1530 
1531 	if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
1532 	    (REQ_IDLE | REQ_SYNC))
1533 		opf |= REQ_IDLE;
1534 
1535 	/*
1536 	 * Use an on-stack bio for this, it's safe since we don't
1537 	 * need to reference it after submit. It's just used as
1538 	 * the basis for the clone(s).
1539 	 */
1540 	bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
1541 
1542 	ci->bio = &flush_bio;
1543 	ci->sector_count = 0;
1544 	ci->io->tio.clone.bi_iter.bi_size = 0;
1545 
1546 	if (!t->flush_bypasses_map) {
1547 		for (unsigned int i = 0; i < t->num_targets; i++) {
1548 			unsigned int bios;
1549 			struct dm_target *ti = dm_table_get_target(t, i);
1550 
1551 			if (unlikely(ti->num_flush_bios == 0))
1552 				continue;
1553 
1554 			atomic_add(ti->num_flush_bios, &ci->io->io_count);
1555 			bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
1556 						     NULL);
1557 			atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
1558 		}
1559 	} else {
1560 		/*
1561 		 * Note that there's no need to grab t->devices_lock here
1562 		 * because the targets that support flush optimization don't
1563 		 * modify the list of devices.
1564 		 */
1565 		struct list_head *devices = dm_table_get_devices(t);
1566 		unsigned int len = 0;
1567 		struct dm_dev_internal *dd;
1568 		list_for_each_entry(dd, devices, list) {
1569 			struct bio *clone;
1570 			/*
1571 			 * Note that the structure dm_target_io is not
1572 			 * associated with any target (because the device may be
1573 			 * used by multiple targets), so we set tio->ti = NULL.
1574 			 * We must check for NULL in the I/O processing path, to
1575 			 * avoid NULL pointer dereference.
1576 			 */
1577 			clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO);
1578 			atomic_add(1, &ci->io->io_count);
1579 			bio_set_dev(clone, dd->dm_dev->bdev);
1580 			clone->bi_end_io = clone_endio;
1581 			dm_submit_bio_remap(clone, NULL);
1582 		}
1583 	}
1584 
1585 	/*
1586 	 * alloc_io() takes one extra reference for submission, so the
1587 	 * reference won't reach 0 without the following subtraction
1588 	 */
1589 	atomic_sub(1, &ci->io->io_count);
1590 
1591 	bio_uninit(ci->bio);
1592 }
1593 
1594 static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1595 			       unsigned int num_bios, unsigned int max_granularity,
1596 			       unsigned int max_sectors)
1597 {
1598 	unsigned int len, bios;
1599 
1600 	len = min_t(sector_t, ci->sector_count,
1601 		    __max_io_len(ti, ci->sector, max_granularity, max_sectors));
1602 
1603 	atomic_add(num_bios, &ci->io->io_count);
1604 	bios = __send_duplicate_bios(ci, ti, num_bios, &len);
1605 	/*
1606 	 * alloc_io() takes one extra reference for submission, so the
1607 	 * reference won't reach 0 without the following (+1) subtraction
1608 	 */
1609 	atomic_sub(num_bios - bios + 1, &ci->io->io_count);
1610 
1611 	ci->sector += len;
1612 	ci->sector_count -= len;
1613 }
1614 
1615 static bool is_abnormal_io(struct bio *bio)
1616 {
1617 	switch (bio_op(bio)) {
1618 	case REQ_OP_READ:
1619 	case REQ_OP_WRITE:
1620 	case REQ_OP_FLUSH:
1621 		return false;
1622 	case REQ_OP_DISCARD:
1623 	case REQ_OP_SECURE_ERASE:
1624 	case REQ_OP_WRITE_ZEROES:
1625 	case REQ_OP_ZONE_RESET_ALL:
1626 		return true;
1627 	default:
1628 		return false;
1629 	}
1630 }
1631 
1632 static blk_status_t __process_abnormal_io(struct clone_info *ci,
1633 					  struct dm_target *ti)
1634 {
1635 	unsigned int num_bios = 0;
1636 	unsigned int max_granularity = 0;
1637 	unsigned int max_sectors = 0;
1638 	struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
1639 
1640 	switch (bio_op(ci->bio)) {
1641 	case REQ_OP_DISCARD:
1642 		num_bios = ti->num_discard_bios;
1643 		max_sectors = limits->max_discard_sectors;
1644 		if (ti->max_discard_granularity)
1645 			max_granularity = max_sectors;
1646 		break;
1647 	case REQ_OP_SECURE_ERASE:
1648 		num_bios = ti->num_secure_erase_bios;
1649 		max_sectors = limits->max_secure_erase_sectors;
1650 		break;
1651 	case REQ_OP_WRITE_ZEROES:
1652 		num_bios = ti->num_write_zeroes_bios;
1653 		max_sectors = limits->max_write_zeroes_sectors;
1654 		break;
1655 	default:
1656 		break;
1657 	}
1658 
1659 	/*
1660 	 * Even though the device advertised support for this type of
1661 	 * request, that does not mean every target supports it, and
1662 	 * reconfiguration might also have changed that since the
1663 	 * check was performed.
1664 	 */
1665 	if (unlikely(!num_bios))
1666 		return BLK_STS_NOTSUPP;
1667 
1668 	__send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors);
1669 
1670 	return BLK_STS_OK;
1671 }
1672 
1673 /*
1674  * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1675  * associated with this bio, and this bio's bi_private needs to be
1676  * stored in dm_io->data before the reuse.
1677  *
1678  * bio->bi_private is owned by fs or upper layer, so block layer won't
1679  * touch it after splitting. Meantime it won't be changed by anyone after
1680  * bio is submitted. So this reuse is safe.
1681  */
1682 static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1683 {
1684 	return (struct dm_io **)&bio->bi_private;
1685 }
1686 
1687 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1688 {
1689 	struct dm_io **head = dm_poll_list_head(bio);
1690 
1691 	if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1692 		bio->bi_opf |= REQ_DM_POLL_LIST;
1693 		/*
1694 		 * Save .bi_private into dm_io, so that we can reuse
1695 		 * .bi_private as dm_io list head for storing dm_io list
1696 		 */
1697 		io->data = bio->bi_private;
1698 
1699 		/* tell block layer to poll for completion */
1700 		bio->bi_cookie = ~BLK_QC_T_NONE;
1701 
1702 		io->next = NULL;
1703 	} else {
1704 		/*
1705 		 * bio recursed due to split, reuse original poll list,
1706 		 * and save bio->bi_private too.
1707 		 */
1708 		io->data = (*head)->data;
1709 		io->next = *head;
1710 	}
1711 
1712 	*head = io;
1713 }
1714 
1715 /*
1716  * Select the correct strategy for processing a non-flush bio.
1717  */
1718 static blk_status_t __split_and_process_bio(struct clone_info *ci)
1719 {
1720 	struct bio *clone;
1721 	struct dm_target *ti;
1722 	unsigned int len;
1723 
1724 	ti = dm_table_find_target(ci->map, ci->sector);
1725 	if (unlikely(!ti))
1726 		return BLK_STS_IOERR;
1727 
1728 	if (unlikely(ci->is_abnormal_io))
1729 		return __process_abnormal_io(ci, ti);
1730 
1731 	/*
1732 	 * Only support bio polling for normal IO, and the target io is
1733 	 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
1734 	 */
1735 	ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
1736 
1737 	len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1738 	if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count)
1739 		return BLK_STS_IOERR;
1740 
1741 	setup_split_accounting(ci, len);
1742 
1743 	if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
1744 		if (unlikely(!dm_target_supports_nowait(ti->type)))
1745 			return BLK_STS_NOTSUPP;
1746 
1747 		clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT);
1748 		if (unlikely(!clone))
1749 			return BLK_STS_AGAIN;
1750 	} else {
1751 		clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
1752 	}
1753 	__map_bio(clone);
1754 
1755 	ci->sector += len;
1756 	ci->sector_count -= len;
1757 
1758 	return BLK_STS_OK;
1759 }
1760 
1761 static void init_clone_info(struct clone_info *ci, struct dm_io *io,
1762 			    struct dm_table *map, struct bio *bio, bool is_abnormal)
1763 {
1764 	ci->map = map;
1765 	ci->io = io;
1766 	ci->bio = bio;
1767 	ci->is_abnormal_io = is_abnormal;
1768 	ci->submit_as_polled = false;
1769 	ci->sector = bio->bi_iter.bi_sector;
1770 	ci->sector_count = bio_sectors(bio);
1771 
1772 	/* Shouldn't happen but sector_count was being set to 0 so... */
1773 	if (static_branch_unlikely(&zoned_enabled) &&
1774 	    WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1775 		ci->sector_count = 0;
1776 }
1777 
1778 #ifdef CONFIG_BLK_DEV_ZONED
1779 static inline bool dm_zone_bio_needs_split(struct bio *bio)
1780 {
1781 	/*
1782 	 * Special case the zone operations that cannot or should not be split.
1783 	 */
1784 	switch (bio_op(bio)) {
1785 	case REQ_OP_ZONE_APPEND:
1786 	case REQ_OP_ZONE_FINISH:
1787 	case REQ_OP_ZONE_RESET:
1788 	case REQ_OP_ZONE_RESET_ALL:
1789 		return false;
1790 	default:
1791 		break;
1792 	}
1793 
1794 	/*
1795 	 * When mapped devices use the block layer zone write plugging, we must
1796 	 * split any large BIO to the mapped device limits to not submit BIOs
1797 	 * that span zone boundaries and to avoid potential deadlocks with
1798 	 * queue freeze operations.
1799 	 */
1800 	return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
1801 }
1802 
1803 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
1804 {
1805 	if (!bio_needs_zone_write_plugging(bio))
1806 		return false;
1807 	return blk_zone_plug_bio(bio, 0);
1808 }
1809 
1810 static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
1811 						   struct dm_target *ti)
1812 {
1813 	struct bio_list blist = BIO_EMPTY_LIST;
1814 	struct mapped_device *md = ci->io->md;
1815 	unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors;
1816 	unsigned long *need_reset;
1817 	unsigned int i, nr_zones, nr_reset;
1818 	unsigned int num_bios = 0;
1819 	blk_status_t sts = BLK_STS_OK;
1820 	sector_t sector = ti->begin;
1821 	struct bio *clone;
1822 	int ret;
1823 
1824 	nr_zones = ti->len >> ilog2(zone_sectors);
1825 	need_reset = bitmap_zalloc(nr_zones, GFP_NOIO);
1826 	if (!need_reset)
1827 		return BLK_STS_RESOURCE;
1828 
1829 	ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin,
1830 				       nr_zones, need_reset);
1831 	if (ret) {
1832 		sts = BLK_STS_IOERR;
1833 		goto free_bitmap;
1834 	}
1835 
1836 	/* If we have no zone to reset, we are done. */
1837 	nr_reset = bitmap_weight(need_reset, nr_zones);
1838 	if (!nr_reset)
1839 		goto free_bitmap;
1840 
1841 	atomic_add(nr_zones, &ci->io->io_count);
1842 
1843 	for (i = 0; i < nr_zones; i++) {
1844 
1845 		if (!test_bit(i, need_reset)) {
1846 			sector += zone_sectors;
1847 			continue;
1848 		}
1849 
1850 		if (bio_list_empty(&blist)) {
1851 			/* This may take a while, so be nice to others */
1852 			if (num_bios)
1853 				cond_resched();
1854 
1855 			/*
1856 			 * We may need to reset thousands of zones, so let's
1857 			 * not go crazy with the clone allocation.
1858 			 */
1859 			alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32),
1860 					    NULL);
1861 		}
1862 
1863 		/* Get a clone and change it to a regular reset operation. */
1864 		clone = bio_list_pop(&blist);
1865 		clone->bi_opf &= ~REQ_OP_MASK;
1866 		clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC;
1867 		clone->bi_iter.bi_sector = sector;
1868 		clone->bi_iter.bi_size = 0;
1869 		__map_bio(clone);
1870 
1871 		sector += zone_sectors;
1872 		num_bios++;
1873 		nr_reset--;
1874 	}
1875 
1876 	WARN_ON_ONCE(!bio_list_empty(&blist));
1877 	atomic_sub(nr_zones - num_bios, &ci->io->io_count);
1878 	ci->sector_count = 0;
1879 
1880 free_bitmap:
1881 	bitmap_free(need_reset);
1882 
1883 	return sts;
1884 }
1885 
1886 static void __send_zone_reset_all_native(struct clone_info *ci,
1887 					 struct dm_target *ti)
1888 {
1889 	unsigned int bios;
1890 
1891 	atomic_add(1, &ci->io->io_count);
1892 	bios = __send_duplicate_bios(ci, ti, 1, NULL);
1893 	atomic_sub(1 - bios, &ci->io->io_count);
1894 
1895 	ci->sector_count = 0;
1896 }
1897 
1898 static blk_status_t __send_zone_reset_all(struct clone_info *ci)
1899 {
1900 	struct dm_table *t = ci->map;
1901 	blk_status_t sts = BLK_STS_OK;
1902 
1903 	for (unsigned int i = 0; i < t->num_targets; i++) {
1904 		struct dm_target *ti = dm_table_get_target(t, i);
1905 
1906 		if (ti->zone_reset_all_supported) {
1907 			__send_zone_reset_all_native(ci, ti);
1908 			continue;
1909 		}
1910 
1911 		sts = __send_zone_reset_all_emulated(ci, ti);
1912 		if (sts != BLK_STS_OK)
1913 			break;
1914 	}
1915 
1916 	/* Release the reference that alloc_io() took for submission. */
1917 	atomic_sub(1, &ci->io->io_count);
1918 
1919 	return sts;
1920 }
1921 
1922 #else
1923 static inline bool dm_zone_bio_needs_split(struct bio *bio)
1924 {
1925 	return false;
1926 }
1927 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
1928 {
1929 	return false;
1930 }
1931 static blk_status_t __send_zone_reset_all(struct clone_info *ci)
1932 {
1933 	return BLK_STS_NOTSUPP;
1934 }
1935 #endif
1936 
1937 /*
1938  * Entry point to split a bio into clones and submit them to the targets.
1939  */
1940 static void dm_split_and_process_bio(struct mapped_device *md,
1941 				     struct dm_table *map, struct bio *bio)
1942 {
1943 	struct clone_info ci;
1944 	struct dm_io *io;
1945 	blk_status_t error = BLK_STS_OK;
1946 	bool is_abnormal, need_split;
1947 
1948 	is_abnormal = is_abnormal_io(bio);
1949 	if (static_branch_unlikely(&zoned_enabled)) {
1950 		need_split = is_abnormal || dm_zone_bio_needs_split(bio);
1951 	} else {
1952 		need_split = is_abnormal;
1953 	}
1954 
1955 	if (unlikely(need_split)) {
1956 		/*
1957 		 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
1958 		 * otherwise associated queue_limits won't be imposed.
1959 		 * Also split the BIO for mapped devices needing zone append
1960 		 * emulation to ensure that the BIO does not cross zone
1961 		 * boundaries.
1962 		 */
1963 		bio = bio_split_to_limits(bio);
1964 		if (!bio)
1965 			return;
1966 	}
1967 
1968 	/*
1969 	 * Use the block layer zone write plugging for mapped devices that
1970 	 * need zone append emulation (e.g. dm-crypt).
1971 	 */
1972 	if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio))
1973 		return;
1974 
1975 	/* Only support nowait for normal IO */
1976 	if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
1977 		/*
1978 		 * Don't support NOWAIT for FLUSH because it may allocate
1979 		 * multiple bios and there's no easy way how to undo the
1980 		 * allocations.
1981 		 */
1982 		if (bio->bi_opf & REQ_PREFLUSH) {
1983 			bio_wouldblock_error(bio);
1984 			return;
1985 		}
1986 		io = alloc_io(md, bio, GFP_NOWAIT);
1987 		if (unlikely(!io)) {
1988 			/* Unable to do anything without dm_io. */
1989 			bio_wouldblock_error(bio);
1990 			return;
1991 		}
1992 	} else {
1993 		io = alloc_io(md, bio, GFP_NOIO);
1994 	}
1995 	init_clone_info(&ci, io, map, bio, is_abnormal);
1996 
1997 	if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) {
1998 		/*
1999 		 * The "flush_bypasses_map" is set on targets where it is safe
2000 		 * to skip the map function and submit bios directly to the
2001 		 * underlying block devices - currently, it is set for dm-linear
2002 		 * and dm-stripe.
2003 		 *
2004 		 * If we have just one underlying device (i.e. there is one
2005 		 * linear target or multiple linear targets pointing to the same
2006 		 * device), we can send the flush with data directly to it.
2007 		 */
2008 		if (map->flush_bypasses_map) {
2009 			struct list_head *devices = dm_table_get_devices(map);
2010 			if (devices->next == devices->prev)
2011 				goto send_preflush_with_data;
2012 		}
2013 		if (bio->bi_iter.bi_size)
2014 			io->requeue_flush_with_data = true;
2015 		__send_empty_flush(&ci);
2016 		/* dm_io_complete submits any data associated with flush */
2017 		goto out;
2018 	}
2019 
2020 send_preflush_with_data:
2021 	if (static_branch_unlikely(&zoned_enabled) &&
2022 	    (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
2023 		error = __send_zone_reset_all(&ci);
2024 		goto out;
2025 	}
2026 
2027 	error = __split_and_process_bio(&ci);
2028 	if (error || !ci.sector_count)
2029 		goto out;
2030 	/*
2031 	 * Remainder must be passed to submit_bio_noacct() so it gets handled
2032 	 * *after* bios already submitted have been completely processed.
2033 	 */
2034 	bio_trim(bio, io->sectors, ci.sector_count);
2035 	trace_block_split(bio, bio->bi_iter.bi_sector);
2036 	bio_inc_remaining(bio);
2037 	submit_bio_noacct(bio);
2038 out:
2039 	/*
2040 	 * Drop the extra reference count for non-POLLED bio, and hold one
2041 	 * reference for POLLED bio, which will be released in dm_poll_bio
2042 	 *
2043 	 * Add every dm_io instance into the dm_io list head which is stored
2044 	 * in bio->bi_private, so that dm_poll_bio can poll them all.
2045 	 */
2046 	if (error || !ci.submit_as_polled) {
2047 		/*
2048 		 * In case of submission failure, the extra reference for
2049 		 * submitting io isn't consumed yet
2050 		 */
2051 		if (error)
2052 			atomic_dec(&io->io_count);
2053 		dm_io_dec_pending(io, error);
2054 	} else
2055 		dm_queue_poll_io(bio, io);
2056 }
2057 
2058 static void dm_submit_bio(struct bio *bio)
2059 {
2060 	struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
2061 	int srcu_idx;
2062 	struct dm_table *map;
2063 
2064 	map = dm_get_live_table(md, &srcu_idx);
2065 	if (unlikely(!map)) {
2066 		DMERR_LIMIT("%s: mapping table unavailable, erroring io",
2067 			    dm_device_name(md));
2068 		bio_io_error(bio);
2069 		goto out;
2070 	}
2071 
2072 	/* If suspended, queue this IO for later */
2073 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
2074 		if (bio->bi_opf & REQ_NOWAIT)
2075 			bio_wouldblock_error(bio);
2076 		else if (bio->bi_opf & REQ_RAHEAD)
2077 			bio_io_error(bio);
2078 		else
2079 			queue_io(md, bio);
2080 		goto out;
2081 	}
2082 
2083 	dm_split_and_process_bio(md, map, bio);
2084 out:
2085 	dm_put_live_table(md, srcu_idx);
2086 }
2087 
2088 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
2089 			  unsigned int flags)
2090 {
2091 	WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
2092 
2093 	/* don't poll if the mapped io is done */
2094 	if (atomic_read(&io->io_count) > 1)
2095 		bio_poll(&io->tio.clone, iob, flags);
2096 
2097 	/* bio_poll holds the last reference */
2098 	return atomic_read(&io->io_count) == 1;
2099 }
2100 
2101 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
2102 		       unsigned int flags)
2103 {
2104 	struct dm_io **head = dm_poll_list_head(bio);
2105 	struct dm_io *list = *head;
2106 	struct dm_io *tmp = NULL;
2107 	struct dm_io *curr, *next;
2108 
2109 	/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
2110 	if (!(bio->bi_opf & REQ_DM_POLL_LIST))
2111 		return 0;
2112 
2113 	WARN_ON_ONCE(!list);
2114 
2115 	/*
2116 	 * Restore .bi_private before possibly completing dm_io.
2117 	 *
2118 	 * bio_poll() is only possible once @bio has been completely
2119 	 * submitted via submit_bio_noacct()'s depth-first submission.
2120 	 * So there is no dm_queue_poll_io() race associated with
2121 	 * clearing REQ_DM_POLL_LIST here.
2122 	 */
2123 	bio->bi_opf &= ~REQ_DM_POLL_LIST;
2124 	bio->bi_private = list->data;
2125 
2126 	for (curr = list, next = curr->next; curr; curr = next, next =
2127 			curr ? curr->next : NULL) {
2128 		if (dm_poll_dm_io(curr, iob, flags)) {
2129 			/*
2130 			 * clone_endio() has already occurred, so no
2131 			 * error handling is needed here.
2132 			 */
2133 			__dm_io_dec_pending(curr);
2134 		} else {
2135 			curr->next = tmp;
2136 			tmp = curr;
2137 		}
2138 	}
2139 
2140 	/* Not done? */
2141 	if (tmp) {
2142 		bio->bi_opf |= REQ_DM_POLL_LIST;
2143 		/* Reset bio->bi_private to dm_io list head */
2144 		*head = tmp;
2145 		return 0;
2146 	}
2147 	return 1;
2148 }
2149 
2150 /*
2151  *---------------------------------------------------------------
2152  * An IDR is used to keep track of allocated minor numbers.
2153  *---------------------------------------------------------------
2154  */
2155 static void free_minor(int minor)
2156 {
2157 	spin_lock(&_minor_lock);
2158 	idr_remove(&_minor_idr, minor);
2159 	spin_unlock(&_minor_lock);
2160 }
2161 
2162 /*
2163  * See if the device with a specific minor # is free.
2164  */
2165 static int specific_minor(int minor)
2166 {
2167 	int r;
2168 
2169 	if (minor >= (1 << MINORBITS))
2170 		return -EINVAL;
2171 
2172 	idr_preload(GFP_KERNEL);
2173 	spin_lock(&_minor_lock);
2174 
2175 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
2176 
2177 	spin_unlock(&_minor_lock);
2178 	idr_preload_end();
2179 	if (r < 0)
2180 		return r == -ENOSPC ? -EBUSY : r;
2181 	return 0;
2182 }
2183 
2184 static int next_free_minor(int *minor)
2185 {
2186 	int r;
2187 
2188 	idr_preload(GFP_KERNEL);
2189 	spin_lock(&_minor_lock);
2190 
2191 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
2192 
2193 	spin_unlock(&_minor_lock);
2194 	idr_preload_end();
2195 	if (r < 0)
2196 		return r;
2197 	*minor = r;
2198 	return 0;
2199 }
2200 
2201 static const struct block_device_operations dm_blk_dops;
2202 static const struct block_device_operations dm_rq_blk_dops;
2203 static const struct dax_operations dm_dax_ops;
2204 
2205 static void dm_wq_work(struct work_struct *work);
2206 
2207 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
2208 static void dm_queue_destroy_crypto_profile(struct request_queue *q)
2209 {
2210 	dm_destroy_crypto_profile(q->crypto_profile);
2211 }
2212 
2213 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
2214 
2215 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
2216 {
2217 }
2218 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
2219 
2220 static void cleanup_mapped_device(struct mapped_device *md)
2221 {
2222 	if (md->wq)
2223 		destroy_workqueue(md->wq);
2224 	dm_free_md_mempools(md->mempools);
2225 
2226 	if (md->dax_dev) {
2227 		dax_remove_host(md->disk);
2228 		kill_dax(md->dax_dev);
2229 		put_dax(md->dax_dev);
2230 		md->dax_dev = NULL;
2231 	}
2232 
2233 	if (md->disk) {
2234 		spin_lock(&_minor_lock);
2235 		md->disk->private_data = NULL;
2236 		spin_unlock(&_minor_lock);
2237 		if (dm_get_md_type(md) != DM_TYPE_NONE) {
2238 			struct table_device *td;
2239 
2240 			dm_sysfs_exit(md);
2241 			list_for_each_entry(td, &md->table_devices, list) {
2242 				bd_unlink_disk_holder(td->dm_dev.bdev,
2243 						      md->disk);
2244 			}
2245 
2246 			/*
2247 			 * Hold lock to make sure del_gendisk() won't concurrent
2248 			 * with open/close_table_device().
2249 			 */
2250 			mutex_lock(&md->table_devices_lock);
2251 			del_gendisk(md->disk);
2252 			mutex_unlock(&md->table_devices_lock);
2253 		}
2254 		dm_queue_destroy_crypto_profile(md->queue);
2255 		put_disk(md->disk);
2256 	}
2257 
2258 	if (md->pending_io) {
2259 		free_percpu(md->pending_io);
2260 		md->pending_io = NULL;
2261 	}
2262 
2263 	cleanup_srcu_struct(&md->io_barrier);
2264 
2265 	mutex_destroy(&md->suspend_lock);
2266 	mutex_destroy(&md->type_lock);
2267 	mutex_destroy(&md->table_devices_lock);
2268 	mutex_destroy(&md->swap_bios_lock);
2269 
2270 	dm_mq_cleanup_mapped_device(md);
2271 }
2272 
2273 /*
2274  * Allocate and initialise a blank device with a given minor.
2275  */
2276 static struct mapped_device *alloc_dev(int minor)
2277 {
2278 	int r, numa_node_id = dm_get_numa_node();
2279 	struct dax_device *dax_dev;
2280 	struct mapped_device *md;
2281 	void *old_md;
2282 
2283 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2284 	if (!md) {
2285 		DMERR("unable to allocate device, out of memory.");
2286 		return NULL;
2287 	}
2288 
2289 	if (!try_module_get(THIS_MODULE))
2290 		goto bad_module_get;
2291 
2292 	/* get a minor number for the dev */
2293 	if (minor == DM_ANY_MINOR)
2294 		r = next_free_minor(&minor);
2295 	else
2296 		r = specific_minor(minor);
2297 	if (r < 0)
2298 		goto bad_minor;
2299 
2300 	r = init_srcu_struct(&md->io_barrier);
2301 	if (r < 0)
2302 		goto bad_io_barrier;
2303 
2304 	md->numa_node_id = numa_node_id;
2305 	md->init_tio_pdu = false;
2306 	md->type = DM_TYPE_NONE;
2307 	mutex_init(&md->suspend_lock);
2308 	mutex_init(&md->type_lock);
2309 	mutex_init(&md->table_devices_lock);
2310 	spin_lock_init(&md->deferred_lock);
2311 	atomic_set(&md->holders, 1);
2312 	atomic_set(&md->open_count, 0);
2313 	atomic_set(&md->event_nr, 0);
2314 	atomic_set(&md->uevent_seq, 0);
2315 	INIT_LIST_HEAD(&md->uevent_list);
2316 	INIT_LIST_HEAD(&md->table_devices);
2317 	spin_lock_init(&md->uevent_lock);
2318 
2319 	/*
2320 	 * default to bio-based until DM table is loaded and md->type
2321 	 * established. If request-based table is loaded: blk-mq will
2322 	 * override accordingly.
2323 	 */
2324 	md->disk = blk_alloc_disk(NULL, md->numa_node_id);
2325 	if (IS_ERR(md->disk)) {
2326 		md->disk = NULL;
2327 		goto bad;
2328 	}
2329 	md->queue = md->disk->queue;
2330 
2331 	init_waitqueue_head(&md->wait);
2332 	INIT_WORK(&md->work, dm_wq_work);
2333 	INIT_WORK(&md->requeue_work, dm_wq_requeue_work);
2334 	init_waitqueue_head(&md->eventq);
2335 	init_completion(&md->kobj_holder.completion);
2336 
2337 	md->requeue_list = NULL;
2338 	md->swap_bios = get_swap_bios();
2339 	sema_init(&md->swap_bios_semaphore, md->swap_bios);
2340 	mutex_init(&md->swap_bios_lock);
2341 
2342 	md->disk->major = _major;
2343 	md->disk->first_minor = minor;
2344 	md->disk->minors = 1;
2345 	md->disk->flags |= GENHD_FL_NO_PART;
2346 	md->disk->fops = &dm_blk_dops;
2347 	md->disk->private_data = md;
2348 	sprintf(md->disk->disk_name, "dm-%d", minor);
2349 
2350 	dax_dev = alloc_dax(md, &dm_dax_ops);
2351 	if (IS_ERR(dax_dev)) {
2352 		if (PTR_ERR(dax_dev) != -EOPNOTSUPP)
2353 			goto bad;
2354 	} else {
2355 		set_dax_nocache(dax_dev);
2356 		set_dax_nomc(dax_dev);
2357 		md->dax_dev = dax_dev;
2358 		if (dax_add_host(dax_dev, md->disk))
2359 			goto bad;
2360 	}
2361 
2362 	format_dev_t(md->name, MKDEV(_major, minor));
2363 
2364 	md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
2365 	if (!md->wq)
2366 		goto bad;
2367 
2368 	md->pending_io = alloc_percpu(unsigned long);
2369 	if (!md->pending_io)
2370 		goto bad;
2371 
2372 	r = dm_stats_init(&md->stats);
2373 	if (r < 0)
2374 		goto bad;
2375 
2376 	/* Populate the mapping, nobody knows we exist yet */
2377 	spin_lock(&_minor_lock);
2378 	old_md = idr_replace(&_minor_idr, md, minor);
2379 	spin_unlock(&_minor_lock);
2380 
2381 	BUG_ON(old_md != MINOR_ALLOCED);
2382 
2383 	return md;
2384 
2385 bad:
2386 	cleanup_mapped_device(md);
2387 bad_io_barrier:
2388 	free_minor(minor);
2389 bad_minor:
2390 	module_put(THIS_MODULE);
2391 bad_module_get:
2392 	kvfree(md);
2393 	return NULL;
2394 }
2395 
2396 static void unlock_fs(struct mapped_device *md);
2397 
2398 static void free_dev(struct mapped_device *md)
2399 {
2400 	int minor = MINOR(disk_devt(md->disk));
2401 
2402 	unlock_fs(md);
2403 
2404 	cleanup_mapped_device(md);
2405 
2406 	WARN_ON_ONCE(!list_empty(&md->table_devices));
2407 	dm_stats_cleanup(&md->stats);
2408 	free_minor(minor);
2409 
2410 	module_put(THIS_MODULE);
2411 	kvfree(md);
2412 }
2413 
2414 /*
2415  * Bind a table to the device.
2416  */
2417 static void event_callback(void *context)
2418 {
2419 	unsigned long flags;
2420 	LIST_HEAD(uevents);
2421 	struct mapped_device *md = context;
2422 
2423 	spin_lock_irqsave(&md->uevent_lock, flags);
2424 	list_splice_init(&md->uevent_list, &uevents);
2425 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2426 
2427 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2428 
2429 	atomic_inc(&md->event_nr);
2430 	wake_up(&md->eventq);
2431 	dm_issue_global_event();
2432 }
2433 
2434 /*
2435  * Returns old map, which caller must destroy.
2436  */
2437 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2438 			       struct queue_limits *limits)
2439 {
2440 	struct dm_table *old_map;
2441 	sector_t size, old_size;
2442 	int ret;
2443 
2444 	lockdep_assert_held(&md->suspend_lock);
2445 
2446 	size = dm_table_get_size(t);
2447 
2448 	old_size = dm_get_size(md);
2449 
2450 	if (!dm_table_supports_size_change(t, old_size, size)) {
2451 		old_map = ERR_PTR(-EINVAL);
2452 		goto out;
2453 	}
2454 
2455 	set_capacity(md->disk, size);
2456 
2457 	ret = dm_table_set_restrictions(t, md->queue, limits);
2458 	if (ret) {
2459 		set_capacity(md->disk, old_size);
2460 		old_map = ERR_PTR(ret);
2461 		goto out;
2462 	}
2463 
2464 	/*
2465 	 * Wipe any geometry if the size of the table changed.
2466 	 */
2467 	if (size != old_size)
2468 		memset(&md->geometry, 0, sizeof(md->geometry));
2469 
2470 	dm_table_event_callback(t, event_callback, md);
2471 
2472 	if (dm_table_request_based(t)) {
2473 		/*
2474 		 * Leverage the fact that request-based DM targets are
2475 		 * immutable singletons - used to optimize dm_mq_queue_rq.
2476 		 */
2477 		md->immutable_target = dm_table_get_immutable_target(t);
2478 
2479 		/*
2480 		 * There is no need to reload with request-based dm because the
2481 		 * size of front_pad doesn't change.
2482 		 *
2483 		 * Note for future: If you are to reload bioset, prep-ed
2484 		 * requests in the queue may refer to bio from the old bioset,
2485 		 * so you must walk through the queue to unprep.
2486 		 */
2487 		if (!md->mempools)
2488 			md->mempools = t->mempools;
2489 		else
2490 			dm_free_md_mempools(t->mempools);
2491 	} else {
2492 		/*
2493 		 * The md may already have mempools that need changing.
2494 		 * If so, reload bioset because front_pad may have changed
2495 		 * because a different table was loaded.
2496 		 */
2497 		dm_free_md_mempools(md->mempools);
2498 		md->mempools = t->mempools;
2499 	}
2500 	t->mempools = NULL;
2501 
2502 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2503 	rcu_assign_pointer(md->map, (void *)t);
2504 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2505 
2506 	if (old_map)
2507 		dm_sync_table(md);
2508 out:
2509 	return old_map;
2510 }
2511 
2512 /*
2513  * Returns unbound table for the caller to free.
2514  */
2515 static struct dm_table *__unbind(struct mapped_device *md)
2516 {
2517 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
2518 
2519 	if (!map)
2520 		return NULL;
2521 
2522 	dm_table_event_callback(map, NULL, NULL);
2523 	RCU_INIT_POINTER(md->map, NULL);
2524 	dm_sync_table(md);
2525 
2526 	return map;
2527 }
2528 
2529 /*
2530  * Constructor for a new device.
2531  */
2532 int dm_create(int minor, struct mapped_device **result)
2533 {
2534 	struct mapped_device *md;
2535 
2536 	md = alloc_dev(minor);
2537 	if (!md)
2538 		return -ENXIO;
2539 
2540 	dm_ima_reset_data(md);
2541 
2542 	*result = md;
2543 	return 0;
2544 }
2545 
2546 /*
2547  * Functions to manage md->type.
2548  * All are required to hold md->type_lock.
2549  */
2550 void dm_lock_md_type(struct mapped_device *md)
2551 {
2552 	mutex_lock(&md->type_lock);
2553 }
2554 
2555 void dm_unlock_md_type(struct mapped_device *md)
2556 {
2557 	mutex_unlock(&md->type_lock);
2558 }
2559 
2560 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2561 {
2562 	return md->type;
2563 }
2564 
2565 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2566 {
2567 	return md->immutable_target_type;
2568 }
2569 
2570 /*
2571  * Setup the DM device's queue based on md's type
2572  */
2573 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2574 {
2575 	enum dm_queue_mode type = dm_table_get_type(t);
2576 	struct queue_limits limits;
2577 	struct table_device *td;
2578 	int r;
2579 
2580 	WARN_ON_ONCE(type == DM_TYPE_NONE);
2581 
2582 	if (type == DM_TYPE_REQUEST_BASED) {
2583 		md->disk->fops = &dm_rq_blk_dops;
2584 		r = dm_mq_init_request_queue(md, t);
2585 		if (r) {
2586 			DMERR("Cannot initialize queue for request-based dm mapped device");
2587 			return r;
2588 		}
2589 	}
2590 
2591 	r = dm_calculate_queue_limits(t, &limits);
2592 	if (r) {
2593 		DMERR("Cannot calculate initial queue limits");
2594 		return r;
2595 	}
2596 	r = dm_table_set_restrictions(t, md->queue, &limits);
2597 	if (r)
2598 		return r;
2599 
2600 	/*
2601 	 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent
2602 	 * with open_table_device() and close_table_device().
2603 	 */
2604 	mutex_lock(&md->table_devices_lock);
2605 	r = add_disk(md->disk);
2606 	mutex_unlock(&md->table_devices_lock);
2607 	if (r)
2608 		return r;
2609 
2610 	/*
2611 	 * Register the holder relationship for devices added before the disk
2612 	 * was live.
2613 	 */
2614 	list_for_each_entry(td, &md->table_devices, list) {
2615 		r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
2616 		if (r)
2617 			goto out_undo_holders;
2618 	}
2619 
2620 	r = dm_sysfs_init(md);
2621 	if (r)
2622 		goto out_undo_holders;
2623 
2624 	md->type = type;
2625 	return 0;
2626 
2627 out_undo_holders:
2628 	list_for_each_entry_continue_reverse(td, &md->table_devices, list)
2629 		bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
2630 	mutex_lock(&md->table_devices_lock);
2631 	del_gendisk(md->disk);
2632 	mutex_unlock(&md->table_devices_lock);
2633 	return r;
2634 }
2635 
2636 struct mapped_device *dm_get_md(dev_t dev)
2637 {
2638 	struct mapped_device *md;
2639 	unsigned int minor = MINOR(dev);
2640 
2641 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2642 		return NULL;
2643 
2644 	spin_lock(&_minor_lock);
2645 
2646 	md = idr_find(&_minor_idr, minor);
2647 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2648 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2649 		md = NULL;
2650 		goto out;
2651 	}
2652 	dm_get(md);
2653 out:
2654 	spin_unlock(&_minor_lock);
2655 
2656 	return md;
2657 }
2658 EXPORT_SYMBOL_GPL(dm_get_md);
2659 
2660 void *dm_get_mdptr(struct mapped_device *md)
2661 {
2662 	return md->interface_ptr;
2663 }
2664 
2665 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2666 {
2667 	md->interface_ptr = ptr;
2668 }
2669 
2670 void dm_get(struct mapped_device *md)
2671 {
2672 	atomic_inc(&md->holders);
2673 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2674 }
2675 
2676 int dm_hold(struct mapped_device *md)
2677 {
2678 	spin_lock(&_minor_lock);
2679 	if (test_bit(DMF_FREEING, &md->flags)) {
2680 		spin_unlock(&_minor_lock);
2681 		return -EBUSY;
2682 	}
2683 	dm_get(md);
2684 	spin_unlock(&_minor_lock);
2685 	return 0;
2686 }
2687 EXPORT_SYMBOL_GPL(dm_hold);
2688 
2689 const char *dm_device_name(struct mapped_device *md)
2690 {
2691 	return md->name;
2692 }
2693 EXPORT_SYMBOL_GPL(dm_device_name);
2694 
2695 static void __dm_destroy(struct mapped_device *md, bool wait)
2696 {
2697 	struct dm_table *map;
2698 	int srcu_idx;
2699 
2700 	might_sleep();
2701 
2702 	spin_lock(&_minor_lock);
2703 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2704 	set_bit(DMF_FREEING, &md->flags);
2705 	spin_unlock(&_minor_lock);
2706 
2707 	blk_mark_disk_dead(md->disk);
2708 
2709 	/*
2710 	 * Take suspend_lock so that presuspend and postsuspend methods
2711 	 * do not race with internal suspend.
2712 	 */
2713 	mutex_lock(&md->suspend_lock);
2714 	map = dm_get_live_table(md, &srcu_idx);
2715 	if (!dm_suspended_md(md)) {
2716 		dm_table_presuspend_targets(map);
2717 		set_bit(DMF_SUSPENDED, &md->flags);
2718 		set_bit(DMF_POST_SUSPENDING, &md->flags);
2719 		dm_table_postsuspend_targets(map);
2720 	}
2721 	/* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
2722 	dm_put_live_table(md, srcu_idx);
2723 	mutex_unlock(&md->suspend_lock);
2724 
2725 	/*
2726 	 * Rare, but there may be I/O requests still going to complete,
2727 	 * for example.  Wait for all references to disappear.
2728 	 * No one should increment the reference count of the mapped_device,
2729 	 * after the mapped_device state becomes DMF_FREEING.
2730 	 */
2731 	if (wait)
2732 		while (atomic_read(&md->holders))
2733 			fsleep(1000);
2734 	else if (atomic_read(&md->holders))
2735 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2736 		       dm_device_name(md), atomic_read(&md->holders));
2737 
2738 	dm_table_destroy(__unbind(md));
2739 	free_dev(md);
2740 }
2741 
2742 void dm_destroy(struct mapped_device *md)
2743 {
2744 	__dm_destroy(md, true);
2745 }
2746 
2747 void dm_destroy_immediate(struct mapped_device *md)
2748 {
2749 	__dm_destroy(md, false);
2750 }
2751 
2752 void dm_put(struct mapped_device *md)
2753 {
2754 	atomic_dec(&md->holders);
2755 }
2756 EXPORT_SYMBOL_GPL(dm_put);
2757 
2758 static bool dm_in_flight_bios(struct mapped_device *md)
2759 {
2760 	int cpu;
2761 	unsigned long sum = 0;
2762 
2763 	for_each_possible_cpu(cpu)
2764 		sum += *per_cpu_ptr(md->pending_io, cpu);
2765 
2766 	return sum != 0;
2767 }
2768 
2769 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
2770 {
2771 	int r = 0;
2772 	DEFINE_WAIT(wait);
2773 
2774 	while (true) {
2775 		prepare_to_wait(&md->wait, &wait, task_state);
2776 
2777 		if (!dm_in_flight_bios(md))
2778 			break;
2779 
2780 		if (signal_pending_state(task_state, current)) {
2781 			r = -ERESTARTSYS;
2782 			break;
2783 		}
2784 
2785 		io_schedule();
2786 	}
2787 	finish_wait(&md->wait, &wait);
2788 
2789 	smp_rmb();
2790 
2791 	return r;
2792 }
2793 
2794 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
2795 {
2796 	int r = 0;
2797 
2798 	if (!queue_is_mq(md->queue))
2799 		return dm_wait_for_bios_completion(md, task_state);
2800 
2801 	while (true) {
2802 		if (!blk_mq_queue_inflight(md->queue))
2803 			break;
2804 
2805 		if (signal_pending_state(task_state, current)) {
2806 			r = -ERESTARTSYS;
2807 			break;
2808 		}
2809 
2810 		fsleep(5000);
2811 	}
2812 
2813 	return r;
2814 }
2815 
2816 /*
2817  * Process the deferred bios
2818  */
2819 static void dm_wq_work(struct work_struct *work)
2820 {
2821 	struct mapped_device *md = container_of(work, struct mapped_device, work);
2822 	struct bio *bio;
2823 
2824 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2825 		spin_lock_irq(&md->deferred_lock);
2826 		bio = bio_list_pop(&md->deferred);
2827 		spin_unlock_irq(&md->deferred_lock);
2828 
2829 		if (!bio)
2830 			break;
2831 
2832 		submit_bio_noacct(bio);
2833 		cond_resched();
2834 	}
2835 }
2836 
2837 static void dm_queue_flush(struct mapped_device *md)
2838 {
2839 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2840 	smp_mb__after_atomic();
2841 	queue_work(md->wq, &md->work);
2842 }
2843 
2844 /*
2845  * Swap in a new table, returning the old one for the caller to destroy.
2846  */
2847 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2848 {
2849 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2850 	struct queue_limits limits;
2851 	int r;
2852 
2853 	mutex_lock(&md->suspend_lock);
2854 
2855 	/* device must be suspended */
2856 	if (!dm_suspended_md(md))
2857 		goto out;
2858 
2859 	/*
2860 	 * If the new table has no data devices, retain the existing limits.
2861 	 * This helps multipath with queue_if_no_path if all paths disappear,
2862 	 * then new I/O is queued based on these limits, and then some paths
2863 	 * reappear.
2864 	 */
2865 	if (dm_table_has_no_data_devices(table)) {
2866 		live_map = dm_get_live_table_fast(md);
2867 		if (live_map)
2868 			limits = md->queue->limits;
2869 		dm_put_live_table_fast(md);
2870 	}
2871 
2872 	if (!live_map) {
2873 		r = dm_calculate_queue_limits(table, &limits);
2874 		if (r) {
2875 			map = ERR_PTR(r);
2876 			goto out;
2877 		}
2878 	}
2879 
2880 	map = __bind(md, table, &limits);
2881 	dm_issue_global_event();
2882 
2883 out:
2884 	mutex_unlock(&md->suspend_lock);
2885 	return map;
2886 }
2887 
2888 /*
2889  * Functions to lock and unlock any filesystem running on the
2890  * device.
2891  */
2892 static int lock_fs(struct mapped_device *md)
2893 {
2894 	int r;
2895 
2896 	WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2897 
2898 	r = bdev_freeze(md->disk->part0);
2899 	if (!r)
2900 		set_bit(DMF_FROZEN, &md->flags);
2901 	return r;
2902 }
2903 
2904 static void unlock_fs(struct mapped_device *md)
2905 {
2906 	if (!test_bit(DMF_FROZEN, &md->flags))
2907 		return;
2908 	bdev_thaw(md->disk->part0);
2909 	clear_bit(DMF_FROZEN, &md->flags);
2910 }
2911 
2912 /*
2913  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2914  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2915  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2916  *
2917  * If __dm_suspend returns 0, the device is completely quiescent
2918  * now. There is no request-processing activity. All new requests
2919  * are being added to md->deferred list.
2920  */
2921 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2922 			unsigned int suspend_flags, unsigned int task_state,
2923 			int dmf_suspended_flag)
2924 {
2925 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2926 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2927 	int r = 0;
2928 
2929 	lockdep_assert_held(&md->suspend_lock);
2930 
2931 	/*
2932 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2933 	 * This flag is cleared before dm_suspend returns.
2934 	 */
2935 	if (noflush)
2936 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2937 	else
2938 		DMDEBUG("%s: suspending with flush", dm_device_name(md));
2939 
2940 	/*
2941 	 * This gets reverted if there's an error later and the targets
2942 	 * provide the .presuspend_undo hook.
2943 	 */
2944 	dm_table_presuspend_targets(map);
2945 
2946 	/*
2947 	 * Flush I/O to the device.
2948 	 * Any I/O submitted after lock_fs() may not be flushed.
2949 	 * noflush takes precedence over do_lockfs.
2950 	 * (lock_fs() flushes I/Os and waits for them to complete.)
2951 	 */
2952 	if (!noflush && do_lockfs) {
2953 		r = lock_fs(md);
2954 		if (r) {
2955 			dm_table_presuspend_undo_targets(map);
2956 			return r;
2957 		}
2958 	}
2959 
2960 	/*
2961 	 * Here we must make sure that no processes are submitting requests
2962 	 * to target drivers i.e. no one may be executing
2963 	 * dm_split_and_process_bio from dm_submit_bio.
2964 	 *
2965 	 * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
2966 	 * we take the write lock. To prevent any process from reentering
2967 	 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
2968 	 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
2969 	 * flush_workqueue(md->wq).
2970 	 */
2971 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2972 	if (map)
2973 		synchronize_srcu(&md->io_barrier);
2974 
2975 	/*
2976 	 * Stop md->queue before flushing md->wq in case request-based
2977 	 * dm defers requests to md->wq from md->queue.
2978 	 */
2979 	if (map && dm_request_based(md)) {
2980 		dm_stop_queue(md->queue);
2981 		set_bit(DMF_QUEUE_STOPPED, &md->flags);
2982 	}
2983 
2984 	flush_workqueue(md->wq);
2985 
2986 	/*
2987 	 * At this point no more requests are entering target request routines.
2988 	 * We call dm_wait_for_completion to wait for all existing requests
2989 	 * to finish.
2990 	 */
2991 	if (map)
2992 		r = dm_wait_for_completion(md, task_state);
2993 	if (!r)
2994 		set_bit(dmf_suspended_flag, &md->flags);
2995 
2996 	if (noflush)
2997 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2998 	if (map)
2999 		synchronize_srcu(&md->io_barrier);
3000 
3001 	/* were we interrupted ? */
3002 	if (r < 0) {
3003 		dm_queue_flush(md);
3004 
3005 		if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
3006 			dm_start_queue(md->queue);
3007 
3008 		unlock_fs(md);
3009 		dm_table_presuspend_undo_targets(map);
3010 		/* pushback list is already flushed, so skip flush */
3011 	}
3012 
3013 	return r;
3014 }
3015 
3016 /*
3017  * We need to be able to change a mapping table under a mounted
3018  * filesystem.  For example we might want to move some data in
3019  * the background.  Before the table can be swapped with
3020  * dm_bind_table, dm_suspend must be called to flush any in
3021  * flight bios and ensure that any further io gets deferred.
3022  */
3023 /*
3024  * Suspend mechanism in request-based dm.
3025  *
3026  * 1. Flush all I/Os by lock_fs() if needed.
3027  * 2. Stop dispatching any I/O by stopping the request_queue.
3028  * 3. Wait for all in-flight I/Os to be completed or requeued.
3029  *
3030  * To abort suspend, start the request_queue.
3031  */
3032 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
3033 {
3034 	struct dm_table *map = NULL;
3035 	int r = 0;
3036 
3037 retry:
3038 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3039 
3040 	if (dm_suspended_md(md)) {
3041 		r = -EINVAL;
3042 		goto out_unlock;
3043 	}
3044 
3045 	if (dm_suspended_internally_md(md)) {
3046 		/* already internally suspended, wait for internal resume */
3047 		mutex_unlock(&md->suspend_lock);
3048 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3049 		if (r)
3050 			return r;
3051 		goto retry;
3052 	}
3053 
3054 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3055 	if (!map) {
3056 		/* avoid deadlock with fs/namespace.c:do_mount() */
3057 		suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
3058 	}
3059 
3060 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
3061 	if (r)
3062 		goto out_unlock;
3063 
3064 	set_bit(DMF_POST_SUSPENDING, &md->flags);
3065 	dm_table_postsuspend_targets(map);
3066 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
3067 
3068 out_unlock:
3069 	mutex_unlock(&md->suspend_lock);
3070 	return r;
3071 }
3072 
3073 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
3074 {
3075 	if (map) {
3076 		int r = dm_table_resume_targets(map);
3077 
3078 		if (r)
3079 			return r;
3080 	}
3081 
3082 	dm_queue_flush(md);
3083 
3084 	/*
3085 	 * Flushing deferred I/Os must be done after targets are resumed
3086 	 * so that mapping of targets can work correctly.
3087 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
3088 	 */
3089 	if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
3090 		dm_start_queue(md->queue);
3091 
3092 	unlock_fs(md);
3093 
3094 	return 0;
3095 }
3096 
3097 int dm_resume(struct mapped_device *md)
3098 {
3099 	int r;
3100 	struct dm_table *map = NULL;
3101 
3102 retry:
3103 	r = -EINVAL;
3104 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3105 
3106 	if (!dm_suspended_md(md))
3107 		goto out;
3108 
3109 	if (dm_suspended_internally_md(md)) {
3110 		/* already internally suspended, wait for internal resume */
3111 		mutex_unlock(&md->suspend_lock);
3112 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3113 		if (r)
3114 			return r;
3115 		goto retry;
3116 	}
3117 
3118 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3119 	if (!map || !dm_table_get_size(map))
3120 		goto out;
3121 
3122 	r = __dm_resume(md, map);
3123 	if (r)
3124 		goto out;
3125 
3126 	clear_bit(DMF_SUSPENDED, &md->flags);
3127 out:
3128 	mutex_unlock(&md->suspend_lock);
3129 
3130 	return r;
3131 }
3132 
3133 /*
3134  * Internal suspend/resume works like userspace-driven suspend. It waits
3135  * until all bios finish and prevents issuing new bios to the target drivers.
3136  * It may be used only from the kernel.
3137  */
3138 
3139 static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
3140 {
3141 	struct dm_table *map = NULL;
3142 
3143 	lockdep_assert_held(&md->suspend_lock);
3144 
3145 	if (md->internal_suspend_count++)
3146 		return; /* nested internal suspend */
3147 
3148 	if (dm_suspended_md(md)) {
3149 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3150 		return; /* nest suspend */
3151 	}
3152 
3153 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3154 
3155 	/*
3156 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
3157 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
3158 	 * would require changing .presuspend to return an error -- avoid this
3159 	 * until there is a need for more elaborate variants of internal suspend.
3160 	 */
3161 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
3162 			    DMF_SUSPENDED_INTERNALLY);
3163 
3164 	set_bit(DMF_POST_SUSPENDING, &md->flags);
3165 	dm_table_postsuspend_targets(map);
3166 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
3167 }
3168 
3169 static void __dm_internal_resume(struct mapped_device *md)
3170 {
3171 	int r;
3172 	struct dm_table *map;
3173 
3174 	BUG_ON(!md->internal_suspend_count);
3175 
3176 	if (--md->internal_suspend_count)
3177 		return; /* resume from nested internal suspend */
3178 
3179 	if (dm_suspended_md(md))
3180 		goto done; /* resume from nested suspend */
3181 
3182 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3183 	r = __dm_resume(md, map);
3184 	if (r) {
3185 		/*
3186 		 * If a preresume method of some target failed, we are in a
3187 		 * tricky situation. We can't return an error to the caller. We
3188 		 * can't fake success because then the "resume" and
3189 		 * "postsuspend" methods would not be paired correctly, and it
3190 		 * would break various targets, for example it would cause list
3191 		 * corruption in the "origin" target.
3192 		 *
3193 		 * So, we fake normal suspend here, to make sure that the
3194 		 * "resume" and "postsuspend" methods will be paired correctly.
3195 		 */
3196 		DMERR("Preresume method failed: %d", r);
3197 		set_bit(DMF_SUSPENDED, &md->flags);
3198 	}
3199 done:
3200 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3201 	smp_mb__after_atomic();
3202 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
3203 }
3204 
3205 void dm_internal_suspend_noflush(struct mapped_device *md)
3206 {
3207 	mutex_lock(&md->suspend_lock);
3208 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
3209 	mutex_unlock(&md->suspend_lock);
3210 }
3211 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
3212 
3213 void dm_internal_resume(struct mapped_device *md)
3214 {
3215 	mutex_lock(&md->suspend_lock);
3216 	__dm_internal_resume(md);
3217 	mutex_unlock(&md->suspend_lock);
3218 }
3219 EXPORT_SYMBOL_GPL(dm_internal_resume);
3220 
3221 /*
3222  * Fast variants of internal suspend/resume hold md->suspend_lock,
3223  * which prevents interaction with userspace-driven suspend.
3224  */
3225 
3226 void dm_internal_suspend_fast(struct mapped_device *md)
3227 {
3228 	mutex_lock(&md->suspend_lock);
3229 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3230 		return;
3231 
3232 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3233 	synchronize_srcu(&md->io_barrier);
3234 	flush_workqueue(md->wq);
3235 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3236 }
3237 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3238 
3239 void dm_internal_resume_fast(struct mapped_device *md)
3240 {
3241 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3242 		goto done;
3243 
3244 	dm_queue_flush(md);
3245 
3246 done:
3247 	mutex_unlock(&md->suspend_lock);
3248 }
3249 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3250 
3251 /*
3252  *---------------------------------------------------------------
3253  * Event notification.
3254  *---------------------------------------------------------------
3255  */
3256 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3257 		      unsigned int cookie, bool need_resize_uevent)
3258 {
3259 	int r;
3260 	unsigned int noio_flag;
3261 	char udev_cookie[DM_COOKIE_LENGTH];
3262 	char *envp[3] = { NULL, NULL, NULL };
3263 	char **envpp = envp;
3264 	if (cookie) {
3265 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3266 			 DM_COOKIE_ENV_VAR_NAME, cookie);
3267 		*envpp++ = udev_cookie;
3268 	}
3269 	if (need_resize_uevent) {
3270 		*envpp++ = "RESIZE=1";
3271 	}
3272 
3273 	noio_flag = memalloc_noio_save();
3274 
3275 	r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
3276 
3277 	memalloc_noio_restore(noio_flag);
3278 
3279 	return r;
3280 }
3281 
3282 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3283 {
3284 	return atomic_add_return(1, &md->uevent_seq);
3285 }
3286 
3287 uint32_t dm_get_event_nr(struct mapped_device *md)
3288 {
3289 	return atomic_read(&md->event_nr);
3290 }
3291 
3292 int dm_wait_event(struct mapped_device *md, int event_nr)
3293 {
3294 	return wait_event_interruptible(md->eventq,
3295 			(event_nr != atomic_read(&md->event_nr)));
3296 }
3297 
3298 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3299 {
3300 	unsigned long flags;
3301 
3302 	spin_lock_irqsave(&md->uevent_lock, flags);
3303 	list_add(elist, &md->uevent_list);
3304 	spin_unlock_irqrestore(&md->uevent_lock, flags);
3305 }
3306 
3307 /*
3308  * The gendisk is only valid as long as you have a reference
3309  * count on 'md'.
3310  */
3311 struct gendisk *dm_disk(struct mapped_device *md)
3312 {
3313 	return md->disk;
3314 }
3315 EXPORT_SYMBOL_GPL(dm_disk);
3316 
3317 struct kobject *dm_kobject(struct mapped_device *md)
3318 {
3319 	return &md->kobj_holder.kobj;
3320 }
3321 
3322 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3323 {
3324 	struct mapped_device *md;
3325 
3326 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3327 
3328 	spin_lock(&_minor_lock);
3329 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
3330 		md = NULL;
3331 		goto out;
3332 	}
3333 	dm_get(md);
3334 out:
3335 	spin_unlock(&_minor_lock);
3336 
3337 	return md;
3338 }
3339 
3340 int dm_suspended_md(struct mapped_device *md)
3341 {
3342 	return test_bit(DMF_SUSPENDED, &md->flags);
3343 }
3344 
3345 static int dm_post_suspending_md(struct mapped_device *md)
3346 {
3347 	return test_bit(DMF_POST_SUSPENDING, &md->flags);
3348 }
3349 
3350 int dm_suspended_internally_md(struct mapped_device *md)
3351 {
3352 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3353 }
3354 
3355 int dm_test_deferred_remove_flag(struct mapped_device *md)
3356 {
3357 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3358 }
3359 
3360 int dm_suspended(struct dm_target *ti)
3361 {
3362 	return dm_suspended_md(ti->table->md);
3363 }
3364 EXPORT_SYMBOL_GPL(dm_suspended);
3365 
3366 int dm_post_suspending(struct dm_target *ti)
3367 {
3368 	return dm_post_suspending_md(ti->table->md);
3369 }
3370 EXPORT_SYMBOL_GPL(dm_post_suspending);
3371 
3372 int dm_noflush_suspending(struct dm_target *ti)
3373 {
3374 	return __noflush_suspending(ti->table->md);
3375 }
3376 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3377 
3378 void dm_free_md_mempools(struct dm_md_mempools *pools)
3379 {
3380 	if (!pools)
3381 		return;
3382 
3383 	bioset_exit(&pools->bs);
3384 	bioset_exit(&pools->io_bs);
3385 
3386 	kfree(pools);
3387 }
3388 
3389 struct dm_blkdev_id {
3390 	u8 *id;
3391 	enum blk_unique_id type;
3392 };
3393 
3394 static int __dm_get_unique_id(struct dm_target *ti, struct dm_dev *dev,
3395 				sector_t start, sector_t len, void *data)
3396 {
3397 	struct dm_blkdev_id *dm_id = data;
3398 	const struct block_device_operations *fops = dev->bdev->bd_disk->fops;
3399 
3400 	if (!fops->get_unique_id)
3401 		return 0;
3402 
3403 	return fops->get_unique_id(dev->bdev->bd_disk, dm_id->id, dm_id->type);
3404 }
3405 
3406 /*
3407  * Allow access to get_unique_id() for the first device returning a
3408  * non-zero result.  Reasonable use expects all devices to have the
3409  * same unique id.
3410  */
3411 static int dm_blk_get_unique_id(struct gendisk *disk, u8 *id,
3412 		enum blk_unique_id type)
3413 {
3414 	struct mapped_device *md = disk->private_data;
3415 	struct dm_table *table;
3416 	struct dm_target *ti;
3417 	int ret = 0, srcu_idx;
3418 
3419 	struct dm_blkdev_id dm_id = {
3420 		.id = id,
3421 		.type = type,
3422 	};
3423 
3424 	table = dm_get_live_table(md, &srcu_idx);
3425 	if (!table || !dm_table_get_size(table))
3426 		goto out;
3427 
3428 	/* We only support devices that have a single target */
3429 	if (table->num_targets != 1)
3430 		goto out;
3431 	ti = dm_table_get_target(table, 0);
3432 
3433 	if (!ti->type->iterate_devices)
3434 		goto out;
3435 
3436 	ret = ti->type->iterate_devices(ti, __dm_get_unique_id, &dm_id);
3437 out:
3438 	dm_put_live_table(md, srcu_idx);
3439 	return ret;
3440 }
3441 
3442 struct dm_pr {
3443 	u64	old_key;
3444 	u64	new_key;
3445 	u32	flags;
3446 	bool	abort;
3447 	bool	fail_early;
3448 	int	ret;
3449 	enum pr_type type;
3450 	struct pr_keys *read_keys;
3451 	struct pr_held_reservation *rsv;
3452 };
3453 
3454 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3455 		      struct dm_pr *pr)
3456 {
3457 	struct mapped_device *md = bdev->bd_disk->private_data;
3458 	struct dm_table *table;
3459 	struct dm_target *ti;
3460 	int ret = -ENOTTY, srcu_idx;
3461 
3462 	table = dm_get_live_table(md, &srcu_idx);
3463 	if (!table || !dm_table_get_size(table))
3464 		goto out;
3465 
3466 	/* We only support devices that have a single target */
3467 	if (table->num_targets != 1)
3468 		goto out;
3469 	ti = dm_table_get_target(table, 0);
3470 
3471 	if (dm_suspended_md(md)) {
3472 		ret = -EAGAIN;
3473 		goto out;
3474 	}
3475 
3476 	ret = -EINVAL;
3477 	if (!ti->type->iterate_devices)
3478 		goto out;
3479 
3480 	ti->type->iterate_devices(ti, fn, pr);
3481 	ret = 0;
3482 out:
3483 	dm_put_live_table(md, srcu_idx);
3484 	return ret;
3485 }
3486 
3487 /*
3488  * For register / unregister we need to manually call out to every path.
3489  */
3490 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3491 			    sector_t start, sector_t len, void *data)
3492 {
3493 	struct dm_pr *pr = data;
3494 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3495 	int ret;
3496 
3497 	if (!ops || !ops->pr_register) {
3498 		pr->ret = -EOPNOTSUPP;
3499 		return -1;
3500 	}
3501 
3502 	ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3503 	if (!ret)
3504 		return 0;
3505 
3506 	if (!pr->ret)
3507 		pr->ret = ret;
3508 
3509 	if (pr->fail_early)
3510 		return -1;
3511 
3512 	return 0;
3513 }
3514 
3515 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3516 			  u32 flags)
3517 {
3518 	struct dm_pr pr = {
3519 		.old_key	= old_key,
3520 		.new_key	= new_key,
3521 		.flags		= flags,
3522 		.fail_early	= true,
3523 		.ret		= 0,
3524 	};
3525 	int ret;
3526 
3527 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3528 	if (ret) {
3529 		/* Didn't even get to register a path */
3530 		return ret;
3531 	}
3532 
3533 	if (!pr.ret)
3534 		return 0;
3535 	ret = pr.ret;
3536 
3537 	if (!new_key)
3538 		return ret;
3539 
3540 	/* unregister all paths if we failed to register any path */
3541 	pr.old_key = new_key;
3542 	pr.new_key = 0;
3543 	pr.flags = 0;
3544 	pr.fail_early = false;
3545 	(void) dm_call_pr(bdev, __dm_pr_register, &pr);
3546 	return ret;
3547 }
3548 
3549 
3550 static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
3551 			   sector_t start, sector_t len, void *data)
3552 {
3553 	struct dm_pr *pr = data;
3554 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3555 
3556 	if (!ops || !ops->pr_reserve) {
3557 		pr->ret = -EOPNOTSUPP;
3558 		return -1;
3559 	}
3560 
3561 	pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
3562 	if (!pr->ret)
3563 		return -1;
3564 
3565 	return 0;
3566 }
3567 
3568 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3569 			 u32 flags)
3570 {
3571 	struct dm_pr pr = {
3572 		.old_key	= key,
3573 		.flags		= flags,
3574 		.type		= type,
3575 		.fail_early	= false,
3576 		.ret		= 0,
3577 	};
3578 	int ret;
3579 
3580 	ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
3581 	if (ret)
3582 		return ret;
3583 
3584 	return pr.ret;
3585 }
3586 
3587 /*
3588  * If there is a non-All Registrants type of reservation, the release must be
3589  * sent down the holding path. For the cases where there is no reservation or
3590  * the path is not the holder the device will also return success, so we must
3591  * try each path to make sure we got the correct path.
3592  */
3593 static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
3594 			   sector_t start, sector_t len, void *data)
3595 {
3596 	struct dm_pr *pr = data;
3597 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3598 
3599 	if (!ops || !ops->pr_release) {
3600 		pr->ret = -EOPNOTSUPP;
3601 		return -1;
3602 	}
3603 
3604 	pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type);
3605 	if (pr->ret)
3606 		return -1;
3607 
3608 	return 0;
3609 }
3610 
3611 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3612 {
3613 	struct dm_pr pr = {
3614 		.old_key	= key,
3615 		.type		= type,
3616 		.fail_early	= false,
3617 	};
3618 	int ret;
3619 
3620 	ret = dm_call_pr(bdev, __dm_pr_release, &pr);
3621 	if (ret)
3622 		return ret;
3623 
3624 	return pr.ret;
3625 }
3626 
3627 static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
3628 			   sector_t start, sector_t len, void *data)
3629 {
3630 	struct dm_pr *pr = data;
3631 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3632 
3633 	if (!ops || !ops->pr_preempt) {
3634 		pr->ret = -EOPNOTSUPP;
3635 		return -1;
3636 	}
3637 
3638 	pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type,
3639 				  pr->abort);
3640 	if (!pr->ret)
3641 		return -1;
3642 
3643 	return 0;
3644 }
3645 
3646 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3647 			 enum pr_type type, bool abort)
3648 {
3649 	struct dm_pr pr = {
3650 		.new_key	= new_key,
3651 		.old_key	= old_key,
3652 		.type		= type,
3653 		.fail_early	= false,
3654 	};
3655 	int ret;
3656 
3657 	ret = dm_call_pr(bdev, __dm_pr_preempt, &pr);
3658 	if (ret)
3659 		return ret;
3660 
3661 	return pr.ret;
3662 }
3663 
3664 static int dm_pr_clear(struct block_device *bdev, u64 key)
3665 {
3666 	struct mapped_device *md = bdev->bd_disk->private_data;
3667 	const struct pr_ops *ops;
3668 	int r, srcu_idx;
3669 	bool forward = true;
3670 
3671 	/* Not a real ioctl, but targets must not interpret non-DM ioctls */
3672 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward);
3673 	if (r < 0)
3674 		goto out;
3675 	WARN_ON_ONCE(!forward);
3676 
3677 	ops = bdev->bd_disk->fops->pr_ops;
3678 	if (ops && ops->pr_clear)
3679 		r = ops->pr_clear(bdev, key);
3680 	else
3681 		r = -EOPNOTSUPP;
3682 out:
3683 	dm_unprepare_ioctl(md, srcu_idx);
3684 	return r;
3685 }
3686 
3687 static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
3688 			     sector_t start, sector_t len, void *data)
3689 {
3690 	struct dm_pr *pr = data;
3691 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3692 
3693 	if (!ops || !ops->pr_read_keys) {
3694 		pr->ret = -EOPNOTSUPP;
3695 		return -1;
3696 	}
3697 
3698 	pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
3699 	if (!pr->ret)
3700 		return -1;
3701 
3702 	return 0;
3703 }
3704 
3705 static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
3706 {
3707 	struct dm_pr pr = {
3708 		.read_keys = keys,
3709 	};
3710 	int ret;
3711 
3712 	ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
3713 	if (ret)
3714 		return ret;
3715 
3716 	return pr.ret;
3717 }
3718 
3719 static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
3720 				    sector_t start, sector_t len, void *data)
3721 {
3722 	struct dm_pr *pr = data;
3723 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3724 
3725 	if (!ops || !ops->pr_read_reservation) {
3726 		pr->ret = -EOPNOTSUPP;
3727 		return -1;
3728 	}
3729 
3730 	pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
3731 	if (!pr->ret)
3732 		return -1;
3733 
3734 	return 0;
3735 }
3736 
3737 static int dm_pr_read_reservation(struct block_device *bdev,
3738 				  struct pr_held_reservation *rsv)
3739 {
3740 	struct dm_pr pr = {
3741 		.rsv = rsv,
3742 	};
3743 	int ret;
3744 
3745 	ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
3746 	if (ret)
3747 		return ret;
3748 
3749 	return pr.ret;
3750 }
3751 
3752 static const struct pr_ops dm_pr_ops = {
3753 	.pr_register	= dm_pr_register,
3754 	.pr_reserve	= dm_pr_reserve,
3755 	.pr_release	= dm_pr_release,
3756 	.pr_preempt	= dm_pr_preempt,
3757 	.pr_clear	= dm_pr_clear,
3758 	.pr_read_keys	= dm_pr_read_keys,
3759 	.pr_read_reservation = dm_pr_read_reservation,
3760 };
3761 
3762 static const struct block_device_operations dm_blk_dops = {
3763 	.submit_bio = dm_submit_bio,
3764 	.poll_bio = dm_poll_bio,
3765 	.open = dm_blk_open,
3766 	.release = dm_blk_close,
3767 	.ioctl = dm_blk_ioctl,
3768 	.getgeo = dm_blk_getgeo,
3769 	.report_zones = dm_blk_report_zones,
3770 	.get_unique_id = dm_blk_get_unique_id,
3771 	.pr_ops = &dm_pr_ops,
3772 	.owner = THIS_MODULE
3773 };
3774 
3775 static const struct block_device_operations dm_rq_blk_dops = {
3776 	.open = dm_blk_open,
3777 	.release = dm_blk_close,
3778 	.ioctl = dm_blk_ioctl,
3779 	.getgeo = dm_blk_getgeo,
3780 	.get_unique_id = dm_blk_get_unique_id,
3781 	.pr_ops = &dm_pr_ops,
3782 	.owner = THIS_MODULE
3783 };
3784 
3785 static const struct dax_operations dm_dax_ops = {
3786 	.direct_access = dm_dax_direct_access,
3787 	.zero_page_range = dm_dax_zero_page_range,
3788 	.recovery_write = dm_dax_recovery_write,
3789 };
3790 
3791 /*
3792  * module hooks
3793  */
3794 module_init(dm_init);
3795 module_exit(dm_exit);
3796 
3797 module_param(major, uint, 0);
3798 MODULE_PARM_DESC(major, "The major number of the device mapper");
3799 
3800 module_param(reserved_bio_based_ios, uint, 0644);
3801 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3802 
3803 module_param(dm_numa_node, int, 0644);
3804 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3805 
3806 module_param(swap_bios, int, 0644);
3807 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3808 
3809 MODULE_DESCRIPTION(DM_NAME " driver");
3810 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
3811 MODULE_LICENSE("GPL");
3812