xref: /linux/drivers/md/dm.c (revision 6e11664f148454a127dd89e8698c3e3e80e5f62f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include "dm-core.h"
10 #include "dm-rq.h"
11 #include "dm-uevent.h"
12 #include "dm-ima.h"
13 
14 #include <linux/bio-integrity.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/signal.h>
20 #include <linux/blkpg.h>
21 #include <linux/bio.h>
22 #include <linux/mempool.h>
23 #include <linux/dax.h>
24 #include <linux/slab.h>
25 #include <linux/idr.h>
26 #include <linux/uio.h>
27 #include <linux/hdreg.h>
28 #include <linux/delay.h>
29 #include <linux/wait.h>
30 #include <linux/pr.h>
31 #include <linux/refcount.h>
32 #include <linux/part_stat.h>
33 #include <linux/blk-crypto.h>
34 #include <linux/blk-crypto-profile.h>
35 
36 #define DM_MSG_PREFIX "core"
37 
38 /*
39  * Cookies are numeric values sent with CHANGE and REMOVE
40  * uevents while resuming, removing or renaming the device.
41  */
42 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
43 #define DM_COOKIE_LENGTH 24
44 
45 /*
46  * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
47  * dm_io into one list, and reuse bio->bi_private as the list head. Before
48  * ending this fs bio, we will recover its ->bi_private.
49  */
50 #define REQ_DM_POLL_LIST	REQ_DRV
51 
52 static const char *_name = DM_NAME;
53 
54 static unsigned int major;
55 static unsigned int _major;
56 
57 static DEFINE_IDR(_minor_idr);
58 
59 static DEFINE_SPINLOCK(_minor_lock);
60 
61 static void do_deferred_remove(struct work_struct *w);
62 
63 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
64 
65 static struct workqueue_struct *deferred_remove_workqueue;
66 
67 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
68 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
69 
dm_issue_global_event(void)70 void dm_issue_global_event(void)
71 {
72 	atomic_inc(&dm_global_event_nr);
73 	wake_up(&dm_global_eventq);
74 }
75 
76 DEFINE_STATIC_KEY_FALSE(stats_enabled);
77 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
78 DEFINE_STATIC_KEY_FALSE(zoned_enabled);
79 
80 /*
81  * One of these is allocated (on-stack) per original bio.
82  */
83 struct clone_info {
84 	struct dm_table *map;
85 	struct bio *bio;
86 	struct dm_io *io;
87 	sector_t sector;
88 	unsigned int sector_count;
89 	bool is_abnormal_io:1;
90 	bool submit_as_polled:1;
91 };
92 
clone_to_tio(struct bio * clone)93 static inline struct dm_target_io *clone_to_tio(struct bio *clone)
94 {
95 	return container_of(clone, struct dm_target_io, clone);
96 }
97 
dm_per_bio_data(struct bio * bio,size_t data_size)98 void *dm_per_bio_data(struct bio *bio, size_t data_size)
99 {
100 	if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
101 		return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
102 	return (char *)bio - DM_IO_BIO_OFFSET - data_size;
103 }
104 EXPORT_SYMBOL_GPL(dm_per_bio_data);
105 
dm_bio_from_per_bio_data(void * data,size_t data_size)106 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
107 {
108 	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
109 
110 	if (io->magic == DM_IO_MAGIC)
111 		return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
112 	BUG_ON(io->magic != DM_TIO_MAGIC);
113 	return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
114 }
115 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
116 
dm_bio_get_target_bio_nr(const struct bio * bio)117 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
118 {
119 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
120 }
121 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
122 
123 #define MINOR_ALLOCED ((void *)-1)
124 
125 #define DM_NUMA_NODE NUMA_NO_NODE
126 static int dm_numa_node = DM_NUMA_NODE;
127 
128 #define DEFAULT_SWAP_BIOS	(8 * 1048576 / PAGE_SIZE)
129 static int swap_bios = DEFAULT_SWAP_BIOS;
get_swap_bios(void)130 static int get_swap_bios(void)
131 {
132 	int latch = READ_ONCE(swap_bios);
133 
134 	if (unlikely(latch <= 0))
135 		latch = DEFAULT_SWAP_BIOS;
136 	return latch;
137 }
138 
139 struct table_device {
140 	struct list_head list;
141 	refcount_t count;
142 	struct dm_dev dm_dev;
143 };
144 
145 /*
146  * Bio-based DM's mempools' reserved IOs set by the user.
147  */
148 #define RESERVED_BIO_BASED_IOS		16
149 static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
150 
__dm_get_module_param_int(int * module_param,int min,int max)151 static int __dm_get_module_param_int(int *module_param, int min, int max)
152 {
153 	int param = READ_ONCE(*module_param);
154 	int modified_param = 0;
155 	bool modified = true;
156 
157 	if (param < min)
158 		modified_param = min;
159 	else if (param > max)
160 		modified_param = max;
161 	else
162 		modified = false;
163 
164 	if (modified) {
165 		(void)cmpxchg(module_param, param, modified_param);
166 		param = modified_param;
167 	}
168 
169 	return param;
170 }
171 
__dm_get_module_param(unsigned int * module_param,unsigned int def,unsigned int max)172 unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
173 {
174 	unsigned int param = READ_ONCE(*module_param);
175 	unsigned int modified_param = 0;
176 
177 	if (!param)
178 		modified_param = def;
179 	else if (param > max)
180 		modified_param = max;
181 
182 	if (modified_param) {
183 		(void)cmpxchg(module_param, param, modified_param);
184 		param = modified_param;
185 	}
186 
187 	return param;
188 }
189 
dm_get_reserved_bio_based_ios(void)190 unsigned int dm_get_reserved_bio_based_ios(void)
191 {
192 	return __dm_get_module_param(&reserved_bio_based_ios,
193 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
194 }
195 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
196 
dm_get_numa_node(void)197 static unsigned int dm_get_numa_node(void)
198 {
199 	return __dm_get_module_param_int(&dm_numa_node,
200 					 DM_NUMA_NODE, num_online_nodes() - 1);
201 }
202 
local_init(void)203 static int __init local_init(void)
204 {
205 	int r;
206 
207 	r = dm_uevent_init();
208 	if (r)
209 		return r;
210 
211 	deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
212 	if (!deferred_remove_workqueue) {
213 		r = -ENOMEM;
214 		goto out_uevent_exit;
215 	}
216 
217 	_major = major;
218 	r = register_blkdev(_major, _name);
219 	if (r < 0)
220 		goto out_free_workqueue;
221 
222 	if (!_major)
223 		_major = r;
224 
225 	return 0;
226 
227 out_free_workqueue:
228 	destroy_workqueue(deferred_remove_workqueue);
229 out_uevent_exit:
230 	dm_uevent_exit();
231 
232 	return r;
233 }
234 
local_exit(void)235 static void local_exit(void)
236 {
237 	destroy_workqueue(deferred_remove_workqueue);
238 
239 	unregister_blkdev(_major, _name);
240 	dm_uevent_exit();
241 
242 	_major = 0;
243 
244 	DMINFO("cleaned up");
245 }
246 
247 static int (*_inits[])(void) __initdata = {
248 	local_init,
249 	dm_target_init,
250 	dm_linear_init,
251 	dm_stripe_init,
252 	dm_io_init,
253 	dm_kcopyd_init,
254 	dm_interface_init,
255 	dm_statistics_init,
256 };
257 
258 static void (*_exits[])(void) = {
259 	local_exit,
260 	dm_target_exit,
261 	dm_linear_exit,
262 	dm_stripe_exit,
263 	dm_io_exit,
264 	dm_kcopyd_exit,
265 	dm_interface_exit,
266 	dm_statistics_exit,
267 };
268 
dm_init(void)269 static int __init dm_init(void)
270 {
271 	const int count = ARRAY_SIZE(_inits);
272 	int r, i;
273 
274 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
275 	DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
276 	       " Duplicate IMA measurements will not be recorded in the IMA log.");
277 #endif
278 
279 	for (i = 0; i < count; i++) {
280 		r = _inits[i]();
281 		if (r)
282 			goto bad;
283 	}
284 
285 	return 0;
286 bad:
287 	while (i--)
288 		_exits[i]();
289 
290 	return r;
291 }
292 
dm_exit(void)293 static void __exit dm_exit(void)
294 {
295 	int i = ARRAY_SIZE(_exits);
296 
297 	while (i--)
298 		_exits[i]();
299 
300 	/*
301 	 * Should be empty by this point.
302 	 */
303 	idr_destroy(&_minor_idr);
304 }
305 
306 /*
307  * Block device functions
308  */
dm_deleting_md(struct mapped_device * md)309 int dm_deleting_md(struct mapped_device *md)
310 {
311 	return test_bit(DMF_DELETING, &md->flags);
312 }
313 
dm_blk_open(struct gendisk * disk,blk_mode_t mode)314 static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
315 {
316 	struct mapped_device *md;
317 
318 	spin_lock(&_minor_lock);
319 
320 	md = disk->private_data;
321 	if (!md)
322 		goto out;
323 
324 	if (test_bit(DMF_FREEING, &md->flags) ||
325 	    dm_deleting_md(md)) {
326 		md = NULL;
327 		goto out;
328 	}
329 
330 	dm_get(md);
331 	atomic_inc(&md->open_count);
332 out:
333 	spin_unlock(&_minor_lock);
334 
335 	return md ? 0 : -ENXIO;
336 }
337 
dm_blk_close(struct gendisk * disk)338 static void dm_blk_close(struct gendisk *disk)
339 {
340 	struct mapped_device *md;
341 
342 	spin_lock(&_minor_lock);
343 
344 	md = disk->private_data;
345 	if (WARN_ON(!md))
346 		goto out;
347 
348 	if (atomic_dec_and_test(&md->open_count) &&
349 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
350 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
351 
352 	dm_put(md);
353 out:
354 	spin_unlock(&_minor_lock);
355 }
356 
dm_open_count(struct mapped_device * md)357 int dm_open_count(struct mapped_device *md)
358 {
359 	return atomic_read(&md->open_count);
360 }
361 
362 /*
363  * Guarantees nothing is using the device before it's deleted.
364  */
dm_lock_for_deletion(struct mapped_device * md,bool mark_deferred,bool only_deferred)365 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
366 {
367 	int r = 0;
368 
369 	spin_lock(&_minor_lock);
370 
371 	if (dm_open_count(md)) {
372 		r = -EBUSY;
373 		if (mark_deferred)
374 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
375 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
376 		r = -EEXIST;
377 	else
378 		set_bit(DMF_DELETING, &md->flags);
379 
380 	spin_unlock(&_minor_lock);
381 
382 	return r;
383 }
384 
dm_cancel_deferred_remove(struct mapped_device * md)385 int dm_cancel_deferred_remove(struct mapped_device *md)
386 {
387 	int r = 0;
388 
389 	spin_lock(&_minor_lock);
390 
391 	if (test_bit(DMF_DELETING, &md->flags))
392 		r = -EBUSY;
393 	else
394 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
395 
396 	spin_unlock(&_minor_lock);
397 
398 	return r;
399 }
400 
do_deferred_remove(struct work_struct * w)401 static void do_deferred_remove(struct work_struct *w)
402 {
403 	dm_deferred_remove();
404 }
405 
dm_blk_getgeo(struct block_device * bdev,struct hd_geometry * geo)406 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
407 {
408 	struct mapped_device *md = bdev->bd_disk->private_data;
409 
410 	return dm_get_geometry(md, geo);
411 }
412 
dm_prepare_ioctl(struct mapped_device * md,int * srcu_idx,struct block_device ** bdev,unsigned int cmd,unsigned long arg,bool * forward)413 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
414 			    struct block_device **bdev, unsigned int cmd,
415 			    unsigned long arg, bool *forward)
416 {
417 	struct dm_target *ti;
418 	struct dm_table *map;
419 	int r;
420 
421 retry:
422 	r = -ENOTTY;
423 	map = dm_get_live_table(md, srcu_idx);
424 	if (!map || !dm_table_get_size(map))
425 		return r;
426 
427 	/* We only support devices that have a single target */
428 	if (map->num_targets != 1)
429 		return r;
430 
431 	ti = dm_table_get_target(map, 0);
432 	if (!ti->type->prepare_ioctl)
433 		return r;
434 
435 	if (dm_suspended_md(md))
436 		return -EAGAIN;
437 
438 	r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward);
439 	if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) {
440 		dm_put_live_table(md, *srcu_idx);
441 		fsleep(10000);
442 		goto retry;
443 	}
444 
445 	return r;
446 }
447 
dm_unprepare_ioctl(struct mapped_device * md,int srcu_idx)448 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
449 {
450 	dm_put_live_table(md, srcu_idx);
451 }
452 
dm_blk_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)453 static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
454 			unsigned int cmd, unsigned long arg)
455 {
456 	struct mapped_device *md = bdev->bd_disk->private_data;
457 	int r, srcu_idx;
458 	bool forward = true;
459 
460 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward);
461 	if (!forward || r < 0)
462 		goto out;
463 
464 	if (r > 0) {
465 		/*
466 		 * Target determined this ioctl is being issued against a
467 		 * subset of the parent bdev; require extra privileges.
468 		 */
469 		if (!capable(CAP_SYS_RAWIO)) {
470 			DMDEBUG_LIMIT(
471 	"%s: sending ioctl %x to DM device without required privilege.",
472 				current->comm, cmd);
473 			r = -ENOIOCTLCMD;
474 			goto out;
475 		}
476 	}
477 
478 	if (!bdev->bd_disk->fops->ioctl)
479 		r = -ENOTTY;
480 	else
481 		r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
482 out:
483 	dm_unprepare_ioctl(md, srcu_idx);
484 	return r;
485 }
486 
dm_start_time_ns_from_clone(struct bio * bio)487 u64 dm_start_time_ns_from_clone(struct bio *bio)
488 {
489 	return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
490 }
491 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
492 
bio_is_flush_with_data(struct bio * bio)493 static inline bool bio_is_flush_with_data(struct bio *bio)
494 {
495 	return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
496 }
497 
dm_io_sectors(struct dm_io * io,struct bio * bio)498 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
499 {
500 	/*
501 	 * If REQ_PREFLUSH set, don't account payload, it will be
502 	 * submitted (and accounted) after this flush completes.
503 	 */
504 	if (bio_is_flush_with_data(bio))
505 		return 0;
506 	if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
507 		return io->sectors;
508 	return bio_sectors(bio);
509 }
510 
dm_io_acct(struct dm_io * io,bool end)511 static void dm_io_acct(struct dm_io *io, bool end)
512 {
513 	struct bio *bio = io->orig_bio;
514 
515 	if (dm_io_flagged(io, DM_IO_BLK_STAT)) {
516 		if (!end)
517 			bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
518 					   io->start_time);
519 		else
520 			bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
521 					 dm_io_sectors(io, bio),
522 					 io->start_time);
523 	}
524 
525 	if (static_branch_unlikely(&stats_enabled) &&
526 	    unlikely(dm_stats_used(&io->md->stats))) {
527 		sector_t sector;
528 
529 		if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
530 			sector = bio_end_sector(bio) - io->sector_offset;
531 		else
532 			sector = bio->bi_iter.bi_sector;
533 
534 		dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
535 				    sector, dm_io_sectors(io, bio),
536 				    end, io->start_time, &io->stats_aux);
537 	}
538 }
539 
__dm_start_io_acct(struct dm_io * io)540 static void __dm_start_io_acct(struct dm_io *io)
541 {
542 	dm_io_acct(io, false);
543 }
544 
dm_start_io_acct(struct dm_io * io,struct bio * clone)545 static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
546 {
547 	/*
548 	 * Ensure IO accounting is only ever started once.
549 	 */
550 	if (dm_io_flagged(io, DM_IO_ACCOUNTED))
551 		return;
552 
553 	/* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
554 	if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
555 		dm_io_set_flag(io, DM_IO_ACCOUNTED);
556 	} else {
557 		unsigned long flags;
558 		/* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
559 		spin_lock_irqsave(&io->lock, flags);
560 		if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
561 			spin_unlock_irqrestore(&io->lock, flags);
562 			return;
563 		}
564 		dm_io_set_flag(io, DM_IO_ACCOUNTED);
565 		spin_unlock_irqrestore(&io->lock, flags);
566 	}
567 
568 	__dm_start_io_acct(io);
569 }
570 
dm_end_io_acct(struct dm_io * io)571 static void dm_end_io_acct(struct dm_io *io)
572 {
573 	dm_io_acct(io, true);
574 }
575 
alloc_io(struct mapped_device * md,struct bio * bio,gfp_t gfp_mask)576 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
577 {
578 	struct dm_io *io;
579 	struct dm_target_io *tio;
580 	struct bio *clone;
581 
582 	clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
583 	if (unlikely(!clone))
584 		return NULL;
585 	tio = clone_to_tio(clone);
586 	tio->flags = 0;
587 	dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
588 	tio->io = NULL;
589 
590 	io = container_of(tio, struct dm_io, tio);
591 	io->magic = DM_IO_MAGIC;
592 	io->status = BLK_STS_OK;
593 
594 	/* one ref is for submission, the other is for completion */
595 	atomic_set(&io->io_count, 2);
596 	this_cpu_inc(*md->pending_io);
597 	io->orig_bio = bio;
598 	io->md = md;
599 	spin_lock_init(&io->lock);
600 	io->start_time = jiffies;
601 	io->flags = 0;
602 	if (blk_queue_io_stat(md->queue))
603 		dm_io_set_flag(io, DM_IO_BLK_STAT);
604 
605 	if (static_branch_unlikely(&stats_enabled) &&
606 	    unlikely(dm_stats_used(&md->stats)))
607 		dm_stats_record_start(&md->stats, &io->stats_aux);
608 
609 	return io;
610 }
611 
free_io(struct dm_io * io)612 static void free_io(struct dm_io *io)
613 {
614 	bio_put(&io->tio.clone);
615 }
616 
alloc_tio(struct clone_info * ci,struct dm_target * ti,unsigned int target_bio_nr,unsigned int * len,gfp_t gfp_mask)617 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
618 			     unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
619 {
620 	struct mapped_device *md = ci->io->md;
621 	struct dm_target_io *tio;
622 	struct bio *clone;
623 
624 	if (!ci->io->tio.io) {
625 		/* the dm_target_io embedded in ci->io is available */
626 		tio = &ci->io->tio;
627 		/* alloc_io() already initialized embedded clone */
628 		clone = &tio->clone;
629 	} else {
630 		clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
631 					&md->mempools->bs);
632 		if (!clone)
633 			return NULL;
634 
635 		/* REQ_DM_POLL_LIST shouldn't be inherited */
636 		clone->bi_opf &= ~REQ_DM_POLL_LIST;
637 
638 		tio = clone_to_tio(clone);
639 		tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
640 	}
641 
642 	tio->magic = DM_TIO_MAGIC;
643 	tio->io = ci->io;
644 	tio->ti = ti;
645 	tio->target_bio_nr = target_bio_nr;
646 	tio->len_ptr = len;
647 	tio->old_sector = 0;
648 
649 	/* Set default bdev, but target must bio_set_dev() before issuing IO */
650 	clone->bi_bdev = md->disk->part0;
651 	if (likely(ti != NULL) && unlikely(ti->needs_bio_set_dev))
652 		bio_set_dev(clone, md->disk->part0);
653 
654 	if (len) {
655 		clone->bi_iter.bi_size = to_bytes(*len);
656 		if (bio_integrity(clone))
657 			bio_integrity_trim(clone);
658 	}
659 
660 	return clone;
661 }
662 
free_tio(struct bio * clone)663 static void free_tio(struct bio *clone)
664 {
665 	if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
666 		return;
667 	bio_put(clone);
668 }
669 
670 /*
671  * Add the bio to the list of deferred io.
672  */
queue_io(struct mapped_device * md,struct bio * bio)673 static void queue_io(struct mapped_device *md, struct bio *bio)
674 {
675 	unsigned long flags;
676 
677 	spin_lock_irqsave(&md->deferred_lock, flags);
678 	bio_list_add(&md->deferred, bio);
679 	spin_unlock_irqrestore(&md->deferred_lock, flags);
680 	queue_work(md->wq, &md->work);
681 }
682 
683 /*
684  * Everyone (including functions in this file), should use this
685  * function to access the md->map field, and make sure they call
686  * dm_put_live_table() when finished.
687  */
dm_get_live_table(struct mapped_device * md,int * srcu_idx)688 struct dm_table *dm_get_live_table(struct mapped_device *md,
689 				   int *srcu_idx) __acquires(md->io_barrier)
690 {
691 	*srcu_idx = srcu_read_lock(&md->io_barrier);
692 
693 	return srcu_dereference(md->map, &md->io_barrier);
694 }
695 
dm_put_live_table(struct mapped_device * md,int srcu_idx)696 void dm_put_live_table(struct mapped_device *md,
697 		       int srcu_idx) __releases(md->io_barrier)
698 {
699 	srcu_read_unlock(&md->io_barrier, srcu_idx);
700 }
701 
dm_sync_table(struct mapped_device * md)702 void dm_sync_table(struct mapped_device *md)
703 {
704 	synchronize_srcu(&md->io_barrier);
705 	synchronize_rcu_expedited();
706 }
707 
708 /*
709  * A fast alternative to dm_get_live_table/dm_put_live_table.
710  * The caller must not block between these two functions.
711  */
dm_get_live_table_fast(struct mapped_device * md)712 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
713 {
714 	rcu_read_lock();
715 	return rcu_dereference(md->map);
716 }
717 
dm_put_live_table_fast(struct mapped_device * md)718 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
719 {
720 	rcu_read_unlock();
721 }
722 
723 static char *_dm_claim_ptr = "I belong to device-mapper";
724 
725 /*
726  * Open a table device so we can use it as a map destination.
727  */
open_table_device(struct mapped_device * md,dev_t dev,blk_mode_t mode)728 static struct table_device *open_table_device(struct mapped_device *md,
729 		dev_t dev, blk_mode_t mode)
730 {
731 	struct table_device *td;
732 	struct file *bdev_file;
733 	struct block_device *bdev;
734 	u64 part_off;
735 	int r;
736 
737 	td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
738 	if (!td)
739 		return ERR_PTR(-ENOMEM);
740 	refcount_set(&td->count, 1);
741 
742 	bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
743 	if (IS_ERR(bdev_file)) {
744 		r = PTR_ERR(bdev_file);
745 		goto out_free_td;
746 	}
747 
748 	bdev = file_bdev(bdev_file);
749 
750 	/*
751 	 * We can be called before the dm disk is added.  In that case we can't
752 	 * register the holder relation here.  It will be done once add_disk was
753 	 * called.
754 	 */
755 	if (md->disk->slave_dir) {
756 		r = bd_link_disk_holder(bdev, md->disk);
757 		if (r)
758 			goto out_blkdev_put;
759 	}
760 
761 	td->dm_dev.mode = mode;
762 	td->dm_dev.bdev = bdev;
763 	td->dm_dev.bdev_file = bdev_file;
764 	td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off,
765 						NULL, NULL);
766 	format_dev_t(td->dm_dev.name, dev);
767 	list_add(&td->list, &md->table_devices);
768 	return td;
769 
770 out_blkdev_put:
771 	__fput_sync(bdev_file);
772 out_free_td:
773 	kfree(td);
774 	return ERR_PTR(r);
775 }
776 
777 /*
778  * Close a table device that we've been using.
779  */
close_table_device(struct table_device * td,struct mapped_device * md)780 static void close_table_device(struct table_device *td, struct mapped_device *md)
781 {
782 	if (md->disk->slave_dir)
783 		bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
784 
785 	/* Leverage async fput() if DMF_DEFERRED_REMOVE set */
786 	if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
787 		fput(td->dm_dev.bdev_file);
788 	else
789 		__fput_sync(td->dm_dev.bdev_file);
790 
791 	put_dax(td->dm_dev.dax_dev);
792 	list_del(&td->list);
793 	kfree(td);
794 }
795 
find_table_device(struct list_head * l,dev_t dev,blk_mode_t mode)796 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
797 					      blk_mode_t mode)
798 {
799 	struct table_device *td;
800 
801 	list_for_each_entry(td, l, list)
802 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
803 			return td;
804 
805 	return NULL;
806 }
807 
dm_get_table_device(struct mapped_device * md,dev_t dev,blk_mode_t mode,struct dm_dev ** result)808 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
809 			struct dm_dev **result)
810 {
811 	struct table_device *td;
812 
813 	mutex_lock(&md->table_devices_lock);
814 	td = find_table_device(&md->table_devices, dev, mode);
815 	if (!td) {
816 		td = open_table_device(md, dev, mode);
817 		if (IS_ERR(td)) {
818 			mutex_unlock(&md->table_devices_lock);
819 			return PTR_ERR(td);
820 		}
821 	} else {
822 		refcount_inc(&td->count);
823 	}
824 	mutex_unlock(&md->table_devices_lock);
825 
826 	*result = &td->dm_dev;
827 	return 0;
828 }
829 
dm_put_table_device(struct mapped_device * md,struct dm_dev * d)830 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
831 {
832 	struct table_device *td = container_of(d, struct table_device, dm_dev);
833 
834 	mutex_lock(&md->table_devices_lock);
835 	if (refcount_dec_and_test(&td->count))
836 		close_table_device(td, md);
837 	mutex_unlock(&md->table_devices_lock);
838 }
839 
840 /*
841  * Get the geometry associated with a dm device
842  */
dm_get_geometry(struct mapped_device * md,struct hd_geometry * geo)843 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
844 {
845 	*geo = md->geometry;
846 
847 	return 0;
848 }
849 
850 /*
851  * Set the geometry of a device.
852  */
dm_set_geometry(struct mapped_device * md,struct hd_geometry * geo)853 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
854 {
855 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
856 
857 	if (geo->start > sz) {
858 		DMERR("Start sector is beyond the geometry limits.");
859 		return -EINVAL;
860 	}
861 
862 	md->geometry = *geo;
863 
864 	return 0;
865 }
866 
__noflush_suspending(struct mapped_device * md)867 static int __noflush_suspending(struct mapped_device *md)
868 {
869 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
870 }
871 
dm_requeue_add_io(struct dm_io * io,bool first_stage)872 static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
873 {
874 	struct mapped_device *md = io->md;
875 
876 	if (first_stage) {
877 		struct dm_io *next = md->requeue_list;
878 
879 		md->requeue_list = io;
880 		io->next = next;
881 	} else {
882 		bio_list_add_head(&md->deferred, io->orig_bio);
883 	}
884 }
885 
dm_kick_requeue(struct mapped_device * md,bool first_stage)886 static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
887 {
888 	if (first_stage)
889 		queue_work(md->wq, &md->requeue_work);
890 	else
891 		queue_work(md->wq, &md->work);
892 }
893 
894 /*
895  * Return true if the dm_io's original bio is requeued.
896  * io->status is updated with error if requeue disallowed.
897  */
dm_handle_requeue(struct dm_io * io,bool first_stage)898 static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
899 {
900 	struct bio *bio = io->orig_bio;
901 	bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
902 	bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
903 				     (bio->bi_opf & REQ_POLLED));
904 	struct mapped_device *md = io->md;
905 	bool requeued = false;
906 
907 	if (handle_requeue || handle_polled_eagain) {
908 		unsigned long flags;
909 
910 		if (bio->bi_opf & REQ_POLLED) {
911 			/*
912 			 * Upper layer won't help us poll split bio
913 			 * (io->orig_bio may only reflect a subset of the
914 			 * pre-split original) so clear REQ_POLLED.
915 			 */
916 			bio_clear_polled(bio);
917 		}
918 
919 		/*
920 		 * Target requested pushing back the I/O or
921 		 * polled IO hit BLK_STS_AGAIN.
922 		 */
923 		spin_lock_irqsave(&md->deferred_lock, flags);
924 		if ((__noflush_suspending(md) &&
925 		     !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
926 		    handle_polled_eagain || first_stage) {
927 			dm_requeue_add_io(io, first_stage);
928 			requeued = true;
929 		} else {
930 			/*
931 			 * noflush suspend was interrupted or this is
932 			 * a write to a zoned target.
933 			 */
934 			io->status = BLK_STS_IOERR;
935 		}
936 		spin_unlock_irqrestore(&md->deferred_lock, flags);
937 	}
938 
939 	if (requeued)
940 		dm_kick_requeue(md, first_stage);
941 
942 	return requeued;
943 }
944 
__dm_io_complete(struct dm_io * io,bool first_stage)945 static void __dm_io_complete(struct dm_io *io, bool first_stage)
946 {
947 	struct bio *bio = io->orig_bio;
948 	struct mapped_device *md = io->md;
949 	blk_status_t io_error;
950 	bool requeued;
951 
952 	requeued = dm_handle_requeue(io, first_stage);
953 	if (requeued && first_stage)
954 		return;
955 
956 	io_error = io->status;
957 	if (dm_io_flagged(io, DM_IO_ACCOUNTED))
958 		dm_end_io_acct(io);
959 	else if (!io_error) {
960 		/*
961 		 * Must handle target that DM_MAPIO_SUBMITTED only to
962 		 * then bio_endio() rather than dm_submit_bio_remap()
963 		 */
964 		__dm_start_io_acct(io);
965 		dm_end_io_acct(io);
966 	}
967 	free_io(io);
968 	smp_wmb();
969 	this_cpu_dec(*md->pending_io);
970 
971 	/* nudge anyone waiting on suspend queue */
972 	if (unlikely(wq_has_sleeper(&md->wait)))
973 		wake_up(&md->wait);
974 
975 	/* Return early if the original bio was requeued */
976 	if (requeued)
977 		return;
978 
979 	if (bio_is_flush_with_data(bio)) {
980 		/*
981 		 * Preflush done for flush with data, reissue
982 		 * without REQ_PREFLUSH.
983 		 */
984 		bio->bi_opf &= ~REQ_PREFLUSH;
985 		queue_io(md, bio);
986 	} else {
987 		/* done with normal IO or empty flush */
988 		if (io_error)
989 			bio->bi_status = io_error;
990 		bio_endio(bio);
991 	}
992 }
993 
dm_wq_requeue_work(struct work_struct * work)994 static void dm_wq_requeue_work(struct work_struct *work)
995 {
996 	struct mapped_device *md = container_of(work, struct mapped_device,
997 						requeue_work);
998 	unsigned long flags;
999 	struct dm_io *io;
1000 
1001 	/* reuse deferred lock to simplify dm_handle_requeue */
1002 	spin_lock_irqsave(&md->deferred_lock, flags);
1003 	io = md->requeue_list;
1004 	md->requeue_list = NULL;
1005 	spin_unlock_irqrestore(&md->deferred_lock, flags);
1006 
1007 	while (io) {
1008 		struct dm_io *next = io->next;
1009 
1010 		dm_io_rewind(io, &md->disk->bio_split);
1011 
1012 		io->next = NULL;
1013 		__dm_io_complete(io, false);
1014 		io = next;
1015 		cond_resched();
1016 	}
1017 }
1018 
1019 /*
1020  * Two staged requeue:
1021  *
1022  * 1) io->orig_bio points to the real original bio, and the part mapped to
1023  *    this io must be requeued, instead of other parts of the original bio.
1024  *
1025  * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
1026  */
dm_io_complete(struct dm_io * io)1027 static void dm_io_complete(struct dm_io *io)
1028 {
1029 	bool first_requeue;
1030 
1031 	/*
1032 	 * Only dm_io that has been split needs two stage requeue, otherwise
1033 	 * we may run into long bio clone chain during suspend and OOM could
1034 	 * be triggered.
1035 	 *
1036 	 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
1037 	 * also aren't handled via the first stage requeue.
1038 	 */
1039 	if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
1040 		first_requeue = true;
1041 	else
1042 		first_requeue = false;
1043 
1044 	__dm_io_complete(io, first_requeue);
1045 }
1046 
1047 /*
1048  * Decrements the number of outstanding ios that a bio has been
1049  * cloned into, completing the original io if necc.
1050  */
__dm_io_dec_pending(struct dm_io * io)1051 static inline void __dm_io_dec_pending(struct dm_io *io)
1052 {
1053 	if (atomic_dec_and_test(&io->io_count))
1054 		dm_io_complete(io);
1055 }
1056 
dm_io_set_error(struct dm_io * io,blk_status_t error)1057 static void dm_io_set_error(struct dm_io *io, blk_status_t error)
1058 {
1059 	unsigned long flags;
1060 
1061 	/* Push-back supersedes any I/O errors */
1062 	spin_lock_irqsave(&io->lock, flags);
1063 	if (!(io->status == BLK_STS_DM_REQUEUE &&
1064 	      __noflush_suspending(io->md))) {
1065 		io->status = error;
1066 	}
1067 	spin_unlock_irqrestore(&io->lock, flags);
1068 }
1069 
dm_io_dec_pending(struct dm_io * io,blk_status_t error)1070 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
1071 {
1072 	if (unlikely(error))
1073 		dm_io_set_error(io, error);
1074 
1075 	__dm_io_dec_pending(io);
1076 }
1077 
1078 /*
1079  * The queue_limits are only valid as long as you have a reference
1080  * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
1081  */
dm_get_queue_limits(struct mapped_device * md)1082 static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1083 {
1084 	return &md->queue->limits;
1085 }
1086 
swap_bios_limit(struct dm_target * ti,struct bio * bio)1087 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1088 {
1089 	return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
1090 }
1091 
clone_endio(struct bio * bio)1092 static void clone_endio(struct bio *bio)
1093 {
1094 	blk_status_t error = bio->bi_status;
1095 	struct dm_target_io *tio = clone_to_tio(bio);
1096 	struct dm_target *ti = tio->ti;
1097 	dm_endio_fn endio = likely(ti != NULL) ? ti->type->end_io : NULL;
1098 	struct dm_io *io = tio->io;
1099 	struct mapped_device *md = io->md;
1100 
1101 	if (unlikely(error == BLK_STS_TARGET)) {
1102 		if (bio_op(bio) == REQ_OP_DISCARD &&
1103 		    !bdev_max_discard_sectors(bio->bi_bdev))
1104 			blk_queue_disable_discard(md->queue);
1105 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1106 			 !bdev_write_zeroes_sectors(bio->bi_bdev))
1107 			blk_queue_disable_write_zeroes(md->queue);
1108 	}
1109 
1110 	if (static_branch_unlikely(&zoned_enabled) &&
1111 	    unlikely(bdev_is_zoned(bio->bi_bdev)))
1112 		dm_zone_endio(io, bio);
1113 
1114 	if (endio) {
1115 		int r = endio(ti, bio, &error);
1116 
1117 		switch (r) {
1118 		case DM_ENDIO_REQUEUE:
1119 			if (static_branch_unlikely(&zoned_enabled)) {
1120 				/*
1121 				 * Requeuing writes to a sequential zone of a zoned
1122 				 * target will break the sequential write pattern:
1123 				 * fail such IO.
1124 				 */
1125 				if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1126 					error = BLK_STS_IOERR;
1127 				else
1128 					error = BLK_STS_DM_REQUEUE;
1129 			} else
1130 				error = BLK_STS_DM_REQUEUE;
1131 			fallthrough;
1132 		case DM_ENDIO_DONE:
1133 			break;
1134 		case DM_ENDIO_INCOMPLETE:
1135 			/* The target will handle the io */
1136 			return;
1137 		default:
1138 			DMCRIT("unimplemented target endio return value: %d", r);
1139 			BUG();
1140 		}
1141 	}
1142 
1143 	if (static_branch_unlikely(&swap_bios_enabled) &&
1144 	    likely(ti != NULL) && unlikely(swap_bios_limit(ti, bio)))
1145 		up(&md->swap_bios_semaphore);
1146 
1147 	free_tio(bio);
1148 	dm_io_dec_pending(io, error);
1149 }
1150 
1151 /*
1152  * Return maximum size of I/O possible at the supplied sector up to the current
1153  * target boundary.
1154  */
max_io_len_target_boundary(struct dm_target * ti,sector_t target_offset)1155 static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
1156 						  sector_t target_offset)
1157 {
1158 	return ti->len - target_offset;
1159 }
1160 
__max_io_len(struct dm_target * ti,sector_t sector,unsigned int max_granularity,unsigned int max_sectors)1161 static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
1162 			     unsigned int max_granularity,
1163 			     unsigned int max_sectors)
1164 {
1165 	sector_t target_offset = dm_target_offset(ti, sector);
1166 	sector_t len = max_io_len_target_boundary(ti, target_offset);
1167 
1168 	/*
1169 	 * Does the target need to split IO even further?
1170 	 * - varied (per target) IO splitting is a tenet of DM; this
1171 	 *   explains why stacked chunk_sectors based splitting via
1172 	 *   bio_split_to_limits() isn't possible here.
1173 	 */
1174 	if (!max_granularity)
1175 		return len;
1176 	return min_t(sector_t, len,
1177 		min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
1178 		    blk_boundary_sectors_left(target_offset, max_granularity)));
1179 }
1180 
max_io_len(struct dm_target * ti,sector_t sector)1181 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
1182 {
1183 	return __max_io_len(ti, sector, ti->max_io_len, 0);
1184 }
1185 
dm_set_target_max_io_len(struct dm_target * ti,sector_t len)1186 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1187 {
1188 	if (len > UINT_MAX) {
1189 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1190 		      (unsigned long long)len, UINT_MAX);
1191 		ti->error = "Maximum size of target IO is too large";
1192 		return -EINVAL;
1193 	}
1194 
1195 	ti->max_io_len = (uint32_t) len;
1196 
1197 	return 0;
1198 }
1199 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1200 
dm_dax_get_live_target(struct mapped_device * md,sector_t sector,int * srcu_idx)1201 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1202 						sector_t sector, int *srcu_idx)
1203 	__acquires(md->io_barrier)
1204 {
1205 	struct dm_table *map;
1206 	struct dm_target *ti;
1207 
1208 	map = dm_get_live_table(md, srcu_idx);
1209 	if (!map)
1210 		return NULL;
1211 
1212 	ti = dm_table_find_target(map, sector);
1213 	if (!ti)
1214 		return NULL;
1215 
1216 	return ti;
1217 }
1218 
dm_dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,pfn_t * pfn)1219 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1220 		long nr_pages, enum dax_access_mode mode, void **kaddr,
1221 		pfn_t *pfn)
1222 {
1223 	struct mapped_device *md = dax_get_private(dax_dev);
1224 	sector_t sector = pgoff * PAGE_SECTORS;
1225 	struct dm_target *ti;
1226 	long len, ret = -EIO;
1227 	int srcu_idx;
1228 
1229 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1230 
1231 	if (!ti)
1232 		goto out;
1233 	if (!ti->type->direct_access)
1234 		goto out;
1235 	len = max_io_len(ti, sector) / PAGE_SECTORS;
1236 	if (len < 1)
1237 		goto out;
1238 	nr_pages = min(len, nr_pages);
1239 	ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1240 
1241  out:
1242 	dm_put_live_table(md, srcu_idx);
1243 
1244 	return ret;
1245 }
1246 
dm_dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)1247 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1248 				  size_t nr_pages)
1249 {
1250 	struct mapped_device *md = dax_get_private(dax_dev);
1251 	sector_t sector = pgoff * PAGE_SECTORS;
1252 	struct dm_target *ti;
1253 	int ret = -EIO;
1254 	int srcu_idx;
1255 
1256 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1257 
1258 	if (!ti)
1259 		goto out;
1260 	if (WARN_ON(!ti->type->dax_zero_page_range)) {
1261 		/*
1262 		 * ->zero_page_range() is mandatory dax operation. If we are
1263 		 *  here, something is wrong.
1264 		 */
1265 		goto out;
1266 	}
1267 	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1268  out:
1269 	dm_put_live_table(md, srcu_idx);
1270 
1271 	return ret;
1272 }
1273 
dm_dax_recovery_write(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)1274 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
1275 		void *addr, size_t bytes, struct iov_iter *i)
1276 {
1277 	struct mapped_device *md = dax_get_private(dax_dev);
1278 	sector_t sector = pgoff * PAGE_SECTORS;
1279 	struct dm_target *ti;
1280 	int srcu_idx;
1281 	long ret = 0;
1282 
1283 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1284 	if (!ti || !ti->type->dax_recovery_write)
1285 		goto out;
1286 
1287 	ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1288 out:
1289 	dm_put_live_table(md, srcu_idx);
1290 	return ret;
1291 }
1292 
1293 /*
1294  * A target may call dm_accept_partial_bio only from the map routine.  It is
1295  * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1296  * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated
1297  * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced
1298  * by __send_duplicate_bios().
1299  *
1300  * dm_accept_partial_bio informs the dm that the target only wants to process
1301  * additional n_sectors sectors of the bio and the rest of the data should be
1302  * sent in a next bio.
1303  *
1304  * A diagram that explains the arithmetics:
1305  * +--------------------+---------------+-------+
1306  * |         1          |       2       |   3   |
1307  * +--------------------+---------------+-------+
1308  *
1309  * <-------------- *tio->len_ptr --------------->
1310  *                      <----- bio_sectors ----->
1311  *                      <-- n_sectors -->
1312  *
1313  * Region 1 was already iterated over with bio_advance or similar function.
1314  *	(it may be empty if the target doesn't use bio_advance)
1315  * Region 2 is the remaining bio size that the target wants to process.
1316  *	(it may be empty if region 1 is non-empty, although there is no reason
1317  *	 to make it empty)
1318  * The target requires that region 3 is to be sent in the next bio.
1319  *
1320  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1321  * the partially processed part (the sum of regions 1+2) must be the same for all
1322  * copies of the bio.
1323  */
dm_accept_partial_bio(struct bio * bio,unsigned int n_sectors)1324 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
1325 {
1326 	struct dm_target_io *tio = clone_to_tio(bio);
1327 	struct dm_io *io = tio->io;
1328 	unsigned int bio_sectors = bio_sectors(bio);
1329 
1330 	BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
1331 	BUG_ON(bio_sectors > *tio->len_ptr);
1332 	BUG_ON(n_sectors > bio_sectors);
1333 
1334 	if (static_branch_unlikely(&zoned_enabled) &&
1335 	    unlikely(bdev_is_zoned(bio->bi_bdev))) {
1336 		enum req_op op = bio_op(bio);
1337 
1338 		BUG_ON(op_is_zone_mgmt(op));
1339 		BUG_ON(op == REQ_OP_WRITE);
1340 		BUG_ON(op == REQ_OP_WRITE_ZEROES);
1341 		BUG_ON(op == REQ_OP_ZONE_APPEND);
1342 	}
1343 
1344 	*tio->len_ptr -= bio_sectors - n_sectors;
1345 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1346 
1347 	/*
1348 	 * __split_and_process_bio() may have already saved mapped part
1349 	 * for accounting but it is being reduced so update accordingly.
1350 	 */
1351 	dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1352 	io->sectors = n_sectors;
1353 	io->sector_offset = bio_sectors(io->orig_bio);
1354 }
1355 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1356 
1357 /*
1358  * @clone: clone bio that DM core passed to target's .map function
1359  * @tgt_clone: clone of @clone bio that target needs submitted
1360  *
1361  * Targets should use this interface to submit bios they take
1362  * ownership of when returning DM_MAPIO_SUBMITTED.
1363  *
1364  * Target should also enable ti->accounts_remapped_io
1365  */
dm_submit_bio_remap(struct bio * clone,struct bio * tgt_clone)1366 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
1367 {
1368 	struct dm_target_io *tio = clone_to_tio(clone);
1369 	struct dm_io *io = tio->io;
1370 
1371 	/* establish bio that will get submitted */
1372 	if (!tgt_clone)
1373 		tgt_clone = clone;
1374 
1375 	/*
1376 	 * Account io->origin_bio to DM dev on behalf of target
1377 	 * that took ownership of IO with DM_MAPIO_SUBMITTED.
1378 	 */
1379 	dm_start_io_acct(io, clone);
1380 
1381 	trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
1382 			      tio->old_sector);
1383 	submit_bio_noacct(tgt_clone);
1384 }
1385 EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
1386 
__set_swap_bios_limit(struct mapped_device * md,int latch)1387 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1388 {
1389 	mutex_lock(&md->swap_bios_lock);
1390 	while (latch < md->swap_bios) {
1391 		cond_resched();
1392 		down(&md->swap_bios_semaphore);
1393 		md->swap_bios--;
1394 	}
1395 	while (latch > md->swap_bios) {
1396 		cond_resched();
1397 		up(&md->swap_bios_semaphore);
1398 		md->swap_bios++;
1399 	}
1400 	mutex_unlock(&md->swap_bios_lock);
1401 }
1402 
__map_bio(struct bio * clone)1403 static void __map_bio(struct bio *clone)
1404 {
1405 	struct dm_target_io *tio = clone_to_tio(clone);
1406 	struct dm_target *ti = tio->ti;
1407 	struct dm_io *io = tio->io;
1408 	struct mapped_device *md = io->md;
1409 	int r;
1410 
1411 	clone->bi_end_io = clone_endio;
1412 
1413 	/*
1414 	 * Map the clone.
1415 	 */
1416 	tio->old_sector = clone->bi_iter.bi_sector;
1417 
1418 	if (static_branch_unlikely(&swap_bios_enabled) &&
1419 	    unlikely(swap_bios_limit(ti, clone))) {
1420 		int latch = get_swap_bios();
1421 
1422 		if (unlikely(latch != md->swap_bios))
1423 			__set_swap_bios_limit(md, latch);
1424 		down(&md->swap_bios_semaphore);
1425 	}
1426 
1427 	if (likely(ti->type->map == linear_map))
1428 		r = linear_map(ti, clone);
1429 	else if (ti->type->map == stripe_map)
1430 		r = stripe_map(ti, clone);
1431 	else
1432 		r = ti->type->map(ti, clone);
1433 
1434 	switch (r) {
1435 	case DM_MAPIO_SUBMITTED:
1436 		/* target has assumed ownership of this io */
1437 		if (!ti->accounts_remapped_io)
1438 			dm_start_io_acct(io, clone);
1439 		break;
1440 	case DM_MAPIO_REMAPPED:
1441 		dm_submit_bio_remap(clone, NULL);
1442 		break;
1443 	case DM_MAPIO_KILL:
1444 	case DM_MAPIO_REQUEUE:
1445 		if (static_branch_unlikely(&swap_bios_enabled) &&
1446 		    unlikely(swap_bios_limit(ti, clone)))
1447 			up(&md->swap_bios_semaphore);
1448 		free_tio(clone);
1449 		if (r == DM_MAPIO_KILL)
1450 			dm_io_dec_pending(io, BLK_STS_IOERR);
1451 		else
1452 			dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1453 		break;
1454 	default:
1455 		DMCRIT("unimplemented target map return value: %d", r);
1456 		BUG();
1457 	}
1458 }
1459 
setup_split_accounting(struct clone_info * ci,unsigned int len)1460 static void setup_split_accounting(struct clone_info *ci, unsigned int len)
1461 {
1462 	struct dm_io *io = ci->io;
1463 
1464 	if (ci->sector_count > len) {
1465 		/*
1466 		 * Split needed, save the mapped part for accounting.
1467 		 * NOTE: dm_accept_partial_bio() will update accordingly.
1468 		 */
1469 		dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1470 		io->sectors = len;
1471 		io->sector_offset = bio_sectors(ci->bio);
1472 	}
1473 }
1474 
alloc_multiple_bios(struct bio_list * blist,struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned * len)1475 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1476 				struct dm_target *ti, unsigned int num_bios,
1477 				unsigned *len)
1478 {
1479 	struct bio *bio;
1480 	int try;
1481 
1482 	for (try = 0; try < 2; try++) {
1483 		int bio_nr;
1484 
1485 		if (try && num_bios > 1)
1486 			mutex_lock(&ci->io->md->table_devices_lock);
1487 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1488 			bio = alloc_tio(ci, ti, bio_nr, len,
1489 					try ? GFP_NOIO : GFP_NOWAIT);
1490 			if (!bio)
1491 				break;
1492 
1493 			bio_list_add(blist, bio);
1494 		}
1495 		if (try && num_bios > 1)
1496 			mutex_unlock(&ci->io->md->table_devices_lock);
1497 		if (bio_nr == num_bios)
1498 			return;
1499 
1500 		while ((bio = bio_list_pop(blist)))
1501 			free_tio(bio);
1502 	}
1503 }
1504 
__send_duplicate_bios(struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned int * len)1505 static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1506 					  unsigned int num_bios, unsigned int *len)
1507 {
1508 	struct bio_list blist = BIO_EMPTY_LIST;
1509 	struct bio *clone;
1510 	unsigned int ret = 0;
1511 
1512 	if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */
1513 		return 0;
1514 
1515 	/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
1516 	if (len)
1517 		setup_split_accounting(ci, *len);
1518 
1519 	/*
1520 	 * Using alloc_multiple_bios(), even if num_bios is 1, to consistently
1521 	 * support allocating using GFP_NOWAIT with GFP_NOIO fallback.
1522 	 */
1523 	alloc_multiple_bios(&blist, ci, ti, num_bios, len);
1524 	while ((clone = bio_list_pop(&blist))) {
1525 		if (num_bios > 1)
1526 			dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
1527 		__map_bio(clone);
1528 		ret += 1;
1529 	}
1530 
1531 	return ret;
1532 }
1533 
__send_empty_flush(struct clone_info * ci)1534 static void __send_empty_flush(struct clone_info *ci)
1535 {
1536 	struct dm_table *t = ci->map;
1537 	struct bio flush_bio;
1538 	blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1539 
1540 	if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
1541 	    (REQ_IDLE | REQ_SYNC))
1542 		opf |= REQ_IDLE;
1543 
1544 	/*
1545 	 * Use an on-stack bio for this, it's safe since we don't
1546 	 * need to reference it after submit. It's just used as
1547 	 * the basis for the clone(s).
1548 	 */
1549 	bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
1550 
1551 	ci->bio = &flush_bio;
1552 	ci->sector_count = 0;
1553 	ci->io->tio.clone.bi_iter.bi_size = 0;
1554 
1555 	if (!t->flush_bypasses_map) {
1556 		for (unsigned int i = 0; i < t->num_targets; i++) {
1557 			unsigned int bios;
1558 			struct dm_target *ti = dm_table_get_target(t, i);
1559 
1560 			if (unlikely(ti->num_flush_bios == 0))
1561 				continue;
1562 
1563 			atomic_add(ti->num_flush_bios, &ci->io->io_count);
1564 			bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
1565 						     NULL);
1566 			atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
1567 		}
1568 	} else {
1569 		/*
1570 		 * Note that there's no need to grab t->devices_lock here
1571 		 * because the targets that support flush optimization don't
1572 		 * modify the list of devices.
1573 		 */
1574 		struct list_head *devices = dm_table_get_devices(t);
1575 		unsigned int len = 0;
1576 		struct dm_dev_internal *dd;
1577 		list_for_each_entry(dd, devices, list) {
1578 			struct bio *clone;
1579 			/*
1580 			 * Note that the structure dm_target_io is not
1581 			 * associated with any target (because the device may be
1582 			 * used by multiple targets), so we set tio->ti = NULL.
1583 			 * We must check for NULL in the I/O processing path, to
1584 			 * avoid NULL pointer dereference.
1585 			 */
1586 			clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO);
1587 			atomic_add(1, &ci->io->io_count);
1588 			bio_set_dev(clone, dd->dm_dev->bdev);
1589 			clone->bi_end_io = clone_endio;
1590 			dm_submit_bio_remap(clone, NULL);
1591 		}
1592 	}
1593 
1594 	/*
1595 	 * alloc_io() takes one extra reference for submission, so the
1596 	 * reference won't reach 0 without the following subtraction
1597 	 */
1598 	atomic_sub(1, &ci->io->io_count);
1599 
1600 	bio_uninit(ci->bio);
1601 }
1602 
__send_abnormal_io(struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned int max_granularity,unsigned int max_sectors)1603 static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1604 			       unsigned int num_bios, unsigned int max_granularity,
1605 			       unsigned int max_sectors)
1606 {
1607 	unsigned int len, bios;
1608 
1609 	len = min_t(sector_t, ci->sector_count,
1610 		    __max_io_len(ti, ci->sector, max_granularity, max_sectors));
1611 
1612 	atomic_add(num_bios, &ci->io->io_count);
1613 	bios = __send_duplicate_bios(ci, ti, num_bios, &len);
1614 	/*
1615 	 * alloc_io() takes one extra reference for submission, so the
1616 	 * reference won't reach 0 without the following (+1) subtraction
1617 	 */
1618 	atomic_sub(num_bios - bios + 1, &ci->io->io_count);
1619 
1620 	ci->sector += len;
1621 	ci->sector_count -= len;
1622 }
1623 
is_abnormal_io(struct bio * bio)1624 static bool is_abnormal_io(struct bio *bio)
1625 {
1626 	switch (bio_op(bio)) {
1627 	case REQ_OP_READ:
1628 	case REQ_OP_WRITE:
1629 	case REQ_OP_FLUSH:
1630 		return false;
1631 	case REQ_OP_DISCARD:
1632 	case REQ_OP_SECURE_ERASE:
1633 	case REQ_OP_WRITE_ZEROES:
1634 	case REQ_OP_ZONE_RESET_ALL:
1635 		return true;
1636 	default:
1637 		return false;
1638 	}
1639 }
1640 
__process_abnormal_io(struct clone_info * ci,struct dm_target * ti)1641 static blk_status_t __process_abnormal_io(struct clone_info *ci,
1642 					  struct dm_target *ti)
1643 {
1644 	unsigned int num_bios = 0;
1645 	unsigned int max_granularity = 0;
1646 	unsigned int max_sectors = 0;
1647 	struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
1648 
1649 	switch (bio_op(ci->bio)) {
1650 	case REQ_OP_DISCARD:
1651 		num_bios = ti->num_discard_bios;
1652 		max_sectors = limits->max_discard_sectors;
1653 		if (ti->max_discard_granularity)
1654 			max_granularity = max_sectors;
1655 		break;
1656 	case REQ_OP_SECURE_ERASE:
1657 		num_bios = ti->num_secure_erase_bios;
1658 		max_sectors = limits->max_secure_erase_sectors;
1659 		break;
1660 	case REQ_OP_WRITE_ZEROES:
1661 		num_bios = ti->num_write_zeroes_bios;
1662 		max_sectors = limits->max_write_zeroes_sectors;
1663 		break;
1664 	default:
1665 		break;
1666 	}
1667 
1668 	/*
1669 	 * Even though the device advertised support for this type of
1670 	 * request, that does not mean every target supports it, and
1671 	 * reconfiguration might also have changed that since the
1672 	 * check was performed.
1673 	 */
1674 	if (unlikely(!num_bios))
1675 		return BLK_STS_NOTSUPP;
1676 
1677 	__send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors);
1678 
1679 	return BLK_STS_OK;
1680 }
1681 
1682 /*
1683  * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1684  * associated with this bio, and this bio's bi_private needs to be
1685  * stored in dm_io->data before the reuse.
1686  *
1687  * bio->bi_private is owned by fs or upper layer, so block layer won't
1688  * touch it after splitting. Meantime it won't be changed by anyone after
1689  * bio is submitted. So this reuse is safe.
1690  */
dm_poll_list_head(struct bio * bio)1691 static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1692 {
1693 	return (struct dm_io **)&bio->bi_private;
1694 }
1695 
dm_queue_poll_io(struct bio * bio,struct dm_io * io)1696 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1697 {
1698 	struct dm_io **head = dm_poll_list_head(bio);
1699 
1700 	if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1701 		bio->bi_opf |= REQ_DM_POLL_LIST;
1702 		/*
1703 		 * Save .bi_private into dm_io, so that we can reuse
1704 		 * .bi_private as dm_io list head for storing dm_io list
1705 		 */
1706 		io->data = bio->bi_private;
1707 
1708 		/* tell block layer to poll for completion */
1709 		bio->bi_cookie = ~BLK_QC_T_NONE;
1710 
1711 		io->next = NULL;
1712 	} else {
1713 		/*
1714 		 * bio recursed due to split, reuse original poll list,
1715 		 * and save bio->bi_private too.
1716 		 */
1717 		io->data = (*head)->data;
1718 		io->next = *head;
1719 	}
1720 
1721 	*head = io;
1722 }
1723 
1724 /*
1725  * Select the correct strategy for processing a non-flush bio.
1726  */
__split_and_process_bio(struct clone_info * ci)1727 static blk_status_t __split_and_process_bio(struct clone_info *ci)
1728 {
1729 	struct bio *clone;
1730 	struct dm_target *ti;
1731 	unsigned int len;
1732 
1733 	ti = dm_table_find_target(ci->map, ci->sector);
1734 	if (unlikely(!ti))
1735 		return BLK_STS_IOERR;
1736 
1737 	if (unlikely(ci->is_abnormal_io))
1738 		return __process_abnormal_io(ci, ti);
1739 
1740 	/*
1741 	 * Only support bio polling for normal IO, and the target io is
1742 	 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
1743 	 */
1744 	ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
1745 
1746 	len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1747 	if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count)
1748 		return BLK_STS_IOERR;
1749 
1750 	setup_split_accounting(ci, len);
1751 
1752 	if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
1753 		if (unlikely(!dm_target_supports_nowait(ti->type)))
1754 			return BLK_STS_NOTSUPP;
1755 
1756 		clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT);
1757 		if (unlikely(!clone))
1758 			return BLK_STS_AGAIN;
1759 	} else {
1760 		clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
1761 	}
1762 	__map_bio(clone);
1763 
1764 	ci->sector += len;
1765 	ci->sector_count -= len;
1766 
1767 	return BLK_STS_OK;
1768 }
1769 
init_clone_info(struct clone_info * ci,struct dm_io * io,struct dm_table * map,struct bio * bio,bool is_abnormal)1770 static void init_clone_info(struct clone_info *ci, struct dm_io *io,
1771 			    struct dm_table *map, struct bio *bio, bool is_abnormal)
1772 {
1773 	ci->map = map;
1774 	ci->io = io;
1775 	ci->bio = bio;
1776 	ci->is_abnormal_io = is_abnormal;
1777 	ci->submit_as_polled = false;
1778 	ci->sector = bio->bi_iter.bi_sector;
1779 	ci->sector_count = bio_sectors(bio);
1780 
1781 	/* Shouldn't happen but sector_count was being set to 0 so... */
1782 	if (static_branch_unlikely(&zoned_enabled) &&
1783 	    WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1784 		ci->sector_count = 0;
1785 }
1786 
1787 #ifdef CONFIG_BLK_DEV_ZONED
dm_zone_bio_needs_split(struct bio * bio)1788 static inline bool dm_zone_bio_needs_split(struct bio *bio)
1789 {
1790 	/*
1791 	 * Special case the zone operations that cannot or should not be split.
1792 	 */
1793 	switch (bio_op(bio)) {
1794 	case REQ_OP_ZONE_APPEND:
1795 	case REQ_OP_ZONE_FINISH:
1796 	case REQ_OP_ZONE_RESET:
1797 	case REQ_OP_ZONE_RESET_ALL:
1798 		return false;
1799 	default:
1800 		break;
1801 	}
1802 
1803 	/*
1804 	 * When mapped devices use the block layer zone write plugging, we must
1805 	 * split any large BIO to the mapped device limits to not submit BIOs
1806 	 * that span zone boundaries and to avoid potential deadlocks with
1807 	 * queue freeze operations.
1808 	 */
1809 	return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
1810 }
1811 
dm_zone_plug_bio(struct mapped_device * md,struct bio * bio)1812 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
1813 {
1814 	if (!bio_needs_zone_write_plugging(bio))
1815 		return false;
1816 	return blk_zone_plug_bio(bio, 0);
1817 }
1818 
__send_zone_reset_all_emulated(struct clone_info * ci,struct dm_target * ti)1819 static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
1820 						   struct dm_target *ti)
1821 {
1822 	struct bio_list blist = BIO_EMPTY_LIST;
1823 	struct mapped_device *md = ci->io->md;
1824 	unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors;
1825 	unsigned long *need_reset;
1826 	unsigned int i, nr_zones, nr_reset;
1827 	unsigned int num_bios = 0;
1828 	blk_status_t sts = BLK_STS_OK;
1829 	sector_t sector = ti->begin;
1830 	struct bio *clone;
1831 	int ret;
1832 
1833 	nr_zones = ti->len >> ilog2(zone_sectors);
1834 	need_reset = bitmap_zalloc(nr_zones, GFP_NOIO);
1835 	if (!need_reset)
1836 		return BLK_STS_RESOURCE;
1837 
1838 	ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin,
1839 				       nr_zones, need_reset);
1840 	if (ret) {
1841 		sts = BLK_STS_IOERR;
1842 		goto free_bitmap;
1843 	}
1844 
1845 	/* If we have no zone to reset, we are done. */
1846 	nr_reset = bitmap_weight(need_reset, nr_zones);
1847 	if (!nr_reset)
1848 		goto free_bitmap;
1849 
1850 	atomic_add(nr_zones, &ci->io->io_count);
1851 
1852 	for (i = 0; i < nr_zones; i++) {
1853 
1854 		if (!test_bit(i, need_reset)) {
1855 			sector += zone_sectors;
1856 			continue;
1857 		}
1858 
1859 		if (bio_list_empty(&blist)) {
1860 			/* This may take a while, so be nice to others */
1861 			if (num_bios)
1862 				cond_resched();
1863 
1864 			/*
1865 			 * We may need to reset thousands of zones, so let's
1866 			 * not go crazy with the clone allocation.
1867 			 */
1868 			alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32),
1869 					    NULL);
1870 		}
1871 
1872 		/* Get a clone and change it to a regular reset operation. */
1873 		clone = bio_list_pop(&blist);
1874 		clone->bi_opf &= ~REQ_OP_MASK;
1875 		clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC;
1876 		clone->bi_iter.bi_sector = sector;
1877 		clone->bi_iter.bi_size = 0;
1878 		__map_bio(clone);
1879 
1880 		sector += zone_sectors;
1881 		num_bios++;
1882 		nr_reset--;
1883 	}
1884 
1885 	WARN_ON_ONCE(!bio_list_empty(&blist));
1886 	atomic_sub(nr_zones - num_bios, &ci->io->io_count);
1887 	ci->sector_count = 0;
1888 
1889 free_bitmap:
1890 	bitmap_free(need_reset);
1891 
1892 	return sts;
1893 }
1894 
__send_zone_reset_all_native(struct clone_info * ci,struct dm_target * ti)1895 static void __send_zone_reset_all_native(struct clone_info *ci,
1896 					 struct dm_target *ti)
1897 {
1898 	unsigned int bios;
1899 
1900 	atomic_add(1, &ci->io->io_count);
1901 	bios = __send_duplicate_bios(ci, ti, 1, NULL);
1902 	atomic_sub(1 - bios, &ci->io->io_count);
1903 
1904 	ci->sector_count = 0;
1905 }
1906 
__send_zone_reset_all(struct clone_info * ci)1907 static blk_status_t __send_zone_reset_all(struct clone_info *ci)
1908 {
1909 	struct dm_table *t = ci->map;
1910 	blk_status_t sts = BLK_STS_OK;
1911 
1912 	for (unsigned int i = 0; i < t->num_targets; i++) {
1913 		struct dm_target *ti = dm_table_get_target(t, i);
1914 
1915 		if (ti->zone_reset_all_supported) {
1916 			__send_zone_reset_all_native(ci, ti);
1917 			continue;
1918 		}
1919 
1920 		sts = __send_zone_reset_all_emulated(ci, ti);
1921 		if (sts != BLK_STS_OK)
1922 			break;
1923 	}
1924 
1925 	/* Release the reference that alloc_io() took for submission. */
1926 	atomic_sub(1, &ci->io->io_count);
1927 
1928 	return sts;
1929 }
1930 
1931 #else
dm_zone_bio_needs_split(struct bio * bio)1932 static inline bool dm_zone_bio_needs_split(struct bio *bio)
1933 {
1934 	return false;
1935 }
dm_zone_plug_bio(struct mapped_device * md,struct bio * bio)1936 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
1937 {
1938 	return false;
1939 }
__send_zone_reset_all(struct clone_info * ci)1940 static blk_status_t __send_zone_reset_all(struct clone_info *ci)
1941 {
1942 	return BLK_STS_NOTSUPP;
1943 }
1944 #endif
1945 
1946 /*
1947  * Entry point to split a bio into clones and submit them to the targets.
1948  */
dm_split_and_process_bio(struct mapped_device * md,struct dm_table * map,struct bio * bio)1949 static void dm_split_and_process_bio(struct mapped_device *md,
1950 				     struct dm_table *map, struct bio *bio)
1951 {
1952 	struct clone_info ci;
1953 	struct dm_io *io;
1954 	blk_status_t error = BLK_STS_OK;
1955 	bool is_abnormal, need_split;
1956 
1957 	is_abnormal = is_abnormal_io(bio);
1958 	if (static_branch_unlikely(&zoned_enabled)) {
1959 		need_split = is_abnormal || dm_zone_bio_needs_split(bio);
1960 	} else {
1961 		need_split = is_abnormal;
1962 	}
1963 
1964 	if (unlikely(need_split)) {
1965 		/*
1966 		 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
1967 		 * otherwise associated queue_limits won't be imposed.
1968 		 * Also split the BIO for mapped devices needing zone append
1969 		 * emulation to ensure that the BIO does not cross zone
1970 		 * boundaries.
1971 		 */
1972 		bio = bio_split_to_limits(bio);
1973 		if (!bio)
1974 			return;
1975 	}
1976 
1977 	/*
1978 	 * Use the block layer zone write plugging for mapped devices that
1979 	 * need zone append emulation (e.g. dm-crypt).
1980 	 */
1981 	if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio))
1982 		return;
1983 
1984 	/* Only support nowait for normal IO */
1985 	if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
1986 		/*
1987 		 * Don't support NOWAIT for FLUSH because it may allocate
1988 		 * multiple bios and there's no easy way how to undo the
1989 		 * allocations.
1990 		 */
1991 		if (bio->bi_opf & REQ_PREFLUSH) {
1992 			bio_wouldblock_error(bio);
1993 			return;
1994 		}
1995 		io = alloc_io(md, bio, GFP_NOWAIT);
1996 		if (unlikely(!io)) {
1997 			/* Unable to do anything without dm_io. */
1998 			bio_wouldblock_error(bio);
1999 			return;
2000 		}
2001 	} else {
2002 		io = alloc_io(md, bio, GFP_NOIO);
2003 	}
2004 	init_clone_info(&ci, io, map, bio, is_abnormal);
2005 
2006 	if (bio->bi_opf & REQ_PREFLUSH) {
2007 		__send_empty_flush(&ci);
2008 		/* dm_io_complete submits any data associated with flush */
2009 		goto out;
2010 	}
2011 
2012 	if (static_branch_unlikely(&zoned_enabled) &&
2013 	    (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
2014 		error = __send_zone_reset_all(&ci);
2015 		goto out;
2016 	}
2017 
2018 	error = __split_and_process_bio(&ci);
2019 	if (error || !ci.sector_count)
2020 		goto out;
2021 	/*
2022 	 * Remainder must be passed to submit_bio_noacct() so it gets handled
2023 	 * *after* bios already submitted have been completely processed.
2024 	 */
2025 	bio_trim(bio, io->sectors, ci.sector_count);
2026 	trace_block_split(bio, bio->bi_iter.bi_sector);
2027 	bio_inc_remaining(bio);
2028 	submit_bio_noacct(bio);
2029 out:
2030 	/*
2031 	 * Drop the extra reference count for non-POLLED bio, and hold one
2032 	 * reference for POLLED bio, which will be released in dm_poll_bio
2033 	 *
2034 	 * Add every dm_io instance into the dm_io list head which is stored
2035 	 * in bio->bi_private, so that dm_poll_bio can poll them all.
2036 	 */
2037 	if (error || !ci.submit_as_polled) {
2038 		/*
2039 		 * In case of submission failure, the extra reference for
2040 		 * submitting io isn't consumed yet
2041 		 */
2042 		if (error)
2043 			atomic_dec(&io->io_count);
2044 		dm_io_dec_pending(io, error);
2045 	} else
2046 		dm_queue_poll_io(bio, io);
2047 }
2048 
dm_submit_bio(struct bio * bio)2049 static void dm_submit_bio(struct bio *bio)
2050 {
2051 	struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
2052 	int srcu_idx;
2053 	struct dm_table *map;
2054 
2055 	map = dm_get_live_table(md, &srcu_idx);
2056 	if (unlikely(!map)) {
2057 		DMERR_LIMIT("%s: mapping table unavailable, erroring io",
2058 			    dm_device_name(md));
2059 		bio_io_error(bio);
2060 		goto out;
2061 	}
2062 
2063 	/* If suspended, queue this IO for later */
2064 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
2065 		if (bio->bi_opf & REQ_NOWAIT)
2066 			bio_wouldblock_error(bio);
2067 		else if (bio->bi_opf & REQ_RAHEAD)
2068 			bio_io_error(bio);
2069 		else
2070 			queue_io(md, bio);
2071 		goto out;
2072 	}
2073 
2074 	dm_split_and_process_bio(md, map, bio);
2075 out:
2076 	dm_put_live_table(md, srcu_idx);
2077 }
2078 
dm_poll_dm_io(struct dm_io * io,struct io_comp_batch * iob,unsigned int flags)2079 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
2080 			  unsigned int flags)
2081 {
2082 	WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
2083 
2084 	/* don't poll if the mapped io is done */
2085 	if (atomic_read(&io->io_count) > 1)
2086 		bio_poll(&io->tio.clone, iob, flags);
2087 
2088 	/* bio_poll holds the last reference */
2089 	return atomic_read(&io->io_count) == 1;
2090 }
2091 
dm_poll_bio(struct bio * bio,struct io_comp_batch * iob,unsigned int flags)2092 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
2093 		       unsigned int flags)
2094 {
2095 	struct dm_io **head = dm_poll_list_head(bio);
2096 	struct dm_io *list = *head;
2097 	struct dm_io *tmp = NULL;
2098 	struct dm_io *curr, *next;
2099 
2100 	/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
2101 	if (!(bio->bi_opf & REQ_DM_POLL_LIST))
2102 		return 0;
2103 
2104 	WARN_ON_ONCE(!list);
2105 
2106 	/*
2107 	 * Restore .bi_private before possibly completing dm_io.
2108 	 *
2109 	 * bio_poll() is only possible once @bio has been completely
2110 	 * submitted via submit_bio_noacct()'s depth-first submission.
2111 	 * So there is no dm_queue_poll_io() race associated with
2112 	 * clearing REQ_DM_POLL_LIST here.
2113 	 */
2114 	bio->bi_opf &= ~REQ_DM_POLL_LIST;
2115 	bio->bi_private = list->data;
2116 
2117 	for (curr = list, next = curr->next; curr; curr = next, next =
2118 			curr ? curr->next : NULL) {
2119 		if (dm_poll_dm_io(curr, iob, flags)) {
2120 			/*
2121 			 * clone_endio() has already occurred, so no
2122 			 * error handling is needed here.
2123 			 */
2124 			__dm_io_dec_pending(curr);
2125 		} else {
2126 			curr->next = tmp;
2127 			tmp = curr;
2128 		}
2129 	}
2130 
2131 	/* Not done? */
2132 	if (tmp) {
2133 		bio->bi_opf |= REQ_DM_POLL_LIST;
2134 		/* Reset bio->bi_private to dm_io list head */
2135 		*head = tmp;
2136 		return 0;
2137 	}
2138 	return 1;
2139 }
2140 
2141 /*
2142  *---------------------------------------------------------------
2143  * An IDR is used to keep track of allocated minor numbers.
2144  *---------------------------------------------------------------
2145  */
free_minor(int minor)2146 static void free_minor(int minor)
2147 {
2148 	spin_lock(&_minor_lock);
2149 	idr_remove(&_minor_idr, minor);
2150 	spin_unlock(&_minor_lock);
2151 }
2152 
2153 /*
2154  * See if the device with a specific minor # is free.
2155  */
specific_minor(int minor)2156 static int specific_minor(int minor)
2157 {
2158 	int r;
2159 
2160 	if (minor >= (1 << MINORBITS))
2161 		return -EINVAL;
2162 
2163 	idr_preload(GFP_KERNEL);
2164 	spin_lock(&_minor_lock);
2165 
2166 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
2167 
2168 	spin_unlock(&_minor_lock);
2169 	idr_preload_end();
2170 	if (r < 0)
2171 		return r == -ENOSPC ? -EBUSY : r;
2172 	return 0;
2173 }
2174 
next_free_minor(int * minor)2175 static int next_free_minor(int *minor)
2176 {
2177 	int r;
2178 
2179 	idr_preload(GFP_KERNEL);
2180 	spin_lock(&_minor_lock);
2181 
2182 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
2183 
2184 	spin_unlock(&_minor_lock);
2185 	idr_preload_end();
2186 	if (r < 0)
2187 		return r;
2188 	*minor = r;
2189 	return 0;
2190 }
2191 
2192 static const struct block_device_operations dm_blk_dops;
2193 static const struct block_device_operations dm_rq_blk_dops;
2194 static const struct dax_operations dm_dax_ops;
2195 
2196 static void dm_wq_work(struct work_struct *work);
2197 
2198 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
dm_queue_destroy_crypto_profile(struct request_queue * q)2199 static void dm_queue_destroy_crypto_profile(struct request_queue *q)
2200 {
2201 	dm_destroy_crypto_profile(q->crypto_profile);
2202 }
2203 
2204 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
2205 
dm_queue_destroy_crypto_profile(struct request_queue * q)2206 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
2207 {
2208 }
2209 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
2210 
cleanup_mapped_device(struct mapped_device * md)2211 static void cleanup_mapped_device(struct mapped_device *md)
2212 {
2213 	if (md->wq)
2214 		destroy_workqueue(md->wq);
2215 	dm_free_md_mempools(md->mempools);
2216 
2217 	if (md->dax_dev) {
2218 		dax_remove_host(md->disk);
2219 		kill_dax(md->dax_dev);
2220 		put_dax(md->dax_dev);
2221 		md->dax_dev = NULL;
2222 	}
2223 
2224 	if (md->disk) {
2225 		spin_lock(&_minor_lock);
2226 		md->disk->private_data = NULL;
2227 		spin_unlock(&_minor_lock);
2228 		if (dm_get_md_type(md) != DM_TYPE_NONE) {
2229 			struct table_device *td;
2230 
2231 			dm_sysfs_exit(md);
2232 			list_for_each_entry(td, &md->table_devices, list) {
2233 				bd_unlink_disk_holder(td->dm_dev.bdev,
2234 						      md->disk);
2235 			}
2236 
2237 			/*
2238 			 * Hold lock to make sure del_gendisk() won't concurrent
2239 			 * with open/close_table_device().
2240 			 */
2241 			mutex_lock(&md->table_devices_lock);
2242 			del_gendisk(md->disk);
2243 			mutex_unlock(&md->table_devices_lock);
2244 		}
2245 		dm_queue_destroy_crypto_profile(md->queue);
2246 		put_disk(md->disk);
2247 	}
2248 
2249 	if (md->pending_io) {
2250 		free_percpu(md->pending_io);
2251 		md->pending_io = NULL;
2252 	}
2253 
2254 	cleanup_srcu_struct(&md->io_barrier);
2255 
2256 	mutex_destroy(&md->suspend_lock);
2257 	mutex_destroy(&md->type_lock);
2258 	mutex_destroy(&md->table_devices_lock);
2259 	mutex_destroy(&md->swap_bios_lock);
2260 
2261 	dm_mq_cleanup_mapped_device(md);
2262 }
2263 
2264 /*
2265  * Allocate and initialise a blank device with a given minor.
2266  */
alloc_dev(int minor)2267 static struct mapped_device *alloc_dev(int minor)
2268 {
2269 	int r, numa_node_id = dm_get_numa_node();
2270 	struct dax_device *dax_dev;
2271 	struct mapped_device *md;
2272 	void *old_md;
2273 
2274 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2275 	if (!md) {
2276 		DMERR("unable to allocate device, out of memory.");
2277 		return NULL;
2278 	}
2279 
2280 	if (!try_module_get(THIS_MODULE))
2281 		goto bad_module_get;
2282 
2283 	/* get a minor number for the dev */
2284 	if (minor == DM_ANY_MINOR)
2285 		r = next_free_minor(&minor);
2286 	else
2287 		r = specific_minor(minor);
2288 	if (r < 0)
2289 		goto bad_minor;
2290 
2291 	r = init_srcu_struct(&md->io_barrier);
2292 	if (r < 0)
2293 		goto bad_io_barrier;
2294 
2295 	md->numa_node_id = numa_node_id;
2296 	md->init_tio_pdu = false;
2297 	md->type = DM_TYPE_NONE;
2298 	mutex_init(&md->suspend_lock);
2299 	mutex_init(&md->type_lock);
2300 	mutex_init(&md->table_devices_lock);
2301 	spin_lock_init(&md->deferred_lock);
2302 	atomic_set(&md->holders, 1);
2303 	atomic_set(&md->open_count, 0);
2304 	atomic_set(&md->event_nr, 0);
2305 	atomic_set(&md->uevent_seq, 0);
2306 	INIT_LIST_HEAD(&md->uevent_list);
2307 	INIT_LIST_HEAD(&md->table_devices);
2308 	spin_lock_init(&md->uevent_lock);
2309 
2310 	/*
2311 	 * default to bio-based until DM table is loaded and md->type
2312 	 * established. If request-based table is loaded: blk-mq will
2313 	 * override accordingly.
2314 	 */
2315 	md->disk = blk_alloc_disk(NULL, md->numa_node_id);
2316 	if (IS_ERR(md->disk)) {
2317 		md->disk = NULL;
2318 		goto bad;
2319 	}
2320 	md->queue = md->disk->queue;
2321 
2322 	init_waitqueue_head(&md->wait);
2323 	INIT_WORK(&md->work, dm_wq_work);
2324 	INIT_WORK(&md->requeue_work, dm_wq_requeue_work);
2325 	init_waitqueue_head(&md->eventq);
2326 	init_completion(&md->kobj_holder.completion);
2327 
2328 	md->requeue_list = NULL;
2329 	md->swap_bios = get_swap_bios();
2330 	sema_init(&md->swap_bios_semaphore, md->swap_bios);
2331 	mutex_init(&md->swap_bios_lock);
2332 
2333 	md->disk->major = _major;
2334 	md->disk->first_minor = minor;
2335 	md->disk->minors = 1;
2336 	md->disk->flags |= GENHD_FL_NO_PART;
2337 	md->disk->fops = &dm_blk_dops;
2338 	md->disk->private_data = md;
2339 	sprintf(md->disk->disk_name, "dm-%d", minor);
2340 
2341 	dax_dev = alloc_dax(md, &dm_dax_ops);
2342 	if (IS_ERR(dax_dev)) {
2343 		if (PTR_ERR(dax_dev) != -EOPNOTSUPP)
2344 			goto bad;
2345 	} else {
2346 		set_dax_nocache(dax_dev);
2347 		set_dax_nomc(dax_dev);
2348 		md->dax_dev = dax_dev;
2349 		if (dax_add_host(dax_dev, md->disk))
2350 			goto bad;
2351 	}
2352 
2353 	format_dev_t(md->name, MKDEV(_major, minor));
2354 
2355 	md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
2356 	if (!md->wq)
2357 		goto bad;
2358 
2359 	md->pending_io = alloc_percpu(unsigned long);
2360 	if (!md->pending_io)
2361 		goto bad;
2362 
2363 	r = dm_stats_init(&md->stats);
2364 	if (r < 0)
2365 		goto bad;
2366 
2367 	/* Populate the mapping, nobody knows we exist yet */
2368 	spin_lock(&_minor_lock);
2369 	old_md = idr_replace(&_minor_idr, md, minor);
2370 	spin_unlock(&_minor_lock);
2371 
2372 	BUG_ON(old_md != MINOR_ALLOCED);
2373 
2374 	return md;
2375 
2376 bad:
2377 	cleanup_mapped_device(md);
2378 bad_io_barrier:
2379 	free_minor(minor);
2380 bad_minor:
2381 	module_put(THIS_MODULE);
2382 bad_module_get:
2383 	kvfree(md);
2384 	return NULL;
2385 }
2386 
2387 static void unlock_fs(struct mapped_device *md);
2388 
free_dev(struct mapped_device * md)2389 static void free_dev(struct mapped_device *md)
2390 {
2391 	int minor = MINOR(disk_devt(md->disk));
2392 
2393 	unlock_fs(md);
2394 
2395 	cleanup_mapped_device(md);
2396 
2397 	WARN_ON_ONCE(!list_empty(&md->table_devices));
2398 	dm_stats_cleanup(&md->stats);
2399 	free_minor(minor);
2400 
2401 	module_put(THIS_MODULE);
2402 	kvfree(md);
2403 }
2404 
2405 /*
2406  * Bind a table to the device.
2407  */
event_callback(void * context)2408 static void event_callback(void *context)
2409 {
2410 	unsigned long flags;
2411 	LIST_HEAD(uevents);
2412 	struct mapped_device *md = context;
2413 
2414 	spin_lock_irqsave(&md->uevent_lock, flags);
2415 	list_splice_init(&md->uevent_list, &uevents);
2416 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2417 
2418 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2419 
2420 	atomic_inc(&md->event_nr);
2421 	wake_up(&md->eventq);
2422 	dm_issue_global_event();
2423 }
2424 
2425 /*
2426  * Returns old map, which caller must destroy.
2427  */
__bind(struct mapped_device * md,struct dm_table * t,struct queue_limits * limits)2428 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2429 			       struct queue_limits *limits)
2430 {
2431 	struct dm_table *old_map;
2432 	sector_t size, old_size;
2433 	int ret;
2434 
2435 	lockdep_assert_held(&md->suspend_lock);
2436 
2437 	size = dm_table_get_size(t);
2438 
2439 	old_size = dm_get_size(md);
2440 
2441 	if (!dm_table_supports_size_change(t, old_size, size)) {
2442 		old_map = ERR_PTR(-EINVAL);
2443 		goto out;
2444 	}
2445 
2446 	set_capacity(md->disk, size);
2447 
2448 	ret = dm_table_set_restrictions(t, md->queue, limits);
2449 	if (ret) {
2450 		set_capacity(md->disk, old_size);
2451 		old_map = ERR_PTR(ret);
2452 		goto out;
2453 	}
2454 
2455 	/*
2456 	 * Wipe any geometry if the size of the table changed.
2457 	 */
2458 	if (size != old_size)
2459 		memset(&md->geometry, 0, sizeof(md->geometry));
2460 
2461 	dm_table_event_callback(t, event_callback, md);
2462 
2463 	if (dm_table_request_based(t)) {
2464 		/*
2465 		 * Leverage the fact that request-based DM targets are
2466 		 * immutable singletons - used to optimize dm_mq_queue_rq.
2467 		 */
2468 		md->immutable_target = dm_table_get_immutable_target(t);
2469 
2470 		/*
2471 		 * There is no need to reload with request-based dm because the
2472 		 * size of front_pad doesn't change.
2473 		 *
2474 		 * Note for future: If you are to reload bioset, prep-ed
2475 		 * requests in the queue may refer to bio from the old bioset,
2476 		 * so you must walk through the queue to unprep.
2477 		 */
2478 		if (!md->mempools)
2479 			md->mempools = t->mempools;
2480 		else
2481 			dm_free_md_mempools(t->mempools);
2482 	} else {
2483 		/*
2484 		 * The md may already have mempools that need changing.
2485 		 * If so, reload bioset because front_pad may have changed
2486 		 * because a different table was loaded.
2487 		 */
2488 		dm_free_md_mempools(md->mempools);
2489 		md->mempools = t->mempools;
2490 	}
2491 	t->mempools = NULL;
2492 
2493 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2494 	rcu_assign_pointer(md->map, (void *)t);
2495 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2496 
2497 	if (old_map)
2498 		dm_sync_table(md);
2499 out:
2500 	return old_map;
2501 }
2502 
2503 /*
2504  * Returns unbound table for the caller to free.
2505  */
__unbind(struct mapped_device * md)2506 static struct dm_table *__unbind(struct mapped_device *md)
2507 {
2508 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
2509 
2510 	if (!map)
2511 		return NULL;
2512 
2513 	dm_table_event_callback(map, NULL, NULL);
2514 	RCU_INIT_POINTER(md->map, NULL);
2515 	dm_sync_table(md);
2516 
2517 	return map;
2518 }
2519 
2520 /*
2521  * Constructor for a new device.
2522  */
dm_create(int minor,struct mapped_device ** result)2523 int dm_create(int minor, struct mapped_device **result)
2524 {
2525 	struct mapped_device *md;
2526 
2527 	md = alloc_dev(minor);
2528 	if (!md)
2529 		return -ENXIO;
2530 
2531 	dm_ima_reset_data(md);
2532 
2533 	*result = md;
2534 	return 0;
2535 }
2536 
2537 /*
2538  * Functions to manage md->type.
2539  * All are required to hold md->type_lock.
2540  */
dm_lock_md_type(struct mapped_device * md)2541 void dm_lock_md_type(struct mapped_device *md)
2542 {
2543 	mutex_lock(&md->type_lock);
2544 }
2545 
dm_unlock_md_type(struct mapped_device * md)2546 void dm_unlock_md_type(struct mapped_device *md)
2547 {
2548 	mutex_unlock(&md->type_lock);
2549 }
2550 
dm_get_md_type(struct mapped_device * md)2551 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2552 {
2553 	return md->type;
2554 }
2555 
dm_get_immutable_target_type(struct mapped_device * md)2556 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2557 {
2558 	return md->immutable_target_type;
2559 }
2560 
2561 /*
2562  * Setup the DM device's queue based on md's type
2563  */
dm_setup_md_queue(struct mapped_device * md,struct dm_table * t)2564 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2565 {
2566 	enum dm_queue_mode type = dm_table_get_type(t);
2567 	struct queue_limits limits;
2568 	struct table_device *td;
2569 	int r;
2570 
2571 	WARN_ON_ONCE(type == DM_TYPE_NONE);
2572 
2573 	if (type == DM_TYPE_REQUEST_BASED) {
2574 		md->disk->fops = &dm_rq_blk_dops;
2575 		r = dm_mq_init_request_queue(md, t);
2576 		if (r) {
2577 			DMERR("Cannot initialize queue for request-based dm mapped device");
2578 			return r;
2579 		}
2580 	}
2581 
2582 	r = dm_calculate_queue_limits(t, &limits);
2583 	if (r) {
2584 		DMERR("Cannot calculate initial queue limits");
2585 		return r;
2586 	}
2587 	r = dm_table_set_restrictions(t, md->queue, &limits);
2588 	if (r)
2589 		return r;
2590 
2591 	/*
2592 	 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent
2593 	 * with open_table_device() and close_table_device().
2594 	 */
2595 	mutex_lock(&md->table_devices_lock);
2596 	r = add_disk(md->disk);
2597 	mutex_unlock(&md->table_devices_lock);
2598 	if (r)
2599 		return r;
2600 
2601 	/*
2602 	 * Register the holder relationship for devices added before the disk
2603 	 * was live.
2604 	 */
2605 	list_for_each_entry(td, &md->table_devices, list) {
2606 		r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
2607 		if (r)
2608 			goto out_undo_holders;
2609 	}
2610 
2611 	r = dm_sysfs_init(md);
2612 	if (r)
2613 		goto out_undo_holders;
2614 
2615 	md->type = type;
2616 	return 0;
2617 
2618 out_undo_holders:
2619 	list_for_each_entry_continue_reverse(td, &md->table_devices, list)
2620 		bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
2621 	mutex_lock(&md->table_devices_lock);
2622 	del_gendisk(md->disk);
2623 	mutex_unlock(&md->table_devices_lock);
2624 	return r;
2625 }
2626 
dm_get_md(dev_t dev)2627 struct mapped_device *dm_get_md(dev_t dev)
2628 {
2629 	struct mapped_device *md;
2630 	unsigned int minor = MINOR(dev);
2631 
2632 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2633 		return NULL;
2634 
2635 	spin_lock(&_minor_lock);
2636 
2637 	md = idr_find(&_minor_idr, minor);
2638 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2639 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2640 		md = NULL;
2641 		goto out;
2642 	}
2643 	dm_get(md);
2644 out:
2645 	spin_unlock(&_minor_lock);
2646 
2647 	return md;
2648 }
2649 EXPORT_SYMBOL_GPL(dm_get_md);
2650 
dm_get_mdptr(struct mapped_device * md)2651 void *dm_get_mdptr(struct mapped_device *md)
2652 {
2653 	return md->interface_ptr;
2654 }
2655 
dm_set_mdptr(struct mapped_device * md,void * ptr)2656 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2657 {
2658 	md->interface_ptr = ptr;
2659 }
2660 
dm_get(struct mapped_device * md)2661 void dm_get(struct mapped_device *md)
2662 {
2663 	atomic_inc(&md->holders);
2664 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2665 }
2666 
dm_hold(struct mapped_device * md)2667 int dm_hold(struct mapped_device *md)
2668 {
2669 	spin_lock(&_minor_lock);
2670 	if (test_bit(DMF_FREEING, &md->flags)) {
2671 		spin_unlock(&_minor_lock);
2672 		return -EBUSY;
2673 	}
2674 	dm_get(md);
2675 	spin_unlock(&_minor_lock);
2676 	return 0;
2677 }
2678 EXPORT_SYMBOL_GPL(dm_hold);
2679 
dm_device_name(struct mapped_device * md)2680 const char *dm_device_name(struct mapped_device *md)
2681 {
2682 	return md->name;
2683 }
2684 EXPORT_SYMBOL_GPL(dm_device_name);
2685 
__dm_destroy(struct mapped_device * md,bool wait)2686 static void __dm_destroy(struct mapped_device *md, bool wait)
2687 {
2688 	struct dm_table *map;
2689 	int srcu_idx;
2690 
2691 	might_sleep();
2692 
2693 	spin_lock(&_minor_lock);
2694 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2695 	set_bit(DMF_FREEING, &md->flags);
2696 	spin_unlock(&_minor_lock);
2697 
2698 	blk_mark_disk_dead(md->disk);
2699 
2700 	/*
2701 	 * Take suspend_lock so that presuspend and postsuspend methods
2702 	 * do not race with internal suspend.
2703 	 */
2704 	mutex_lock(&md->suspend_lock);
2705 	map = dm_get_live_table(md, &srcu_idx);
2706 	if (!dm_suspended_md(md)) {
2707 		dm_table_presuspend_targets(map);
2708 		set_bit(DMF_SUSPENDED, &md->flags);
2709 		set_bit(DMF_POST_SUSPENDING, &md->flags);
2710 		dm_table_postsuspend_targets(map);
2711 	}
2712 	/* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
2713 	dm_put_live_table(md, srcu_idx);
2714 	mutex_unlock(&md->suspend_lock);
2715 
2716 	/*
2717 	 * Rare, but there may be I/O requests still going to complete,
2718 	 * for example.  Wait for all references to disappear.
2719 	 * No one should increment the reference count of the mapped_device,
2720 	 * after the mapped_device state becomes DMF_FREEING.
2721 	 */
2722 	if (wait)
2723 		while (atomic_read(&md->holders))
2724 			fsleep(1000);
2725 	else if (atomic_read(&md->holders))
2726 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2727 		       dm_device_name(md), atomic_read(&md->holders));
2728 
2729 	dm_table_destroy(__unbind(md));
2730 	free_dev(md);
2731 }
2732 
dm_destroy(struct mapped_device * md)2733 void dm_destroy(struct mapped_device *md)
2734 {
2735 	__dm_destroy(md, true);
2736 }
2737 
dm_destroy_immediate(struct mapped_device * md)2738 void dm_destroy_immediate(struct mapped_device *md)
2739 {
2740 	__dm_destroy(md, false);
2741 }
2742 
dm_put(struct mapped_device * md)2743 void dm_put(struct mapped_device *md)
2744 {
2745 	atomic_dec(&md->holders);
2746 }
2747 EXPORT_SYMBOL_GPL(dm_put);
2748 
dm_in_flight_bios(struct mapped_device * md)2749 static bool dm_in_flight_bios(struct mapped_device *md)
2750 {
2751 	int cpu;
2752 	unsigned long sum = 0;
2753 
2754 	for_each_possible_cpu(cpu)
2755 		sum += *per_cpu_ptr(md->pending_io, cpu);
2756 
2757 	return sum != 0;
2758 }
2759 
dm_wait_for_bios_completion(struct mapped_device * md,unsigned int task_state)2760 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
2761 {
2762 	int r = 0;
2763 	DEFINE_WAIT(wait);
2764 
2765 	while (true) {
2766 		prepare_to_wait(&md->wait, &wait, task_state);
2767 
2768 		if (!dm_in_flight_bios(md))
2769 			break;
2770 
2771 		if (signal_pending_state(task_state, current)) {
2772 			r = -ERESTARTSYS;
2773 			break;
2774 		}
2775 
2776 		io_schedule();
2777 	}
2778 	finish_wait(&md->wait, &wait);
2779 
2780 	smp_rmb();
2781 
2782 	return r;
2783 }
2784 
dm_wait_for_completion(struct mapped_device * md,unsigned int task_state)2785 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
2786 {
2787 	int r = 0;
2788 
2789 	if (!queue_is_mq(md->queue))
2790 		return dm_wait_for_bios_completion(md, task_state);
2791 
2792 	while (true) {
2793 		if (!blk_mq_queue_inflight(md->queue))
2794 			break;
2795 
2796 		if (signal_pending_state(task_state, current)) {
2797 			r = -ERESTARTSYS;
2798 			break;
2799 		}
2800 
2801 		fsleep(5000);
2802 	}
2803 
2804 	return r;
2805 }
2806 
2807 /*
2808  * Process the deferred bios
2809  */
dm_wq_work(struct work_struct * work)2810 static void dm_wq_work(struct work_struct *work)
2811 {
2812 	struct mapped_device *md = container_of(work, struct mapped_device, work);
2813 	struct bio *bio;
2814 
2815 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2816 		spin_lock_irq(&md->deferred_lock);
2817 		bio = bio_list_pop(&md->deferred);
2818 		spin_unlock_irq(&md->deferred_lock);
2819 
2820 		if (!bio)
2821 			break;
2822 
2823 		submit_bio_noacct(bio);
2824 		cond_resched();
2825 	}
2826 }
2827 
dm_queue_flush(struct mapped_device * md)2828 static void dm_queue_flush(struct mapped_device *md)
2829 {
2830 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2831 	smp_mb__after_atomic();
2832 	queue_work(md->wq, &md->work);
2833 }
2834 
2835 /*
2836  * Swap in a new table, returning the old one for the caller to destroy.
2837  */
dm_swap_table(struct mapped_device * md,struct dm_table * table)2838 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2839 {
2840 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2841 	struct queue_limits limits;
2842 	int r;
2843 
2844 	mutex_lock(&md->suspend_lock);
2845 
2846 	/* device must be suspended */
2847 	if (!dm_suspended_md(md))
2848 		goto out;
2849 
2850 	/*
2851 	 * If the new table has no data devices, retain the existing limits.
2852 	 * This helps multipath with queue_if_no_path if all paths disappear,
2853 	 * then new I/O is queued based on these limits, and then some paths
2854 	 * reappear.
2855 	 */
2856 	if (dm_table_has_no_data_devices(table)) {
2857 		live_map = dm_get_live_table_fast(md);
2858 		if (live_map)
2859 			limits = md->queue->limits;
2860 		dm_put_live_table_fast(md);
2861 	}
2862 
2863 	if (!live_map) {
2864 		r = dm_calculate_queue_limits(table, &limits);
2865 		if (r) {
2866 			map = ERR_PTR(r);
2867 			goto out;
2868 		}
2869 	}
2870 
2871 	map = __bind(md, table, &limits);
2872 	dm_issue_global_event();
2873 
2874 out:
2875 	mutex_unlock(&md->suspend_lock);
2876 	return map;
2877 }
2878 
2879 /*
2880  * Functions to lock and unlock any filesystem running on the
2881  * device.
2882  */
lock_fs(struct mapped_device * md)2883 static int lock_fs(struct mapped_device *md)
2884 {
2885 	int r;
2886 
2887 	WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2888 
2889 	r = bdev_freeze(md->disk->part0);
2890 	if (!r)
2891 		set_bit(DMF_FROZEN, &md->flags);
2892 	return r;
2893 }
2894 
unlock_fs(struct mapped_device * md)2895 static void unlock_fs(struct mapped_device *md)
2896 {
2897 	if (!test_bit(DMF_FROZEN, &md->flags))
2898 		return;
2899 	bdev_thaw(md->disk->part0);
2900 	clear_bit(DMF_FROZEN, &md->flags);
2901 }
2902 
2903 /*
2904  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2905  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2906  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2907  *
2908  * If __dm_suspend returns 0, the device is completely quiescent
2909  * now. There is no request-processing activity. All new requests
2910  * are being added to md->deferred list.
2911  */
__dm_suspend(struct mapped_device * md,struct dm_table * map,unsigned int suspend_flags,unsigned int task_state,int dmf_suspended_flag)2912 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2913 			unsigned int suspend_flags, unsigned int task_state,
2914 			int dmf_suspended_flag)
2915 {
2916 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2917 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2918 	int r;
2919 
2920 	lockdep_assert_held(&md->suspend_lock);
2921 
2922 	/*
2923 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2924 	 * This flag is cleared before dm_suspend returns.
2925 	 */
2926 	if (noflush)
2927 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2928 	else
2929 		DMDEBUG("%s: suspending with flush", dm_device_name(md));
2930 
2931 	/*
2932 	 * This gets reverted if there's an error later and the targets
2933 	 * provide the .presuspend_undo hook.
2934 	 */
2935 	dm_table_presuspend_targets(map);
2936 
2937 	/*
2938 	 * Flush I/O to the device.
2939 	 * Any I/O submitted after lock_fs() may not be flushed.
2940 	 * noflush takes precedence over do_lockfs.
2941 	 * (lock_fs() flushes I/Os and waits for them to complete.)
2942 	 */
2943 	if (!noflush && do_lockfs) {
2944 		r = lock_fs(md);
2945 		if (r) {
2946 			dm_table_presuspend_undo_targets(map);
2947 			return r;
2948 		}
2949 	}
2950 
2951 	/*
2952 	 * Here we must make sure that no processes are submitting requests
2953 	 * to target drivers i.e. no one may be executing
2954 	 * dm_split_and_process_bio from dm_submit_bio.
2955 	 *
2956 	 * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
2957 	 * we take the write lock. To prevent any process from reentering
2958 	 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
2959 	 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
2960 	 * flush_workqueue(md->wq).
2961 	 */
2962 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2963 	if (map)
2964 		synchronize_srcu(&md->io_barrier);
2965 
2966 	/*
2967 	 * Stop md->queue before flushing md->wq in case request-based
2968 	 * dm defers requests to md->wq from md->queue.
2969 	 */
2970 	if (dm_request_based(md))
2971 		dm_stop_queue(md->queue);
2972 
2973 	flush_workqueue(md->wq);
2974 
2975 	/*
2976 	 * At this point no more requests are entering target request routines.
2977 	 * We call dm_wait_for_completion to wait for all existing requests
2978 	 * to finish.
2979 	 */
2980 	r = dm_wait_for_completion(md, task_state);
2981 	if (!r)
2982 		set_bit(dmf_suspended_flag, &md->flags);
2983 
2984 	if (noflush)
2985 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2986 	if (map)
2987 		synchronize_srcu(&md->io_barrier);
2988 
2989 	/* were we interrupted ? */
2990 	if (r < 0) {
2991 		dm_queue_flush(md);
2992 
2993 		if (dm_request_based(md))
2994 			dm_start_queue(md->queue);
2995 
2996 		unlock_fs(md);
2997 		dm_table_presuspend_undo_targets(map);
2998 		/* pushback list is already flushed, so skip flush */
2999 	}
3000 
3001 	return r;
3002 }
3003 
3004 /*
3005  * We need to be able to change a mapping table under a mounted
3006  * filesystem.  For example we might want to move some data in
3007  * the background.  Before the table can be swapped with
3008  * dm_bind_table, dm_suspend must be called to flush any in
3009  * flight bios and ensure that any further io gets deferred.
3010  */
3011 /*
3012  * Suspend mechanism in request-based dm.
3013  *
3014  * 1. Flush all I/Os by lock_fs() if needed.
3015  * 2. Stop dispatching any I/O by stopping the request_queue.
3016  * 3. Wait for all in-flight I/Os to be completed or requeued.
3017  *
3018  * To abort suspend, start the request_queue.
3019  */
dm_suspend(struct mapped_device * md,unsigned int suspend_flags)3020 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
3021 {
3022 	struct dm_table *map = NULL;
3023 	int r = 0;
3024 
3025 retry:
3026 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3027 
3028 	if (dm_suspended_md(md)) {
3029 		r = -EINVAL;
3030 		goto out_unlock;
3031 	}
3032 
3033 	if (dm_suspended_internally_md(md)) {
3034 		/* already internally suspended, wait for internal resume */
3035 		mutex_unlock(&md->suspend_lock);
3036 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3037 		if (r)
3038 			return r;
3039 		goto retry;
3040 	}
3041 
3042 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3043 	if (!map) {
3044 		/* avoid deadlock with fs/namespace.c:do_mount() */
3045 		suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
3046 	}
3047 
3048 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
3049 	if (r)
3050 		goto out_unlock;
3051 
3052 	set_bit(DMF_POST_SUSPENDING, &md->flags);
3053 	dm_table_postsuspend_targets(map);
3054 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
3055 
3056 out_unlock:
3057 	mutex_unlock(&md->suspend_lock);
3058 	return r;
3059 }
3060 
__dm_resume(struct mapped_device * md,struct dm_table * map)3061 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
3062 {
3063 	if (map) {
3064 		int r = dm_table_resume_targets(map);
3065 
3066 		if (r)
3067 			return r;
3068 	}
3069 
3070 	dm_queue_flush(md);
3071 
3072 	/*
3073 	 * Flushing deferred I/Os must be done after targets are resumed
3074 	 * so that mapping of targets can work correctly.
3075 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
3076 	 */
3077 	if (dm_request_based(md))
3078 		dm_start_queue(md->queue);
3079 
3080 	unlock_fs(md);
3081 
3082 	return 0;
3083 }
3084 
dm_resume(struct mapped_device * md)3085 int dm_resume(struct mapped_device *md)
3086 {
3087 	int r;
3088 	struct dm_table *map = NULL;
3089 
3090 retry:
3091 	r = -EINVAL;
3092 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3093 
3094 	if (!dm_suspended_md(md))
3095 		goto out;
3096 
3097 	if (dm_suspended_internally_md(md)) {
3098 		/* already internally suspended, wait for internal resume */
3099 		mutex_unlock(&md->suspend_lock);
3100 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3101 		if (r)
3102 			return r;
3103 		goto retry;
3104 	}
3105 
3106 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3107 	if (!map || !dm_table_get_size(map))
3108 		goto out;
3109 
3110 	r = __dm_resume(md, map);
3111 	if (r)
3112 		goto out;
3113 
3114 	clear_bit(DMF_SUSPENDED, &md->flags);
3115 out:
3116 	mutex_unlock(&md->suspend_lock);
3117 
3118 	return r;
3119 }
3120 
3121 /*
3122  * Internal suspend/resume works like userspace-driven suspend. It waits
3123  * until all bios finish and prevents issuing new bios to the target drivers.
3124  * It may be used only from the kernel.
3125  */
3126 
__dm_internal_suspend(struct mapped_device * md,unsigned int suspend_flags)3127 static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
3128 {
3129 	struct dm_table *map = NULL;
3130 
3131 	lockdep_assert_held(&md->suspend_lock);
3132 
3133 	if (md->internal_suspend_count++)
3134 		return; /* nested internal suspend */
3135 
3136 	if (dm_suspended_md(md)) {
3137 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3138 		return; /* nest suspend */
3139 	}
3140 
3141 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3142 
3143 	/*
3144 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
3145 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
3146 	 * would require changing .presuspend to return an error -- avoid this
3147 	 * until there is a need for more elaborate variants of internal suspend.
3148 	 */
3149 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
3150 			    DMF_SUSPENDED_INTERNALLY);
3151 
3152 	set_bit(DMF_POST_SUSPENDING, &md->flags);
3153 	dm_table_postsuspend_targets(map);
3154 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
3155 }
3156 
__dm_internal_resume(struct mapped_device * md)3157 static void __dm_internal_resume(struct mapped_device *md)
3158 {
3159 	int r;
3160 	struct dm_table *map;
3161 
3162 	BUG_ON(!md->internal_suspend_count);
3163 
3164 	if (--md->internal_suspend_count)
3165 		return; /* resume from nested internal suspend */
3166 
3167 	if (dm_suspended_md(md))
3168 		goto done; /* resume from nested suspend */
3169 
3170 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3171 	r = __dm_resume(md, map);
3172 	if (r) {
3173 		/*
3174 		 * If a preresume method of some target failed, we are in a
3175 		 * tricky situation. We can't return an error to the caller. We
3176 		 * can't fake success because then the "resume" and
3177 		 * "postsuspend" methods would not be paired correctly, and it
3178 		 * would break various targets, for example it would cause list
3179 		 * corruption in the "origin" target.
3180 		 *
3181 		 * So, we fake normal suspend here, to make sure that the
3182 		 * "resume" and "postsuspend" methods will be paired correctly.
3183 		 */
3184 		DMERR("Preresume method failed: %d", r);
3185 		set_bit(DMF_SUSPENDED, &md->flags);
3186 	}
3187 done:
3188 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3189 	smp_mb__after_atomic();
3190 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
3191 }
3192 
dm_internal_suspend_noflush(struct mapped_device * md)3193 void dm_internal_suspend_noflush(struct mapped_device *md)
3194 {
3195 	mutex_lock(&md->suspend_lock);
3196 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
3197 	mutex_unlock(&md->suspend_lock);
3198 }
3199 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
3200 
dm_internal_resume(struct mapped_device * md)3201 void dm_internal_resume(struct mapped_device *md)
3202 {
3203 	mutex_lock(&md->suspend_lock);
3204 	__dm_internal_resume(md);
3205 	mutex_unlock(&md->suspend_lock);
3206 }
3207 EXPORT_SYMBOL_GPL(dm_internal_resume);
3208 
3209 /*
3210  * Fast variants of internal suspend/resume hold md->suspend_lock,
3211  * which prevents interaction with userspace-driven suspend.
3212  */
3213 
dm_internal_suspend_fast(struct mapped_device * md)3214 void dm_internal_suspend_fast(struct mapped_device *md)
3215 {
3216 	mutex_lock(&md->suspend_lock);
3217 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3218 		return;
3219 
3220 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3221 	synchronize_srcu(&md->io_barrier);
3222 	flush_workqueue(md->wq);
3223 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3224 }
3225 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3226 
dm_internal_resume_fast(struct mapped_device * md)3227 void dm_internal_resume_fast(struct mapped_device *md)
3228 {
3229 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3230 		goto done;
3231 
3232 	dm_queue_flush(md);
3233 
3234 done:
3235 	mutex_unlock(&md->suspend_lock);
3236 }
3237 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3238 
3239 /*
3240  *---------------------------------------------------------------
3241  * Event notification.
3242  *---------------------------------------------------------------
3243  */
dm_kobject_uevent(struct mapped_device * md,enum kobject_action action,unsigned int cookie,bool need_resize_uevent)3244 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3245 		      unsigned int cookie, bool need_resize_uevent)
3246 {
3247 	int r;
3248 	unsigned int noio_flag;
3249 	char udev_cookie[DM_COOKIE_LENGTH];
3250 	char *envp[3] = { NULL, NULL, NULL };
3251 	char **envpp = envp;
3252 	if (cookie) {
3253 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3254 			 DM_COOKIE_ENV_VAR_NAME, cookie);
3255 		*envpp++ = udev_cookie;
3256 	}
3257 	if (need_resize_uevent) {
3258 		*envpp++ = "RESIZE=1";
3259 	}
3260 
3261 	noio_flag = memalloc_noio_save();
3262 
3263 	r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
3264 
3265 	memalloc_noio_restore(noio_flag);
3266 
3267 	return r;
3268 }
3269 
dm_next_uevent_seq(struct mapped_device * md)3270 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3271 {
3272 	return atomic_add_return(1, &md->uevent_seq);
3273 }
3274 
dm_get_event_nr(struct mapped_device * md)3275 uint32_t dm_get_event_nr(struct mapped_device *md)
3276 {
3277 	return atomic_read(&md->event_nr);
3278 }
3279 
dm_wait_event(struct mapped_device * md,int event_nr)3280 int dm_wait_event(struct mapped_device *md, int event_nr)
3281 {
3282 	return wait_event_interruptible(md->eventq,
3283 			(event_nr != atomic_read(&md->event_nr)));
3284 }
3285 
dm_uevent_add(struct mapped_device * md,struct list_head * elist)3286 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3287 {
3288 	unsigned long flags;
3289 
3290 	spin_lock_irqsave(&md->uevent_lock, flags);
3291 	list_add(elist, &md->uevent_list);
3292 	spin_unlock_irqrestore(&md->uevent_lock, flags);
3293 }
3294 
3295 /*
3296  * The gendisk is only valid as long as you have a reference
3297  * count on 'md'.
3298  */
dm_disk(struct mapped_device * md)3299 struct gendisk *dm_disk(struct mapped_device *md)
3300 {
3301 	return md->disk;
3302 }
3303 EXPORT_SYMBOL_GPL(dm_disk);
3304 
dm_kobject(struct mapped_device * md)3305 struct kobject *dm_kobject(struct mapped_device *md)
3306 {
3307 	return &md->kobj_holder.kobj;
3308 }
3309 
dm_get_from_kobject(struct kobject * kobj)3310 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3311 {
3312 	struct mapped_device *md;
3313 
3314 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3315 
3316 	spin_lock(&_minor_lock);
3317 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
3318 		md = NULL;
3319 		goto out;
3320 	}
3321 	dm_get(md);
3322 out:
3323 	spin_unlock(&_minor_lock);
3324 
3325 	return md;
3326 }
3327 
dm_suspended_md(struct mapped_device * md)3328 int dm_suspended_md(struct mapped_device *md)
3329 {
3330 	return test_bit(DMF_SUSPENDED, &md->flags);
3331 }
3332 
dm_post_suspending_md(struct mapped_device * md)3333 static int dm_post_suspending_md(struct mapped_device *md)
3334 {
3335 	return test_bit(DMF_POST_SUSPENDING, &md->flags);
3336 }
3337 
dm_suspended_internally_md(struct mapped_device * md)3338 int dm_suspended_internally_md(struct mapped_device *md)
3339 {
3340 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3341 }
3342 
dm_test_deferred_remove_flag(struct mapped_device * md)3343 int dm_test_deferred_remove_flag(struct mapped_device *md)
3344 {
3345 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3346 }
3347 
dm_suspended(struct dm_target * ti)3348 int dm_suspended(struct dm_target *ti)
3349 {
3350 	return dm_suspended_md(ti->table->md);
3351 }
3352 EXPORT_SYMBOL_GPL(dm_suspended);
3353 
dm_post_suspending(struct dm_target * ti)3354 int dm_post_suspending(struct dm_target *ti)
3355 {
3356 	return dm_post_suspending_md(ti->table->md);
3357 }
3358 EXPORT_SYMBOL_GPL(dm_post_suspending);
3359 
dm_noflush_suspending(struct dm_target * ti)3360 int dm_noflush_suspending(struct dm_target *ti)
3361 {
3362 	return __noflush_suspending(ti->table->md);
3363 }
3364 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3365 
dm_free_md_mempools(struct dm_md_mempools * pools)3366 void dm_free_md_mempools(struct dm_md_mempools *pools)
3367 {
3368 	if (!pools)
3369 		return;
3370 
3371 	bioset_exit(&pools->bs);
3372 	bioset_exit(&pools->io_bs);
3373 
3374 	kfree(pools);
3375 }
3376 
3377 struct dm_blkdev_id {
3378 	u8 *id;
3379 	enum blk_unique_id type;
3380 };
3381 
__dm_get_unique_id(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3382 static int __dm_get_unique_id(struct dm_target *ti, struct dm_dev *dev,
3383 				sector_t start, sector_t len, void *data)
3384 {
3385 	struct dm_blkdev_id *dm_id = data;
3386 	const struct block_device_operations *fops = dev->bdev->bd_disk->fops;
3387 
3388 	if (!fops->get_unique_id)
3389 		return 0;
3390 
3391 	return fops->get_unique_id(dev->bdev->bd_disk, dm_id->id, dm_id->type);
3392 }
3393 
3394 /*
3395  * Allow access to get_unique_id() for the first device returning a
3396  * non-zero result.  Reasonable use expects all devices to have the
3397  * same unique id.
3398  */
dm_blk_get_unique_id(struct gendisk * disk,u8 * id,enum blk_unique_id type)3399 static int dm_blk_get_unique_id(struct gendisk *disk, u8 *id,
3400 		enum blk_unique_id type)
3401 {
3402 	struct mapped_device *md = disk->private_data;
3403 	struct dm_table *table;
3404 	struct dm_target *ti;
3405 	int ret = 0, srcu_idx;
3406 
3407 	struct dm_blkdev_id dm_id = {
3408 		.id = id,
3409 		.type = type,
3410 	};
3411 
3412 	table = dm_get_live_table(md, &srcu_idx);
3413 	if (!table || !dm_table_get_size(table))
3414 		goto out;
3415 
3416 	/* We only support devices that have a single target */
3417 	if (table->num_targets != 1)
3418 		goto out;
3419 	ti = dm_table_get_target(table, 0);
3420 
3421 	if (!ti->type->iterate_devices)
3422 		goto out;
3423 
3424 	ret = ti->type->iterate_devices(ti, __dm_get_unique_id, &dm_id);
3425 out:
3426 	dm_put_live_table(md, srcu_idx);
3427 	return ret;
3428 }
3429 
3430 struct dm_pr {
3431 	u64	old_key;
3432 	u64	new_key;
3433 	u32	flags;
3434 	bool	abort;
3435 	bool	fail_early;
3436 	int	ret;
3437 	enum pr_type type;
3438 	struct pr_keys *read_keys;
3439 	struct pr_held_reservation *rsv;
3440 };
3441 
dm_call_pr(struct block_device * bdev,iterate_devices_callout_fn fn,struct dm_pr * pr)3442 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3443 		      struct dm_pr *pr)
3444 {
3445 	struct mapped_device *md = bdev->bd_disk->private_data;
3446 	struct dm_table *table;
3447 	struct dm_target *ti;
3448 	int ret = -ENOTTY, srcu_idx;
3449 
3450 	table = dm_get_live_table(md, &srcu_idx);
3451 	if (!table || !dm_table_get_size(table))
3452 		goto out;
3453 
3454 	/* We only support devices that have a single target */
3455 	if (table->num_targets != 1)
3456 		goto out;
3457 	ti = dm_table_get_target(table, 0);
3458 
3459 	if (dm_suspended_md(md)) {
3460 		ret = -EAGAIN;
3461 		goto out;
3462 	}
3463 
3464 	ret = -EINVAL;
3465 	if (!ti->type->iterate_devices)
3466 		goto out;
3467 
3468 	ti->type->iterate_devices(ti, fn, pr);
3469 	ret = 0;
3470 out:
3471 	dm_put_live_table(md, srcu_idx);
3472 	return ret;
3473 }
3474 
3475 /*
3476  * For register / unregister we need to manually call out to every path.
3477  */
__dm_pr_register(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3478 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3479 			    sector_t start, sector_t len, void *data)
3480 {
3481 	struct dm_pr *pr = data;
3482 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3483 	int ret;
3484 
3485 	if (!ops || !ops->pr_register) {
3486 		pr->ret = -EOPNOTSUPP;
3487 		return -1;
3488 	}
3489 
3490 	ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3491 	if (!ret)
3492 		return 0;
3493 
3494 	if (!pr->ret)
3495 		pr->ret = ret;
3496 
3497 	if (pr->fail_early)
3498 		return -1;
3499 
3500 	return 0;
3501 }
3502 
dm_pr_register(struct block_device * bdev,u64 old_key,u64 new_key,u32 flags)3503 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3504 			  u32 flags)
3505 {
3506 	struct dm_pr pr = {
3507 		.old_key	= old_key,
3508 		.new_key	= new_key,
3509 		.flags		= flags,
3510 		.fail_early	= true,
3511 		.ret		= 0,
3512 	};
3513 	int ret;
3514 
3515 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3516 	if (ret) {
3517 		/* Didn't even get to register a path */
3518 		return ret;
3519 	}
3520 
3521 	if (!pr.ret)
3522 		return 0;
3523 	ret = pr.ret;
3524 
3525 	if (!new_key)
3526 		return ret;
3527 
3528 	/* unregister all paths if we failed to register any path */
3529 	pr.old_key = new_key;
3530 	pr.new_key = 0;
3531 	pr.flags = 0;
3532 	pr.fail_early = false;
3533 	(void) dm_call_pr(bdev, __dm_pr_register, &pr);
3534 	return ret;
3535 }
3536 
3537 
__dm_pr_reserve(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3538 static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
3539 			   sector_t start, sector_t len, void *data)
3540 {
3541 	struct dm_pr *pr = data;
3542 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3543 
3544 	if (!ops || !ops->pr_reserve) {
3545 		pr->ret = -EOPNOTSUPP;
3546 		return -1;
3547 	}
3548 
3549 	pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
3550 	if (!pr->ret)
3551 		return -1;
3552 
3553 	return 0;
3554 }
3555 
dm_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,u32 flags)3556 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3557 			 u32 flags)
3558 {
3559 	struct dm_pr pr = {
3560 		.old_key	= key,
3561 		.flags		= flags,
3562 		.type		= type,
3563 		.fail_early	= false,
3564 		.ret		= 0,
3565 	};
3566 	int ret;
3567 
3568 	ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
3569 	if (ret)
3570 		return ret;
3571 
3572 	return pr.ret;
3573 }
3574 
3575 /*
3576  * If there is a non-All Registrants type of reservation, the release must be
3577  * sent down the holding path. For the cases where there is no reservation or
3578  * the path is not the holder the device will also return success, so we must
3579  * try each path to make sure we got the correct path.
3580  */
__dm_pr_release(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3581 static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
3582 			   sector_t start, sector_t len, void *data)
3583 {
3584 	struct dm_pr *pr = data;
3585 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3586 
3587 	if (!ops || !ops->pr_release) {
3588 		pr->ret = -EOPNOTSUPP;
3589 		return -1;
3590 	}
3591 
3592 	pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type);
3593 	if (pr->ret)
3594 		return -1;
3595 
3596 	return 0;
3597 }
3598 
dm_pr_release(struct block_device * bdev,u64 key,enum pr_type type)3599 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3600 {
3601 	struct dm_pr pr = {
3602 		.old_key	= key,
3603 		.type		= type,
3604 		.fail_early	= false,
3605 	};
3606 	int ret;
3607 
3608 	ret = dm_call_pr(bdev, __dm_pr_release, &pr);
3609 	if (ret)
3610 		return ret;
3611 
3612 	return pr.ret;
3613 }
3614 
__dm_pr_preempt(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3615 static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
3616 			   sector_t start, sector_t len, void *data)
3617 {
3618 	struct dm_pr *pr = data;
3619 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3620 
3621 	if (!ops || !ops->pr_preempt) {
3622 		pr->ret = -EOPNOTSUPP;
3623 		return -1;
3624 	}
3625 
3626 	pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type,
3627 				  pr->abort);
3628 	if (!pr->ret)
3629 		return -1;
3630 
3631 	return 0;
3632 }
3633 
dm_pr_preempt(struct block_device * bdev,u64 old_key,u64 new_key,enum pr_type type,bool abort)3634 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3635 			 enum pr_type type, bool abort)
3636 {
3637 	struct dm_pr pr = {
3638 		.new_key	= new_key,
3639 		.old_key	= old_key,
3640 		.type		= type,
3641 		.fail_early	= false,
3642 	};
3643 	int ret;
3644 
3645 	ret = dm_call_pr(bdev, __dm_pr_preempt, &pr);
3646 	if (ret)
3647 		return ret;
3648 
3649 	return pr.ret;
3650 }
3651 
dm_pr_clear(struct block_device * bdev,u64 key)3652 static int dm_pr_clear(struct block_device *bdev, u64 key)
3653 {
3654 	struct mapped_device *md = bdev->bd_disk->private_data;
3655 	const struct pr_ops *ops;
3656 	int r, srcu_idx;
3657 	bool forward = true;
3658 
3659 	/* Not a real ioctl, but targets must not interpret non-DM ioctls */
3660 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward);
3661 	if (r < 0)
3662 		goto out;
3663 	WARN_ON_ONCE(!forward);
3664 
3665 	ops = bdev->bd_disk->fops->pr_ops;
3666 	if (ops && ops->pr_clear)
3667 		r = ops->pr_clear(bdev, key);
3668 	else
3669 		r = -EOPNOTSUPP;
3670 out:
3671 	dm_unprepare_ioctl(md, srcu_idx);
3672 	return r;
3673 }
3674 
__dm_pr_read_keys(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3675 static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
3676 			     sector_t start, sector_t len, void *data)
3677 {
3678 	struct dm_pr *pr = data;
3679 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3680 
3681 	if (!ops || !ops->pr_read_keys) {
3682 		pr->ret = -EOPNOTSUPP;
3683 		return -1;
3684 	}
3685 
3686 	pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
3687 	if (!pr->ret)
3688 		return -1;
3689 
3690 	return 0;
3691 }
3692 
dm_pr_read_keys(struct block_device * bdev,struct pr_keys * keys)3693 static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
3694 {
3695 	struct dm_pr pr = {
3696 		.read_keys = keys,
3697 	};
3698 	int ret;
3699 
3700 	ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
3701 	if (ret)
3702 		return ret;
3703 
3704 	return pr.ret;
3705 }
3706 
__dm_pr_read_reservation(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3707 static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
3708 				    sector_t start, sector_t len, void *data)
3709 {
3710 	struct dm_pr *pr = data;
3711 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3712 
3713 	if (!ops || !ops->pr_read_reservation) {
3714 		pr->ret = -EOPNOTSUPP;
3715 		return -1;
3716 	}
3717 
3718 	pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
3719 	if (!pr->ret)
3720 		return -1;
3721 
3722 	return 0;
3723 }
3724 
dm_pr_read_reservation(struct block_device * bdev,struct pr_held_reservation * rsv)3725 static int dm_pr_read_reservation(struct block_device *bdev,
3726 				  struct pr_held_reservation *rsv)
3727 {
3728 	struct dm_pr pr = {
3729 		.rsv = rsv,
3730 	};
3731 	int ret;
3732 
3733 	ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
3734 	if (ret)
3735 		return ret;
3736 
3737 	return pr.ret;
3738 }
3739 
3740 static const struct pr_ops dm_pr_ops = {
3741 	.pr_register	= dm_pr_register,
3742 	.pr_reserve	= dm_pr_reserve,
3743 	.pr_release	= dm_pr_release,
3744 	.pr_preempt	= dm_pr_preempt,
3745 	.pr_clear	= dm_pr_clear,
3746 	.pr_read_keys	= dm_pr_read_keys,
3747 	.pr_read_reservation = dm_pr_read_reservation,
3748 };
3749 
3750 static const struct block_device_operations dm_blk_dops = {
3751 	.submit_bio = dm_submit_bio,
3752 	.poll_bio = dm_poll_bio,
3753 	.open = dm_blk_open,
3754 	.release = dm_blk_close,
3755 	.ioctl = dm_blk_ioctl,
3756 	.getgeo = dm_blk_getgeo,
3757 	.report_zones = dm_blk_report_zones,
3758 	.get_unique_id = dm_blk_get_unique_id,
3759 	.pr_ops = &dm_pr_ops,
3760 	.owner = THIS_MODULE
3761 };
3762 
3763 static const struct block_device_operations dm_rq_blk_dops = {
3764 	.open = dm_blk_open,
3765 	.release = dm_blk_close,
3766 	.ioctl = dm_blk_ioctl,
3767 	.getgeo = dm_blk_getgeo,
3768 	.get_unique_id = dm_blk_get_unique_id,
3769 	.pr_ops = &dm_pr_ops,
3770 	.owner = THIS_MODULE
3771 };
3772 
3773 static const struct dax_operations dm_dax_ops = {
3774 	.direct_access = dm_dax_direct_access,
3775 	.zero_page_range = dm_dax_zero_page_range,
3776 	.recovery_write = dm_dax_recovery_write,
3777 };
3778 
3779 /*
3780  * module hooks
3781  */
3782 module_init(dm_init);
3783 module_exit(dm_exit);
3784 
3785 module_param(major, uint, 0);
3786 MODULE_PARM_DESC(major, "The major number of the device mapper");
3787 
3788 module_param(reserved_bio_based_ios, uint, 0644);
3789 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3790 
3791 module_param(dm_numa_node, int, 0644);
3792 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3793 
3794 module_param(swap_bios, int, 0644);
3795 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3796 
3797 MODULE_DESCRIPTION(DM_NAME " driver");
3798 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
3799 MODULE_LICENSE("GPL");
3800