1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2019 Arrikto, Inc. All Rights Reserved.
4 */
5
6 #include <linux/mm.h>
7 #include <linux/bio.h>
8 #include <linux/err.h>
9 #include <linux/hash.h>
10 #include <linux/list.h>
11 #include <linux/log2.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/dm-io.h>
16 #include <linux/mutex.h>
17 #include <linux/atomic.h>
18 #include <linux/bitops.h>
19 #include <linux/blkdev.h>
20 #include <linux/kdev_t.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/mempool.h>
25 #include <linux/spinlock.h>
26 #include <linux/blk_types.h>
27 #include <linux/dm-kcopyd.h>
28 #include <linux/workqueue.h>
29 #include <linux/backing-dev.h>
30 #include <linux/device-mapper.h>
31
32 #include "dm.h"
33 #include "dm-clone-metadata.h"
34
35 #define DM_MSG_PREFIX "clone"
36
37 /*
38 * Minimum and maximum allowed region sizes
39 */
40 #define MIN_REGION_SIZE (1 << 3) /* 4KB */
41 #define MAX_REGION_SIZE (1 << 21) /* 1GB */
42
43 #define MIN_HYDRATIONS 256 /* Size of hydration mempool */
44 #define DEFAULT_HYDRATION_THRESHOLD 1 /* 1 region */
45 #define DEFAULT_HYDRATION_BATCH_SIZE 1 /* Hydrate in batches of 1 region */
46
47 #define COMMIT_PERIOD HZ /* 1 sec */
48
49 /*
50 * Hydration hash table size: 1 << HASH_TABLE_BITS
51 */
52 #define HASH_TABLE_BITS 15
53
54 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(clone_hydration_throttle,
55 "A percentage of time allocated for hydrating regions");
56
57 /* Slab cache for struct dm_clone_region_hydration */
58 static struct kmem_cache *_hydration_cache;
59
60 /* dm-clone metadata modes */
61 enum clone_metadata_mode {
62 CM_WRITE, /* metadata may be changed */
63 CM_READ_ONLY, /* metadata may not be changed */
64 CM_FAIL, /* all metadata I/O fails */
65 };
66
67 struct hash_table_bucket;
68
69 struct clone {
70 struct dm_target *ti;
71
72 struct dm_dev *metadata_dev;
73 struct dm_dev *dest_dev;
74 struct dm_dev *source_dev;
75
76 unsigned long nr_regions;
77 sector_t region_size;
78 unsigned int region_shift;
79
80 /*
81 * A metadata commit and the actions taken in case it fails should run
82 * as a single atomic step.
83 */
84 struct mutex commit_lock;
85
86 struct dm_clone_metadata *cmd;
87
88 /* Region hydration hash table */
89 struct hash_table_bucket *ht;
90
91 atomic_t ios_in_flight;
92
93 wait_queue_head_t hydration_stopped;
94
95 mempool_t hydration_pool;
96
97 unsigned long last_commit_jiffies;
98
99 /*
100 * We defer incoming WRITE bios for regions that are not hydrated,
101 * until after these regions have been hydrated.
102 *
103 * Also, we defer REQ_FUA and REQ_PREFLUSH bios, until after the
104 * metadata have been committed.
105 */
106 spinlock_t lock;
107 struct bio_list deferred_bios;
108 struct bio_list deferred_discard_bios;
109 struct bio_list deferred_flush_bios;
110 struct bio_list deferred_flush_completions;
111
112 /* Maximum number of regions being copied during background hydration. */
113 unsigned int hydration_threshold;
114
115 /* Number of regions to batch together during background hydration. */
116 unsigned int hydration_batch_size;
117
118 /* Which region to hydrate next */
119 unsigned long hydration_offset;
120
121 atomic_t hydrations_in_flight;
122
123 /*
124 * Save a copy of the table line rather than reconstructing it for the
125 * status.
126 */
127 unsigned int nr_ctr_args;
128 const char **ctr_args;
129
130 struct workqueue_struct *wq;
131 struct work_struct worker;
132 struct delayed_work waker;
133
134 struct dm_kcopyd_client *kcopyd_client;
135
136 enum clone_metadata_mode mode;
137 unsigned long flags;
138 };
139
140 /*
141 * dm-clone flags
142 */
143 #define DM_CLONE_DISCARD_PASSDOWN 0
144 #define DM_CLONE_HYDRATION_ENABLED 1
145 #define DM_CLONE_HYDRATION_SUSPENDED 2
146
147 /*---------------------------------------------------------------------------*/
148
149 /*
150 * Metadata failure handling.
151 */
get_clone_mode(struct clone * clone)152 static enum clone_metadata_mode get_clone_mode(struct clone *clone)
153 {
154 return READ_ONCE(clone->mode);
155 }
156
clone_device_name(struct clone * clone)157 static const char *clone_device_name(struct clone *clone)
158 {
159 return dm_table_device_name(clone->ti->table);
160 }
161
__set_clone_mode(struct clone * clone,enum clone_metadata_mode new_mode)162 static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
163 {
164 static const char * const descs[] = {
165 "read-write",
166 "read-only",
167 "fail"
168 };
169
170 enum clone_metadata_mode old_mode = get_clone_mode(clone);
171
172 /* Never move out of fail mode */
173 if (old_mode == CM_FAIL)
174 new_mode = CM_FAIL;
175
176 switch (new_mode) {
177 case CM_FAIL:
178 case CM_READ_ONLY:
179 dm_clone_metadata_set_read_only(clone->cmd);
180 break;
181
182 case CM_WRITE:
183 dm_clone_metadata_set_read_write(clone->cmd);
184 break;
185 }
186
187 WRITE_ONCE(clone->mode, new_mode);
188
189 if (new_mode != old_mode) {
190 dm_table_event(clone->ti->table);
191 DMINFO("%s: Switching to %s mode", clone_device_name(clone),
192 descs[(int)new_mode]);
193 }
194 }
195
__abort_transaction(struct clone * clone)196 static void __abort_transaction(struct clone *clone)
197 {
198 const char *dev_name = clone_device_name(clone);
199
200 if (get_clone_mode(clone) >= CM_READ_ONLY)
201 return;
202
203 DMERR("%s: Aborting current metadata transaction", dev_name);
204 if (dm_clone_metadata_abort(clone->cmd)) {
205 DMERR("%s: Failed to abort metadata transaction", dev_name);
206 __set_clone_mode(clone, CM_FAIL);
207 }
208 }
209
__reload_in_core_bitset(struct clone * clone)210 static void __reload_in_core_bitset(struct clone *clone)
211 {
212 const char *dev_name = clone_device_name(clone);
213
214 if (get_clone_mode(clone) == CM_FAIL)
215 return;
216
217 /* Reload the on-disk bitset */
218 DMINFO("%s: Reloading on-disk bitmap", dev_name);
219 if (dm_clone_reload_in_core_bitset(clone->cmd)) {
220 DMERR("%s: Failed to reload on-disk bitmap", dev_name);
221 __set_clone_mode(clone, CM_FAIL);
222 }
223 }
224
__metadata_operation_failed(struct clone * clone,const char * op,int r)225 static void __metadata_operation_failed(struct clone *clone, const char *op, int r)
226 {
227 DMERR("%s: Metadata operation `%s' failed: error = %d",
228 clone_device_name(clone), op, r);
229
230 __abort_transaction(clone);
231 __set_clone_mode(clone, CM_READ_ONLY);
232
233 /*
234 * dm_clone_reload_in_core_bitset() may run concurrently with either
235 * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), but
236 * it's safe as we have already set the metadata to read-only mode.
237 */
238 __reload_in_core_bitset(clone);
239 }
240
241 /*---------------------------------------------------------------------------*/
242
243 /* Wake up anyone waiting for region hydrations to stop */
wakeup_hydration_waiters(struct clone * clone)244 static inline void wakeup_hydration_waiters(struct clone *clone)
245 {
246 wake_up_all(&clone->hydration_stopped);
247 }
248
wake_worker(struct clone * clone)249 static inline void wake_worker(struct clone *clone)
250 {
251 queue_work(clone->wq, &clone->worker);
252 }
253
254 /*---------------------------------------------------------------------------*/
255
256 /*
257 * bio helper functions.
258 */
remap_to_source(struct clone * clone,struct bio * bio)259 static inline void remap_to_source(struct clone *clone, struct bio *bio)
260 {
261 bio_set_dev(bio, clone->source_dev->bdev);
262 }
263
remap_to_dest(struct clone * clone,struct bio * bio)264 static inline void remap_to_dest(struct clone *clone, struct bio *bio)
265 {
266 bio_set_dev(bio, clone->dest_dev->bdev);
267 }
268
bio_triggers_commit(struct clone * clone,struct bio * bio)269 static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
270 {
271 return op_is_flush(bio->bi_opf) &&
272 dm_clone_changed_this_transaction(clone->cmd);
273 }
274
275 /* Get the address of the region in sectors */
region_to_sector(struct clone * clone,unsigned long region_nr)276 static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
277 {
278 return ((sector_t)region_nr << clone->region_shift);
279 }
280
281 /* Get the region number of the bio */
bio_to_region(struct clone * clone,struct bio * bio)282 static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
283 {
284 return (bio->bi_iter.bi_sector >> clone->region_shift);
285 }
286
287 /* Get the region range covered by the bio */
bio_region_range(struct clone * clone,struct bio * bio,unsigned long * rs,unsigned long * nr_regions)288 static void bio_region_range(struct clone *clone, struct bio *bio,
289 unsigned long *rs, unsigned long *nr_regions)
290 {
291 unsigned long end;
292
293 *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
294 end = bio_end_sector(bio) >> clone->region_shift;
295
296 if (*rs >= end)
297 *nr_regions = 0;
298 else
299 *nr_regions = end - *rs;
300 }
301
302 /* Check whether a bio overwrites a region */
is_overwrite_bio(struct clone * clone,struct bio * bio)303 static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
304 {
305 return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
306 }
307
fail_bios(struct bio_list * bios,blk_status_t status)308 static void fail_bios(struct bio_list *bios, blk_status_t status)
309 {
310 struct bio *bio;
311
312 while ((bio = bio_list_pop(bios))) {
313 bio->bi_status = status;
314 bio_endio(bio);
315 }
316 }
317
submit_bios(struct bio_list * bios)318 static void submit_bios(struct bio_list *bios)
319 {
320 struct bio *bio;
321 struct blk_plug plug;
322
323 blk_start_plug(&plug);
324
325 while ((bio = bio_list_pop(bios)))
326 submit_bio_noacct(bio);
327
328 blk_finish_plug(&plug);
329 }
330
331 /*
332 * Submit bio to the underlying device.
333 *
334 * If the bio triggers a commit, delay it, until after the metadata have been
335 * committed.
336 *
337 * NOTE: The bio remapping must be performed by the caller.
338 */
issue_bio(struct clone * clone,struct bio * bio)339 static void issue_bio(struct clone *clone, struct bio *bio)
340 {
341 if (!bio_triggers_commit(clone, bio)) {
342 submit_bio_noacct(bio);
343 return;
344 }
345
346 /*
347 * If the metadata mode is RO or FAIL we won't be able to commit the
348 * metadata, so we complete the bio with an error.
349 */
350 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
351 bio_io_error(bio);
352 return;
353 }
354
355 /*
356 * Batch together any bios that trigger commits and then issue a single
357 * commit for them in process_deferred_flush_bios().
358 */
359 spin_lock_irq(&clone->lock);
360 bio_list_add(&clone->deferred_flush_bios, bio);
361 spin_unlock_irq(&clone->lock);
362
363 wake_worker(clone);
364 }
365
366 /*
367 * Remap bio to the destination device and submit it.
368 *
369 * If the bio triggers a commit, delay it, until after the metadata have been
370 * committed.
371 */
remap_and_issue(struct clone * clone,struct bio * bio)372 static void remap_and_issue(struct clone *clone, struct bio *bio)
373 {
374 remap_to_dest(clone, bio);
375 issue_bio(clone, bio);
376 }
377
378 /*
379 * Issue bios that have been deferred until after their region has finished
380 * hydrating.
381 *
382 * We delegate the bio submission to the worker thread, so this is safe to call
383 * from interrupt context.
384 */
issue_deferred_bios(struct clone * clone,struct bio_list * bios)385 static void issue_deferred_bios(struct clone *clone, struct bio_list *bios)
386 {
387 struct bio *bio;
388 unsigned long flags;
389 struct bio_list flush_bios = BIO_EMPTY_LIST;
390 struct bio_list normal_bios = BIO_EMPTY_LIST;
391
392 if (bio_list_empty(bios))
393 return;
394
395 while ((bio = bio_list_pop(bios))) {
396 if (bio_triggers_commit(clone, bio))
397 bio_list_add(&flush_bios, bio);
398 else
399 bio_list_add(&normal_bios, bio);
400 }
401
402 spin_lock_irqsave(&clone->lock, flags);
403 bio_list_merge(&clone->deferred_bios, &normal_bios);
404 bio_list_merge(&clone->deferred_flush_bios, &flush_bios);
405 spin_unlock_irqrestore(&clone->lock, flags);
406
407 wake_worker(clone);
408 }
409
complete_overwrite_bio(struct clone * clone,struct bio * bio)410 static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
411 {
412 unsigned long flags;
413
414 /*
415 * If the bio has the REQ_FUA flag set we must commit the metadata
416 * before signaling its completion.
417 *
418 * complete_overwrite_bio() is only called by hydration_complete(),
419 * after having successfully updated the metadata. This means we don't
420 * need to call dm_clone_changed_this_transaction() to check if the
421 * metadata has changed and thus we can avoid taking the metadata spin
422 * lock.
423 */
424 if (!(bio->bi_opf & REQ_FUA)) {
425 bio_endio(bio);
426 return;
427 }
428
429 /*
430 * If the metadata mode is RO or FAIL we won't be able to commit the
431 * metadata, so we complete the bio with an error.
432 */
433 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
434 bio_io_error(bio);
435 return;
436 }
437
438 /*
439 * Batch together any bios that trigger commits and then issue a single
440 * commit for them in process_deferred_flush_bios().
441 */
442 spin_lock_irqsave(&clone->lock, flags);
443 bio_list_add(&clone->deferred_flush_completions, bio);
444 spin_unlock_irqrestore(&clone->lock, flags);
445
446 wake_worker(clone);
447 }
448
trim_bio(struct bio * bio,sector_t sector,unsigned int len)449 static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
450 {
451 bio->bi_iter.bi_sector = sector;
452 bio->bi_iter.bi_size = to_bytes(len);
453 }
454
complete_discard_bio(struct clone * clone,struct bio * bio,bool success)455 static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
456 {
457 unsigned long rs, nr_regions;
458
459 /*
460 * If the destination device supports discards, remap and trim the
461 * discard bio and pass it down. Otherwise complete the bio
462 * immediately.
463 */
464 if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
465 remap_to_dest(clone, bio);
466 bio_region_range(clone, bio, &rs, &nr_regions);
467 trim_bio(bio, region_to_sector(clone, rs),
468 nr_regions << clone->region_shift);
469 submit_bio_noacct(bio);
470 } else
471 bio_endio(bio);
472 }
473
process_discard_bio(struct clone * clone,struct bio * bio)474 static void process_discard_bio(struct clone *clone, struct bio *bio)
475 {
476 unsigned long rs, nr_regions;
477
478 bio_region_range(clone, bio, &rs, &nr_regions);
479 if (!nr_regions) {
480 bio_endio(bio);
481 return;
482 }
483
484 if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
485 (rs + nr_regions) > clone->nr_regions)) {
486 DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
487 clone_device_name(clone), rs, nr_regions,
488 clone->nr_regions,
489 (unsigned long long)bio->bi_iter.bi_sector,
490 bio_sectors(bio));
491 bio_endio(bio);
492 return;
493 }
494
495 /*
496 * The covered regions are already hydrated so we just need to pass
497 * down the discard.
498 */
499 if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
500 complete_discard_bio(clone, bio, true);
501 return;
502 }
503
504 /*
505 * If the metadata mode is RO or FAIL we won't be able to update the
506 * metadata for the regions covered by the discard so we just ignore
507 * it.
508 */
509 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
510 bio_endio(bio);
511 return;
512 }
513
514 /*
515 * Defer discard processing.
516 */
517 spin_lock_irq(&clone->lock);
518 bio_list_add(&clone->deferred_discard_bios, bio);
519 spin_unlock_irq(&clone->lock);
520
521 wake_worker(clone);
522 }
523
524 /*---------------------------------------------------------------------------*/
525
526 /*
527 * dm-clone region hydrations.
528 */
529 struct dm_clone_region_hydration {
530 struct clone *clone;
531 unsigned long region_nr;
532
533 struct bio *overwrite_bio;
534 bio_end_io_t *overwrite_bio_end_io;
535
536 struct bio_list deferred_bios;
537
538 blk_status_t status;
539
540 /* Used by hydration batching */
541 struct list_head list;
542
543 /* Used by hydration hash table */
544 struct hlist_node h;
545 };
546
547 /*
548 * Hydration hash table implementation.
549 *
550 * Ideally we would like to use list_bl, which uses bit spin locks and employs
551 * the least significant bit of the list head to lock the corresponding bucket,
552 * reducing the memory overhead for the locks. But, currently, list_bl and bit
553 * spin locks don't support IRQ safe versions. Since we have to take the lock
554 * in both process and interrupt context, we must fall back to using regular
555 * spin locks; one per hash table bucket.
556 */
557 struct hash_table_bucket {
558 struct hlist_head head;
559
560 /* Spinlock protecting the bucket */
561 spinlock_t lock;
562 };
563
564 #define bucket_lock_irqsave(bucket, flags) \
565 spin_lock_irqsave(&(bucket)->lock, flags)
566
567 #define bucket_unlock_irqrestore(bucket, flags) \
568 spin_unlock_irqrestore(&(bucket)->lock, flags)
569
570 #define bucket_lock_irq(bucket) \
571 spin_lock_irq(&(bucket)->lock)
572
573 #define bucket_unlock_irq(bucket) \
574 spin_unlock_irq(&(bucket)->lock)
575
hash_table_init(struct clone * clone)576 static int hash_table_init(struct clone *clone)
577 {
578 unsigned int i, sz;
579 struct hash_table_bucket *bucket;
580
581 sz = 1 << HASH_TABLE_BITS;
582
583 clone->ht = kvmalloc_array(sz, sizeof(struct hash_table_bucket), GFP_KERNEL);
584 if (!clone->ht)
585 return -ENOMEM;
586
587 for (i = 0; i < sz; i++) {
588 bucket = clone->ht + i;
589
590 INIT_HLIST_HEAD(&bucket->head);
591 spin_lock_init(&bucket->lock);
592 }
593
594 return 0;
595 }
596
hash_table_exit(struct clone * clone)597 static void hash_table_exit(struct clone *clone)
598 {
599 kvfree(clone->ht);
600 }
601
get_hash_table_bucket(struct clone * clone,unsigned long region_nr)602 static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
603 unsigned long region_nr)
604 {
605 return &clone->ht[hash_long(region_nr, HASH_TABLE_BITS)];
606 }
607
608 /*
609 * Search hash table for a hydration with hd->region_nr == region_nr
610 *
611 * NOTE: Must be called with the bucket lock held
612 */
__hash_find(struct hash_table_bucket * bucket,unsigned long region_nr)613 static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
614 unsigned long region_nr)
615 {
616 struct dm_clone_region_hydration *hd;
617
618 hlist_for_each_entry(hd, &bucket->head, h) {
619 if (hd->region_nr == region_nr)
620 return hd;
621 }
622
623 return NULL;
624 }
625
626 /*
627 * Insert a hydration into the hash table.
628 *
629 * NOTE: Must be called with the bucket lock held.
630 */
__insert_region_hydration(struct hash_table_bucket * bucket,struct dm_clone_region_hydration * hd)631 static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
632 struct dm_clone_region_hydration *hd)
633 {
634 hlist_add_head(&hd->h, &bucket->head);
635 }
636
637 /*
638 * This function inserts a hydration into the hash table, unless someone else
639 * managed to insert a hydration for the same region first. In the latter case
640 * it returns the existing hydration descriptor for this region.
641 *
642 * NOTE: Must be called with the hydration hash table lock held.
643 */
644 static struct dm_clone_region_hydration *
__find_or_insert_region_hydration(struct hash_table_bucket * bucket,struct dm_clone_region_hydration * hd)645 __find_or_insert_region_hydration(struct hash_table_bucket *bucket,
646 struct dm_clone_region_hydration *hd)
647 {
648 struct dm_clone_region_hydration *hd2;
649
650 hd2 = __hash_find(bucket, hd->region_nr);
651 if (hd2)
652 return hd2;
653
654 __insert_region_hydration(bucket, hd);
655
656 return hd;
657 }
658
659 /*---------------------------------------------------------------------------*/
660
661 /* Allocate a hydration */
alloc_hydration(struct clone * clone)662 static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone)
663 {
664 struct dm_clone_region_hydration *hd;
665
666 /*
667 * Allocate a hydration from the hydration mempool.
668 * This might block but it can't fail.
669 */
670 hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO);
671 hd->clone = clone;
672
673 return hd;
674 }
675
free_hydration(struct dm_clone_region_hydration * hd)676 static inline void free_hydration(struct dm_clone_region_hydration *hd)
677 {
678 mempool_free(hd, &hd->clone->hydration_pool);
679 }
680
681 /* Initialize a hydration */
hydration_init(struct dm_clone_region_hydration * hd,unsigned long region_nr)682 static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr)
683 {
684 hd->region_nr = region_nr;
685 hd->overwrite_bio = NULL;
686 bio_list_init(&hd->deferred_bios);
687 hd->status = 0;
688
689 INIT_LIST_HEAD(&hd->list);
690 INIT_HLIST_NODE(&hd->h);
691 }
692
693 /*---------------------------------------------------------------------------*/
694
695 /*
696 * Update dm-clone's metadata after a region has finished hydrating and remove
697 * hydration from the hash table.
698 */
hydration_update_metadata(struct dm_clone_region_hydration * hd)699 static int hydration_update_metadata(struct dm_clone_region_hydration *hd)
700 {
701 int r = 0;
702 unsigned long flags;
703 struct hash_table_bucket *bucket;
704 struct clone *clone = hd->clone;
705
706 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
707 r = -EPERM;
708
709 /* Update the metadata */
710 if (likely(!r) && hd->status == BLK_STS_OK)
711 r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr);
712
713 bucket = get_hash_table_bucket(clone, hd->region_nr);
714
715 /* Remove hydration from hash table */
716 bucket_lock_irqsave(bucket, flags);
717 hlist_del(&hd->h);
718 bucket_unlock_irqrestore(bucket, flags);
719
720 return r;
721 }
722
723 /*
724 * Complete a region's hydration:
725 *
726 * 1. Update dm-clone's metadata.
727 * 2. Remove hydration from hash table.
728 * 3. Complete overwrite bio.
729 * 4. Issue deferred bios.
730 * 5. If this was the last hydration, wake up anyone waiting for
731 * hydrations to finish.
732 */
hydration_complete(struct dm_clone_region_hydration * hd)733 static void hydration_complete(struct dm_clone_region_hydration *hd)
734 {
735 int r;
736 blk_status_t status;
737 struct clone *clone = hd->clone;
738
739 r = hydration_update_metadata(hd);
740
741 if (hd->status == BLK_STS_OK && likely(!r)) {
742 if (hd->overwrite_bio)
743 complete_overwrite_bio(clone, hd->overwrite_bio);
744
745 issue_deferred_bios(clone, &hd->deferred_bios);
746 } else {
747 status = r ? BLK_STS_IOERR : hd->status;
748
749 if (hd->overwrite_bio)
750 bio_list_add(&hd->deferred_bios, hd->overwrite_bio);
751
752 fail_bios(&hd->deferred_bios, status);
753 }
754
755 free_hydration(hd);
756
757 if (atomic_dec_and_test(&clone->hydrations_in_flight))
758 wakeup_hydration_waiters(clone);
759 }
760
hydration_kcopyd_callback(int read_err,unsigned long write_err,void * context)761 static void hydration_kcopyd_callback(int read_err, unsigned long write_err, void *context)
762 {
763 blk_status_t status;
764
765 struct dm_clone_region_hydration *tmp, *hd = context;
766 struct clone *clone = hd->clone;
767
768 LIST_HEAD(batched_hydrations);
769
770 if (read_err || write_err) {
771 DMERR_LIMIT("%s: hydration failed", clone_device_name(clone));
772 status = BLK_STS_IOERR;
773 } else {
774 status = BLK_STS_OK;
775 }
776 list_splice_tail(&hd->list, &batched_hydrations);
777
778 hd->status = status;
779 hydration_complete(hd);
780
781 /* Complete batched hydrations */
782 list_for_each_entry_safe(hd, tmp, &batched_hydrations, list) {
783 hd->status = status;
784 hydration_complete(hd);
785 }
786
787 /* Continue background hydration, if there is no I/O in-flight */
788 if (test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
789 !atomic_read(&clone->ios_in_flight))
790 wake_worker(clone);
791 }
792
hydration_copy(struct dm_clone_region_hydration * hd,unsigned int nr_regions)793 static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions)
794 {
795 unsigned long region_start, region_end;
796 sector_t tail_size, region_size, total_size;
797 struct dm_io_region from, to;
798 struct clone *clone = hd->clone;
799
800 if (WARN_ON(!nr_regions))
801 return;
802
803 region_size = clone->region_size;
804 region_start = hd->region_nr;
805 region_end = region_start + nr_regions - 1;
806
807 total_size = region_to_sector(clone, nr_regions - 1);
808
809 if (region_end == clone->nr_regions - 1) {
810 /*
811 * The last region of the target might be smaller than
812 * region_size.
813 */
814 tail_size = clone->ti->len & (region_size - 1);
815 if (!tail_size)
816 tail_size = region_size;
817 } else {
818 tail_size = region_size;
819 }
820
821 total_size += tail_size;
822
823 from.bdev = clone->source_dev->bdev;
824 from.sector = region_to_sector(clone, region_start);
825 from.count = total_size;
826
827 to.bdev = clone->dest_dev->bdev;
828 to.sector = from.sector;
829 to.count = from.count;
830
831 /* Issue copy */
832 atomic_add(nr_regions, &clone->hydrations_in_flight);
833 dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0,
834 hydration_kcopyd_callback, hd);
835 }
836
overwrite_endio(struct bio * bio)837 static void overwrite_endio(struct bio *bio)
838 {
839 struct dm_clone_region_hydration *hd = bio->bi_private;
840
841 bio->bi_end_io = hd->overwrite_bio_end_io;
842 hd->status = bio->bi_status;
843
844 hydration_complete(hd);
845 }
846
hydration_overwrite(struct dm_clone_region_hydration * hd,struct bio * bio)847 static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
848 {
849 /*
850 * We don't need to save and restore bio->bi_private because device
851 * mapper core generates a new bio for us to use, with clean
852 * bi_private.
853 */
854 hd->overwrite_bio = bio;
855 hd->overwrite_bio_end_io = bio->bi_end_io;
856
857 bio->bi_end_io = overwrite_endio;
858 bio->bi_private = hd;
859
860 atomic_inc(&hd->clone->hydrations_in_flight);
861 submit_bio_noacct(bio);
862 }
863
864 /*
865 * Hydrate bio's region.
866 *
867 * This function starts the hydration of the bio's region and puts the bio in
868 * the list of deferred bios for this region. In case, by the time this
869 * function is called, the region has finished hydrating it's submitted to the
870 * destination device.
871 *
872 * NOTE: The bio remapping must be performed by the caller.
873 */
hydrate_bio_region(struct clone * clone,struct bio * bio)874 static void hydrate_bio_region(struct clone *clone, struct bio *bio)
875 {
876 unsigned long region_nr;
877 struct hash_table_bucket *bucket;
878 struct dm_clone_region_hydration *hd, *hd2;
879
880 region_nr = bio_to_region(clone, bio);
881 bucket = get_hash_table_bucket(clone, region_nr);
882
883 bucket_lock_irq(bucket);
884
885 hd = __hash_find(bucket, region_nr);
886 if (hd) {
887 /* Someone else is hydrating the region */
888 bio_list_add(&hd->deferred_bios, bio);
889 bucket_unlock_irq(bucket);
890 return;
891 }
892
893 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
894 /* The region has been hydrated */
895 bucket_unlock_irq(bucket);
896 issue_bio(clone, bio);
897 return;
898 }
899
900 /*
901 * We must allocate a hydration descriptor and start the hydration of
902 * the corresponding region.
903 */
904 bucket_unlock_irq(bucket);
905
906 hd = alloc_hydration(clone);
907 hydration_init(hd, region_nr);
908
909 bucket_lock_irq(bucket);
910
911 /* Check if the region has been hydrated in the meantime. */
912 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
913 bucket_unlock_irq(bucket);
914 free_hydration(hd);
915 issue_bio(clone, bio);
916 return;
917 }
918
919 hd2 = __find_or_insert_region_hydration(bucket, hd);
920 if (hd2 != hd) {
921 /* Someone else started the region's hydration. */
922 bio_list_add(&hd2->deferred_bios, bio);
923 bucket_unlock_irq(bucket);
924 free_hydration(hd);
925 return;
926 }
927
928 /*
929 * If the metadata mode is RO or FAIL then there is no point starting a
930 * hydration, since we will not be able to update the metadata when the
931 * hydration finishes.
932 */
933 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
934 hlist_del(&hd->h);
935 bucket_unlock_irq(bucket);
936 free_hydration(hd);
937 bio_io_error(bio);
938 return;
939 }
940
941 /*
942 * Start region hydration.
943 *
944 * If a bio overwrites a region, i.e., its size is equal to the
945 * region's size, then we don't need to copy the region from the source
946 * to the destination device.
947 */
948 if (is_overwrite_bio(clone, bio)) {
949 bucket_unlock_irq(bucket);
950 hydration_overwrite(hd, bio);
951 } else {
952 bio_list_add(&hd->deferred_bios, bio);
953 bucket_unlock_irq(bucket);
954 hydration_copy(hd, 1);
955 }
956 }
957
958 /*---------------------------------------------------------------------------*/
959
960 /*
961 * Background hydrations.
962 */
963
964 /*
965 * Batch region hydrations.
966 *
967 * To better utilize device bandwidth we batch together the hydration of
968 * adjacent regions. This allows us to use small region sizes, e.g., 4KB, which
969 * is good for small, random write performance (because of the overwriting of
970 * un-hydrated regions) and at the same time issue big copy requests to kcopyd
971 * to achieve high hydration bandwidth.
972 */
973 struct batch_info {
974 struct dm_clone_region_hydration *head;
975 unsigned int nr_batched_regions;
976 };
977
__batch_hydration(struct batch_info * batch,struct dm_clone_region_hydration * hd)978 static void __batch_hydration(struct batch_info *batch,
979 struct dm_clone_region_hydration *hd)
980 {
981 struct clone *clone = hd->clone;
982 unsigned int max_batch_size = READ_ONCE(clone->hydration_batch_size);
983
984 if (batch->head) {
985 /* Try to extend the current batch */
986 if (batch->nr_batched_regions < max_batch_size &&
987 (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) {
988 list_add_tail(&hd->list, &batch->head->list);
989 batch->nr_batched_regions++;
990 hd = NULL;
991 }
992
993 /* Check if we should issue the current batch */
994 if (batch->nr_batched_regions >= max_batch_size || hd) {
995 hydration_copy(batch->head, batch->nr_batched_regions);
996 batch->head = NULL;
997 batch->nr_batched_regions = 0;
998 }
999 }
1000
1001 if (!hd)
1002 return;
1003
1004 /* We treat max batch sizes of zero and one equivalently */
1005 if (max_batch_size <= 1) {
1006 hydration_copy(hd, 1);
1007 return;
1008 }
1009
1010 /* Start a new batch */
1011 BUG_ON(!list_empty(&hd->list));
1012 batch->head = hd;
1013 batch->nr_batched_regions = 1;
1014 }
1015
__start_next_hydration(struct clone * clone,unsigned long offset,struct batch_info * batch)1016 static unsigned long __start_next_hydration(struct clone *clone,
1017 unsigned long offset,
1018 struct batch_info *batch)
1019 {
1020 struct hash_table_bucket *bucket;
1021 struct dm_clone_region_hydration *hd;
1022 unsigned long nr_regions = clone->nr_regions;
1023
1024 hd = alloc_hydration(clone);
1025
1026 /* Try to find a region to hydrate. */
1027 do {
1028 offset = dm_clone_find_next_unhydrated_region(clone->cmd, offset);
1029 if (offset == nr_regions)
1030 break;
1031
1032 bucket = get_hash_table_bucket(clone, offset);
1033 bucket_lock_irq(bucket);
1034
1035 if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
1036 !__hash_find(bucket, offset)) {
1037 hydration_init(hd, offset);
1038 __insert_region_hydration(bucket, hd);
1039 bucket_unlock_irq(bucket);
1040
1041 /* Batch hydration */
1042 __batch_hydration(batch, hd);
1043
1044 return (offset + 1);
1045 }
1046
1047 bucket_unlock_irq(bucket);
1048
1049 } while (++offset < nr_regions);
1050
1051 if (hd)
1052 free_hydration(hd);
1053
1054 return offset;
1055 }
1056
1057 /*
1058 * This function searches for regions that still reside in the source device
1059 * and starts their hydration.
1060 */
do_hydration(struct clone * clone)1061 static void do_hydration(struct clone *clone)
1062 {
1063 unsigned int current_volume;
1064 unsigned long offset, nr_regions = clone->nr_regions;
1065
1066 struct batch_info batch = {
1067 .head = NULL,
1068 .nr_batched_regions = 0,
1069 };
1070
1071 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1072 return;
1073
1074 if (dm_clone_is_hydration_done(clone->cmd))
1075 return;
1076
1077 /*
1078 * Avoid race with device suspension.
1079 */
1080 atomic_inc(&clone->hydrations_in_flight);
1081
1082 /*
1083 * Make sure atomic_inc() is ordered before test_bit(), otherwise we
1084 * might race with clone_postsuspend() and start a region hydration
1085 * after the target has been suspended.
1086 *
1087 * This is paired with the smp_mb__after_atomic() in
1088 * clone_postsuspend().
1089 */
1090 smp_mb__after_atomic();
1091
1092 offset = clone->hydration_offset;
1093 while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags)) &&
1094 !atomic_read(&clone->ios_in_flight) &&
1095 test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
1096 offset < nr_regions) {
1097 current_volume = atomic_read(&clone->hydrations_in_flight);
1098 current_volume += batch.nr_batched_regions;
1099
1100 if (current_volume > READ_ONCE(clone->hydration_threshold))
1101 break;
1102
1103 offset = __start_next_hydration(clone, offset, &batch);
1104 }
1105
1106 if (batch.head)
1107 hydration_copy(batch.head, batch.nr_batched_regions);
1108
1109 if (offset >= nr_regions)
1110 offset = 0;
1111
1112 clone->hydration_offset = offset;
1113
1114 if (atomic_dec_and_test(&clone->hydrations_in_flight))
1115 wakeup_hydration_waiters(clone);
1116 }
1117
1118 /*---------------------------------------------------------------------------*/
1119
need_commit_due_to_time(struct clone * clone)1120 static bool need_commit_due_to_time(struct clone *clone)
1121 {
1122 return !time_in_range(jiffies, clone->last_commit_jiffies,
1123 clone->last_commit_jiffies + COMMIT_PERIOD);
1124 }
1125
1126 /*
1127 * A non-zero return indicates read-only or fail mode.
1128 */
commit_metadata(struct clone * clone,bool * dest_dev_flushed)1129 static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
1130 {
1131 int r = 0;
1132
1133 if (dest_dev_flushed)
1134 *dest_dev_flushed = false;
1135
1136 mutex_lock(&clone->commit_lock);
1137
1138 if (!dm_clone_changed_this_transaction(clone->cmd))
1139 goto out;
1140
1141 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
1142 r = -EPERM;
1143 goto out;
1144 }
1145
1146 r = dm_clone_metadata_pre_commit(clone->cmd);
1147 if (unlikely(r)) {
1148 __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
1149 goto out;
1150 }
1151
1152 r = blkdev_issue_flush(clone->dest_dev->bdev);
1153 if (unlikely(r)) {
1154 __metadata_operation_failed(clone, "flush destination device", r);
1155 goto out;
1156 }
1157
1158 if (dest_dev_flushed)
1159 *dest_dev_flushed = true;
1160
1161 r = dm_clone_metadata_commit(clone->cmd);
1162 if (unlikely(r)) {
1163 __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
1164 goto out;
1165 }
1166
1167 if (dm_clone_is_hydration_done(clone->cmd))
1168 dm_table_event(clone->ti->table);
1169 out:
1170 mutex_unlock(&clone->commit_lock);
1171
1172 return r;
1173 }
1174
process_deferred_discards(struct clone * clone)1175 static void process_deferred_discards(struct clone *clone)
1176 {
1177 int r = -EPERM;
1178 struct bio *bio;
1179 struct blk_plug plug;
1180 unsigned long rs, nr_regions;
1181 struct bio_list discards = BIO_EMPTY_LIST;
1182
1183 spin_lock_irq(&clone->lock);
1184 bio_list_merge_init(&discards, &clone->deferred_discard_bios);
1185 spin_unlock_irq(&clone->lock);
1186
1187 if (bio_list_empty(&discards))
1188 return;
1189
1190 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1191 goto out;
1192
1193 /* Update the metadata */
1194 bio_list_for_each(bio, &discards) {
1195 bio_region_range(clone, bio, &rs, &nr_regions);
1196 /*
1197 * A discard request might cover regions that have been already
1198 * hydrated. There is no need to update the metadata for these
1199 * regions.
1200 */
1201 r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
1202 if (unlikely(r))
1203 break;
1204 }
1205 out:
1206 blk_start_plug(&plug);
1207 while ((bio = bio_list_pop(&discards)))
1208 complete_discard_bio(clone, bio, r == 0);
1209 blk_finish_plug(&plug);
1210 }
1211
process_deferred_bios(struct clone * clone)1212 static void process_deferred_bios(struct clone *clone)
1213 {
1214 struct bio_list bios = BIO_EMPTY_LIST;
1215
1216 spin_lock_irq(&clone->lock);
1217 bio_list_merge_init(&bios, &clone->deferred_bios);
1218 spin_unlock_irq(&clone->lock);
1219
1220 if (bio_list_empty(&bios))
1221 return;
1222
1223 submit_bios(&bios);
1224 }
1225
process_deferred_flush_bios(struct clone * clone)1226 static void process_deferred_flush_bios(struct clone *clone)
1227 {
1228 struct bio *bio;
1229 bool dest_dev_flushed;
1230 struct bio_list bios = BIO_EMPTY_LIST;
1231 struct bio_list bio_completions = BIO_EMPTY_LIST;
1232
1233 /*
1234 * If there are any deferred flush bios, we must commit the metadata
1235 * before issuing them or signaling their completion.
1236 */
1237 spin_lock_irq(&clone->lock);
1238 bio_list_merge_init(&bios, &clone->deferred_flush_bios);
1239 bio_list_merge_init(&bio_completions,
1240 &clone->deferred_flush_completions);
1241 spin_unlock_irq(&clone->lock);
1242
1243 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1244 !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
1245 return;
1246
1247 if (commit_metadata(clone, &dest_dev_flushed)) {
1248 bio_list_merge(&bios, &bio_completions);
1249
1250 while ((bio = bio_list_pop(&bios)))
1251 bio_io_error(bio);
1252
1253 return;
1254 }
1255
1256 clone->last_commit_jiffies = jiffies;
1257
1258 while ((bio = bio_list_pop(&bio_completions)))
1259 bio_endio(bio);
1260
1261 while ((bio = bio_list_pop(&bios))) {
1262 if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
1263 /* We just flushed the destination device as part of
1264 * the metadata commit, so there is no reason to send
1265 * another flush.
1266 */
1267 bio_endio(bio);
1268 } else {
1269 submit_bio_noacct(bio);
1270 }
1271 }
1272 }
1273
do_worker(struct work_struct * work)1274 static void do_worker(struct work_struct *work)
1275 {
1276 struct clone *clone = container_of(work, typeof(*clone), worker);
1277
1278 process_deferred_bios(clone);
1279 process_deferred_discards(clone);
1280
1281 /*
1282 * process_deferred_flush_bios():
1283 *
1284 * - Commit metadata
1285 *
1286 * - Process deferred REQ_FUA completions
1287 *
1288 * - Process deferred REQ_PREFLUSH bios
1289 */
1290 process_deferred_flush_bios(clone);
1291
1292 /* Background hydration */
1293 do_hydration(clone);
1294 }
1295
1296 /*
1297 * Commit periodically so that not too much unwritten data builds up.
1298 *
1299 * Also, restart background hydration, if it has been stopped by in-flight I/O.
1300 */
do_waker(struct work_struct * work)1301 static void do_waker(struct work_struct *work)
1302 {
1303 struct clone *clone = container_of(to_delayed_work(work), struct clone, waker);
1304
1305 wake_worker(clone);
1306 queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD);
1307 }
1308
1309 /*---------------------------------------------------------------------------*/
1310
1311 /*
1312 * Target methods
1313 */
clone_map(struct dm_target * ti,struct bio * bio)1314 static int clone_map(struct dm_target *ti, struct bio *bio)
1315 {
1316 struct clone *clone = ti->private;
1317 unsigned long region_nr;
1318
1319 atomic_inc(&clone->ios_in_flight);
1320
1321 if (unlikely(get_clone_mode(clone) == CM_FAIL))
1322 return DM_MAPIO_KILL;
1323
1324 /*
1325 * REQ_PREFLUSH bios carry no data:
1326 *
1327 * - Commit metadata, if changed
1328 *
1329 * - Pass down to destination device
1330 */
1331 if (bio->bi_opf & REQ_PREFLUSH) {
1332 remap_and_issue(clone, bio);
1333 return DM_MAPIO_SUBMITTED;
1334 }
1335
1336 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1337
1338 /*
1339 * dm-clone interprets discards and performs a fast hydration of the
1340 * discarded regions, i.e., we skip the copy from the source device and
1341 * just mark the regions as hydrated.
1342 */
1343 if (bio_op(bio) == REQ_OP_DISCARD) {
1344 process_discard_bio(clone, bio);
1345 return DM_MAPIO_SUBMITTED;
1346 }
1347
1348 /*
1349 * If the bio's region is hydrated, redirect it to the destination
1350 * device.
1351 *
1352 * If the region is not hydrated and the bio is a READ, redirect it to
1353 * the source device.
1354 *
1355 * Else, defer WRITE bio until after its region has been hydrated and
1356 * start the region's hydration immediately.
1357 */
1358 region_nr = bio_to_region(clone, bio);
1359 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
1360 remap_and_issue(clone, bio);
1361 return DM_MAPIO_SUBMITTED;
1362 } else if (bio_data_dir(bio) == READ) {
1363 remap_to_source(clone, bio);
1364 return DM_MAPIO_REMAPPED;
1365 }
1366
1367 remap_to_dest(clone, bio);
1368 hydrate_bio_region(clone, bio);
1369
1370 return DM_MAPIO_SUBMITTED;
1371 }
1372
clone_endio(struct dm_target * ti,struct bio * bio,blk_status_t * error)1373 static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
1374 {
1375 struct clone *clone = ti->private;
1376
1377 atomic_dec(&clone->ios_in_flight);
1378
1379 return DM_ENDIO_DONE;
1380 }
1381
emit_flags(struct clone * clone,char * result,unsigned int maxlen,ssize_t * sz_ptr)1382 static void emit_flags(struct clone *clone, char *result, unsigned int maxlen,
1383 ssize_t *sz_ptr)
1384 {
1385 ssize_t sz = *sz_ptr;
1386 unsigned int count;
1387
1388 count = !test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1389 count += !test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1390
1391 DMEMIT("%u ", count);
1392
1393 if (!test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
1394 DMEMIT("no_hydration ");
1395
1396 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
1397 DMEMIT("no_discard_passdown ");
1398
1399 *sz_ptr = sz;
1400 }
1401
emit_core_args(struct clone * clone,char * result,unsigned int maxlen,ssize_t * sz_ptr)1402 static void emit_core_args(struct clone *clone, char *result,
1403 unsigned int maxlen, ssize_t *sz_ptr)
1404 {
1405 ssize_t sz = *sz_ptr;
1406 unsigned int count = 4;
1407
1408 DMEMIT("%u hydration_threshold %u hydration_batch_size %u ", count,
1409 READ_ONCE(clone->hydration_threshold),
1410 READ_ONCE(clone->hydration_batch_size));
1411
1412 *sz_ptr = sz;
1413 }
1414
1415 /*
1416 * Status format:
1417 *
1418 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1419 * <clone region size> <#hydrated regions>/<#total regions> <#hydrating regions>
1420 * <#features> <features>* <#core args> <core args>* <clone metadata mode>
1421 */
clone_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)1422 static void clone_status(struct dm_target *ti, status_type_t type,
1423 unsigned int status_flags, char *result,
1424 unsigned int maxlen)
1425 {
1426 int r;
1427 unsigned int i;
1428 ssize_t sz = 0;
1429 dm_block_t nr_free_metadata_blocks = 0;
1430 dm_block_t nr_metadata_blocks = 0;
1431 char buf[BDEVNAME_SIZE];
1432 struct clone *clone = ti->private;
1433
1434 switch (type) {
1435 case STATUSTYPE_INFO:
1436 if (get_clone_mode(clone) == CM_FAIL) {
1437 DMEMIT("Fail");
1438 break;
1439 }
1440
1441 /* Commit to ensure statistics aren't out-of-date */
1442 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
1443 (void) commit_metadata(clone, NULL);
1444
1445 r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
1446
1447 if (r) {
1448 DMERR("%s: dm_clone_get_free_metadata_block_count returned %d",
1449 clone_device_name(clone), r);
1450 goto error;
1451 }
1452
1453 r = dm_clone_get_metadata_dev_size(clone->cmd, &nr_metadata_blocks);
1454
1455 if (r) {
1456 DMERR("%s: dm_clone_get_metadata_dev_size returned %d",
1457 clone_device_name(clone), r);
1458 goto error;
1459 }
1460
1461 DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
1462 DM_CLONE_METADATA_BLOCK_SIZE,
1463 (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
1464 (unsigned long long)nr_metadata_blocks,
1465 (unsigned long long)clone->region_size,
1466 dm_clone_nr_of_hydrated_regions(clone->cmd),
1467 clone->nr_regions,
1468 atomic_read(&clone->hydrations_in_flight));
1469
1470 emit_flags(clone, result, maxlen, &sz);
1471 emit_core_args(clone, result, maxlen, &sz);
1472
1473 switch (get_clone_mode(clone)) {
1474 case CM_WRITE:
1475 DMEMIT("rw");
1476 break;
1477 case CM_READ_ONLY:
1478 DMEMIT("ro");
1479 break;
1480 case CM_FAIL:
1481 DMEMIT("Fail");
1482 }
1483
1484 break;
1485
1486 case STATUSTYPE_TABLE:
1487 format_dev_t(buf, clone->metadata_dev->bdev->bd_dev);
1488 DMEMIT("%s ", buf);
1489
1490 format_dev_t(buf, clone->dest_dev->bdev->bd_dev);
1491 DMEMIT("%s ", buf);
1492
1493 format_dev_t(buf, clone->source_dev->bdev->bd_dev);
1494 DMEMIT("%s", buf);
1495
1496 for (i = 0; i < clone->nr_ctr_args; i++)
1497 DMEMIT(" %s", clone->ctr_args[i]);
1498 break;
1499
1500 case STATUSTYPE_IMA:
1501 *result = '\0';
1502 break;
1503 }
1504
1505 return;
1506
1507 error:
1508 DMEMIT("Error");
1509 }
1510
get_dev_size(struct dm_dev * dev)1511 static sector_t get_dev_size(struct dm_dev *dev)
1512 {
1513 return bdev_nr_sectors(dev->bdev);
1514 }
1515
1516 /*---------------------------------------------------------------------------*/
1517
1518 /*
1519 * Construct a clone device mapping:
1520 *
1521 * clone <metadata dev> <destination dev> <source dev> <region size>
1522 * [<#feature args> [<feature arg>]* [<#core args> [key value]*]]
1523 *
1524 * metadata dev: Fast device holding the persistent metadata
1525 * destination dev: The destination device, which will become a clone of the
1526 * source device
1527 * source dev: The read-only source device that gets cloned
1528 * region size: dm-clone unit size in sectors
1529 *
1530 * #feature args: Number of feature arguments passed
1531 * feature args: E.g. no_hydration, no_discard_passdown
1532 *
1533 * #core arguments: An even number of core arguments
1534 * core arguments: Key/value pairs for tuning the core
1535 * E.g. 'hydration_threshold 256'
1536 */
parse_feature_args(struct dm_arg_set * as,struct clone * clone)1537 static int parse_feature_args(struct dm_arg_set *as, struct clone *clone)
1538 {
1539 int r;
1540 unsigned int argc;
1541 const char *arg_name;
1542 struct dm_target *ti = clone->ti;
1543
1544 const struct dm_arg args = {
1545 .min = 0,
1546 .max = 2,
1547 .error = "Invalid number of feature arguments"
1548 };
1549
1550 /* No feature arguments supplied */
1551 if (!as->argc)
1552 return 0;
1553
1554 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1555 if (r)
1556 return r;
1557
1558 while (argc) {
1559 arg_name = dm_shift_arg(as);
1560 argc--;
1561
1562 if (!strcasecmp(arg_name, "no_hydration")) {
1563 __clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1564 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1565 __clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1566 } else {
1567 ti->error = "Invalid feature argument";
1568 return -EINVAL;
1569 }
1570 }
1571
1572 return 0;
1573 }
1574
parse_core_args(struct dm_arg_set * as,struct clone * clone)1575 static int parse_core_args(struct dm_arg_set *as, struct clone *clone)
1576 {
1577 int r;
1578 unsigned int argc;
1579 unsigned int value;
1580 const char *arg_name;
1581 struct dm_target *ti = clone->ti;
1582
1583 const struct dm_arg args = {
1584 .min = 0,
1585 .max = 4,
1586 .error = "Invalid number of core arguments"
1587 };
1588
1589 /* Initialize core arguments */
1590 clone->hydration_batch_size = DEFAULT_HYDRATION_BATCH_SIZE;
1591 clone->hydration_threshold = DEFAULT_HYDRATION_THRESHOLD;
1592
1593 /* No core arguments supplied */
1594 if (!as->argc)
1595 return 0;
1596
1597 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1598 if (r)
1599 return r;
1600
1601 if (argc & 1) {
1602 ti->error = "Number of core arguments must be even";
1603 return -EINVAL;
1604 }
1605
1606 while (argc) {
1607 arg_name = dm_shift_arg(as);
1608 argc -= 2;
1609
1610 if (!strcasecmp(arg_name, "hydration_threshold")) {
1611 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1612 ti->error = "Invalid value for argument `hydration_threshold'";
1613 return -EINVAL;
1614 }
1615 clone->hydration_threshold = value;
1616 } else if (!strcasecmp(arg_name, "hydration_batch_size")) {
1617 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1618 ti->error = "Invalid value for argument `hydration_batch_size'";
1619 return -EINVAL;
1620 }
1621 clone->hydration_batch_size = value;
1622 } else {
1623 ti->error = "Invalid core argument";
1624 return -EINVAL;
1625 }
1626 }
1627
1628 return 0;
1629 }
1630
parse_region_size(struct clone * clone,struct dm_arg_set * as,char ** error)1631 static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error)
1632 {
1633 int r;
1634 unsigned int region_size;
1635 struct dm_arg arg;
1636
1637 arg.min = MIN_REGION_SIZE;
1638 arg.max = MAX_REGION_SIZE;
1639 arg.error = "Invalid region size";
1640
1641 r = dm_read_arg(&arg, as, ®ion_size, error);
1642 if (r)
1643 return r;
1644
1645 /* Check region size is a power of 2 */
1646 if (!is_power_of_2(region_size)) {
1647 *error = "Region size is not a power of 2";
1648 return -EINVAL;
1649 }
1650
1651 /* Validate the region size against the device logical block size */
1652 if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) ||
1653 region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) {
1654 *error = "Region size is not a multiple of device logical block size";
1655 return -EINVAL;
1656 }
1657
1658 clone->region_size = region_size;
1659
1660 return 0;
1661 }
1662
validate_nr_regions(unsigned long n,char ** error)1663 static int validate_nr_regions(unsigned long n, char **error)
1664 {
1665 /*
1666 * dm_bitset restricts us to 2^32 regions. test_bit & co. restrict us
1667 * further to 2^31 regions.
1668 */
1669 if (n > (1UL << 31)) {
1670 *error = "Too many regions. Consider increasing the region size";
1671 return -EINVAL;
1672 }
1673
1674 return 0;
1675 }
1676
parse_metadata_dev(struct clone * clone,struct dm_arg_set * as,char ** error)1677 static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1678 {
1679 int r;
1680 sector_t metadata_dev_size;
1681
1682 r = dm_get_device(clone->ti, dm_shift_arg(as),
1683 BLK_OPEN_READ | BLK_OPEN_WRITE, &clone->metadata_dev);
1684 if (r) {
1685 *error = "Error opening metadata device";
1686 return r;
1687 }
1688
1689 metadata_dev_size = get_dev_size(clone->metadata_dev);
1690 if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
1691 DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
1692 clone->metadata_dev->bdev, DM_CLONE_METADATA_MAX_SECTORS);
1693
1694 return 0;
1695 }
1696
parse_dest_dev(struct clone * clone,struct dm_arg_set * as,char ** error)1697 static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1698 {
1699 int r;
1700 sector_t dest_dev_size;
1701
1702 r = dm_get_device(clone->ti, dm_shift_arg(as),
1703 BLK_OPEN_READ | BLK_OPEN_WRITE, &clone->dest_dev);
1704 if (r) {
1705 *error = "Error opening destination device";
1706 return r;
1707 }
1708
1709 dest_dev_size = get_dev_size(clone->dest_dev);
1710 if (dest_dev_size < clone->ti->len) {
1711 dm_put_device(clone->ti, clone->dest_dev);
1712 *error = "Device size larger than destination device";
1713 return -EINVAL;
1714 }
1715
1716 return 0;
1717 }
1718
parse_source_dev(struct clone * clone,struct dm_arg_set * as,char ** error)1719 static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1720 {
1721 int r;
1722 sector_t source_dev_size;
1723
1724 r = dm_get_device(clone->ti, dm_shift_arg(as), BLK_OPEN_READ,
1725 &clone->source_dev);
1726 if (r) {
1727 *error = "Error opening source device";
1728 return r;
1729 }
1730
1731 source_dev_size = get_dev_size(clone->source_dev);
1732 if (source_dev_size < clone->ti->len) {
1733 dm_put_device(clone->ti, clone->source_dev);
1734 *error = "Device size larger than source device";
1735 return -EINVAL;
1736 }
1737
1738 return 0;
1739 }
1740
copy_ctr_args(struct clone * clone,int argc,const char ** argv,char ** error)1741 static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error)
1742 {
1743 unsigned int i;
1744 const char **copy;
1745
1746 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
1747 if (!copy)
1748 goto error;
1749
1750 for (i = 0; i < argc; i++) {
1751 copy[i] = kstrdup(argv[i], GFP_KERNEL);
1752
1753 if (!copy[i]) {
1754 while (i--)
1755 kfree(copy[i]);
1756 kfree(copy);
1757 goto error;
1758 }
1759 }
1760
1761 clone->nr_ctr_args = argc;
1762 clone->ctr_args = copy;
1763 return 0;
1764
1765 error:
1766 *error = "Failed to allocate memory for table line";
1767 return -ENOMEM;
1768 }
1769
clone_ctr(struct dm_target * ti,unsigned int argc,char ** argv)1770 static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1771 {
1772 int r;
1773 sector_t nr_regions;
1774 struct clone *clone;
1775 struct dm_arg_set as;
1776
1777 if (argc < 4) {
1778 ti->error = "Invalid number of arguments";
1779 return -EINVAL;
1780 }
1781
1782 as.argc = argc;
1783 as.argv = argv;
1784
1785 clone = kzalloc(sizeof(*clone), GFP_KERNEL);
1786 if (!clone) {
1787 ti->error = "Failed to allocate clone structure";
1788 return -ENOMEM;
1789 }
1790
1791 clone->ti = ti;
1792
1793 /* Initialize dm-clone flags */
1794 __set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1795 __set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1796 __set_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1797
1798 r = parse_metadata_dev(clone, &as, &ti->error);
1799 if (r)
1800 goto out_with_clone;
1801
1802 r = parse_dest_dev(clone, &as, &ti->error);
1803 if (r)
1804 goto out_with_meta_dev;
1805
1806 r = parse_source_dev(clone, &as, &ti->error);
1807 if (r)
1808 goto out_with_dest_dev;
1809
1810 r = parse_region_size(clone, &as, &ti->error);
1811 if (r)
1812 goto out_with_source_dev;
1813
1814 clone->region_shift = __ffs(clone->region_size);
1815 nr_regions = dm_sector_div_up(ti->len, clone->region_size);
1816
1817 /* Check for overflow */
1818 if (nr_regions != (unsigned long)nr_regions) {
1819 ti->error = "Too many regions. Consider increasing the region size";
1820 r = -EOVERFLOW;
1821 goto out_with_source_dev;
1822 }
1823
1824 clone->nr_regions = nr_regions;
1825
1826 r = validate_nr_regions(clone->nr_regions, &ti->error);
1827 if (r)
1828 goto out_with_source_dev;
1829
1830 r = dm_set_target_max_io_len(ti, clone->region_size);
1831 if (r) {
1832 ti->error = "Failed to set max io len";
1833 goto out_with_source_dev;
1834 }
1835
1836 r = parse_feature_args(&as, clone);
1837 if (r)
1838 goto out_with_source_dev;
1839
1840 r = parse_core_args(&as, clone);
1841 if (r)
1842 goto out_with_source_dev;
1843
1844 /* Load metadata */
1845 clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len,
1846 clone->region_size);
1847 if (IS_ERR(clone->cmd)) {
1848 ti->error = "Failed to load metadata";
1849 r = PTR_ERR(clone->cmd);
1850 goto out_with_source_dev;
1851 }
1852
1853 __set_clone_mode(clone, CM_WRITE);
1854
1855 if (get_clone_mode(clone) != CM_WRITE) {
1856 ti->error = "Unable to get write access to metadata, please check/repair metadata";
1857 r = -EPERM;
1858 goto out_with_metadata;
1859 }
1860
1861 clone->last_commit_jiffies = jiffies;
1862
1863 /* Allocate hydration hash table */
1864 r = hash_table_init(clone);
1865 if (r) {
1866 ti->error = "Failed to allocate hydration hash table";
1867 goto out_with_metadata;
1868 }
1869
1870 atomic_set(&clone->ios_in_flight, 0);
1871 init_waitqueue_head(&clone->hydration_stopped);
1872 spin_lock_init(&clone->lock);
1873 bio_list_init(&clone->deferred_bios);
1874 bio_list_init(&clone->deferred_discard_bios);
1875 bio_list_init(&clone->deferred_flush_bios);
1876 bio_list_init(&clone->deferred_flush_completions);
1877 clone->hydration_offset = 0;
1878 atomic_set(&clone->hydrations_in_flight, 0);
1879
1880 clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
1881 if (!clone->wq) {
1882 ti->error = "Failed to allocate workqueue";
1883 r = -ENOMEM;
1884 goto out_with_ht;
1885 }
1886
1887 INIT_WORK(&clone->worker, do_worker);
1888 INIT_DELAYED_WORK(&clone->waker, do_waker);
1889
1890 clone->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1891 if (IS_ERR(clone->kcopyd_client)) {
1892 r = PTR_ERR(clone->kcopyd_client);
1893 goto out_with_wq;
1894 }
1895
1896 r = mempool_init_slab_pool(&clone->hydration_pool, MIN_HYDRATIONS,
1897 _hydration_cache);
1898 if (r) {
1899 ti->error = "Failed to create dm_clone_region_hydration memory pool";
1900 goto out_with_kcopyd;
1901 }
1902
1903 /* Save a copy of the table line */
1904 r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error);
1905 if (r)
1906 goto out_with_mempool;
1907
1908 mutex_init(&clone->commit_lock);
1909
1910 /* Enable flushes */
1911 ti->num_flush_bios = 1;
1912 ti->flush_supported = true;
1913
1914 /* Enable discards */
1915 ti->discards_supported = true;
1916 ti->num_discard_bios = 1;
1917
1918 ti->private = clone;
1919
1920 return 0;
1921
1922 out_with_mempool:
1923 mempool_exit(&clone->hydration_pool);
1924 out_with_kcopyd:
1925 dm_kcopyd_client_destroy(clone->kcopyd_client);
1926 out_with_wq:
1927 destroy_workqueue(clone->wq);
1928 out_with_ht:
1929 hash_table_exit(clone);
1930 out_with_metadata:
1931 dm_clone_metadata_close(clone->cmd);
1932 out_with_source_dev:
1933 dm_put_device(ti, clone->source_dev);
1934 out_with_dest_dev:
1935 dm_put_device(ti, clone->dest_dev);
1936 out_with_meta_dev:
1937 dm_put_device(ti, clone->metadata_dev);
1938 out_with_clone:
1939 kfree(clone);
1940
1941 return r;
1942 }
1943
clone_dtr(struct dm_target * ti)1944 static void clone_dtr(struct dm_target *ti)
1945 {
1946 unsigned int i;
1947 struct clone *clone = ti->private;
1948
1949 mutex_destroy(&clone->commit_lock);
1950
1951 for (i = 0; i < clone->nr_ctr_args; i++)
1952 kfree(clone->ctr_args[i]);
1953 kfree(clone->ctr_args);
1954
1955 mempool_exit(&clone->hydration_pool);
1956 dm_kcopyd_client_destroy(clone->kcopyd_client);
1957 cancel_delayed_work_sync(&clone->waker);
1958 destroy_workqueue(clone->wq);
1959 hash_table_exit(clone);
1960 dm_clone_metadata_close(clone->cmd);
1961 dm_put_device(ti, clone->source_dev);
1962 dm_put_device(ti, clone->dest_dev);
1963 dm_put_device(ti, clone->metadata_dev);
1964
1965 kfree(clone);
1966 }
1967
1968 /*---------------------------------------------------------------------------*/
1969
clone_postsuspend(struct dm_target * ti)1970 static void clone_postsuspend(struct dm_target *ti)
1971 {
1972 struct clone *clone = ti->private;
1973
1974 /*
1975 * To successfully suspend the device:
1976 *
1977 * - We cancel the delayed work for periodic commits and wait for
1978 * it to finish.
1979 *
1980 * - We stop the background hydration, i.e. we prevent new region
1981 * hydrations from starting.
1982 *
1983 * - We wait for any in-flight hydrations to finish.
1984 *
1985 * - We flush the workqueue.
1986 *
1987 * - We commit the metadata.
1988 */
1989 cancel_delayed_work_sync(&clone->waker);
1990
1991 set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1992
1993 /*
1994 * Make sure set_bit() is ordered before atomic_read(), otherwise we
1995 * might race with do_hydration() and miss some started region
1996 * hydrations.
1997 *
1998 * This is paired with smp_mb__after_atomic() in do_hydration().
1999 */
2000 smp_mb__after_atomic();
2001
2002 wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
2003 flush_workqueue(clone->wq);
2004
2005 (void) commit_metadata(clone, NULL);
2006 }
2007
clone_resume(struct dm_target * ti)2008 static void clone_resume(struct dm_target *ti)
2009 {
2010 struct clone *clone = ti->private;
2011
2012 clear_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2013 do_waker(&clone->waker.work);
2014 }
2015
2016 /*
2017 * If discard_passdown was enabled verify that the destination device supports
2018 * discards. Disable discard_passdown if not.
2019 */
disable_passdown_if_not_supported(struct clone * clone)2020 static void disable_passdown_if_not_supported(struct clone *clone)
2021 {
2022 struct block_device *dest_dev = clone->dest_dev->bdev;
2023 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
2024 const char *reason = NULL;
2025
2026 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
2027 return;
2028
2029 if (!bdev_max_discard_sectors(dest_dev))
2030 reason = "discard unsupported";
2031 else if (dest_limits->max_discard_sectors < clone->region_size)
2032 reason = "max discard sectors smaller than a region";
2033
2034 if (reason) {
2035 DMWARN("Destination device (%pg) %s: Disabling discard passdown.",
2036 dest_dev, reason);
2037 clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
2038 }
2039 }
2040
set_discard_limits(struct clone * clone,struct queue_limits * limits)2041 static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
2042 {
2043 struct block_device *dest_bdev = clone->dest_dev->bdev;
2044 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
2045
2046 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
2047 /* No passdown is done so we set our own virtual limits */
2048 limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
2049 limits->max_hw_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT,
2050 clone->region_size);
2051 return;
2052 }
2053
2054 /*
2055 * clone_iterate_devices() is stacking both the source and destination
2056 * device limits but discards aren't passed to the source device, so
2057 * inherit destination's limits.
2058 */
2059 limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
2060 limits->discard_granularity = dest_limits->discard_granularity;
2061 limits->discard_alignment = dest_limits->discard_alignment;
2062 limits->max_discard_segments = dest_limits->max_discard_segments;
2063 }
2064
clone_io_hints(struct dm_target * ti,struct queue_limits * limits)2065 static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits)
2066 {
2067 struct clone *clone = ti->private;
2068 u64 io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2069
2070 /*
2071 * If the system-determined stacked limits are compatible with
2072 * dm-clone's region size (io_opt is a factor) do not override them.
2073 */
2074 if (io_opt_sectors < clone->region_size ||
2075 do_div(io_opt_sectors, clone->region_size)) {
2076 limits->io_min = clone->region_size << SECTOR_SHIFT;
2077 limits->io_opt = clone->region_size << SECTOR_SHIFT;
2078 }
2079
2080 disable_passdown_if_not_supported(clone);
2081 set_discard_limits(clone, limits);
2082 }
2083
clone_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)2084 static int clone_iterate_devices(struct dm_target *ti,
2085 iterate_devices_callout_fn fn, void *data)
2086 {
2087 int ret;
2088 struct clone *clone = ti->private;
2089 struct dm_dev *dest_dev = clone->dest_dev;
2090 struct dm_dev *source_dev = clone->source_dev;
2091
2092 ret = fn(ti, source_dev, 0, ti->len, data);
2093 if (!ret)
2094 ret = fn(ti, dest_dev, 0, ti->len, data);
2095 return ret;
2096 }
2097
2098 /*
2099 * dm-clone message functions.
2100 */
set_hydration_threshold(struct clone * clone,unsigned int nr_regions)2101 static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions)
2102 {
2103 WRITE_ONCE(clone->hydration_threshold, nr_regions);
2104
2105 /*
2106 * If user space sets hydration_threshold to zero then the hydration
2107 * will stop. If at a later time the hydration_threshold is increased
2108 * we must restart the hydration process by waking up the worker.
2109 */
2110 wake_worker(clone);
2111 }
2112
set_hydration_batch_size(struct clone * clone,unsigned int nr_regions)2113 static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions)
2114 {
2115 WRITE_ONCE(clone->hydration_batch_size, nr_regions);
2116 }
2117
enable_hydration(struct clone * clone)2118 static void enable_hydration(struct clone *clone)
2119 {
2120 if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
2121 wake_worker(clone);
2122 }
2123
disable_hydration(struct clone * clone)2124 static void disable_hydration(struct clone *clone)
2125 {
2126 clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
2127 }
2128
clone_message(struct dm_target * ti,unsigned int argc,char ** argv,char * result,unsigned int maxlen)2129 static int clone_message(struct dm_target *ti, unsigned int argc, char **argv,
2130 char *result, unsigned int maxlen)
2131 {
2132 struct clone *clone = ti->private;
2133 unsigned int value;
2134
2135 if (!argc)
2136 return -EINVAL;
2137
2138 if (!strcasecmp(argv[0], "enable_hydration")) {
2139 enable_hydration(clone);
2140 return 0;
2141 }
2142
2143 if (!strcasecmp(argv[0], "disable_hydration")) {
2144 disable_hydration(clone);
2145 return 0;
2146 }
2147
2148 if (argc != 2)
2149 return -EINVAL;
2150
2151 if (!strcasecmp(argv[0], "hydration_threshold")) {
2152 if (kstrtouint(argv[1], 10, &value))
2153 return -EINVAL;
2154
2155 set_hydration_threshold(clone, value);
2156
2157 return 0;
2158 }
2159
2160 if (!strcasecmp(argv[0], "hydration_batch_size")) {
2161 if (kstrtouint(argv[1], 10, &value))
2162 return -EINVAL;
2163
2164 set_hydration_batch_size(clone, value);
2165
2166 return 0;
2167 }
2168
2169 DMERR("%s: Unsupported message `%s'", clone_device_name(clone), argv[0]);
2170 return -EINVAL;
2171 }
2172
2173 static struct target_type clone_target = {
2174 .name = "clone",
2175 .version = {1, 0, 0},
2176 .module = THIS_MODULE,
2177 .ctr = clone_ctr,
2178 .dtr = clone_dtr,
2179 .map = clone_map,
2180 .end_io = clone_endio,
2181 .postsuspend = clone_postsuspend,
2182 .resume = clone_resume,
2183 .status = clone_status,
2184 .message = clone_message,
2185 .io_hints = clone_io_hints,
2186 .iterate_devices = clone_iterate_devices,
2187 };
2188
2189 /*---------------------------------------------------------------------------*/
2190
2191 /* Module functions */
dm_clone_init(void)2192 static int __init dm_clone_init(void)
2193 {
2194 int r;
2195
2196 _hydration_cache = KMEM_CACHE(dm_clone_region_hydration, 0);
2197 if (!_hydration_cache)
2198 return -ENOMEM;
2199
2200 r = dm_register_target(&clone_target);
2201 if (r < 0) {
2202 kmem_cache_destroy(_hydration_cache);
2203 return r;
2204 }
2205
2206 return 0;
2207 }
2208
dm_clone_exit(void)2209 static void __exit dm_clone_exit(void)
2210 {
2211 dm_unregister_target(&clone_target);
2212
2213 kmem_cache_destroy(_hydration_cache);
2214 _hydration_cache = NULL;
2215 }
2216
2217 /* Module hooks */
2218 module_init(dm_clone_init);
2219 module_exit(dm_clone_exit);
2220
2221 MODULE_DESCRIPTION(DM_NAME " clone target");
2222 MODULE_AUTHOR("Nikos Tsironis <ntsironis@arrikto.com>");
2223 MODULE_LICENSE("GPL");
2224