Lines Matching +full:non +full:- +full:urgent
1 // SPDX-License-Identifier: GPL-2.0-only
9 #include "dm-bio-prison-v2.h"
10 #include "dm-bio-record.h"
11 #include "dm-cache-metadata.h"
12 #include "dm-io-tracker.h"
13 #include "dm-cache-background-tracker.h"
15 #include <linux/dm-io.h>
16 #include <linux/dm-kcopyd.h>
30 /*----------------------------------------------------------------*/
43 /*----------------------------------------------------------------*/
57 INIT_WORK(&k->ws, fn); in init_continuation()
58 k->input = 0; in init_continuation()
64 queue_work(wq, &k->ws); in queue_continuation()
67 /*----------------------------------------------------------------*/
117 spin_lock_irq(&b->lock); in __commit()
118 list_splice_init(&b->work_items, &work_items); in __commit()
119 bio_list_merge_init(&bios, &b->bios); in __commit()
120 b->commit_scheduled = false; in __commit()
121 spin_unlock_irq(&b->lock); in __commit()
123 r = b->commit_op(b->commit_context); in __commit()
127 k->input = r; in __commit()
128 INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */ in __commit()
129 queue_work(b->wq, ws); in __commit()
134 bio->bi_status = r; in __commit()
137 b->issue_op(bio, b->issue_context); in __commit()
148 b->commit_op = commit_op; in batcher_init()
149 b->commit_context = commit_context; in batcher_init()
150 b->issue_op = issue_op; in batcher_init()
151 b->issue_context = issue_context; in batcher_init()
152 b->wq = wq; in batcher_init()
154 spin_lock_init(&b->lock); in batcher_init()
155 INIT_LIST_HEAD(&b->work_items); in batcher_init()
156 bio_list_init(&b->bios); in batcher_init()
157 INIT_WORK(&b->commit_work, __commit); in batcher_init()
158 b->commit_scheduled = false; in batcher_init()
163 queue_work(b->wq, &b->commit_work); in async_commit()
170 spin_lock_irq(&b->lock); in continue_after_commit()
171 commit_scheduled = b->commit_scheduled; in continue_after_commit()
172 list_add_tail(&k->ws.entry, &b->work_items); in continue_after_commit()
173 spin_unlock_irq(&b->lock); in continue_after_commit()
186 spin_lock_irq(&b->lock); in issue_after_commit()
187 commit_scheduled = b->commit_scheduled; in issue_after_commit()
188 bio_list_add(&b->bios, bio); in issue_after_commit()
189 spin_unlock_irq(&b->lock); in issue_after_commit()
196 * Call this if some urgent work is waiting for the commit to complete.
202 spin_lock_irq(&b->lock); in schedule_commit()
203 immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios); in schedule_commit()
204 b->commit_scheduled = true; in schedule_commit()
205 spin_unlock_irq(&b->lock); in schedule_commit()
223 h->bi_end_io = bio->bi_end_io; in dm_hook_bio()
225 bio->bi_end_io = bi_end_io; in dm_hook_bio()
226 bio->bi_private = bi_private; in dm_hook_bio()
231 bio->bi_end_io = h->bi_end_io; in dm_unhook_bio()
234 /*----------------------------------------------------------------*/
386 * Cache features such as write-through.
437 /*----------------------------------------------------------------*/
441 return cache->features.io_mode == CM_IO_WRITETHROUGH; in writethrough_mode()
446 return cache->features.io_mode == CM_IO_WRITEBACK; in writeback_mode()
451 return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH); in passthrough_mode()
454 /*----------------------------------------------------------------*/
458 queue_work(cache->wq, &cache->deferred_bio_worker); in wake_deferred_bio_worker()
466 queue_work(cache->wq, &cache->migration_worker); in wake_migration_worker()
469 /*----------------------------------------------------------------*/
473 return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO); in alloc_prison_cell()
478 dm_bio_prison_free_cell_v2(cache->prison, cell); in free_prison_cell()
485 mg = mempool_alloc(&cache->migration_pool, GFP_NOIO); in alloc_migration()
489 mg->cache = cache; in alloc_migration()
490 atomic_inc(&cache->nr_allocated_migrations); in alloc_migration()
497 struct cache *cache = mg->cache; in free_migration()
499 if (atomic_dec_and_test(&cache->nr_allocated_migrations)) in free_migration()
500 wake_up(&cache->migration_wait); in free_migration()
502 mempool_free(mg, &cache->migration_pool); in free_migration()
505 /*----------------------------------------------------------------*/
514 key->virtual = 0; in build_key()
515 key->dev = 0; in build_key()
516 key->block_begin = from_oblock(begin); in build_key()
517 key->block_end = from_oblock(end); in build_key()
535 *--------------------------------------------------------------
537 *--------------------------------------------------------------
552 pb->tick = false; in init_per_bio_data()
553 pb->req_nr = dm_bio_get_target_bio_nr(bio); in init_per_bio_data()
554 pb->cell = NULL; in init_per_bio_data()
555 pb->len = 0; in init_per_bio_data()
560 /*----------------------------------------------------------------*/
564 spin_lock_irq(&cache->lock); in defer_bio()
565 bio_list_add(&cache->deferred_bios, bio); in defer_bio()
566 spin_unlock_irq(&cache->lock); in defer_bio()
573 spin_lock_irq(&cache->lock); in defer_bios()
574 bio_list_merge_init(&cache->deferred_bios, bios); in defer_bios()
575 spin_unlock_irq(&cache->lock); in defer_bios()
580 /*----------------------------------------------------------------*/
593 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell); in bio_detain_shared()
606 pb->cell = cell; in bio_detain_shared()
611 /*----------------------------------------------------------------*/
615 return test_bit(from_cblock(b), cache->dirty_bitset); in is_dirty()
620 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { in set_dirty()
621 atomic_inc(&cache->nr_dirty); in set_dirty()
622 policy_set_dirty(cache->policy, cblock); in set_dirty()
632 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) in force_set_dirty()
633 atomic_inc(&cache->nr_dirty); in force_set_dirty()
634 policy_set_dirty(cache->policy, cblock); in force_set_dirty()
639 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { in force_clear_dirty()
640 if (atomic_dec_return(&cache->nr_dirty) == 0) in force_clear_dirty()
641 dm_table_event(cache->ti->table); in force_clear_dirty()
644 policy_clear_dirty(cache->policy, cblock); in force_clear_dirty()
647 /*----------------------------------------------------------------*/
651 return cache->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
663 dm_block_t oblocks = cache->discard_block_size; in oblocks_per_dblock()
666 oblocks >>= cache->sectors_per_block_shift; in oblocks_per_dblock()
668 oblocks = block_div(oblocks, cache->sectors_per_block); in oblocks_per_dblock()
681 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); in set_discard()
682 atomic_inc(&cache->stats.discard_count); in set_discard()
684 spin_lock_irq(&cache->lock); in set_discard()
685 set_bit(from_dblock(b), cache->discard_bitset); in set_discard()
686 spin_unlock_irq(&cache->lock); in set_discard()
691 spin_lock_irq(&cache->lock); in clear_discard()
692 clear_bit(from_dblock(b), cache->discard_bitset); in clear_discard()
693 spin_unlock_irq(&cache->lock); in clear_discard()
700 spin_lock_irq(&cache->lock); in is_discarded()
701 r = test_bit(from_dblock(b), cache->discard_bitset); in is_discarded()
702 spin_unlock_irq(&cache->lock); in is_discarded()
711 spin_lock_irq(&cache->lock); in is_discarded_oblock()
713 cache->discard_bitset); in is_discarded_oblock()
714 spin_unlock_irq(&cache->lock); in is_discarded_oblock()
720 * -------------------------------------------------------------
722 *--------------------------------------------------------------
726 bio_set_dev(bio, cache->origin_dev->bdev); in remap_to_origin()
732 sector_t bi_sector = bio->bi_iter.bi_sector; in remap_to_cache()
735 bio_set_dev(bio, cache->cache_dev->bdev); in remap_to_cache()
737 bio->bi_iter.bi_sector = in remap_to_cache()
738 (block * cache->sectors_per_block) + in remap_to_cache()
739 sector_div(bi_sector, cache->sectors_per_block); in remap_to_cache()
741 bio->bi_iter.bi_sector = in remap_to_cache()
742 (block << cache->sectors_per_block_shift) | in remap_to_cache()
743 (bi_sector & (cache->sectors_per_block - 1)); in remap_to_cache()
750 spin_lock_irq(&cache->lock); in check_if_tick_bio_needed()
751 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && in check_if_tick_bio_needed()
754 pb->tick = true; in check_if_tick_bio_needed()
755 cache->need_tick_bio = false; in check_if_tick_bio_needed()
757 spin_unlock_irq(&cache->lock); in check_if_tick_bio_needed()
783 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
786 (void) sector_div(block_nr, cache->sectors_per_block); in get_bio_block()
788 block_nr >>= cache->sectors_per_block_shift; in get_bio_block()
804 pb->len = bio_sectors(bio); in accounted_begin()
805 dm_iot_io_begin(&cache->tracker, pb->len); in accounted_begin()
813 dm_iot_io_end(&cache->tracker, pb->len); in accounted_complete()
836 struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio, in remap_to_origin_and_cache()
837 GFP_NOIO, &cache->bs); in remap_to_origin_and_cache()
851 *--------------------------------------------------------------
853 *--------------------------------------------------------------
857 return cache->features.mode; in get_cache_mode()
862 return dm_table_device_name(cache->ti->table); in cache_device_name()
869 "read-only", in notify_mode_switch()
873 dm_table_event(cache->ti->table); in notify_mode_switch()
883 if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) { in set_cache_mode()
905 dm_cache_metadata_set_read_only(cache->cmd); in set_cache_mode()
909 dm_cache_metadata_set_read_write(cache->cmd); in set_cache_mode()
913 cache->features.mode = new_mode; in set_cache_mode()
927 if (dm_cache_metadata_abort(cache->cmd)) { in abort_transaction()
932 if (dm_cache_metadata_set_needs_check(cache->cmd)) { in abort_transaction()
946 /*----------------------------------------------------------------*/
952 dm_cache_metadata_get_stats(cache->cmd, &stats); in load_stats()
953 atomic_set(&cache->stats.read_hit, stats.read_hits); in load_stats()
954 atomic_set(&cache->stats.read_miss, stats.read_misses); in load_stats()
955 atomic_set(&cache->stats.write_hit, stats.write_hits); in load_stats()
956 atomic_set(&cache->stats.write_miss, stats.write_misses); in load_stats()
966 stats.read_hits = atomic_read(&cache->stats.read_hit); in save_stats()
967 stats.read_misses = atomic_read(&cache->stats.read_miss); in save_stats()
968 stats.write_hits = atomic_read(&cache->stats.write_hit); in save_stats()
969 stats.write_misses = atomic_read(&cache->stats.write_miss); in save_stats()
971 dm_cache_metadata_set_stats(cache->cmd, &stats); in save_stats()
978 atomic_inc(&stats->promotion); in update_stats()
982 atomic_inc(&stats->demotion); in update_stats()
986 atomic_inc(&stats->writeback); in update_stats()
992 *---------------------------------------------------------------------
997 *---------------------------------------------------------------------
1001 atomic_inc(&cache->nr_io_migrations); in inc_io_migrations()
1006 atomic_dec(&cache->nr_io_migrations); in dec_io_migrations()
1011 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf); in discard_or_flush()
1017 sector_t sb = bio->bi_iter.bi_sector; in calc_discard_block_range()
1020 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size)); in calc_discard_block_range()
1022 if (se - sb < cache->discard_block_size) in calc_discard_block_range()
1025 *e = to_dblock(block_div(se, cache->discard_block_size)); in calc_discard_block_range()
1028 /*----------------------------------------------------------------*/
1033 down_write(&cache->background_work_lock); in prevent_background_work()
1040 up_write(&cache->background_work_lock); in allow_background_work()
1049 r = down_read_trylock(&cache->background_work_lock); in background_work_begin()
1058 up_read(&cache->background_work_lock); in background_work_end()
1062 /*----------------------------------------------------------------*/
1067 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block()
1079 init_continuation(&mg->k, continuation); in quiesce()
1080 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws); in quiesce()
1095 mg->k.input = BLK_STS_IOERR; in copy_complete()
1097 queue_continuation(mg->cache->wq, &mg->k); in copy_complete()
1103 struct cache *cache = mg->cache; in copy()
1105 o_region.bdev = cache->origin_dev->bdev; in copy()
1106 o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block; in copy()
1107 o_region.count = cache->sectors_per_block; in copy()
1109 c_region.bdev = cache->cache_dev->bdev; in copy()
1110 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block; in copy()
1111 c_region.count = cache->sectors_per_block; in copy()
1114 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k); in copy()
1116 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k); in copy()
1123 if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell)) in bio_drop_shared_lock()
1124 free_prison_cell(cache, pb->cell); in bio_drop_shared_lock()
1125 pb->cell = NULL; in bio_drop_shared_lock()
1130 struct dm_cache_migration *mg = bio->bi_private; in overwrite_endio()
1131 struct cache *cache = mg->cache; in overwrite_endio()
1134 dm_unhook_bio(&pb->hook_info, bio); in overwrite_endio()
1136 if (bio->bi_status) in overwrite_endio()
1137 mg->k.input = bio->bi_status; in overwrite_endio()
1139 queue_continuation(cache->wq, &mg->k); in overwrite_endio()
1145 struct bio *bio = mg->overwrite_bio; in overwrite()
1148 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); in overwrite()
1154 if (mg->op->op == POLICY_PROMOTE) in overwrite()
1155 remap_to_cache(mg->cache, bio, mg->op->cblock); in overwrite()
1157 remap_to_origin(mg->cache, bio); in overwrite()
1159 init_continuation(&mg->k, continuation); in overwrite()
1160 accounted_request(mg->cache, bio); in overwrite()
1177 struct cache *cache = mg->cache; in mg_complete()
1178 struct policy_work *op = mg->op; in mg_complete()
1179 dm_cblock_t cblock = op->cblock; in mg_complete()
1182 update_stats(&cache->stats, op->op); in mg_complete()
1184 switch (op->op) { in mg_complete()
1186 clear_discard(cache, oblock_to_dblock(cache, op->oblock)); in mg_complete()
1187 policy_complete_background_work(cache->policy, op, success); in mg_complete()
1189 if (mg->overwrite_bio) { in mg_complete()
1192 else if (mg->k.input) in mg_complete()
1193 mg->overwrite_bio->bi_status = mg->k.input; in mg_complete()
1195 mg->overwrite_bio->bi_status = BLK_STS_IOERR; in mg_complete()
1196 bio_endio(mg->overwrite_bio); in mg_complete()
1210 policy_complete_background_work(cache->policy, op, success); in mg_complete()
1217 policy_complete_background_work(cache->policy, op, success); in mg_complete()
1223 if (mg->cell) { in mg_complete()
1224 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) in mg_complete()
1225 free_prison_cell(cache, mg->cell); in mg_complete()
1239 mg_complete(mg, mg->k.input == 0); in mg_success()
1246 struct cache *cache = mg->cache; in mg_update_metadata()
1247 struct policy_work *op = mg->op; in mg_update_metadata()
1249 switch (op->op) { in mg_update_metadata()
1251 r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock); in mg_update_metadata()
1264 r = dm_cache_remove_mapping(cache->cmd, op->cblock); in mg_update_metadata()
1279 * - vblock x in a cache block in mg_update_metadata()
1280 * - domotion occurs in mg_update_metadata()
1281 * - cache block gets reallocated and over written in mg_update_metadata()
1282 * - crash in mg_update_metadata()
1293 init_continuation(&mg->k, mg_success); in mg_update_metadata()
1294 continue_after_commit(&cache->committer, &mg->k); in mg_update_metadata()
1295 schedule_commit(&cache->committer); in mg_update_metadata()
1311 if (mg->k.input) in mg_update_metadata_after_copy()
1325 if (mg->k.input) in mg_upgrade_lock()
1332 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell, in mg_upgrade_lock()
1348 struct cache *cache = mg->cache; in mg_full_copy()
1349 struct policy_work *op = mg->op; in mg_full_copy()
1350 bool is_policy_promote = (op->op == POLICY_PROMOTE); in mg_full_copy()
1352 if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || in mg_full_copy()
1353 is_discarded_oblock(cache, op->oblock)) { in mg_full_copy()
1358 init_continuation(&mg->k, mg_upgrade_lock); in mg_full_copy()
1366 if (mg->overwrite_bio) { in mg_copy()
1372 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { in mg_copy()
1376 bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); in mg_copy()
1379 mg->overwrite_bio = NULL; in mg_copy()
1380 inc_io_migrations(mg->cache); in mg_copy()
1402 struct cache *cache = mg->cache; in mg_lock_writes()
1412 build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key); in mg_lock_writes()
1413 r = dm_cell_lock_v2(cache->prison, &key, in mg_lock_writes()
1414 mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL, in mg_lock_writes()
1415 prealloc, &mg->cell); in mg_lock_writes()
1422 if (mg->cell != prealloc) in mg_lock_writes()
1426 mg_copy(&mg->k.ws); in mg_lock_writes()
1438 policy_complete_background_work(cache->policy, op, false); in mg_start()
1439 return -EPERM; in mg_start()
1444 mg->op = op; in mg_start()
1445 mg->overwrite_bio = bio; in mg_start()
1454 *--------------------------------------------------------------
1456 *--------------------------------------------------------------
1462 struct cache *cache = mg->cache; in invalidate_complete()
1465 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) in invalidate_complete()
1466 free_prison_cell(cache, mg->cell); in invalidate_complete()
1468 if (!success && mg->overwrite_bio) in invalidate_complete()
1469 bio_io_error(mg->overwrite_bio); in invalidate_complete()
1481 invalidate_complete(mg, !mg->k.input); in invalidate_completed()
1488 r = policy_invalidate_mapping(cache->policy, cblock); in invalidate_cblock()
1490 r = dm_cache_remove_mapping(cache->cmd, cblock); in invalidate_cblock()
1497 } else if (r == -ENODATA) { in invalidate_cblock()
1513 struct cache *cache = mg->cache; in invalidate_remove()
1515 r = invalidate_cblock(cache, mg->invalidate_cblock); in invalidate_remove()
1521 init_continuation(&mg->k, invalidate_completed); in invalidate_remove()
1522 continue_after_commit(&cache->committer, &mg->k); in invalidate_remove()
1523 remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock); in invalidate_remove()
1524 mg->overwrite_bio = NULL; in invalidate_remove()
1525 schedule_commit(&cache->committer); in invalidate_remove()
1532 struct cache *cache = mg->cache; in invalidate_lock()
1537 build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key); in invalidate_lock()
1538 r = dm_cell_lock_v2(cache->prison, &key, in invalidate_lock()
1539 READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell); in invalidate_lock()
1546 if (mg->cell != prealloc) in invalidate_lock()
1557 init_continuation(&mg->k, invalidate_remove); in invalidate_lock()
1558 queue_work(cache->wq, &mg->k.ws); in invalidate_lock()
1570 return -EPERM; in invalidate_start()
1574 mg->overwrite_bio = bio; in invalidate_start()
1575 mg->invalidate_cblock = cblock; in invalidate_start()
1576 mg->invalidate_oblock = oblock; in invalidate_start()
1582 *--------------------------------------------------------------
1584 *--------------------------------------------------------------
1594 bool idle = dm_iot_idle_for(&cache->tracker, HZ); in spare_migration_bandwidth()
1595 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * in spare_migration_bandwidth()
1596 cache->sectors_per_block; in spare_migration_bandwidth()
1598 if (idle && current_volume <= cache->migration_threshold) in spare_migration_bandwidth()
1607 &cache->stats.read_hit : &cache->stats.write_hit); in inc_hit_counter()
1613 &cache->stats.read_miss : &cache->stats.write_miss); in inc_miss_counter()
1616 /*----------------------------------------------------------------*/
1644 r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op); in map_bio()
1645 if (unlikely(r && r != -ENOENT)) { in map_bio()
1652 if (r == -ENOENT && op) { in map_bio()
1654 BUG_ON(op->op != POLICY_PROMOTE); in map_bio()
1659 r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued); in map_bio()
1660 if (unlikely(r && r != -ENOENT)) { in map_bio()
1671 if (r == -ENOENT) { in map_bio()
1678 if (pb->req_nr == 0) { in map_bio()
1702 atomic_inc(&cache->stats.demotion); in map_bio()
1719 if (bio->bi_opf & REQ_FUA) { in map_bio()
1725 issue_after_commit(&cache->committer, bio); in map_bio()
1744 * A non-zero return indicates read_only or fail_io mode.
1751 return -EINVAL; in commit()
1753 atomic_inc(&cache->stats.commit_count); in commit()
1754 r = dm_cache_commit(cache->cmd, clean_shutdown); in commit()
1768 if (dm_cache_changed_this_transaction(cache->cmd)) in commit_op()
1774 /*----------------------------------------------------------------*/
1780 if (!pb->req_nr) in process_flush_bio()
1785 issue_after_commit(&cache->committer, bio); in process_flush_bio()
1804 if (cache->features.discard_passdown) { in process_discard_bio()
1823 spin_lock_irq(&cache->lock); in process_deferred_bios()
1824 bio_list_merge_init(&bios, &cache->deferred_bios); in process_deferred_bios()
1825 spin_unlock_irq(&cache->lock); in process_deferred_bios()
1828 if (bio->bi_opf & REQ_PREFLUSH) in process_deferred_bios()
1840 schedule_commit(&cache->committer); in process_deferred_bios()
1844 *--------------------------------------------------------------
1846 *--------------------------------------------------------------
1854 bio_list_merge_init(&bios, &cache->deferred_bios); in requeue_deferred_bios()
1857 bio->bi_status = BLK_STS_DM_REQUEUE; in requeue_deferred_bios()
1871 policy_tick(cache->policy, true); in do_waker()
1873 schedule_commit(&cache->committer); in do_waker()
1874 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); in do_waker()
1887 r = policy_get_background_work(cache->policy, b == IDLE, &op); in check_migrations()
1888 if (r == -ENODATA) in check_migrations()
1906 *--------------------------------------------------------------
1908 *--------------------------------------------------------------
1917 mempool_exit(&cache->migration_pool); in __destroy()
1919 if (cache->prison) in __destroy()
1920 dm_bio_prison_destroy_v2(cache->prison); in __destroy()
1922 if (cache->wq) in __destroy()
1923 destroy_workqueue(cache->wq); in __destroy()
1925 if (cache->dirty_bitset) in __destroy()
1926 free_bitset(cache->dirty_bitset); in __destroy()
1928 if (cache->discard_bitset) in __destroy()
1929 free_bitset(cache->discard_bitset); in __destroy()
1931 if (cache->invalid_bitset) in __destroy()
1932 free_bitset(cache->invalid_bitset); in __destroy()
1934 if (cache->copier) in __destroy()
1935 dm_kcopyd_client_destroy(cache->copier); in __destroy()
1937 if (cache->cmd) in __destroy()
1938 dm_cache_metadata_close(cache->cmd); in __destroy()
1940 if (cache->metadata_dev) in __destroy()
1941 dm_put_device(cache->ti, cache->metadata_dev); in __destroy()
1943 if (cache->origin_dev) in __destroy()
1944 dm_put_device(cache->ti, cache->origin_dev); in __destroy()
1946 if (cache->cache_dev) in __destroy()
1947 dm_put_device(cache->ti, cache->cache_dev); in __destroy()
1949 if (cache->policy) in __destroy()
1950 dm_cache_policy_destroy(cache->policy); in __destroy()
1952 bioset_exit(&cache->bs); in __destroy()
1961 cancel_delayed_work_sync(&cache->waker); in destroy()
1963 for (i = 0; i < cache->nr_ctr_args ; i++) in destroy()
1964 kfree(cache->ctr_args[i]); in destroy()
1965 kfree(cache->ctr_args); in destroy()
1972 struct cache *cache = ti->private; in cache_dtr()
1979 return bdev_nr_sectors(dev->bdev); in get_dev_size()
1982 /*----------------------------------------------------------------*/
2004 * See cache-policies.txt for details.
2034 if (ca->metadata_dev) in destroy_cache_args()
2035 dm_put_device(ca->ti, ca->metadata_dev); in destroy_cache_args()
2037 if (ca->cache_dev) in destroy_cache_args()
2038 dm_put_device(ca->ti, ca->cache_dev); in destroy_cache_args()
2040 if (ca->origin_dev) in destroy_cache_args()
2041 dm_put_device(ca->ti, ca->origin_dev); in destroy_cache_args()
2048 if (!as->argc) { in at_least_one_arg()
2063 return -EINVAL; in parse_metadata_dev()
2065 r = dm_get_device(ca->ti, dm_shift_arg(as), in parse_metadata_dev()
2066 BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->metadata_dev); in parse_metadata_dev()
2072 metadata_dev_size = get_dev_size(ca->metadata_dev); in parse_metadata_dev()
2075 ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS); in parse_metadata_dev()
2086 return -EINVAL; in parse_cache_dev()
2088 r = dm_get_device(ca->ti, dm_shift_arg(as), in parse_cache_dev()
2089 BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->cache_dev); in parse_cache_dev()
2094 ca->cache_sectors = get_dev_size(ca->cache_dev); in parse_cache_dev()
2106 return -EINVAL; in parse_origin_dev()
2108 r = dm_get_device(ca->ti, dm_shift_arg(as), in parse_origin_dev()
2109 BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->origin_dev); in parse_origin_dev()
2115 origin_sectors = get_dev_size(ca->origin_dev); in parse_origin_dev()
2116 if (ca->ti->len > origin_sectors) { in parse_origin_dev()
2118 return -EINVAL; in parse_origin_dev()
2130 return -EINVAL; in parse_block_size()
2135 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { in parse_block_size()
2137 return -EINVAL; in parse_block_size()
2140 if (block_size > ca->cache_sectors) { in parse_block_size()
2142 return -EINVAL; in parse_block_size()
2145 ca->block_size = block_size; in parse_block_size()
2152 cf->mode = CM_WRITE; in init_features()
2153 cf->io_mode = CM_IO_WRITEBACK; in init_features()
2154 cf->metadata_version = 1; in init_features()
2155 cf->discard_passdown = true; in init_features()
2168 struct cache_features *cf = &ca->features; in parse_features()
2174 return -EINVAL; in parse_features()
2176 while (argc--) { in parse_features()
2180 cf->io_mode = CM_IO_WRITEBACK; in parse_features()
2185 cf->io_mode = CM_IO_WRITETHROUGH; in parse_features()
2190 cf->io_mode = CM_IO_PASSTHROUGH; in parse_features()
2195 cf->metadata_version = 2; in parse_features()
2198 cf->discard_passdown = false; in parse_features()
2202 return -EINVAL; in parse_features()
2208 return -EINVAL; in parse_features()
2224 return -EINVAL; in parse_policy()
2226 ca->policy_name = dm_shift_arg(as); in parse_policy()
2228 r = dm_read_arg_group(_args, as, &ca->policy_argc, error); in parse_policy()
2230 return -EINVAL; in parse_policy()
2232 ca->policy_argv = (const char **)as->argv; in parse_policy()
2233 dm_consume_args(as, ca->policy_argc); in parse_policy()
2274 /*----------------------------------------------------------------*/
2286 return -EINVAL; in process_config_option()
2288 cache->migration_threshold = tmp; in process_config_option()
2300 r = policy_set_config_value(cache->policy, key, value); in set_config_value()
2314 return -EINVAL; in set_config_values()
2322 argc -= 2; in set_config_values()
2332 struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name, in create_cache_policy()
2333 cache->cache_size, in create_cache_policy()
2334 cache->origin_sectors, in create_cache_policy()
2335 cache->sectors_per_block); in create_cache_policy()
2340 cache->policy = p; in create_cache_policy()
2341 BUG_ON(!cache->policy); in create_cache_policy()
2376 if (nr_blocks > (1 << 20) && cache->cache_size != size) in set_cache_size()
2382 cache->cache_size = size; in set_cache_size()
2390 char **error = &ca->ti->error; in cache_create()
2392 struct dm_target *ti = ca->ti; in cache_create()
2395 bool may_format = ca->features.mode == CM_WRITE; in cache_create()
2399 return -ENOMEM; in cache_create()
2401 cache->ti = ca->ti; in cache_create()
2402 ti->private = cache; in cache_create()
2403 ti->accounts_remapped_io = true; in cache_create()
2404 ti->num_flush_bios = 2; in cache_create()
2405 ti->flush_supported = true; in cache_create()
2407 ti->num_discard_bios = 1; in cache_create()
2408 ti->discards_supported = true; in cache_create()
2410 ti->per_io_data_size = sizeof(struct per_bio_data); in cache_create()
2412 cache->features = ca->features; in cache_create()
2415 r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0); in cache_create()
2420 cache->metadata_dev = ca->metadata_dev; in cache_create()
2421 cache->origin_dev = ca->origin_dev; in cache_create()
2422 cache->cache_dev = ca->cache_dev; in cache_create()
2424 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL; in cache_create()
2426 origin_blocks = cache->origin_sectors = ti->len; in cache_create()
2427 origin_blocks = block_div(origin_blocks, ca->block_size); in cache_create()
2428 cache->origin_blocks = to_oblock(origin_blocks); in cache_create()
2430 cache->sectors_per_block = ca->block_size; in cache_create()
2431 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { in cache_create()
2432 r = -EINVAL; in cache_create()
2436 if (ca->block_size & (ca->block_size - 1)) { in cache_create()
2437 dm_block_t cache_size = ca->cache_sectors; in cache_create()
2439 cache->sectors_per_block_shift = -1; in cache_create()
2440 cache_size = block_div(cache_size, ca->block_size); in cache_create()
2443 cache->sectors_per_block_shift = __ffs(ca->block_size); in cache_create()
2444 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift)); in cache_create()
2451 cache->policy_nr_args = ca->policy_argc; in cache_create()
2452 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; in cache_create()
2454 r = set_config_values(cache, ca->policy_argc, ca->policy_argv); in cache_create()
2460 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, in cache_create()
2461 ca->block_size, may_format, in cache_create()
2462 dm_cache_policy_get_hint_size(cache->policy), in cache_create()
2463 ca->features.metadata_version); in cache_create()
2469 cache->cmd = cmd; in cache_create()
2473 r = -EINVAL; in cache_create()
2480 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); in cache_create()
2488 r = -EINVAL; in cache_create()
2492 policy_allow_migrations(cache->policy, false); in cache_create()
2495 spin_lock_init(&cache->lock); in cache_create()
2496 bio_list_init(&cache->deferred_bios); in cache_create()
2497 atomic_set(&cache->nr_allocated_migrations, 0); in cache_create()
2498 atomic_set(&cache->nr_io_migrations, 0); in cache_create()
2499 init_waitqueue_head(&cache->migration_wait); in cache_create()
2501 r = -ENOMEM; in cache_create()
2502 atomic_set(&cache->nr_dirty, 0); in cache_create()
2503 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create()
2504 if (!cache->dirty_bitset) { in cache_create()
2508 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); in cache_create()
2510 cache->discard_block_size = in cache_create()
2511 calculate_discard_block_size(cache->sectors_per_block, in cache_create()
2512 cache->origin_sectors); in cache_create()
2513 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors, in cache_create()
2514 cache->discard_block_size)); in cache_create()
2515 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); in cache_create()
2516 if (!cache->discard_bitset) { in cache_create()
2520 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); in cache_create()
2522 cache->invalid_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create()
2523 if (!cache->invalid_bitset) { in cache_create()
2527 clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size)); in cache_create()
2529 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in cache_create()
2530 if (IS_ERR(cache->copier)) { in cache_create()
2532 r = PTR_ERR(cache->copier); in cache_create()
2536 cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); in cache_create()
2537 if (!cache->wq) { in cache_create()
2541 INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios); in cache_create()
2542 INIT_WORK(&cache->migration_worker, check_migrations); in cache_create()
2543 INIT_DELAYED_WORK(&cache->waker, do_waker); in cache_create()
2545 cache->prison = dm_bio_prison_create_v2(cache->wq); in cache_create()
2546 if (!cache->prison) { in cache_create()
2551 r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE, in cache_create()
2558 cache->need_tick_bio = true; in cache_create()
2559 cache->sized = false; in cache_create()
2560 cache->invalidate = false; in cache_create()
2561 cache->commit_requested = false; in cache_create()
2562 cache->loaded_mappings = false; in cache_create()
2563 cache->loaded_discards = false; in cache_create()
2567 atomic_set(&cache->stats.demotion, 0); in cache_create()
2568 atomic_set(&cache->stats.promotion, 0); in cache_create()
2569 atomic_set(&cache->stats.copies_avoided, 0); in cache_create()
2570 atomic_set(&cache->stats.cache_cell_clash, 0); in cache_create()
2571 atomic_set(&cache->stats.commit_count, 0); in cache_create()
2572 atomic_set(&cache->stats.discard_count, 0); in cache_create()
2574 spin_lock_init(&cache->invalidation_lock); in cache_create()
2575 INIT_LIST_HEAD(&cache->invalidation_requests); in cache_create()
2577 batcher_init(&cache->committer, commit_op, cache, in cache_create()
2578 issue_op, cache, cache->wq); in cache_create()
2579 dm_iot_init(&cache->tracker); in cache_create()
2581 init_rwsem(&cache->background_work_lock); in cache_create()
2598 return -ENOMEM; in copy_ctr_args()
2602 while (i--) in copy_ctr_args()
2605 return -ENOMEM; in copy_ctr_args()
2609 cache->nr_ctr_args = argc; in copy_ctr_args()
2610 cache->ctr_args = copy; in copy_ctr_args()
2617 int r = -EINVAL; in cache_ctr()
2623 ti->error = "Error allocating memory for cache"; in cache_ctr()
2624 return -ENOMEM; in cache_ctr()
2626 ca->ti = ti; in cache_ctr()
2628 r = parse_cache_args(ca, argc, argv, &ti->error); in cache_ctr()
2636 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); in cache_ctr()
2642 ti->private = cache; in cache_ctr()
2648 /*----------------------------------------------------------------*/
2652 struct cache *cache = ti->private; in cache_map()
2659 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { in cache_map()
2677 schedule_commit(&cache->committer); in cache_map()
2684 struct cache *cache = ti->private; in cache_end_io()
2688 if (pb->tick) { in cache_end_io()
2689 policy_tick(cache->policy, false); in cache_end_io()
2691 spin_lock_irqsave(&cache->lock, flags); in cache_end_io()
2692 cache->need_tick_bio = true; in cache_end_io()
2693 spin_unlock_irqrestore(&cache->lock, flags); in cache_end_io()
2707 return -EINVAL; in write_dirty_bitset()
2709 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset); in write_dirty_bitset()
2721 return -EINVAL; in write_discard_bitset()
2723 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, in write_discard_bitset()
2724 cache->discard_nr_blocks); in write_discard_bitset()
2726 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache)); in write_discard_bitset()
2731 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { in write_discard_bitset()
2732 r = dm_cache_set_discard(cache->cmd, to_dblock(i), in write_discard_bitset()
2748 return -EINVAL; in write_hints()
2750 r = dm_cache_write_hints(cache->cmd, cache->policy); in write_hints()
2794 struct cache *cache = ti->private; in cache_postsuspend()
2797 BUG_ON(atomic_read(&cache->nr_io_migrations)); in cache_postsuspend()
2799 cancel_delayed_work_sync(&cache->waker); in cache_postsuspend()
2800 drain_workqueue(cache->wq); in cache_postsuspend()
2801 WARN_ON(cache->tracker.in_flight); in cache_postsuspend()
2819 set_bit(from_cblock(cblock), cache->dirty_bitset); in load_mapping()
2820 atomic_inc(&cache->nr_dirty); in load_mapping()
2822 clear_bit(from_cblock(cblock), cache->dirty_bitset); in load_mapping()
2824 return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid); in load_mapping()
2832 if (from_oblock(oblock) >= from_oblock(cache->origin_blocks)) { in load_filtered_mapping()
2836 return -EFBIG; in load_filtered_mapping()
2838 set_bit(from_cblock(cblock), cache->invalid_bitset); in load_filtered_mapping()
2865 li->cache = cache; in discard_load_info_init()
2866 li->discard_begin = li->discard_end = 0; in discard_load_info_init()
2873 if (li->discard_begin == li->discard_end) in set_discard_range()
2879 b = li->discard_begin * li->block_size; in set_discard_range()
2880 e = li->discard_end * li->block_size; in set_discard_range()
2885 b = dm_sector_div_up(b, li->cache->discard_block_size); in set_discard_range()
2886 sector_div(e, li->cache->discard_block_size); in set_discard_range()
2892 if (e > from_dblock(li->cache->discard_nr_blocks)) in set_discard_range()
2893 e = from_dblock(li->cache->discard_nr_blocks); in set_discard_range()
2896 set_discard(li->cache, to_dblock(b)); in set_discard_range()
2904 li->block_size = discard_block_size; in load_discard()
2907 if (from_dblock(dblock) == li->discard_end) in load_discard()
2911 li->discard_end = li->discard_end + 1ULL; in load_discard()
2918 li->discard_begin = from_dblock(dblock); in load_discard()
2919 li->discard_end = li->discard_begin + 1ULL; in load_discard()
2923 li->discard_begin = li->discard_end = 0; in load_discard()
2931 sector_t size = get_dev_size(cache->cache_dev); in get_cache_dev_size()
2932 (void) sector_div(size, cache->sectors_per_block); in get_cache_dev_size()
2944 if (cache->sized && !cache->loaded_mappings) { in can_resume()
2946 DMERR("%s: unable to resume a failed-loaded cache, please check metadata.", in can_resume()
2959 if (from_cblock(new_size) > from_cblock(cache->cache_size)) { in can_resize()
2968 if (cache->loaded_mappings) { in can_resize()
2969 new_size = to_cblock(find_next_bit(cache->dirty_bitset, in can_resize()
2970 from_cblock(cache->cache_size), in can_resize()
2972 if (new_size != cache->cache_size) { in can_resize()
2987 r = dm_cache_resize(cache->cmd, new_size); in resize_cache_dev()
3001 uint32_t nr_blocks = from_cblock(cache->cache_size); in truncate_oblocks()
3005 for_each_set_bit(i, cache->invalid_bitset, nr_blocks) { in truncate_oblocks()
3006 r = dm_cache_remove_mapping(cache->cmd, to_cblock(i)); in truncate_oblocks()
3020 struct cache *cache = ti->private; in cache_preresume()
3024 return -EINVAL; in cache_preresume()
3029 if (!cache->sized || csize != cache->cache_size) { in cache_preresume()
3031 return -EINVAL; in cache_preresume()
3037 cache->sized = true; in cache_preresume()
3040 if (!cache->loaded_mappings) { in cache_preresume()
3046 clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size)); in cache_preresume()
3048 r = dm_cache_load_mappings(cache->cmd, cache->policy, in cache_preresume()
3052 if (r != -EFBIG) in cache_preresume()
3063 cache->loaded_mappings = true; in cache_preresume()
3066 if (!cache->loaded_discards) { in cache_preresume()
3074 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); in cache_preresume()
3077 r = dm_cache_load_discards(cache->cmd, load_discard, &li); in cache_preresume()
3085 cache->loaded_discards = true; in cache_preresume()
3093 struct cache *cache = ti->private; in cache_resume()
3095 cache->need_tick_bio = true; in cache_resume()
3097 do_waker(&cache->waker.work); in cache_resume()
3104 struct cache_features *cf = &cache->features; in emit_flags()
3105 unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1; in emit_flags()
3109 if (cf->metadata_version == 2) in emit_flags()
3124 cache_device_name(cache), (int) cf->io_mode); in emit_flags()
3127 if (!cf->discard_passdown) in emit_flags()
3153 struct cache *cache = ti->private; in cache_status()
3164 /* Commit to ensure statistics aren't out-of-date */ in cache_status()
3168 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); in cache_status()
3175 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); in cache_status()
3182 residency = policy_residency(cache->policy); in cache_status()
3186 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), in cache_status()
3188 (unsigned long long)cache->sectors_per_block, in cache_status()
3190 (unsigned long long) from_cblock(cache->cache_size), in cache_status()
3191 (unsigned int) atomic_read(&cache->stats.read_hit), in cache_status()
3192 (unsigned int) atomic_read(&cache->stats.read_miss), in cache_status()
3193 (unsigned int) atomic_read(&cache->stats.write_hit), in cache_status()
3194 (unsigned int) atomic_read(&cache->stats.write_miss), in cache_status()
3195 (unsigned int) atomic_read(&cache->stats.demotion), in cache_status()
3196 (unsigned int) atomic_read(&cache->stats.promotion), in cache_status()
3197 (unsigned long) atomic_read(&cache->nr_dirty)); in cache_status()
3201 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); in cache_status()
3203 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); in cache_status()
3205 r = policy_emit_config_values(cache->policy, result, maxlen, &sz); in cache_status()
3216 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check); in cache_status()
3221 DMEMIT("- "); in cache_status()
3226 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); in cache_status()
3228 format_dev_t(buf, cache->cache_dev->bdev->bd_dev); in cache_status()
3230 format_dev_t(buf, cache->origin_dev->bdev->bd_dev); in cache_status()
3233 for (i = 0; i < cache->nr_ctr_args - 1; i++) in cache_status()
3234 DMEMIT(" %s", cache->ctr_args[i]); in cache_status()
3235 if (cache->nr_ctr_args) in cache_status()
3236 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); in cache_status()
3240 DMEMIT_TARGET_NAME_VERSION(ti->type); in cache_status()
3248 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); in cache_status()
3250 format_dev_t(buf, cache->cache_dev->bdev->bd_dev); in cache_status()
3252 format_dev_t(buf, cache->origin_dev->bdev->bd_dev); in cache_status()
3257 DMEMIT(",metadata2=%c", cache->features.metadata_version == 2 ? 'y' : 'n'); in cache_status()
3258 DMEMIT(",no_discard_passdown=%c", cache->features.discard_passdown ? 'n' : 'y'); in cache_status()
3270 * Defines a range of cblocks, begin to (end - 1) are in the range. end is
3271 * the one-past-the-end value.
3282 * ii) A begin and end cblock with a dash between, eg. 123-234
3294 r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy); in parse_cblock_range()
3297 result->begin = to_cblock(b); in parse_cblock_range()
3298 result->end = to_cblock(e); in parse_cblock_range()
3308 result->begin = to_cblock(b); in parse_cblock_range()
3309 result->end = to_cblock(from_cblock(result->begin) + 1u); in parse_cblock_range()
3314 return -EINVAL; in parse_cblock_range()
3319 uint64_t b = from_cblock(range->begin); in validate_cblock_range()
3320 uint64_t e = from_cblock(range->end); in validate_cblock_range()
3321 uint64_t n = from_cblock(cache->cache_size); in validate_cblock_range()
3326 return -EINVAL; in validate_cblock_range()
3332 return -EINVAL; in validate_cblock_range()
3338 return -EINVAL; in validate_cblock_range()
3359 while (range->begin != range->end) { in request_invalidation()
3360 r = invalidate_cblock(cache, range->begin); in request_invalidation()
3364 range->begin = cblock_succ(range->begin); in request_invalidation()
3367 cache->commit_requested = true; in request_invalidation()
3381 return -EPERM; in process_invalidate_cblocks_message()
3408 * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3415 struct cache *cache = ti->private; in cache_message()
3418 return -EINVAL; in cache_message()
3423 return -EOPNOTSUPP; in cache_message()
3427 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); in cache_message()
3430 return -EINVAL; in cache_message()
3439 struct cache *cache = ti->private; in cache_iterate_devices()
3441 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); in cache_iterate_devices()
3443 r = fn(ti, cache->origin_dev, 0, ti->len, data); in cache_iterate_devices()
3454 struct block_device *origin_bdev = cache->origin_dev->bdev; in disable_passdown_if_not_supported()
3458 if (!cache->features.discard_passdown) in disable_passdown_if_not_supported()
3464 else if (origin_limits->max_discard_sectors < cache->sectors_per_block) in disable_passdown_if_not_supported()
3470 cache->features.discard_passdown = false; in disable_passdown_if_not_supported()
3476 struct block_device *origin_bdev = cache->origin_dev->bdev; in set_discard_limits()
3479 if (!cache->features.discard_passdown) { in set_discard_limits()
3481 limits->max_hw_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, in set_discard_limits()
3482 cache->origin_sectors); in set_discard_limits()
3483 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; in set_discard_limits()
3491 limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors; in set_discard_limits()
3492 limits->discard_granularity = origin_limits->discard_granularity; in set_discard_limits()
3493 limits->discard_alignment = origin_limits->discard_alignment; in set_discard_limits()
3498 struct cache *cache = ti->private; in cache_io_hints()
3499 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in cache_io_hints()
3502 * If the system-determined stacked limits are compatible with the in cache_io_hints()
3505 if (io_opt_sectors < cache->sectors_per_block || in cache_io_hints()
3506 do_div(io_opt_sectors, cache->sectors_per_block)) { in cache_io_hints()
3507 limits->io_min = cache->sectors_per_block << SECTOR_SHIFT; in cache_io_hints()
3508 limits->io_opt = cache->sectors_per_block << SECTOR_SHIFT; in cache_io_hints()
3515 /*----------------------------------------------------------------*/
3540 r = -ENOMEM; in dm_cache_init()
3547 r = -ENOMEM; in dm_cache_init()