dm-cache-target.c (86a3238c7b9b759cb864f4f768ab2e24687dc0e6) | dm-cache-target.c (a4a82ce3d24d4409143a7b7b980072ada6e20b2a) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012 Red Hat. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" --- 511 unchanged lines hidden (view full) --- 520 521static unsigned int lock_level(struct bio *bio) 522{ 523 return bio_data_dir(bio) == WRITE ? 524 WRITE_LOCK_LEVEL : 525 READ_WRITE_LOCK_LEVEL; 526} 527 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012 Red Hat. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" --- 511 unchanged lines hidden (view full) --- 520 521static unsigned int lock_level(struct bio *bio) 522{ 523 return bio_data_dir(bio) == WRITE ? 524 WRITE_LOCK_LEVEL : 525 READ_WRITE_LOCK_LEVEL; 526} 527 |
528/*---------------------------------------------------------------- | 528/* 529 *-------------------------------------------------------------- |
529 * Per bio data | 530 * Per bio data |
530 *--------------------------------------------------------------*/ | 531 *-------------------------------------------------------------- 532 */ |
531 532static struct per_bio_data *get_per_bio_data(struct bio *bio) 533{ 534 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 535 BUG_ON(!pb); 536 return pb; 537} 538 --- 162 unchanged lines hidden (view full) --- 701 spin_lock_irq(&cache->lock); 702 r = test_bit(from_dblock(oblock_to_dblock(cache, b)), 703 cache->discard_bitset); 704 spin_unlock_irq(&cache->lock); 705 706 return r; 707} 708 | 533 534static struct per_bio_data *get_per_bio_data(struct bio *bio) 535{ 536 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 537 BUG_ON(!pb); 538 return pb; 539} 540 --- 162 unchanged lines hidden (view full) --- 703 spin_lock_irq(&cache->lock); 704 r = test_bit(from_dblock(oblock_to_dblock(cache, b)), 705 cache->discard_bitset); 706 spin_unlock_irq(&cache->lock); 707 708 return r; 709} 710 |
709/*---------------------------------------------------------------- | 711/* 712 * ------------------------------------------------------------- |
710 * Remapping | 713 * Remapping |
711 *--------------------------------------------------------------*/ | 714 *-------------------------------------------------------------- 715 */ |
712static void remap_to_origin(struct cache *cache, struct bio *bio) 713{ 714 bio_set_dev(bio, cache->origin_dev->bdev); 715} 716 717static void remap_to_cache(struct cache *cache, struct bio *bio, 718 dm_cblock_t cblock) 719{ --- 109 unchanged lines hidden (view full) --- 829 830 if (bio_data_dir(origin_bio) == WRITE) 831 clear_discard(cache, oblock_to_dblock(cache, oblock)); 832 submit_bio(origin_bio); 833 834 remap_to_cache(cache, bio, cblock); 835} 836 | 716static void remap_to_origin(struct cache *cache, struct bio *bio) 717{ 718 bio_set_dev(bio, cache->origin_dev->bdev); 719} 720 721static void remap_to_cache(struct cache *cache, struct bio *bio, 722 dm_cblock_t cblock) 723{ --- 109 unchanged lines hidden (view full) --- 833 834 if (bio_data_dir(origin_bio) == WRITE) 835 clear_discard(cache, oblock_to_dblock(cache, oblock)); 836 submit_bio(origin_bio); 837 838 remap_to_cache(cache, bio, cblock); 839} 840 |
837/*---------------------------------------------------------------- | 841/* 842 *-------------------------------------------------------------- |
838 * Failure modes | 843 * Failure modes |
839 *--------------------------------------------------------------*/ | 844 *-------------------------------------------------------------- 845 */ |
840static enum cache_metadata_mode get_cache_mode(struct cache *cache) 841{ 842 return cache->features.mode; 843} 844 845static const char *cache_device_name(struct cache *cache) 846{ 847 return dm_table_device_name(cache->ti->table); --- 120 unchanged lines hidden (view full) --- 968 break; 969 970 case POLICY_WRITEBACK: 971 atomic_inc(&stats->writeback); 972 break; 973 } 974} 975 | 846static enum cache_metadata_mode get_cache_mode(struct cache *cache) 847{ 848 return cache->features.mode; 849} 850 851static const char *cache_device_name(struct cache *cache) 852{ 853 return dm_table_device_name(cache->ti->table); --- 120 unchanged lines hidden (view full) --- 974 break; 975 976 case POLICY_WRITEBACK: 977 atomic_inc(&stats->writeback); 978 break; 979 } 980} 981 |
976/*---------------------------------------------------------------- | 982/* 983 *--------------------------------------------------------------------- |
977 * Migration processing 978 * 979 * Migration covers moving data from the origin device to the cache, or 980 * vice versa. | 984 * Migration processing 985 * 986 * Migration covers moving data from the origin device to the cache, or 987 * vice versa. |
981 *--------------------------------------------------------------*/ 982 | 988 *--------------------------------------------------------------------- 989 */ |
983static void inc_io_migrations(struct cache *cache) 984{ 985 atomic_inc(&cache->nr_io_migrations); 986} 987 988static void dec_io_migrations(struct cache *cache) 989{ 990 atomic_dec(&cache->nr_io_migrations); --- 435 unchanged lines hidden (view full) --- 1426 mg->overwrite_bio = bio; 1427 1428 if (!bio) 1429 inc_io_migrations(cache); 1430 1431 return mg_lock_writes(mg); 1432} 1433 | 990static void inc_io_migrations(struct cache *cache) 991{ 992 atomic_inc(&cache->nr_io_migrations); 993} 994 995static void dec_io_migrations(struct cache *cache) 996{ 997 atomic_dec(&cache->nr_io_migrations); --- 435 unchanged lines hidden (view full) --- 1433 mg->overwrite_bio = bio; 1434 1435 if (!bio) 1436 inc_io_migrations(cache); 1437 1438 return mg_lock_writes(mg); 1439} 1440 |
1434/*---------------------------------------------------------------- | 1441/* 1442 *-------------------------------------------------------------- |
1435 * invalidation processing | 1443 * invalidation processing |
1436 *--------------------------------------------------------------*/ | 1444 *-------------------------------------------------------------- 1445 */ |
1437 1438static void invalidate_complete(struct dm_cache_migration *mg, bool success) 1439{ 1440 struct bio_list bios; 1441 struct cache *cache = mg->cache; 1442 1443 bio_list_init(&bios); 1444 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) --- 104 unchanged lines hidden (view full) --- 1549 1550 mg->overwrite_bio = bio; 1551 mg->invalidate_cblock = cblock; 1552 mg->invalidate_oblock = oblock; 1553 1554 return invalidate_lock(mg); 1555} 1556 | 1446 1447static void invalidate_complete(struct dm_cache_migration *mg, bool success) 1448{ 1449 struct bio_list bios; 1450 struct cache *cache = mg->cache; 1451 1452 bio_list_init(&bios); 1453 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) --- 104 unchanged lines hidden (view full) --- 1558 1559 mg->overwrite_bio = bio; 1560 mg->invalidate_cblock = cblock; 1561 mg->invalidate_oblock = oblock; 1562 1563 return invalidate_lock(mg); 1564} 1565 |
1557/*---------------------------------------------------------------- | 1566/* 1567 *-------------------------------------------------------------- |
1558 * bio processing | 1568 * bio processing |
1559 *--------------------------------------------------------------*/ | 1569 *-------------------------------------------------------------- 1570 */ |
1560 1561enum busy { 1562 IDLE, 1563 BUSY 1564}; 1565 1566static enum busy spare_migration_bandwidth(struct cache *cache) 1567{ --- 191 unchanged lines hidden (view full) --- 1759 issue_after_commit(&cache->committer, bio); 1760 return true; 1761} 1762 1763static bool process_discard_bio(struct cache *cache, struct bio *bio) 1764{ 1765 dm_dblock_t b, e; 1766 | 1571 1572enum busy { 1573 IDLE, 1574 BUSY 1575}; 1576 1577static enum busy spare_migration_bandwidth(struct cache *cache) 1578{ --- 191 unchanged lines hidden (view full) --- 1770 issue_after_commit(&cache->committer, bio); 1771 return true; 1772} 1773 1774static bool process_discard_bio(struct cache *cache, struct bio *bio) 1775{ 1776 dm_dblock_t b, e; 1777 |
1767 // FIXME: do we need to lock the region? Or can we just assume the 1768 // user wont be so foolish as to issue discard concurrently with 1769 // other IO? | 1778 /* 1779 * FIXME: do we need to lock the region? Or can we just assume the 1780 * user wont be so foolish as to issue discard concurrently with 1781 * other IO? 1782 */ |
1770 calc_discard_block_range(cache, bio, &b, &e); 1771 while (b != e) { 1772 set_discard(cache, b); 1773 b = to_dblock(from_dblock(b) + 1); 1774 } 1775 1776 if (cache->features.discard_passdown) { 1777 remap_to_origin(cache, bio); --- 29 unchanged lines hidden (view full) --- 1807 else 1808 commit_needed = process_bio(cache, bio) || commit_needed; 1809 } 1810 1811 if (commit_needed) 1812 schedule_commit(&cache->committer); 1813} 1814 | 1783 calc_discard_block_range(cache, bio, &b, &e); 1784 while (b != e) { 1785 set_discard(cache, b); 1786 b = to_dblock(from_dblock(b) + 1); 1787 } 1788 1789 if (cache->features.discard_passdown) { 1790 remap_to_origin(cache, bio); --- 29 unchanged lines hidden (view full) --- 1820 else 1821 commit_needed = process_bio(cache, bio) || commit_needed; 1822 } 1823 1824 if (commit_needed) 1825 schedule_commit(&cache->committer); 1826} 1827 |
1815/*---------------------------------------------------------------- | 1828/* 1829 *-------------------------------------------------------------- |
1816 * Main worker loop | 1830 * Main worker loop |
1817 *--------------------------------------------------------------*/ 1818 | 1831 *-------------------------------------------------------------- 1832 */ |
1819static void requeue_deferred_bios(struct cache *cache) 1820{ 1821 struct bio *bio; 1822 struct bio_list bios; 1823 1824 bio_list_init(&bios); 1825 bio_list_merge(&bios, &cache->deferred_bios); 1826 bio_list_init(&cache->deferred_bios); --- 39 unchanged lines hidden (view full) --- 1866 } 1867 1868 r = mg_start(cache, op, NULL); 1869 if (r) 1870 break; 1871 } 1872} 1873 | 1833static void requeue_deferred_bios(struct cache *cache) 1834{ 1835 struct bio *bio; 1836 struct bio_list bios; 1837 1838 bio_list_init(&bios); 1839 bio_list_merge(&bios, &cache->deferred_bios); 1840 bio_list_init(&cache->deferred_bios); --- 39 unchanged lines hidden (view full) --- 1880 } 1881 1882 r = mg_start(cache, op, NULL); 1883 if (r) 1884 break; 1885 } 1886} 1887 |
1874/*---------------------------------------------------------------- | 1888/* 1889 *-------------------------------------------------------------- |
1875 * Target methods | 1890 * Target methods |
1876 *--------------------------------------------------------------*/ | 1891 *-------------------------------------------------------------- 1892 */ |
1877 1878/* 1879 * This function gets called on the error paths of the constructor, so we 1880 * have to cope with a partially initialised struct. 1881 */ 1882static void destroy(struct cache *cache) 1883{ 1884 unsigned int i; --- 1567 unchanged lines hidden --- | 1893 1894/* 1895 * This function gets called on the error paths of the constructor, so we 1896 * have to cope with a partially initialised struct. 1897 */ 1898static void destroy(struct cache *cache) 1899{ 1900 unsigned int i; --- 1567 unchanged lines hidden --- |