1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0 2cac06d84SQu Wenruo 3cac06d84SQu Wenruo #include <linux/slab.h> 49b569ea0SJosef Bacik #include "messages.h" 5cac06d84SQu Wenruo #include "ctree.h" 6cac06d84SQu Wenruo #include "subpage.h" 73d078efaSQu Wenruo #include "btrfs_inode.h" 8cac06d84SQu Wenruo 9894d1378SQu Wenruo /* 10894d1378SQu Wenruo * Subpage (sectorsize < PAGE_SIZE) support overview: 11894d1378SQu Wenruo * 12894d1378SQu Wenruo * Limitations: 13894d1378SQu Wenruo * 14894d1378SQu Wenruo * - Only support 64K page size for now 15894d1378SQu Wenruo * This is to make metadata handling easier, as 64K page would ensure 16894d1378SQu Wenruo * all nodesize would fit inside one page, thus we don't need to handle 17894d1378SQu Wenruo * cases where a tree block crosses several pages. 18894d1378SQu Wenruo * 19894d1378SQu Wenruo * - Only metadata read-write for now 20894d1378SQu Wenruo * The data read-write part is in development. 21894d1378SQu Wenruo * 22894d1378SQu Wenruo * - Metadata can't cross 64K page boundary 23894d1378SQu Wenruo * btrfs-progs and kernel have done that for a while, thus only ancient 24894d1378SQu Wenruo * filesystems could have such problem. For such case, do a graceful 25894d1378SQu Wenruo * rejection. 26894d1378SQu Wenruo * 27894d1378SQu Wenruo * Special behavior: 28894d1378SQu Wenruo * 29894d1378SQu Wenruo * - Metadata 30894d1378SQu Wenruo * Metadata read is fully supported. 31894d1378SQu Wenruo * Meaning when reading one tree block will only trigger the read for the 32894d1378SQu Wenruo * needed range, other unrelated range in the same page will not be touched. 33894d1378SQu Wenruo * 34894d1378SQu Wenruo * Metadata write support is partial. 35894d1378SQu Wenruo * The writeback is still for the full page, but we will only submit 36894d1378SQu Wenruo * the dirty extent buffers in the page. 37894d1378SQu Wenruo * 38894d1378SQu Wenruo * This means, if we have a metadata page like this: 39894d1378SQu Wenruo * 40894d1378SQu Wenruo * Page offset 41894d1378SQu Wenruo * 0 16K 32K 48K 64K 42894d1378SQu Wenruo * |/////////| |///////////| 43894d1378SQu Wenruo * \- Tree block A \- Tree block B 44894d1378SQu Wenruo * 45894d1378SQu Wenruo * Even if we just want to writeback tree block A, we will also writeback 46894d1378SQu Wenruo * tree block B if it's also dirty. 47894d1378SQu Wenruo * 48894d1378SQu Wenruo * This may cause extra metadata writeback which results more COW. 49894d1378SQu Wenruo * 50894d1378SQu Wenruo * Implementation: 51894d1378SQu Wenruo * 52894d1378SQu Wenruo * - Common 53894d1378SQu Wenruo * Both metadata and data will use a new structure, btrfs_subpage, to 54894d1378SQu Wenruo * record the status of each sector inside a page. This provides the extra 55894d1378SQu Wenruo * granularity needed. 56894d1378SQu Wenruo * 57894d1378SQu Wenruo * - Metadata 58894d1378SQu Wenruo * Since we have multiple tree blocks inside one page, we can't rely on page 59894d1378SQu Wenruo * locking anymore, or we will have greatly reduced concurrency or even 60894d1378SQu Wenruo * deadlocks (hold one tree lock while trying to lock another tree lock in 61894d1378SQu Wenruo * the same page). 62894d1378SQu Wenruo * 63894d1378SQu Wenruo * Thus for metadata locking, subpage support relies on io_tree locking only. 64894d1378SQu Wenruo * This means a slightly higher tree locking latency. 65894d1378SQu Wenruo */ 66894d1378SQu Wenruo 6713df3775SQu Wenruo bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping) 68fbca46ebSQu Wenruo { 69fbca46ebSQu Wenruo if (fs_info->sectorsize >= PAGE_SIZE) 70fbca46ebSQu Wenruo return false; 71fbca46ebSQu Wenruo 72fbca46ebSQu Wenruo /* 73fbca46ebSQu Wenruo * Only data pages (either through DIO or compression) can have no 74fbca46ebSQu Wenruo * mapping. And if page->mapping->host is data inode, it's subpage. 75fbca46ebSQu Wenruo * As we have ruled our sectorsize >= PAGE_SIZE case already. 76fbca46ebSQu Wenruo */ 7713df3775SQu Wenruo if (!mapping || !mapping->host || is_data_inode(mapping->host)) 78fbca46ebSQu Wenruo return true; 79fbca46ebSQu Wenruo 80fbca46ebSQu Wenruo /* 81fbca46ebSQu Wenruo * Now the only remaining case is metadata, which we only go subpage 82fbca46ebSQu Wenruo * routine if nodesize < PAGE_SIZE. 83fbca46ebSQu Wenruo */ 84fbca46ebSQu Wenruo if (fs_info->nodesize < PAGE_SIZE) 85fbca46ebSQu Wenruo return true; 86fbca46ebSQu Wenruo return false; 87fbca46ebSQu Wenruo } 88fbca46ebSQu Wenruo 898481dd80SQu Wenruo void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize) 908481dd80SQu Wenruo { 918481dd80SQu Wenruo unsigned int cur = 0; 928481dd80SQu Wenruo unsigned int nr_bits; 938481dd80SQu Wenruo 948481dd80SQu Wenruo ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize)); 958481dd80SQu Wenruo 968481dd80SQu Wenruo nr_bits = PAGE_SIZE / sectorsize; 978481dd80SQu Wenruo subpage_info->bitmap_nr_bits = nr_bits; 988481dd80SQu Wenruo 998481dd80SQu Wenruo subpage_info->uptodate_offset = cur; 1008481dd80SQu Wenruo cur += nr_bits; 1018481dd80SQu Wenruo 1028481dd80SQu Wenruo subpage_info->dirty_offset = cur; 1038481dd80SQu Wenruo cur += nr_bits; 1048481dd80SQu Wenruo 1058481dd80SQu Wenruo subpage_info->writeback_offset = cur; 1068481dd80SQu Wenruo cur += nr_bits; 1078481dd80SQu Wenruo 1088481dd80SQu Wenruo subpage_info->ordered_offset = cur; 1098481dd80SQu Wenruo cur += nr_bits; 1108481dd80SQu Wenruo 111e4f94347SQu Wenruo subpage_info->checked_offset = cur; 112e4f94347SQu Wenruo cur += nr_bits; 113e4f94347SQu Wenruo 1148481dd80SQu Wenruo subpage_info->total_nr_bits = cur; 1158481dd80SQu Wenruo } 1168481dd80SQu Wenruo 117cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, 11855151ea9SQu Wenruo struct folio *folio, enum btrfs_subpage_type type) 119cac06d84SQu Wenruo { 120651fb419SQu Wenruo struct btrfs_subpage *subpage; 121cac06d84SQu Wenruo 122cac06d84SQu Wenruo /* 123143823cfSDavid Sterba * We have cases like a dummy extent buffer page, which is not mapped 124cac06d84SQu Wenruo * and doesn't need to be locked. 125cac06d84SQu Wenruo */ 12655151ea9SQu Wenruo if (folio->mapping) 12755151ea9SQu Wenruo ASSERT(folio_test_locked(folio)); 128651fb419SQu Wenruo 129cfbf07e2SQu Wenruo /* Either not subpage, or the folio already has private attached. */ 13055151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio)) 131cac06d84SQu Wenruo return 0; 132cac06d84SQu Wenruo 133651fb419SQu Wenruo subpage = btrfs_alloc_subpage(fs_info, type); 134651fb419SQu Wenruo if (IS_ERR(subpage)) 135651fb419SQu Wenruo return PTR_ERR(subpage); 136651fb419SQu Wenruo 137cfbf07e2SQu Wenruo folio_attach_private(folio, subpage); 138cac06d84SQu Wenruo return 0; 139cac06d84SQu Wenruo } 140cac06d84SQu Wenruo 14155151ea9SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio) 142cac06d84SQu Wenruo { 143cac06d84SQu Wenruo struct btrfs_subpage *subpage; 144cac06d84SQu Wenruo 145cfbf07e2SQu Wenruo /* Either not subpage, or the folio already has private attached. */ 14655151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio)) 147cac06d84SQu Wenruo return; 148cac06d84SQu Wenruo 149cfbf07e2SQu Wenruo subpage = folio_detach_private(folio); 150cac06d84SQu Wenruo ASSERT(subpage); 151760f991fSQu Wenruo btrfs_free_subpage(subpage); 152760f991fSQu Wenruo } 153760f991fSQu Wenruo 154651fb419SQu Wenruo struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, 155760f991fSQu Wenruo enum btrfs_subpage_type type) 156760f991fSQu Wenruo { 157651fb419SQu Wenruo struct btrfs_subpage *ret; 15872a69cd0SQu Wenruo unsigned int real_size; 159651fb419SQu Wenruo 160fdf250dbSQu Wenruo ASSERT(fs_info->sectorsize < PAGE_SIZE); 161760f991fSQu Wenruo 16272a69cd0SQu Wenruo real_size = struct_size(ret, bitmaps, 16372a69cd0SQu Wenruo BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits)); 16472a69cd0SQu Wenruo ret = kzalloc(real_size, GFP_NOFS); 165651fb419SQu Wenruo if (!ret) 166651fb419SQu Wenruo return ERR_PTR(-ENOMEM); 167651fb419SQu Wenruo 168651fb419SQu Wenruo spin_lock_init(&ret->lock); 1691e1de387SQu Wenruo if (type == BTRFS_SUBPAGE_METADATA) { 170651fb419SQu Wenruo atomic_set(&ret->eb_refs, 0); 1711e1de387SQu Wenruo } else { 172651fb419SQu Wenruo atomic_set(&ret->readers, 0); 173651fb419SQu Wenruo atomic_set(&ret->writers, 0); 1741e1de387SQu Wenruo } 175651fb419SQu Wenruo return ret; 176760f991fSQu Wenruo } 177760f991fSQu Wenruo 178760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage) 179760f991fSQu Wenruo { 180cac06d84SQu Wenruo kfree(subpage); 181cac06d84SQu Wenruo } 1828ff8466dSQu Wenruo 1838ff8466dSQu Wenruo /* 1848ff8466dSQu Wenruo * Increase the eb_refs of current subpage. 1858ff8466dSQu Wenruo * 1868ff8466dSQu Wenruo * This is important for eb allocation, to prevent race with last eb freeing 1878ff8466dSQu Wenruo * of the same page. 1888ff8466dSQu Wenruo * With the eb_refs increased before the eb inserted into radix tree, 189cfbf07e2SQu Wenruo * detach_extent_buffer_page() won't detach the folio private while we're still 1908ff8466dSQu Wenruo * allocating the extent buffer. 1918ff8466dSQu Wenruo */ 19213df3775SQu Wenruo void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) 1938ff8466dSQu Wenruo { 1948ff8466dSQu Wenruo struct btrfs_subpage *subpage; 1958ff8466dSQu Wenruo 19613df3775SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 1978ff8466dSQu Wenruo return; 1988ff8466dSQu Wenruo 19913df3775SQu Wenruo ASSERT(folio_test_private(folio) && folio->mapping); 20013df3775SQu Wenruo lockdep_assert_held(&folio->mapping->private_lock); 2018ff8466dSQu Wenruo 202cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 2038ff8466dSQu Wenruo atomic_inc(&subpage->eb_refs); 2048ff8466dSQu Wenruo } 2058ff8466dSQu Wenruo 20613df3775SQu Wenruo void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) 2078ff8466dSQu Wenruo { 2088ff8466dSQu Wenruo struct btrfs_subpage *subpage; 2098ff8466dSQu Wenruo 21013df3775SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 2118ff8466dSQu Wenruo return; 2128ff8466dSQu Wenruo 21313df3775SQu Wenruo ASSERT(folio_test_private(folio) && folio->mapping); 21413df3775SQu Wenruo lockdep_assert_held(&folio->mapping->private_lock); 2158ff8466dSQu Wenruo 216cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 2178ff8466dSQu Wenruo ASSERT(atomic_read(&subpage->eb_refs)); 2188ff8466dSQu Wenruo atomic_dec(&subpage->eb_refs); 2198ff8466dSQu Wenruo } 220a1d767c1SQu Wenruo 22192082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, 22255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 22392082d40SQu Wenruo { 22455151ea9SQu Wenruo /* For subpage support, the folio must be single page. */ 22555151ea9SQu Wenruo ASSERT(folio_order(folio) == 0); 226cfbf07e2SQu Wenruo 22792082d40SQu Wenruo /* Basic checks */ 228cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 22992082d40SQu Wenruo ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 23092082d40SQu Wenruo IS_ALIGNED(len, fs_info->sectorsize)); 23192082d40SQu Wenruo /* 23292082d40SQu Wenruo * The range check only works for mapped page, we can still have 23392082d40SQu Wenruo * unmapped page like dummy extent buffer pages. 23492082d40SQu Wenruo */ 23555151ea9SQu Wenruo if (folio->mapping) 23655151ea9SQu Wenruo ASSERT(folio_pos(folio) <= start && 23755151ea9SQu Wenruo start + len <= folio_pos(folio) + PAGE_SIZE); 23892082d40SQu Wenruo } 23992082d40SQu Wenruo 24092082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, 24155151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 24292082d40SQu Wenruo { 243cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 24492082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 24592082d40SQu Wenruo 24655151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 24792082d40SQu Wenruo 2483d078efaSQu Wenruo atomic_add(nbits, &subpage->readers); 24992082d40SQu Wenruo } 25092082d40SQu Wenruo 25192082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, 25255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 25392082d40SQu Wenruo { 254cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 25592082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 2563d078efaSQu Wenruo bool is_data; 2573d078efaSQu Wenruo bool last; 25892082d40SQu Wenruo 25955151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 26055151ea9SQu Wenruo is_data = is_data_inode(folio->mapping->host); 26192082d40SQu Wenruo ASSERT(atomic_read(&subpage->readers) >= nbits); 2623d078efaSQu Wenruo last = atomic_sub_and_test(nbits, &subpage->readers); 2633d078efaSQu Wenruo 2643d078efaSQu Wenruo /* 2653d078efaSQu Wenruo * For data we need to unlock the page if the last read has finished. 2663d078efaSQu Wenruo * 2673d078efaSQu Wenruo * And please don't replace @last with atomic_sub_and_test() call 2683d078efaSQu Wenruo * inside if () condition. 2693d078efaSQu Wenruo * As we want the atomic_sub_and_test() to be always executed. 2703d078efaSQu Wenruo */ 2713d078efaSQu Wenruo if (is_data && last) 27255151ea9SQu Wenruo folio_unlock(folio); 27392082d40SQu Wenruo } 27492082d40SQu Wenruo 27555151ea9SQu Wenruo static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) 2761e1de387SQu Wenruo { 2771e1de387SQu Wenruo u64 orig_start = *start; 2781e1de387SQu Wenruo u32 orig_len = *len; 2791e1de387SQu Wenruo 28055151ea9SQu Wenruo *start = max_t(u64, folio_pos(folio), orig_start); 281e4f94347SQu Wenruo /* 282e4f94347SQu Wenruo * For certain call sites like btrfs_drop_pages(), we may have pages 283e4f94347SQu Wenruo * beyond the target range. In that case, just set @len to 0, subpage 284e4f94347SQu Wenruo * helpers can handle @len == 0 without any problem. 285e4f94347SQu Wenruo */ 28655151ea9SQu Wenruo if (folio_pos(folio) >= orig_start + orig_len) 287e4f94347SQu Wenruo *len = 0; 288e4f94347SQu Wenruo else 28955151ea9SQu Wenruo *len = min_t(u64, folio_pos(folio) + PAGE_SIZE, 2901e1de387SQu Wenruo orig_start + orig_len) - *start; 2911e1de387SQu Wenruo } 2921e1de387SQu Wenruo 2931e1de387SQu Wenruo void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, 29455151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 2951e1de387SQu Wenruo { 296cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 2971e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits); 2981e1de387SQu Wenruo int ret; 2991e1de387SQu Wenruo 30055151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 3011e1de387SQu Wenruo 3021e1de387SQu Wenruo ASSERT(atomic_read(&subpage->readers) == 0); 3031e1de387SQu Wenruo ret = atomic_add_return(nbits, &subpage->writers); 3041e1de387SQu Wenruo ASSERT(ret == nbits); 3051e1de387SQu Wenruo } 3061e1de387SQu Wenruo 3071e1de387SQu Wenruo bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, 30855151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3091e1de387SQu Wenruo { 310cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 3111e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits); 3121e1de387SQu Wenruo 31355151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 3141e1de387SQu Wenruo 315164674a7SQu Wenruo /* 316164674a7SQu Wenruo * We have call sites passing @lock_page into 317164674a7SQu Wenruo * extent_clear_unlock_delalloc() for compression path. 318164674a7SQu Wenruo * 319164674a7SQu Wenruo * This @locked_page is locked by plain lock_page(), thus its 320164674a7SQu Wenruo * subpage::writers is 0. Handle them in a special way. 321164674a7SQu Wenruo */ 322164674a7SQu Wenruo if (atomic_read(&subpage->writers) == 0) 323164674a7SQu Wenruo return true; 324164674a7SQu Wenruo 3251e1de387SQu Wenruo ASSERT(atomic_read(&subpage->writers) >= nbits); 3261e1de387SQu Wenruo return atomic_sub_and_test(nbits, &subpage->writers); 3271e1de387SQu Wenruo } 3281e1de387SQu Wenruo 3291e1de387SQu Wenruo /* 33055151ea9SQu Wenruo * Lock a folio for delalloc page writeback. 3311e1de387SQu Wenruo * 3321e1de387SQu Wenruo * Return -EAGAIN if the page is not properly initialized. 3331e1de387SQu Wenruo * Return 0 with the page locked, and writer counter updated. 3341e1de387SQu Wenruo * 3351e1de387SQu Wenruo * Even with 0 returned, the page still need extra check to make sure 3361e1de387SQu Wenruo * it's really the correct page, as the caller is using 33747d55419SVishal Moola (Oracle) * filemap_get_folios_contig(), which can race with page invalidating. 3381e1de387SQu Wenruo */ 33955151ea9SQu Wenruo int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, 34055151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3411e1de387SQu Wenruo { 34255151ea9SQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { 34355151ea9SQu Wenruo folio_lock(folio); 3441e1de387SQu Wenruo return 0; 3451e1de387SQu Wenruo } 34655151ea9SQu Wenruo folio_lock(folio); 347cfbf07e2SQu Wenruo if (!folio_test_private(folio) || !folio_get_private(folio)) { 34855151ea9SQu Wenruo folio_unlock(folio); 3491e1de387SQu Wenruo return -EAGAIN; 3501e1de387SQu Wenruo } 35155151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); 35255151ea9SQu Wenruo btrfs_subpage_start_writer(fs_info, folio, start, len); 3531e1de387SQu Wenruo return 0; 3541e1de387SQu Wenruo } 3551e1de387SQu Wenruo 35655151ea9SQu Wenruo void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, 35755151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3581e1de387SQu Wenruo { 35955151ea9SQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { 36055151ea9SQu Wenruo folio_unlock(folio); 36155151ea9SQu Wenruo return; 36255151ea9SQu Wenruo } 36355151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); 36455151ea9SQu Wenruo if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len)) 36555151ea9SQu Wenruo folio_unlock(folio); 3661e1de387SQu Wenruo } 3671e1de387SQu Wenruo 36855151ea9SQu Wenruo #define subpage_calc_start_bit(fs_info, folio, name, start, len) \ 36972a69cd0SQu Wenruo ({ \ 37072a69cd0SQu Wenruo unsigned int start_bit; \ 37172a69cd0SQu Wenruo \ 37255151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); \ 37372a69cd0SQu Wenruo start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \ 37472a69cd0SQu Wenruo start_bit += fs_info->subpage_info->name##_offset; \ 37572a69cd0SQu Wenruo start_bit; \ 37672a69cd0SQu Wenruo }) 37772a69cd0SQu Wenruo 37872a69cd0SQu Wenruo #define subpage_test_bitmap_all_set(fs_info, subpage, name) \ 37972a69cd0SQu Wenruo bitmap_test_range_all_set(subpage->bitmaps, \ 38072a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \ 38172a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits) 38272a69cd0SQu Wenruo 38372a69cd0SQu Wenruo #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \ 38472a69cd0SQu Wenruo bitmap_test_range_all_zero(subpage->bitmaps, \ 38572a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \ 38672a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits) 38772a69cd0SQu Wenruo 388a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, 38955151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 390a1d767c1SQu Wenruo { 391cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 39255151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 39372a69cd0SQu Wenruo uptodate, start, len); 394a1d767c1SQu Wenruo unsigned long flags; 395a1d767c1SQu Wenruo 396a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 39772a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 39872a69cd0SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate)) 39955151ea9SQu Wenruo folio_mark_uptodate(folio); 400a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 401a1d767c1SQu Wenruo } 402a1d767c1SQu Wenruo 403a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, 40455151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 405a1d767c1SQu Wenruo { 406cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 40755151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 40872a69cd0SQu Wenruo uptodate, start, len); 409a1d767c1SQu Wenruo unsigned long flags; 410a1d767c1SQu Wenruo 411a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 41272a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 41355151ea9SQu Wenruo folio_clear_uptodate(folio); 414a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 415a1d767c1SQu Wenruo } 416a1d767c1SQu Wenruo 417d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, 41855151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 419d8a5713eSQu Wenruo { 420cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 42155151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 42272a69cd0SQu Wenruo dirty, start, len); 423d8a5713eSQu Wenruo unsigned long flags; 424d8a5713eSQu Wenruo 425d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 42672a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 427d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 42855151ea9SQu Wenruo folio_mark_dirty(folio); 429d8a5713eSQu Wenruo } 430d8a5713eSQu Wenruo 431d8a5713eSQu Wenruo /* 432d8a5713eSQu Wenruo * Extra clear_and_test function for subpage dirty bitmap. 433d8a5713eSQu Wenruo * 434d8a5713eSQu Wenruo * Return true if we're the last bits in the dirty_bitmap and clear the 435d8a5713eSQu Wenruo * dirty_bitmap. 436d8a5713eSQu Wenruo * Return false otherwise. 437d8a5713eSQu Wenruo * 438d8a5713eSQu Wenruo * NOTE: Callers should manually clear page dirty for true case, as we have 439d8a5713eSQu Wenruo * extra handling for tree blocks. 440d8a5713eSQu Wenruo */ 441d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, 44255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 443d8a5713eSQu Wenruo { 444cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 44555151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 44672a69cd0SQu Wenruo dirty, start, len); 447d8a5713eSQu Wenruo unsigned long flags; 448d8a5713eSQu Wenruo bool last = false; 449d8a5713eSQu Wenruo 450d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 45172a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 45272a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty)) 453d8a5713eSQu Wenruo last = true; 454d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 455d8a5713eSQu Wenruo return last; 456d8a5713eSQu Wenruo } 457d8a5713eSQu Wenruo 458d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, 45955151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 460d8a5713eSQu Wenruo { 461d8a5713eSQu Wenruo bool last; 462d8a5713eSQu Wenruo 46355151ea9SQu Wenruo last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len); 464d8a5713eSQu Wenruo if (last) 46555151ea9SQu Wenruo folio_clear_dirty_for_io(folio); 466d8a5713eSQu Wenruo } 467d8a5713eSQu Wenruo 4683470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, 46955151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 4703470da3bSQu Wenruo { 471cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 47255151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 47372a69cd0SQu Wenruo writeback, start, len); 4743470da3bSQu Wenruo unsigned long flags; 4753470da3bSQu Wenruo 4763470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 47772a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 478*1e61b8c6SJosef Bacik if (!folio_test_writeback(folio)) 47955151ea9SQu Wenruo folio_start_writeback(folio); 4803470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 4813470da3bSQu Wenruo } 4823470da3bSQu Wenruo 4833470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, 48455151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 4853470da3bSQu Wenruo { 486cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 48755151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 48872a69cd0SQu Wenruo writeback, start, len); 4893470da3bSQu Wenruo unsigned long flags; 4903470da3bSQu Wenruo 4913470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 49272a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 49372a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) { 49455151ea9SQu Wenruo ASSERT(folio_test_writeback(folio)); 49555151ea9SQu Wenruo folio_end_writeback(folio); 4967c11d0aeSQu Wenruo } 4973470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 4983470da3bSQu Wenruo } 4993470da3bSQu Wenruo 5006f17400bSQu Wenruo void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info, 50155151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 5026f17400bSQu Wenruo { 503cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 50455151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 50572a69cd0SQu Wenruo ordered, start, len); 5066f17400bSQu Wenruo unsigned long flags; 5076f17400bSQu Wenruo 5086f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 50972a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 51055151ea9SQu Wenruo folio_set_ordered(folio); 5116f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5126f17400bSQu Wenruo } 5136f17400bSQu Wenruo 5146f17400bSQu Wenruo void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, 51555151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 5166f17400bSQu Wenruo { 517cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 51855151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 51972a69cd0SQu Wenruo ordered, start, len); 5206f17400bSQu Wenruo unsigned long flags; 5216f17400bSQu Wenruo 5226f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 52372a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 52472a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered)) 52555151ea9SQu Wenruo folio_clear_ordered(folio); 5266f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5276f17400bSQu Wenruo } 528e4f94347SQu Wenruo 529e4f94347SQu Wenruo void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info, 53055151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 531e4f94347SQu Wenruo { 532cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 53355151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 534e4f94347SQu Wenruo checked, start, len); 535e4f94347SQu Wenruo unsigned long flags; 536e4f94347SQu Wenruo 537e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 538e4f94347SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 539e4f94347SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, checked)) 54055151ea9SQu Wenruo folio_set_checked(folio); 541e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 542e4f94347SQu Wenruo } 543e4f94347SQu Wenruo 544e4f94347SQu Wenruo void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, 54555151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 546e4f94347SQu Wenruo { 547cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 54855151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 549e4f94347SQu Wenruo checked, start, len); 550e4f94347SQu Wenruo unsigned long flags; 551e4f94347SQu Wenruo 552e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 553e4f94347SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 55455151ea9SQu Wenruo folio_clear_checked(folio); 555e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 556e4f94347SQu Wenruo } 557e4f94347SQu Wenruo 558a1d767c1SQu Wenruo /* 559a1d767c1SQu Wenruo * Unlike set/clear which is dependent on each page status, for test all bits 560a1d767c1SQu Wenruo * are tested in the same way. 561a1d767c1SQu Wenruo */ 562a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \ 563a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ 56455151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 565a1d767c1SQu Wenruo { \ 566cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); \ 56755151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \ 56872a69cd0SQu Wenruo name, start, len); \ 569a1d767c1SQu Wenruo unsigned long flags; \ 570a1d767c1SQu Wenruo bool ret; \ 571a1d767c1SQu Wenruo \ 572a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); \ 57372a69cd0SQu Wenruo ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \ 57472a69cd0SQu Wenruo len >> fs_info->sectorsize_bits); \ 575a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); \ 576a1d767c1SQu Wenruo return ret; \ 577a1d767c1SQu Wenruo } 578a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); 579d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); 5803470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); 5816f17400bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered); 582e4f94347SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked); 583a1d767c1SQu Wenruo 584a1d767c1SQu Wenruo /* 585a1d767c1SQu Wenruo * Note that, in selftests (extent-io-tests), we can have empty fs_info passed 586a1d767c1SQu Wenruo * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall 587a1d767c1SQu Wenruo * back to regular sectorsize branch. 588a1d767c1SQu Wenruo */ 58955151ea9SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \ 59055151ea9SQu Wenruo folio_clear_func, folio_test_func) \ 59155151ea9SQu Wenruo void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \ 59255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 593a1d767c1SQu Wenruo { \ 59413df3775SQu Wenruo if (unlikely(!fs_info) || \ 59555151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 59655151ea9SQu Wenruo folio_set_func(folio); \ 597a1d767c1SQu Wenruo return; \ 598a1d767c1SQu Wenruo } \ 59955151ea9SQu Wenruo btrfs_subpage_set_##name(fs_info, folio, start, len); \ 600a1d767c1SQu Wenruo } \ 60155151ea9SQu Wenruo void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \ 60255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 603a1d767c1SQu Wenruo { \ 60413df3775SQu Wenruo if (unlikely(!fs_info) || \ 60555151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 60655151ea9SQu Wenruo folio_clear_func(folio); \ 607a1d767c1SQu Wenruo return; \ 608a1d767c1SQu Wenruo } \ 60955151ea9SQu Wenruo btrfs_subpage_clear_##name(fs_info, folio, start, len); \ 610a1d767c1SQu Wenruo } \ 61155151ea9SQu Wenruo bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \ 61255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 613a1d767c1SQu Wenruo { \ 61413df3775SQu Wenruo if (unlikely(!fs_info) || \ 61555151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) \ 61655151ea9SQu Wenruo return folio_test_func(folio); \ 61755151ea9SQu Wenruo return btrfs_subpage_test_##name(fs_info, folio, start, len); \ 61860e2d255SQu Wenruo } \ 61955151ea9SQu Wenruo void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ 62055151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 62160e2d255SQu Wenruo { \ 62213df3775SQu Wenruo if (unlikely(!fs_info) || \ 62355151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 62455151ea9SQu Wenruo folio_set_func(folio); \ 62560e2d255SQu Wenruo return; \ 62660e2d255SQu Wenruo } \ 62755151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 62855151ea9SQu Wenruo btrfs_subpage_set_##name(fs_info, folio, start, len); \ 62960e2d255SQu Wenruo } \ 63055151ea9SQu Wenruo void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ 63155151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 63260e2d255SQu Wenruo { \ 63313df3775SQu Wenruo if (unlikely(!fs_info) || \ 63455151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 63555151ea9SQu Wenruo folio_clear_func(folio); \ 63660e2d255SQu Wenruo return; \ 63760e2d255SQu Wenruo } \ 63855151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 63955151ea9SQu Wenruo btrfs_subpage_clear_##name(fs_info, folio, start, len); \ 64060e2d255SQu Wenruo } \ 64155151ea9SQu Wenruo bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ 64255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 64360e2d255SQu Wenruo { \ 64413df3775SQu Wenruo if (unlikely(!fs_info) || \ 64555151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) \ 64655151ea9SQu Wenruo return folio_test_func(folio); \ 64755151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 64855151ea9SQu Wenruo return btrfs_subpage_test_##name(fs_info, folio, start, len); \ 649a1d767c1SQu Wenruo } 65055151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate, 65155151ea9SQu Wenruo folio_test_uptodate); 65255151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io, 65355151ea9SQu Wenruo folio_test_dirty); 65455151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback, 65555151ea9SQu Wenruo folio_test_writeback); 65655151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered, 65755151ea9SQu Wenruo folio_test_ordered); 65855151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked, 65955151ea9SQu Wenruo folio_test_checked); 660cc1d0d93SQu Wenruo 661cc1d0d93SQu Wenruo /* 662cc1d0d93SQu Wenruo * Make sure not only the page dirty bit is cleared, but also subpage dirty bit 663cc1d0d93SQu Wenruo * is cleared. 664cc1d0d93SQu Wenruo */ 66555151ea9SQu Wenruo void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio) 666cc1d0d93SQu Wenruo { 667cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 668cc1d0d93SQu Wenruo 669cc1d0d93SQu Wenruo if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 670cc1d0d93SQu Wenruo return; 671cc1d0d93SQu Wenruo 67255151ea9SQu Wenruo ASSERT(!folio_test_dirty(folio)); 67355151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 674cc1d0d93SQu Wenruo return; 675cc1d0d93SQu Wenruo 676cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 67772a69cd0SQu Wenruo ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty)); 678cc1d0d93SQu Wenruo } 679e55a0de1SQu Wenruo 680e55a0de1SQu Wenruo /* 681e55a0de1SQu Wenruo * Handle different locked pages with different page sizes: 682e55a0de1SQu Wenruo * 683e55a0de1SQu Wenruo * - Page locked by plain lock_page() 684e55a0de1SQu Wenruo * It should not have any subpage::writers count. 685e55a0de1SQu Wenruo * Can be unlocked by unlock_page(). 686e55a0de1SQu Wenruo * This is the most common locked page for __extent_writepage() called 687f3e90c1cSChristoph Hellwig * inside extent_write_cache_pages(). 688e55a0de1SQu Wenruo * Rarer cases include the @locked_page from extent_write_locked_range(). 689e55a0de1SQu Wenruo * 690e55a0de1SQu Wenruo * - Page locked by lock_delalloc_pages() 691e55a0de1SQu Wenruo * There is only one caller, all pages except @locked_page for 692e55a0de1SQu Wenruo * extent_write_locked_range(). 693e55a0de1SQu Wenruo * In this case, we have to call subpage helper to handle the case. 694e55a0de1SQu Wenruo */ 69555151ea9SQu Wenruo void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info, 69655151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 697e55a0de1SQu Wenruo { 698e55a0de1SQu Wenruo struct btrfs_subpage *subpage; 699e55a0de1SQu Wenruo 70055151ea9SQu Wenruo ASSERT(folio_test_locked(folio)); 701fbca46ebSQu Wenruo /* For non-subpage case, we just unlock the page */ 70255151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) { 70355151ea9SQu Wenruo folio_unlock(folio); 70455151ea9SQu Wenruo return; 70555151ea9SQu Wenruo } 706e55a0de1SQu Wenruo 707cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 708cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 709e55a0de1SQu Wenruo 710e55a0de1SQu Wenruo /* 711e55a0de1SQu Wenruo * For subpage case, there are two types of locked page. With or 712e55a0de1SQu Wenruo * without writers number. 713e55a0de1SQu Wenruo * 714e55a0de1SQu Wenruo * Since we own the page lock, no one else could touch subpage::writers 715e55a0de1SQu Wenruo * and we are safe to do several atomic operations without spinlock. 716e55a0de1SQu Wenruo */ 71755151ea9SQu Wenruo if (atomic_read(&subpage->writers) == 0) { 718e55a0de1SQu Wenruo /* No writers, locked by plain lock_page() */ 71955151ea9SQu Wenruo folio_unlock(folio); 72055151ea9SQu Wenruo return; 72155151ea9SQu Wenruo } 722e55a0de1SQu Wenruo 723e55a0de1SQu Wenruo /* Have writers, use proper subpage helper to end it */ 72455151ea9SQu Wenruo btrfs_folio_end_writer_lock(fs_info, folio, start, len); 725e55a0de1SQu Wenruo } 72675258f20SQu Wenruo 72775258f20SQu Wenruo #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ 72875258f20SQu Wenruo bitmap_cut(dst, subpage->bitmaps, 0, \ 72975258f20SQu Wenruo subpage_info->name##_offset, subpage_info->bitmap_nr_bits) 73075258f20SQu Wenruo 73175258f20SQu Wenruo void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, 73255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 73375258f20SQu Wenruo { 73475258f20SQu Wenruo struct btrfs_subpage_info *subpage_info = fs_info->subpage_info; 73575258f20SQu Wenruo struct btrfs_subpage *subpage; 73675258f20SQu Wenruo unsigned long uptodate_bitmap; 73775258f20SQu Wenruo unsigned long error_bitmap; 73875258f20SQu Wenruo unsigned long dirty_bitmap; 73975258f20SQu Wenruo unsigned long writeback_bitmap; 74075258f20SQu Wenruo unsigned long ordered_bitmap; 74175258f20SQu Wenruo unsigned long checked_bitmap; 74275258f20SQu Wenruo unsigned long flags; 74375258f20SQu Wenruo 744cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 74575258f20SQu Wenruo ASSERT(subpage_info); 746cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 74775258f20SQu Wenruo 74875258f20SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 74975258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); 75075258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); 75175258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); 75275258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); 75375258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap); 75475258f20SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 75575258f20SQu Wenruo 75655151ea9SQu Wenruo dump_page(folio_page(folio, 0), "btrfs subpage dump"); 75775258f20SQu Wenruo btrfs_warn(fs_info, 75875258f20SQu Wenruo "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl", 75955151ea9SQu Wenruo start, len, folio_pos(folio), 76075258f20SQu Wenruo subpage_info->bitmap_nr_bits, &uptodate_bitmap, 76175258f20SQu Wenruo subpage_info->bitmap_nr_bits, &error_bitmap, 76275258f20SQu Wenruo subpage_info->bitmap_nr_bits, &dirty_bitmap, 76375258f20SQu Wenruo subpage_info->bitmap_nr_bits, &writeback_bitmap, 76475258f20SQu Wenruo subpage_info->bitmap_nr_bits, &ordered_bitmap, 76575258f20SQu Wenruo subpage_info->bitmap_nr_bits, &checked_bitmap); 76675258f20SQu Wenruo } 767