1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0 2cac06d84SQu Wenruo 3cac06d84SQu Wenruo #include <linux/slab.h> 49b569ea0SJosef Bacik #include "messages.h" 5cac06d84SQu Wenruo #include "ctree.h" 6cac06d84SQu Wenruo #include "subpage.h" 73d078efaSQu Wenruo #include "btrfs_inode.h" 8cac06d84SQu Wenruo 9894d1378SQu Wenruo /* 10894d1378SQu Wenruo * Subpage (sectorsize < PAGE_SIZE) support overview: 11894d1378SQu Wenruo * 12894d1378SQu Wenruo * Limitations: 13894d1378SQu Wenruo * 14894d1378SQu Wenruo * - Only support 64K page size for now 15894d1378SQu Wenruo * This is to make metadata handling easier, as 64K page would ensure 16894d1378SQu Wenruo * all nodesize would fit inside one page, thus we don't need to handle 17894d1378SQu Wenruo * cases where a tree block crosses several pages. 18894d1378SQu Wenruo * 19894d1378SQu Wenruo * - Only metadata read-write for now 20894d1378SQu Wenruo * The data read-write part is in development. 21894d1378SQu Wenruo * 22894d1378SQu Wenruo * - Metadata can't cross 64K page boundary 23894d1378SQu Wenruo * btrfs-progs and kernel have done that for a while, thus only ancient 24894d1378SQu Wenruo * filesystems could have such problem. For such case, do a graceful 25894d1378SQu Wenruo * rejection. 26894d1378SQu Wenruo * 27894d1378SQu Wenruo * Special behavior: 28894d1378SQu Wenruo * 29894d1378SQu Wenruo * - Metadata 30894d1378SQu Wenruo * Metadata read is fully supported. 31894d1378SQu Wenruo * Meaning when reading one tree block will only trigger the read for the 32894d1378SQu Wenruo * needed range, other unrelated range in the same page will not be touched. 33894d1378SQu Wenruo * 34894d1378SQu Wenruo * Metadata write support is partial. 35894d1378SQu Wenruo * The writeback is still for the full page, but we will only submit 36894d1378SQu Wenruo * the dirty extent buffers in the page. 37894d1378SQu Wenruo * 38894d1378SQu Wenruo * This means, if we have a metadata page like this: 39894d1378SQu Wenruo * 40894d1378SQu Wenruo * Page offset 41894d1378SQu Wenruo * 0 16K 32K 48K 64K 42894d1378SQu Wenruo * |/////////| |///////////| 43894d1378SQu Wenruo * \- Tree block A \- Tree block B 44894d1378SQu Wenruo * 45894d1378SQu Wenruo * Even if we just want to writeback tree block A, we will also writeback 46894d1378SQu Wenruo * tree block B if it's also dirty. 47894d1378SQu Wenruo * 48894d1378SQu Wenruo * This may cause extra metadata writeback which results more COW. 49894d1378SQu Wenruo * 50894d1378SQu Wenruo * Implementation: 51894d1378SQu Wenruo * 52894d1378SQu Wenruo * - Common 53894d1378SQu Wenruo * Both metadata and data will use a new structure, btrfs_subpage, to 54894d1378SQu Wenruo * record the status of each sector inside a page. This provides the extra 55894d1378SQu Wenruo * granularity needed. 56894d1378SQu Wenruo * 57894d1378SQu Wenruo * - Metadata 58894d1378SQu Wenruo * Since we have multiple tree blocks inside one page, we can't rely on page 59894d1378SQu Wenruo * locking anymore, or we will have greatly reduced concurrency or even 60894d1378SQu Wenruo * deadlocks (hold one tree lock while trying to lock another tree lock in 61894d1378SQu Wenruo * the same page). 62894d1378SQu Wenruo * 63894d1378SQu Wenruo * Thus for metadata locking, subpage support relies on io_tree locking only. 64894d1378SQu Wenruo * This means a slightly higher tree locking latency. 65894d1378SQu Wenruo */ 66894d1378SQu Wenruo 67fbca46ebSQu Wenruo bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page) 68fbca46ebSQu Wenruo { 69fbca46ebSQu Wenruo if (fs_info->sectorsize >= PAGE_SIZE) 70fbca46ebSQu Wenruo return false; 71fbca46ebSQu Wenruo 72fbca46ebSQu Wenruo /* 73fbca46ebSQu Wenruo * Only data pages (either through DIO or compression) can have no 74fbca46ebSQu Wenruo * mapping. And if page->mapping->host is data inode, it's subpage. 75fbca46ebSQu Wenruo * As we have ruled our sectorsize >= PAGE_SIZE case already. 76fbca46ebSQu Wenruo */ 77fbca46ebSQu Wenruo if (!page->mapping || !page->mapping->host || 78fbca46ebSQu Wenruo is_data_inode(page->mapping->host)) 79fbca46ebSQu Wenruo return true; 80fbca46ebSQu Wenruo 81fbca46ebSQu Wenruo /* 82fbca46ebSQu Wenruo * Now the only remaining case is metadata, which we only go subpage 83fbca46ebSQu Wenruo * routine if nodesize < PAGE_SIZE. 84fbca46ebSQu Wenruo */ 85fbca46ebSQu Wenruo if (fs_info->nodesize < PAGE_SIZE) 86fbca46ebSQu Wenruo return true; 87fbca46ebSQu Wenruo return false; 88fbca46ebSQu Wenruo } 89fbca46ebSQu Wenruo 908481dd80SQu Wenruo void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize) 918481dd80SQu Wenruo { 928481dd80SQu Wenruo unsigned int cur = 0; 938481dd80SQu Wenruo unsigned int nr_bits; 948481dd80SQu Wenruo 958481dd80SQu Wenruo ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize)); 968481dd80SQu Wenruo 978481dd80SQu Wenruo nr_bits = PAGE_SIZE / sectorsize; 988481dd80SQu Wenruo subpage_info->bitmap_nr_bits = nr_bits; 998481dd80SQu Wenruo 1008481dd80SQu Wenruo subpage_info->uptodate_offset = cur; 1018481dd80SQu Wenruo cur += nr_bits; 1028481dd80SQu Wenruo 1038481dd80SQu Wenruo subpage_info->dirty_offset = cur; 1048481dd80SQu Wenruo cur += nr_bits; 1058481dd80SQu Wenruo 1068481dd80SQu Wenruo subpage_info->writeback_offset = cur; 1078481dd80SQu Wenruo cur += nr_bits; 1088481dd80SQu Wenruo 1098481dd80SQu Wenruo subpage_info->ordered_offset = cur; 1108481dd80SQu Wenruo cur += nr_bits; 1118481dd80SQu Wenruo 112e4f94347SQu Wenruo subpage_info->checked_offset = cur; 113e4f94347SQu Wenruo cur += nr_bits; 114e4f94347SQu Wenruo 1158481dd80SQu Wenruo subpage_info->total_nr_bits = cur; 1168481dd80SQu Wenruo } 1178481dd80SQu Wenruo 118cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, 119cac06d84SQu Wenruo struct page *page, enum btrfs_subpage_type type) 120cac06d84SQu Wenruo { 121*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 122651fb419SQu Wenruo struct btrfs_subpage *subpage; 123cac06d84SQu Wenruo 124cac06d84SQu Wenruo /* 125143823cfSDavid Sterba * We have cases like a dummy extent buffer page, which is not mapped 126cac06d84SQu Wenruo * and doesn't need to be locked. 127cac06d84SQu Wenruo */ 128cac06d84SQu Wenruo if (page->mapping) 129cac06d84SQu Wenruo ASSERT(PageLocked(page)); 130651fb419SQu Wenruo 131*cfbf07e2SQu Wenruo /* Either not subpage, or the folio already has private attached. */ 132*cfbf07e2SQu Wenruo if (!btrfs_is_subpage(fs_info, page) || folio_test_private(folio)) 133cac06d84SQu Wenruo return 0; 134cac06d84SQu Wenruo 135651fb419SQu Wenruo subpage = btrfs_alloc_subpage(fs_info, type); 136651fb419SQu Wenruo if (IS_ERR(subpage)) 137651fb419SQu Wenruo return PTR_ERR(subpage); 138651fb419SQu Wenruo 139*cfbf07e2SQu Wenruo folio_attach_private(folio, subpage); 140cac06d84SQu Wenruo return 0; 141cac06d84SQu Wenruo } 142cac06d84SQu Wenruo 143cac06d84SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, 144cac06d84SQu Wenruo struct page *page) 145cac06d84SQu Wenruo { 146*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 147cac06d84SQu Wenruo struct btrfs_subpage *subpage; 148cac06d84SQu Wenruo 149*cfbf07e2SQu Wenruo /* Either not subpage, or the folio already has private attached. */ 150*cfbf07e2SQu Wenruo if (!btrfs_is_subpage(fs_info, page) || !folio_test_private(folio)) 151cac06d84SQu Wenruo return; 152cac06d84SQu Wenruo 153*cfbf07e2SQu Wenruo subpage = folio_detach_private(folio); 154cac06d84SQu Wenruo ASSERT(subpage); 155760f991fSQu Wenruo btrfs_free_subpage(subpage); 156760f991fSQu Wenruo } 157760f991fSQu Wenruo 158651fb419SQu Wenruo struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, 159760f991fSQu Wenruo enum btrfs_subpage_type type) 160760f991fSQu Wenruo { 161651fb419SQu Wenruo struct btrfs_subpage *ret; 16272a69cd0SQu Wenruo unsigned int real_size; 163651fb419SQu Wenruo 164fdf250dbSQu Wenruo ASSERT(fs_info->sectorsize < PAGE_SIZE); 165760f991fSQu Wenruo 16672a69cd0SQu Wenruo real_size = struct_size(ret, bitmaps, 16772a69cd0SQu Wenruo BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits)); 16872a69cd0SQu Wenruo ret = kzalloc(real_size, GFP_NOFS); 169651fb419SQu Wenruo if (!ret) 170651fb419SQu Wenruo return ERR_PTR(-ENOMEM); 171651fb419SQu Wenruo 172651fb419SQu Wenruo spin_lock_init(&ret->lock); 1731e1de387SQu Wenruo if (type == BTRFS_SUBPAGE_METADATA) { 174651fb419SQu Wenruo atomic_set(&ret->eb_refs, 0); 1751e1de387SQu Wenruo } else { 176651fb419SQu Wenruo atomic_set(&ret->readers, 0); 177651fb419SQu Wenruo atomic_set(&ret->writers, 0); 1781e1de387SQu Wenruo } 179651fb419SQu Wenruo return ret; 180760f991fSQu Wenruo } 181760f991fSQu Wenruo 182760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage) 183760f991fSQu Wenruo { 184cac06d84SQu Wenruo kfree(subpage); 185cac06d84SQu Wenruo } 1868ff8466dSQu Wenruo 1878ff8466dSQu Wenruo /* 1888ff8466dSQu Wenruo * Increase the eb_refs of current subpage. 1898ff8466dSQu Wenruo * 1908ff8466dSQu Wenruo * This is important for eb allocation, to prevent race with last eb freeing 1918ff8466dSQu Wenruo * of the same page. 1928ff8466dSQu Wenruo * With the eb_refs increased before the eb inserted into radix tree, 193*cfbf07e2SQu Wenruo * detach_extent_buffer_page() won't detach the folio private while we're still 1948ff8466dSQu Wenruo * allocating the extent buffer. 1958ff8466dSQu Wenruo */ 1968ff8466dSQu Wenruo void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info, 1978ff8466dSQu Wenruo struct page *page) 1988ff8466dSQu Wenruo { 199*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 2008ff8466dSQu Wenruo struct btrfs_subpage *subpage; 2018ff8466dSQu Wenruo 202fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page)) 2038ff8466dSQu Wenruo return; 2048ff8466dSQu Wenruo 205*cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && page->mapping); 2068ff8466dSQu Wenruo lockdep_assert_held(&page->mapping->private_lock); 2078ff8466dSQu Wenruo 208*cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 2098ff8466dSQu Wenruo atomic_inc(&subpage->eb_refs); 2108ff8466dSQu Wenruo } 2118ff8466dSQu Wenruo 2128ff8466dSQu Wenruo void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info, 2138ff8466dSQu Wenruo struct page *page) 2148ff8466dSQu Wenruo { 215*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 2168ff8466dSQu Wenruo struct btrfs_subpage *subpage; 2178ff8466dSQu Wenruo 218fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page)) 2198ff8466dSQu Wenruo return; 2208ff8466dSQu Wenruo 221*cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && page->mapping); 2228ff8466dSQu Wenruo lockdep_assert_held(&page->mapping->private_lock); 2238ff8466dSQu Wenruo 224*cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 2258ff8466dSQu Wenruo ASSERT(atomic_read(&subpage->eb_refs)); 2268ff8466dSQu Wenruo atomic_dec(&subpage->eb_refs); 2278ff8466dSQu Wenruo } 228a1d767c1SQu Wenruo 22992082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, 23092082d40SQu Wenruo struct page *page, u64 start, u32 len) 23192082d40SQu Wenruo { 232*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 233*cfbf07e2SQu Wenruo 23492082d40SQu Wenruo /* Basic checks */ 235*cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 23692082d40SQu Wenruo ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 23792082d40SQu Wenruo IS_ALIGNED(len, fs_info->sectorsize)); 23892082d40SQu Wenruo /* 23992082d40SQu Wenruo * The range check only works for mapped page, we can still have 24092082d40SQu Wenruo * unmapped page like dummy extent buffer pages. 24192082d40SQu Wenruo */ 24292082d40SQu Wenruo if (page->mapping) 24392082d40SQu Wenruo ASSERT(page_offset(page) <= start && 24492082d40SQu Wenruo start + len <= page_offset(page) + PAGE_SIZE); 24592082d40SQu Wenruo } 24692082d40SQu Wenruo 24792082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, 24892082d40SQu Wenruo struct page *page, u64 start, u32 len) 24992082d40SQu Wenruo { 250*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 251*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 25292082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 25392082d40SQu Wenruo 25492082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 25592082d40SQu Wenruo 2563d078efaSQu Wenruo atomic_add(nbits, &subpage->readers); 25792082d40SQu Wenruo } 25892082d40SQu Wenruo 25992082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, 26092082d40SQu Wenruo struct page *page, u64 start, u32 len) 26192082d40SQu Wenruo { 262*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 263*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 26492082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 2653d078efaSQu Wenruo bool is_data; 2663d078efaSQu Wenruo bool last; 26792082d40SQu Wenruo 26892082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 2693d078efaSQu Wenruo is_data = is_data_inode(page->mapping->host); 27092082d40SQu Wenruo ASSERT(atomic_read(&subpage->readers) >= nbits); 2713d078efaSQu Wenruo last = atomic_sub_and_test(nbits, &subpage->readers); 2723d078efaSQu Wenruo 2733d078efaSQu Wenruo /* 2743d078efaSQu Wenruo * For data we need to unlock the page if the last read has finished. 2753d078efaSQu Wenruo * 2763d078efaSQu Wenruo * And please don't replace @last with atomic_sub_and_test() call 2773d078efaSQu Wenruo * inside if () condition. 2783d078efaSQu Wenruo * As we want the atomic_sub_and_test() to be always executed. 2793d078efaSQu Wenruo */ 2803d078efaSQu Wenruo if (is_data && last) 28192082d40SQu Wenruo unlock_page(page); 28292082d40SQu Wenruo } 28392082d40SQu Wenruo 2841e1de387SQu Wenruo static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len) 2851e1de387SQu Wenruo { 2861e1de387SQu Wenruo u64 orig_start = *start; 2871e1de387SQu Wenruo u32 orig_len = *len; 2881e1de387SQu Wenruo 2891e1de387SQu Wenruo *start = max_t(u64, page_offset(page), orig_start); 290e4f94347SQu Wenruo /* 291e4f94347SQu Wenruo * For certain call sites like btrfs_drop_pages(), we may have pages 292e4f94347SQu Wenruo * beyond the target range. In that case, just set @len to 0, subpage 293e4f94347SQu Wenruo * helpers can handle @len == 0 without any problem. 294e4f94347SQu Wenruo */ 295e4f94347SQu Wenruo if (page_offset(page) >= orig_start + orig_len) 296e4f94347SQu Wenruo *len = 0; 297e4f94347SQu Wenruo else 2981e1de387SQu Wenruo *len = min_t(u64, page_offset(page) + PAGE_SIZE, 2991e1de387SQu Wenruo orig_start + orig_len) - *start; 3001e1de387SQu Wenruo } 3011e1de387SQu Wenruo 3021e1de387SQu Wenruo void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, 3031e1de387SQu Wenruo struct page *page, u64 start, u32 len) 3041e1de387SQu Wenruo { 305*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 306*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 3071e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits); 3081e1de387SQu Wenruo int ret; 3091e1de387SQu Wenruo 3101e1de387SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 3111e1de387SQu Wenruo 3121e1de387SQu Wenruo ASSERT(atomic_read(&subpage->readers) == 0); 3131e1de387SQu Wenruo ret = atomic_add_return(nbits, &subpage->writers); 3141e1de387SQu Wenruo ASSERT(ret == nbits); 3151e1de387SQu Wenruo } 3161e1de387SQu Wenruo 3171e1de387SQu Wenruo bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, 3181e1de387SQu Wenruo struct page *page, u64 start, u32 len) 3191e1de387SQu Wenruo { 320*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 321*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 3221e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits); 3231e1de387SQu Wenruo 3241e1de387SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 3251e1de387SQu Wenruo 326164674a7SQu Wenruo /* 327164674a7SQu Wenruo * We have call sites passing @lock_page into 328164674a7SQu Wenruo * extent_clear_unlock_delalloc() for compression path. 329164674a7SQu Wenruo * 330164674a7SQu Wenruo * This @locked_page is locked by plain lock_page(), thus its 331164674a7SQu Wenruo * subpage::writers is 0. Handle them in a special way. 332164674a7SQu Wenruo */ 333164674a7SQu Wenruo if (atomic_read(&subpage->writers) == 0) 334164674a7SQu Wenruo return true; 335164674a7SQu Wenruo 3361e1de387SQu Wenruo ASSERT(atomic_read(&subpage->writers) >= nbits); 3371e1de387SQu Wenruo return atomic_sub_and_test(nbits, &subpage->writers); 3381e1de387SQu Wenruo } 3391e1de387SQu Wenruo 3401e1de387SQu Wenruo /* 3411e1de387SQu Wenruo * Lock a page for delalloc page writeback. 3421e1de387SQu Wenruo * 3431e1de387SQu Wenruo * Return -EAGAIN if the page is not properly initialized. 3441e1de387SQu Wenruo * Return 0 with the page locked, and writer counter updated. 3451e1de387SQu Wenruo * 3461e1de387SQu Wenruo * Even with 0 returned, the page still need extra check to make sure 3471e1de387SQu Wenruo * it's really the correct page, as the caller is using 34847d55419SVishal Moola (Oracle) * filemap_get_folios_contig(), which can race with page invalidating. 3491e1de387SQu Wenruo */ 3501e1de387SQu Wenruo int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info, 3511e1de387SQu Wenruo struct page *page, u64 start, u32 len) 3521e1de387SQu Wenruo { 353*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 354*cfbf07e2SQu Wenruo 355fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { 3561e1de387SQu Wenruo lock_page(page); 3571e1de387SQu Wenruo return 0; 3581e1de387SQu Wenruo } 3591e1de387SQu Wenruo lock_page(page); 360*cfbf07e2SQu Wenruo if (!folio_test_private(folio) || !folio_get_private(folio)) { 3611e1de387SQu Wenruo unlock_page(page); 3621e1de387SQu Wenruo return -EAGAIN; 3631e1de387SQu Wenruo } 3641e1de387SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len); 3651e1de387SQu Wenruo btrfs_subpage_start_writer(fs_info, page, start, len); 3661e1de387SQu Wenruo return 0; 3671e1de387SQu Wenruo } 3681e1de387SQu Wenruo 3691e1de387SQu Wenruo void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info, 3701e1de387SQu Wenruo struct page *page, u64 start, u32 len) 3711e1de387SQu Wenruo { 372fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) 3731e1de387SQu Wenruo return unlock_page(page); 3741e1de387SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len); 3751e1de387SQu Wenruo if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len)) 3761e1de387SQu Wenruo unlock_page(page); 3771e1de387SQu Wenruo } 3781e1de387SQu Wenruo 37972a69cd0SQu Wenruo #define subpage_calc_start_bit(fs_info, page, name, start, len) \ 38072a69cd0SQu Wenruo ({ \ 38172a69cd0SQu Wenruo unsigned int start_bit; \ 38272a69cd0SQu Wenruo \ 38372a69cd0SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); \ 38472a69cd0SQu Wenruo start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \ 38572a69cd0SQu Wenruo start_bit += fs_info->subpage_info->name##_offset; \ 38672a69cd0SQu Wenruo start_bit; \ 38772a69cd0SQu Wenruo }) 38872a69cd0SQu Wenruo 38972a69cd0SQu Wenruo #define subpage_test_bitmap_all_set(fs_info, subpage, name) \ 39072a69cd0SQu Wenruo bitmap_test_range_all_set(subpage->bitmaps, \ 39172a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \ 39272a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits) 39372a69cd0SQu Wenruo 39472a69cd0SQu Wenruo #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \ 39572a69cd0SQu Wenruo bitmap_test_range_all_zero(subpage->bitmaps, \ 39672a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \ 39772a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits) 39872a69cd0SQu Wenruo 399a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, 400a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) 401a1d767c1SQu Wenruo { 402*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 403*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 40472a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 40572a69cd0SQu Wenruo uptodate, start, len); 406a1d767c1SQu Wenruo unsigned long flags; 407a1d767c1SQu Wenruo 408a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 40972a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 41072a69cd0SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate)) 411a1d767c1SQu Wenruo SetPageUptodate(page); 412a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 413a1d767c1SQu Wenruo } 414a1d767c1SQu Wenruo 415a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, 416a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) 417a1d767c1SQu Wenruo { 418*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 419*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 42072a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 42172a69cd0SQu Wenruo uptodate, start, len); 422a1d767c1SQu Wenruo unsigned long flags; 423a1d767c1SQu Wenruo 424a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 42572a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 426a1d767c1SQu Wenruo ClearPageUptodate(page); 427a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 428a1d767c1SQu Wenruo } 429a1d767c1SQu Wenruo 430d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, 431d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 432d8a5713eSQu Wenruo { 433*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 434*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 43572a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 43672a69cd0SQu Wenruo dirty, start, len); 437d8a5713eSQu Wenruo unsigned long flags; 438d8a5713eSQu Wenruo 439d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 44072a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 441d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 442d8a5713eSQu Wenruo set_page_dirty(page); 443d8a5713eSQu Wenruo } 444d8a5713eSQu Wenruo 445d8a5713eSQu Wenruo /* 446d8a5713eSQu Wenruo * Extra clear_and_test function for subpage dirty bitmap. 447d8a5713eSQu Wenruo * 448d8a5713eSQu Wenruo * Return true if we're the last bits in the dirty_bitmap and clear the 449d8a5713eSQu Wenruo * dirty_bitmap. 450d8a5713eSQu Wenruo * Return false otherwise. 451d8a5713eSQu Wenruo * 452d8a5713eSQu Wenruo * NOTE: Callers should manually clear page dirty for true case, as we have 453d8a5713eSQu Wenruo * extra handling for tree blocks. 454d8a5713eSQu Wenruo */ 455d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, 456d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 457d8a5713eSQu Wenruo { 458*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 459*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 46072a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 46172a69cd0SQu Wenruo dirty, start, len); 462d8a5713eSQu Wenruo unsigned long flags; 463d8a5713eSQu Wenruo bool last = false; 464d8a5713eSQu Wenruo 465d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 46672a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 46772a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty)) 468d8a5713eSQu Wenruo last = true; 469d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 470d8a5713eSQu Wenruo return last; 471d8a5713eSQu Wenruo } 472d8a5713eSQu Wenruo 473d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, 474d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 475d8a5713eSQu Wenruo { 476d8a5713eSQu Wenruo bool last; 477d8a5713eSQu Wenruo 478d8a5713eSQu Wenruo last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len); 479d8a5713eSQu Wenruo if (last) 480d8a5713eSQu Wenruo clear_page_dirty_for_io(page); 481d8a5713eSQu Wenruo } 482d8a5713eSQu Wenruo 4833470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, 4843470da3bSQu Wenruo struct page *page, u64 start, u32 len) 4853470da3bSQu Wenruo { 486*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 487*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 48872a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 48972a69cd0SQu Wenruo writeback, start, len); 4903470da3bSQu Wenruo unsigned long flags; 4913470da3bSQu Wenruo 4923470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 49372a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 4943470da3bSQu Wenruo set_page_writeback(page); 4953470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 4963470da3bSQu Wenruo } 4973470da3bSQu Wenruo 4983470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, 4993470da3bSQu Wenruo struct page *page, u64 start, u32 len) 5003470da3bSQu Wenruo { 501*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 502*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 50372a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 50472a69cd0SQu Wenruo writeback, start, len); 5053470da3bSQu Wenruo unsigned long flags; 5063470da3bSQu Wenruo 5073470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 50872a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 50972a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) { 5107c11d0aeSQu Wenruo ASSERT(PageWriteback(page)); 5113470da3bSQu Wenruo end_page_writeback(page); 5127c11d0aeSQu Wenruo } 5133470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5143470da3bSQu Wenruo } 5153470da3bSQu Wenruo 5166f17400bSQu Wenruo void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info, 5176f17400bSQu Wenruo struct page *page, u64 start, u32 len) 5186f17400bSQu Wenruo { 519*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 520*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 52172a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 52272a69cd0SQu Wenruo ordered, start, len); 5236f17400bSQu Wenruo unsigned long flags; 5246f17400bSQu Wenruo 5256f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 52672a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 5276f17400bSQu Wenruo SetPageOrdered(page); 5286f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5296f17400bSQu Wenruo } 5306f17400bSQu Wenruo 5316f17400bSQu Wenruo void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, 5326f17400bSQu Wenruo struct page *page, u64 start, u32 len) 5336f17400bSQu Wenruo { 534*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 535*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 53672a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 53772a69cd0SQu Wenruo ordered, start, len); 5386f17400bSQu Wenruo unsigned long flags; 5396f17400bSQu Wenruo 5406f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 54172a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 54272a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered)) 5436f17400bSQu Wenruo ClearPageOrdered(page); 5446f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5456f17400bSQu Wenruo } 546e4f94347SQu Wenruo 547e4f94347SQu Wenruo void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info, 548e4f94347SQu Wenruo struct page *page, u64 start, u32 len) 549e4f94347SQu Wenruo { 550*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 551*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 552e4f94347SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 553e4f94347SQu Wenruo checked, start, len); 554e4f94347SQu Wenruo unsigned long flags; 555e4f94347SQu Wenruo 556e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 557e4f94347SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 558e4f94347SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, checked)) 559e4f94347SQu Wenruo SetPageChecked(page); 560e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 561e4f94347SQu Wenruo } 562e4f94347SQu Wenruo 563e4f94347SQu Wenruo void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, 564e4f94347SQu Wenruo struct page *page, u64 start, u32 len) 565e4f94347SQu Wenruo { 566*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 567*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 568e4f94347SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, 569e4f94347SQu Wenruo checked, start, len); 570e4f94347SQu Wenruo unsigned long flags; 571e4f94347SQu Wenruo 572e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 573e4f94347SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 574e4f94347SQu Wenruo ClearPageChecked(page); 575e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 576e4f94347SQu Wenruo } 577e4f94347SQu Wenruo 578a1d767c1SQu Wenruo /* 579a1d767c1SQu Wenruo * Unlike set/clear which is dependent on each page status, for test all bits 580a1d767c1SQu Wenruo * are tested in the same way. 581a1d767c1SQu Wenruo */ 582a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \ 583a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ 584a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 585a1d767c1SQu Wenruo { \ 586*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); \ 587*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); \ 58872a69cd0SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \ 58972a69cd0SQu Wenruo name, start, len); \ 590a1d767c1SQu Wenruo unsigned long flags; \ 591a1d767c1SQu Wenruo bool ret; \ 592a1d767c1SQu Wenruo \ 593a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); \ 59472a69cd0SQu Wenruo ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \ 59572a69cd0SQu Wenruo len >> fs_info->sectorsize_bits); \ 596a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); \ 597a1d767c1SQu Wenruo return ret; \ 598a1d767c1SQu Wenruo } 599a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); 600d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); 6013470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); 6026f17400bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered); 603e4f94347SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked); 604a1d767c1SQu Wenruo 605a1d767c1SQu Wenruo /* 606a1d767c1SQu Wenruo * Note that, in selftests (extent-io-tests), we can have empty fs_info passed 607a1d767c1SQu Wenruo * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall 608a1d767c1SQu Wenruo * back to regular sectorsize branch. 609a1d767c1SQu Wenruo */ 610a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \ 611a1d767c1SQu Wenruo test_page_func) \ 612a1d767c1SQu Wenruo void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \ 613a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 614a1d767c1SQu Wenruo { \ 615fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \ 616a1d767c1SQu Wenruo set_page_func(page); \ 617a1d767c1SQu Wenruo return; \ 618a1d767c1SQu Wenruo } \ 619a1d767c1SQu Wenruo btrfs_subpage_set_##name(fs_info, page, start, len); \ 620a1d767c1SQu Wenruo } \ 621a1d767c1SQu Wenruo void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \ 622a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 623a1d767c1SQu Wenruo { \ 624fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \ 625a1d767c1SQu Wenruo clear_page_func(page); \ 626a1d767c1SQu Wenruo return; \ 627a1d767c1SQu Wenruo } \ 628a1d767c1SQu Wenruo btrfs_subpage_clear_##name(fs_info, page, start, len); \ 629a1d767c1SQu Wenruo } \ 630a1d767c1SQu Wenruo bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \ 631a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 632a1d767c1SQu Wenruo { \ 633fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \ 634a1d767c1SQu Wenruo return test_page_func(page); \ 635a1d767c1SQu Wenruo return btrfs_subpage_test_##name(fs_info, page, start, len); \ 63660e2d255SQu Wenruo } \ 63760e2d255SQu Wenruo void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ 63860e2d255SQu Wenruo struct page *page, u64 start, u32 len) \ 63960e2d255SQu Wenruo { \ 640fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \ 64160e2d255SQu Wenruo set_page_func(page); \ 64260e2d255SQu Wenruo return; \ 64360e2d255SQu Wenruo } \ 64460e2d255SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len); \ 64560e2d255SQu Wenruo btrfs_subpage_set_##name(fs_info, page, start, len); \ 64660e2d255SQu Wenruo } \ 64760e2d255SQu Wenruo void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ 64860e2d255SQu Wenruo struct page *page, u64 start, u32 len) \ 64960e2d255SQu Wenruo { \ 650fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \ 65160e2d255SQu Wenruo clear_page_func(page); \ 65260e2d255SQu Wenruo return; \ 65360e2d255SQu Wenruo } \ 65460e2d255SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len); \ 65560e2d255SQu Wenruo btrfs_subpage_clear_##name(fs_info, page, start, len); \ 65660e2d255SQu Wenruo } \ 65760e2d255SQu Wenruo bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ 65860e2d255SQu Wenruo struct page *page, u64 start, u32 len) \ 65960e2d255SQu Wenruo { \ 660fbca46ebSQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \ 66160e2d255SQu Wenruo return test_page_func(page); \ 66260e2d255SQu Wenruo btrfs_subpage_clamp_range(page, &start, &len); \ 66360e2d255SQu Wenruo return btrfs_subpage_test_##name(fs_info, page, start, len); \ 664a1d767c1SQu Wenruo } 665a1d767c1SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate, 666a1d767c1SQu Wenruo PageUptodate); 667d8a5713eSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io, 668d8a5713eSQu Wenruo PageDirty); 6693470da3bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback, 6703470da3bSQu Wenruo PageWriteback); 6716f17400bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered, 6726f17400bSQu Wenruo PageOrdered); 673e4f94347SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked); 674cc1d0d93SQu Wenruo 675cc1d0d93SQu Wenruo /* 676cc1d0d93SQu Wenruo * Make sure not only the page dirty bit is cleared, but also subpage dirty bit 677cc1d0d93SQu Wenruo * is cleared. 678cc1d0d93SQu Wenruo */ 679cc1d0d93SQu Wenruo void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info, 680cc1d0d93SQu Wenruo struct page *page) 681cc1d0d93SQu Wenruo { 682*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 683*cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 684cc1d0d93SQu Wenruo 685cc1d0d93SQu Wenruo if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 686cc1d0d93SQu Wenruo return; 687cc1d0d93SQu Wenruo 688cc1d0d93SQu Wenruo ASSERT(!PageDirty(page)); 689fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page)) 690cc1d0d93SQu Wenruo return; 691cc1d0d93SQu Wenruo 692*cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 69372a69cd0SQu Wenruo ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty)); 694cc1d0d93SQu Wenruo } 695e55a0de1SQu Wenruo 696e55a0de1SQu Wenruo /* 697e55a0de1SQu Wenruo * Handle different locked pages with different page sizes: 698e55a0de1SQu Wenruo * 699e55a0de1SQu Wenruo * - Page locked by plain lock_page() 700e55a0de1SQu Wenruo * It should not have any subpage::writers count. 701e55a0de1SQu Wenruo * Can be unlocked by unlock_page(). 702e55a0de1SQu Wenruo * This is the most common locked page for __extent_writepage() called 703f3e90c1cSChristoph Hellwig * inside extent_write_cache_pages(). 704e55a0de1SQu Wenruo * Rarer cases include the @locked_page from extent_write_locked_range(). 705e55a0de1SQu Wenruo * 706e55a0de1SQu Wenruo * - Page locked by lock_delalloc_pages() 707e55a0de1SQu Wenruo * There is only one caller, all pages except @locked_page for 708e55a0de1SQu Wenruo * extent_write_locked_range(). 709e55a0de1SQu Wenruo * In this case, we have to call subpage helper to handle the case. 710e55a0de1SQu Wenruo */ 711e55a0de1SQu Wenruo void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, 712e55a0de1SQu Wenruo u64 start, u32 len) 713e55a0de1SQu Wenruo { 714*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 715e55a0de1SQu Wenruo struct btrfs_subpage *subpage; 716e55a0de1SQu Wenruo 717e55a0de1SQu Wenruo ASSERT(PageLocked(page)); 718fbca46ebSQu Wenruo /* For non-subpage case, we just unlock the page */ 719fbca46ebSQu Wenruo if (!btrfs_is_subpage(fs_info, page)) 720e55a0de1SQu Wenruo return unlock_page(page); 721e55a0de1SQu Wenruo 722*cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 723*cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 724e55a0de1SQu Wenruo 725e55a0de1SQu Wenruo /* 726e55a0de1SQu Wenruo * For subpage case, there are two types of locked page. With or 727e55a0de1SQu Wenruo * without writers number. 728e55a0de1SQu Wenruo * 729e55a0de1SQu Wenruo * Since we own the page lock, no one else could touch subpage::writers 730e55a0de1SQu Wenruo * and we are safe to do several atomic operations without spinlock. 731e55a0de1SQu Wenruo */ 732c992fa1fSQu Wenruo if (atomic_read(&subpage->writers) == 0) 733e55a0de1SQu Wenruo /* No writers, locked by plain lock_page() */ 734e55a0de1SQu Wenruo return unlock_page(page); 735e55a0de1SQu Wenruo 736e55a0de1SQu Wenruo /* Have writers, use proper subpage helper to end it */ 737e55a0de1SQu Wenruo btrfs_page_end_writer_lock(fs_info, page, start, len); 738e55a0de1SQu Wenruo } 73975258f20SQu Wenruo 74075258f20SQu Wenruo #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ 74175258f20SQu Wenruo bitmap_cut(dst, subpage->bitmaps, 0, \ 74275258f20SQu Wenruo subpage_info->name##_offset, subpage_info->bitmap_nr_bits) 74375258f20SQu Wenruo 74475258f20SQu Wenruo void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, 74575258f20SQu Wenruo struct page *page, u64 start, u32 len) 74675258f20SQu Wenruo { 74775258f20SQu Wenruo struct btrfs_subpage_info *subpage_info = fs_info->subpage_info; 748*cfbf07e2SQu Wenruo struct folio *folio = page_folio(page); 74975258f20SQu Wenruo struct btrfs_subpage *subpage; 75075258f20SQu Wenruo unsigned long uptodate_bitmap; 75175258f20SQu Wenruo unsigned long error_bitmap; 75275258f20SQu Wenruo unsigned long dirty_bitmap; 75375258f20SQu Wenruo unsigned long writeback_bitmap; 75475258f20SQu Wenruo unsigned long ordered_bitmap; 75575258f20SQu Wenruo unsigned long checked_bitmap; 75675258f20SQu Wenruo unsigned long flags; 75775258f20SQu Wenruo 758*cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 75975258f20SQu Wenruo ASSERT(subpage_info); 760*cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 76175258f20SQu Wenruo 76275258f20SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 76375258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); 76475258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); 76575258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); 76675258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); 76775258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap); 76875258f20SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 76975258f20SQu Wenruo 77075258f20SQu Wenruo dump_page(page, "btrfs subpage dump"); 77175258f20SQu Wenruo btrfs_warn(fs_info, 77275258f20SQu Wenruo "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl", 77375258f20SQu Wenruo start, len, page_offset(page), 77475258f20SQu Wenruo subpage_info->bitmap_nr_bits, &uptodate_bitmap, 77575258f20SQu Wenruo subpage_info->bitmap_nr_bits, &error_bitmap, 77675258f20SQu Wenruo subpage_info->bitmap_nr_bits, &dirty_bitmap, 77775258f20SQu Wenruo subpage_info->bitmap_nr_bits, &writeback_bitmap, 77875258f20SQu Wenruo subpage_info->bitmap_nr_bits, &ordered_bitmap, 77975258f20SQu Wenruo subpage_info->bitmap_nr_bits, &checked_bitmap); 78075258f20SQu Wenruo } 781