1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0 2cac06d84SQu Wenruo 3cac06d84SQu Wenruo #include <linux/slab.h> 4cac06d84SQu Wenruo #include "ctree.h" 5cac06d84SQu Wenruo #include "subpage.h" 6cac06d84SQu Wenruo 7cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, 8cac06d84SQu Wenruo struct page *page, enum btrfs_subpage_type type) 9cac06d84SQu Wenruo { 10760f991fSQu Wenruo struct btrfs_subpage *subpage = NULL; 11760f991fSQu Wenruo int ret; 12cac06d84SQu Wenruo 13cac06d84SQu Wenruo /* 14cac06d84SQu Wenruo * We have cases like a dummy extent buffer page, which is not mappped 15cac06d84SQu Wenruo * and doesn't need to be locked. 16cac06d84SQu Wenruo */ 17cac06d84SQu Wenruo if (page->mapping) 18cac06d84SQu Wenruo ASSERT(PageLocked(page)); 19cac06d84SQu Wenruo /* Either not subpage, or the page already has private attached */ 20cac06d84SQu Wenruo if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page)) 21cac06d84SQu Wenruo return 0; 22cac06d84SQu Wenruo 23760f991fSQu Wenruo ret = btrfs_alloc_subpage(fs_info, &subpage, type); 24760f991fSQu Wenruo if (ret < 0) 25760f991fSQu Wenruo return ret; 26cac06d84SQu Wenruo attach_page_private(page, subpage); 27cac06d84SQu Wenruo return 0; 28cac06d84SQu Wenruo } 29cac06d84SQu Wenruo 30cac06d84SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, 31cac06d84SQu Wenruo struct page *page) 32cac06d84SQu Wenruo { 33cac06d84SQu Wenruo struct btrfs_subpage *subpage; 34cac06d84SQu Wenruo 35cac06d84SQu Wenruo /* Either not subpage, or already detached */ 36cac06d84SQu Wenruo if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page)) 37cac06d84SQu Wenruo return; 38cac06d84SQu Wenruo 39cac06d84SQu Wenruo subpage = (struct btrfs_subpage *)detach_page_private(page); 40cac06d84SQu Wenruo ASSERT(subpage); 41760f991fSQu Wenruo btrfs_free_subpage(subpage); 42760f991fSQu Wenruo } 43760f991fSQu Wenruo 44760f991fSQu Wenruo int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, 45760f991fSQu Wenruo struct btrfs_subpage **ret, 46760f991fSQu Wenruo enum btrfs_subpage_type type) 47760f991fSQu Wenruo { 48760f991fSQu Wenruo if (fs_info->sectorsize == PAGE_SIZE) 49760f991fSQu Wenruo return 0; 50760f991fSQu Wenruo 51760f991fSQu Wenruo *ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS); 52760f991fSQu Wenruo if (!*ret) 53760f991fSQu Wenruo return -ENOMEM; 54760f991fSQu Wenruo spin_lock_init(&(*ret)->lock); 558ff8466dSQu Wenruo if (type == BTRFS_SUBPAGE_METADATA) 568ff8466dSQu Wenruo atomic_set(&(*ret)->eb_refs, 0); 5792082d40SQu Wenruo else 5892082d40SQu Wenruo atomic_set(&(*ret)->readers, 0); 59760f991fSQu Wenruo return 0; 60760f991fSQu Wenruo } 61760f991fSQu Wenruo 62760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage) 63760f991fSQu Wenruo { 64cac06d84SQu Wenruo kfree(subpage); 65cac06d84SQu Wenruo } 668ff8466dSQu Wenruo 678ff8466dSQu Wenruo /* 688ff8466dSQu Wenruo * Increase the eb_refs of current subpage. 698ff8466dSQu Wenruo * 708ff8466dSQu Wenruo * This is important for eb allocation, to prevent race with last eb freeing 718ff8466dSQu Wenruo * of the same page. 728ff8466dSQu Wenruo * With the eb_refs increased before the eb inserted into radix tree, 738ff8466dSQu Wenruo * detach_extent_buffer_page() won't detach the page private while we're still 748ff8466dSQu Wenruo * allocating the extent buffer. 758ff8466dSQu Wenruo */ 768ff8466dSQu Wenruo void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info, 778ff8466dSQu Wenruo struct page *page) 788ff8466dSQu Wenruo { 798ff8466dSQu Wenruo struct btrfs_subpage *subpage; 808ff8466dSQu Wenruo 818ff8466dSQu Wenruo if (fs_info->sectorsize == PAGE_SIZE) 828ff8466dSQu Wenruo return; 838ff8466dSQu Wenruo 848ff8466dSQu Wenruo ASSERT(PagePrivate(page) && page->mapping); 858ff8466dSQu Wenruo lockdep_assert_held(&page->mapping->private_lock); 868ff8466dSQu Wenruo 878ff8466dSQu Wenruo subpage = (struct btrfs_subpage *)page->private; 888ff8466dSQu Wenruo atomic_inc(&subpage->eb_refs); 898ff8466dSQu Wenruo } 908ff8466dSQu Wenruo 918ff8466dSQu Wenruo void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info, 928ff8466dSQu Wenruo struct page *page) 938ff8466dSQu Wenruo { 948ff8466dSQu Wenruo struct btrfs_subpage *subpage; 958ff8466dSQu Wenruo 968ff8466dSQu Wenruo if (fs_info->sectorsize == PAGE_SIZE) 978ff8466dSQu Wenruo return; 988ff8466dSQu Wenruo 998ff8466dSQu Wenruo ASSERT(PagePrivate(page) && page->mapping); 1008ff8466dSQu Wenruo lockdep_assert_held(&page->mapping->private_lock); 1018ff8466dSQu Wenruo 1028ff8466dSQu Wenruo subpage = (struct btrfs_subpage *)page->private; 1038ff8466dSQu Wenruo ASSERT(atomic_read(&subpage->eb_refs)); 1048ff8466dSQu Wenruo atomic_dec(&subpage->eb_refs); 1058ff8466dSQu Wenruo } 106a1d767c1SQu Wenruo 10792082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, 10892082d40SQu Wenruo struct page *page, u64 start, u32 len) 10992082d40SQu Wenruo { 11092082d40SQu Wenruo /* Basic checks */ 11192082d40SQu Wenruo ASSERT(PagePrivate(page) && page->private); 11292082d40SQu Wenruo ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 11392082d40SQu Wenruo IS_ALIGNED(len, fs_info->sectorsize)); 11492082d40SQu Wenruo /* 11592082d40SQu Wenruo * The range check only works for mapped page, we can still have 11692082d40SQu Wenruo * unmapped page like dummy extent buffer pages. 11792082d40SQu Wenruo */ 11892082d40SQu Wenruo if (page->mapping) 11992082d40SQu Wenruo ASSERT(page_offset(page) <= start && 12092082d40SQu Wenruo start + len <= page_offset(page) + PAGE_SIZE); 12192082d40SQu Wenruo } 12292082d40SQu Wenruo 12392082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, 12492082d40SQu Wenruo struct page *page, u64 start, u32 len) 12592082d40SQu Wenruo { 12692082d40SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 12792082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 12892082d40SQu Wenruo int ret; 12992082d40SQu Wenruo 13092082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 13192082d40SQu Wenruo 13292082d40SQu Wenruo ret = atomic_add_return(nbits, &subpage->readers); 13392082d40SQu Wenruo ASSERT(ret == nbits); 13492082d40SQu Wenruo } 13592082d40SQu Wenruo 13692082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, 13792082d40SQu Wenruo struct page *page, u64 start, u32 len) 13892082d40SQu Wenruo { 13992082d40SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 14092082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 14192082d40SQu Wenruo 14292082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 14392082d40SQu Wenruo ASSERT(atomic_read(&subpage->readers) >= nbits); 14492082d40SQu Wenruo if (atomic_sub_and_test(nbits, &subpage->readers)) 14592082d40SQu Wenruo unlock_page(page); 14692082d40SQu Wenruo } 14792082d40SQu Wenruo 148a1d767c1SQu Wenruo /* 149a1d767c1SQu Wenruo * Convert the [start, start + len) range into a u16 bitmap 150a1d767c1SQu Wenruo * 151a1d767c1SQu Wenruo * For example: if start == page_offset() + 16K, len = 16K, we get 0x00f0. 152a1d767c1SQu Wenruo */ 153a1d767c1SQu Wenruo static u16 btrfs_subpage_calc_bitmap(const struct btrfs_fs_info *fs_info, 154a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) 155a1d767c1SQu Wenruo { 156a1d767c1SQu Wenruo const int bit_start = offset_in_page(start) >> fs_info->sectorsize_bits; 157a1d767c1SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 158a1d767c1SQu Wenruo 15992082d40SQu Wenruo btrfs_subpage_assert(fs_info, page, start, len); 160a1d767c1SQu Wenruo 161a1d767c1SQu Wenruo /* 162a1d767c1SQu Wenruo * Here nbits can be 16, thus can go beyond u16 range. We make the 163a1d767c1SQu Wenruo * first left shift to be calculate in unsigned long (at least u32), 164a1d767c1SQu Wenruo * then truncate the result to u16. 165a1d767c1SQu Wenruo */ 166a1d767c1SQu Wenruo return (u16)(((1UL << nbits) - 1) << bit_start); 167a1d767c1SQu Wenruo } 168a1d767c1SQu Wenruo 169a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, 170a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) 171a1d767c1SQu Wenruo { 172a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 173a1d767c1SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 174a1d767c1SQu Wenruo unsigned long flags; 175a1d767c1SQu Wenruo 176a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 177a1d767c1SQu Wenruo subpage->uptodate_bitmap |= tmp; 178a1d767c1SQu Wenruo if (subpage->uptodate_bitmap == U16_MAX) 179a1d767c1SQu Wenruo SetPageUptodate(page); 180a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 181a1d767c1SQu Wenruo } 182a1d767c1SQu Wenruo 183a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, 184a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) 185a1d767c1SQu Wenruo { 186a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 187a1d767c1SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 188a1d767c1SQu Wenruo unsigned long flags; 189a1d767c1SQu Wenruo 190a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 191a1d767c1SQu Wenruo subpage->uptodate_bitmap &= ~tmp; 192a1d767c1SQu Wenruo ClearPageUptodate(page); 193a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 194a1d767c1SQu Wenruo } 195a1d767c1SQu Wenruo 19603a816b3SQu Wenruo void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info, 19703a816b3SQu Wenruo struct page *page, u64 start, u32 len) 19803a816b3SQu Wenruo { 19903a816b3SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 20003a816b3SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 20103a816b3SQu Wenruo unsigned long flags; 20203a816b3SQu Wenruo 20303a816b3SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 20403a816b3SQu Wenruo subpage->error_bitmap |= tmp; 20503a816b3SQu Wenruo SetPageError(page); 20603a816b3SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 20703a816b3SQu Wenruo } 20803a816b3SQu Wenruo 20903a816b3SQu Wenruo void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info, 21003a816b3SQu Wenruo struct page *page, u64 start, u32 len) 21103a816b3SQu Wenruo { 21203a816b3SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 21303a816b3SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 21403a816b3SQu Wenruo unsigned long flags; 21503a816b3SQu Wenruo 21603a816b3SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 21703a816b3SQu Wenruo subpage->error_bitmap &= ~tmp; 21803a816b3SQu Wenruo if (subpage->error_bitmap == 0) 21903a816b3SQu Wenruo ClearPageError(page); 22003a816b3SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 22103a816b3SQu Wenruo } 22203a816b3SQu Wenruo 223d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, 224d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 225d8a5713eSQu Wenruo { 226d8a5713eSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 227d8a5713eSQu Wenruo u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 228d8a5713eSQu Wenruo unsigned long flags; 229d8a5713eSQu Wenruo 230d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 231d8a5713eSQu Wenruo subpage->dirty_bitmap |= tmp; 232d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 233d8a5713eSQu Wenruo set_page_dirty(page); 234d8a5713eSQu Wenruo } 235d8a5713eSQu Wenruo 236d8a5713eSQu Wenruo /* 237d8a5713eSQu Wenruo * Extra clear_and_test function for subpage dirty bitmap. 238d8a5713eSQu Wenruo * 239d8a5713eSQu Wenruo * Return true if we're the last bits in the dirty_bitmap and clear the 240d8a5713eSQu Wenruo * dirty_bitmap. 241d8a5713eSQu Wenruo * Return false otherwise. 242d8a5713eSQu Wenruo * 243d8a5713eSQu Wenruo * NOTE: Callers should manually clear page dirty for true case, as we have 244d8a5713eSQu Wenruo * extra handling for tree blocks. 245d8a5713eSQu Wenruo */ 246d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, 247d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 248d8a5713eSQu Wenruo { 249d8a5713eSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 250d8a5713eSQu Wenruo u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 251d8a5713eSQu Wenruo unsigned long flags; 252d8a5713eSQu Wenruo bool last = false; 253d8a5713eSQu Wenruo 254d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 255d8a5713eSQu Wenruo subpage->dirty_bitmap &= ~tmp; 256d8a5713eSQu Wenruo if (subpage->dirty_bitmap == 0) 257d8a5713eSQu Wenruo last = true; 258d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 259d8a5713eSQu Wenruo return last; 260d8a5713eSQu Wenruo } 261d8a5713eSQu Wenruo 262d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, 263d8a5713eSQu Wenruo struct page *page, u64 start, u32 len) 264d8a5713eSQu Wenruo { 265d8a5713eSQu Wenruo bool last; 266d8a5713eSQu Wenruo 267d8a5713eSQu Wenruo last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len); 268d8a5713eSQu Wenruo if (last) 269d8a5713eSQu Wenruo clear_page_dirty_for_io(page); 270d8a5713eSQu Wenruo } 271d8a5713eSQu Wenruo 272*3470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, 273*3470da3bSQu Wenruo struct page *page, u64 start, u32 len) 274*3470da3bSQu Wenruo { 275*3470da3bSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 276*3470da3bSQu Wenruo u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 277*3470da3bSQu Wenruo unsigned long flags; 278*3470da3bSQu Wenruo 279*3470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 280*3470da3bSQu Wenruo subpage->writeback_bitmap |= tmp; 281*3470da3bSQu Wenruo set_page_writeback(page); 282*3470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 283*3470da3bSQu Wenruo } 284*3470da3bSQu Wenruo 285*3470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, 286*3470da3bSQu Wenruo struct page *page, u64 start, u32 len) 287*3470da3bSQu Wenruo { 288*3470da3bSQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; 289*3470da3bSQu Wenruo u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); 290*3470da3bSQu Wenruo unsigned long flags; 291*3470da3bSQu Wenruo 292*3470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 293*3470da3bSQu Wenruo subpage->writeback_bitmap &= ~tmp; 294*3470da3bSQu Wenruo if (subpage->writeback_bitmap == 0) 295*3470da3bSQu Wenruo end_page_writeback(page); 296*3470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 297*3470da3bSQu Wenruo } 298*3470da3bSQu Wenruo 299a1d767c1SQu Wenruo /* 300a1d767c1SQu Wenruo * Unlike set/clear which is dependent on each page status, for test all bits 301a1d767c1SQu Wenruo * are tested in the same way. 302a1d767c1SQu Wenruo */ 303a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \ 304a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ 305a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 306a1d767c1SQu Wenruo { \ 307a1d767c1SQu Wenruo struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \ 308a1d767c1SQu Wenruo const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); \ 309a1d767c1SQu Wenruo unsigned long flags; \ 310a1d767c1SQu Wenruo bool ret; \ 311a1d767c1SQu Wenruo \ 312a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); \ 313a1d767c1SQu Wenruo ret = ((subpage->name##_bitmap & tmp) == tmp); \ 314a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); \ 315a1d767c1SQu Wenruo return ret; \ 316a1d767c1SQu Wenruo } 317a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); 31803a816b3SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error); 319d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); 320*3470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); 321a1d767c1SQu Wenruo 322a1d767c1SQu Wenruo /* 323a1d767c1SQu Wenruo * Note that, in selftests (extent-io-tests), we can have empty fs_info passed 324a1d767c1SQu Wenruo * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall 325a1d767c1SQu Wenruo * back to regular sectorsize branch. 326a1d767c1SQu Wenruo */ 327a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \ 328a1d767c1SQu Wenruo test_page_func) \ 329a1d767c1SQu Wenruo void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \ 330a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 331a1d767c1SQu Wenruo { \ 332a1d767c1SQu Wenruo if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \ 333a1d767c1SQu Wenruo set_page_func(page); \ 334a1d767c1SQu Wenruo return; \ 335a1d767c1SQu Wenruo } \ 336a1d767c1SQu Wenruo btrfs_subpage_set_##name(fs_info, page, start, len); \ 337a1d767c1SQu Wenruo } \ 338a1d767c1SQu Wenruo void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \ 339a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 340a1d767c1SQu Wenruo { \ 341a1d767c1SQu Wenruo if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \ 342a1d767c1SQu Wenruo clear_page_func(page); \ 343a1d767c1SQu Wenruo return; \ 344a1d767c1SQu Wenruo } \ 345a1d767c1SQu Wenruo btrfs_subpage_clear_##name(fs_info, page, start, len); \ 346a1d767c1SQu Wenruo } \ 347a1d767c1SQu Wenruo bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \ 348a1d767c1SQu Wenruo struct page *page, u64 start, u32 len) \ 349a1d767c1SQu Wenruo { \ 350a1d767c1SQu Wenruo if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \ 351a1d767c1SQu Wenruo return test_page_func(page); \ 352a1d767c1SQu Wenruo return btrfs_subpage_test_##name(fs_info, page, start, len); \ 353a1d767c1SQu Wenruo } 354a1d767c1SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate, 355a1d767c1SQu Wenruo PageUptodate); 35603a816b3SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError); 357d8a5713eSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io, 358d8a5713eSQu Wenruo PageDirty); 359*3470da3bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback, 360*3470da3bSQu Wenruo PageWriteback); 361