xref: /linux/fs/btrfs/subpage.c (revision fbca46eb46ecc4443137e58cf9802a888c9ca136)
1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0
2cac06d84SQu Wenruo 
3cac06d84SQu Wenruo #include <linux/slab.h>
4cac06d84SQu Wenruo #include "ctree.h"
5cac06d84SQu Wenruo #include "subpage.h"
63d078efaSQu Wenruo #include "btrfs_inode.h"
7cac06d84SQu Wenruo 
8894d1378SQu Wenruo /*
9894d1378SQu Wenruo  * Subpage (sectorsize < PAGE_SIZE) support overview:
10894d1378SQu Wenruo  *
11894d1378SQu Wenruo  * Limitations:
12894d1378SQu Wenruo  *
13894d1378SQu Wenruo  * - Only support 64K page size for now
14894d1378SQu Wenruo  *   This is to make metadata handling easier, as 64K page would ensure
15894d1378SQu Wenruo  *   all nodesize would fit inside one page, thus we don't need to handle
16894d1378SQu Wenruo  *   cases where a tree block crosses several pages.
17894d1378SQu Wenruo  *
18894d1378SQu Wenruo  * - Only metadata read-write for now
19894d1378SQu Wenruo  *   The data read-write part is in development.
20894d1378SQu Wenruo  *
21894d1378SQu Wenruo  * - Metadata can't cross 64K page boundary
22894d1378SQu Wenruo  *   btrfs-progs and kernel have done that for a while, thus only ancient
23894d1378SQu Wenruo  *   filesystems could have such problem.  For such case, do a graceful
24894d1378SQu Wenruo  *   rejection.
25894d1378SQu Wenruo  *
26894d1378SQu Wenruo  * Special behavior:
27894d1378SQu Wenruo  *
28894d1378SQu Wenruo  * - Metadata
29894d1378SQu Wenruo  *   Metadata read is fully supported.
30894d1378SQu Wenruo  *   Meaning when reading one tree block will only trigger the read for the
31894d1378SQu Wenruo  *   needed range, other unrelated range in the same page will not be touched.
32894d1378SQu Wenruo  *
33894d1378SQu Wenruo  *   Metadata write support is partial.
34894d1378SQu Wenruo  *   The writeback is still for the full page, but we will only submit
35894d1378SQu Wenruo  *   the dirty extent buffers in the page.
36894d1378SQu Wenruo  *
37894d1378SQu Wenruo  *   This means, if we have a metadata page like this:
38894d1378SQu Wenruo  *
39894d1378SQu Wenruo  *   Page offset
40894d1378SQu Wenruo  *   0         16K         32K         48K        64K
41894d1378SQu Wenruo  *   |/////////|           |///////////|
42894d1378SQu Wenruo  *        \- Tree block A        \- Tree block B
43894d1378SQu Wenruo  *
44894d1378SQu Wenruo  *   Even if we just want to writeback tree block A, we will also writeback
45894d1378SQu Wenruo  *   tree block B if it's also dirty.
46894d1378SQu Wenruo  *
47894d1378SQu Wenruo  *   This may cause extra metadata writeback which results more COW.
48894d1378SQu Wenruo  *
49894d1378SQu Wenruo  * Implementation:
50894d1378SQu Wenruo  *
51894d1378SQu Wenruo  * - Common
52894d1378SQu Wenruo  *   Both metadata and data will use a new structure, btrfs_subpage, to
53894d1378SQu Wenruo  *   record the status of each sector inside a page.  This provides the extra
54894d1378SQu Wenruo  *   granularity needed.
55894d1378SQu Wenruo  *
56894d1378SQu Wenruo  * - Metadata
57894d1378SQu Wenruo  *   Since we have multiple tree blocks inside one page, we can't rely on page
58894d1378SQu Wenruo  *   locking anymore, or we will have greatly reduced concurrency or even
59894d1378SQu Wenruo  *   deadlocks (hold one tree lock while trying to lock another tree lock in
60894d1378SQu Wenruo  *   the same page).
61894d1378SQu Wenruo  *
62894d1378SQu Wenruo  *   Thus for metadata locking, subpage support relies on io_tree locking only.
63894d1378SQu Wenruo  *   This means a slightly higher tree locking latency.
64894d1378SQu Wenruo  */
65894d1378SQu Wenruo 
66*fbca46ebSQu Wenruo bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
67*fbca46ebSQu Wenruo {
68*fbca46ebSQu Wenruo 	if (fs_info->sectorsize >= PAGE_SIZE)
69*fbca46ebSQu Wenruo 		return false;
70*fbca46ebSQu Wenruo 
71*fbca46ebSQu Wenruo 	/*
72*fbca46ebSQu Wenruo 	 * Only data pages (either through DIO or compression) can have no
73*fbca46ebSQu Wenruo 	 * mapping. And if page->mapping->host is data inode, it's subpage.
74*fbca46ebSQu Wenruo 	 * As we have ruled our sectorsize >= PAGE_SIZE case already.
75*fbca46ebSQu Wenruo 	 */
76*fbca46ebSQu Wenruo 	if (!page->mapping || !page->mapping->host ||
77*fbca46ebSQu Wenruo 	    is_data_inode(page->mapping->host))
78*fbca46ebSQu Wenruo 		return true;
79*fbca46ebSQu Wenruo 
80*fbca46ebSQu Wenruo 	/*
81*fbca46ebSQu Wenruo 	 * Now the only remaining case is metadata, which we only go subpage
82*fbca46ebSQu Wenruo 	 * routine if nodesize < PAGE_SIZE.
83*fbca46ebSQu Wenruo 	 */
84*fbca46ebSQu Wenruo 	if (fs_info->nodesize < PAGE_SIZE)
85*fbca46ebSQu Wenruo 		return true;
86*fbca46ebSQu Wenruo 	return false;
87*fbca46ebSQu Wenruo }
88*fbca46ebSQu Wenruo 
898481dd80SQu Wenruo void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
908481dd80SQu Wenruo {
918481dd80SQu Wenruo 	unsigned int cur = 0;
928481dd80SQu Wenruo 	unsigned int nr_bits;
938481dd80SQu Wenruo 
948481dd80SQu Wenruo 	ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
958481dd80SQu Wenruo 
968481dd80SQu Wenruo 	nr_bits = PAGE_SIZE / sectorsize;
978481dd80SQu Wenruo 	subpage_info->bitmap_nr_bits = nr_bits;
988481dd80SQu Wenruo 
998481dd80SQu Wenruo 	subpage_info->uptodate_offset = cur;
1008481dd80SQu Wenruo 	cur += nr_bits;
1018481dd80SQu Wenruo 
1028481dd80SQu Wenruo 	subpage_info->error_offset = cur;
1038481dd80SQu Wenruo 	cur += nr_bits;
1048481dd80SQu Wenruo 
1058481dd80SQu Wenruo 	subpage_info->dirty_offset = cur;
1068481dd80SQu Wenruo 	cur += nr_bits;
1078481dd80SQu Wenruo 
1088481dd80SQu Wenruo 	subpage_info->writeback_offset = cur;
1098481dd80SQu Wenruo 	cur += nr_bits;
1108481dd80SQu Wenruo 
1118481dd80SQu Wenruo 	subpage_info->ordered_offset = cur;
1128481dd80SQu Wenruo 	cur += nr_bits;
1138481dd80SQu Wenruo 
114e4f94347SQu Wenruo 	subpage_info->checked_offset = cur;
115e4f94347SQu Wenruo 	cur += nr_bits;
116e4f94347SQu Wenruo 
1178481dd80SQu Wenruo 	subpage_info->total_nr_bits = cur;
1188481dd80SQu Wenruo }
1198481dd80SQu Wenruo 
120cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
121cac06d84SQu Wenruo 			 struct page *page, enum btrfs_subpage_type type)
122cac06d84SQu Wenruo {
123651fb419SQu Wenruo 	struct btrfs_subpage *subpage;
124cac06d84SQu Wenruo 
125cac06d84SQu Wenruo 	/*
126cac06d84SQu Wenruo 	 * We have cases like a dummy extent buffer page, which is not mappped
127cac06d84SQu Wenruo 	 * and doesn't need to be locked.
128cac06d84SQu Wenruo 	 */
129cac06d84SQu Wenruo 	if (page->mapping)
130cac06d84SQu Wenruo 		ASSERT(PageLocked(page));
131651fb419SQu Wenruo 
132cac06d84SQu Wenruo 	/* Either not subpage, or the page already has private attached */
133*fbca46ebSQu Wenruo 	if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page))
134cac06d84SQu Wenruo 		return 0;
135cac06d84SQu Wenruo 
136651fb419SQu Wenruo 	subpage = btrfs_alloc_subpage(fs_info, type);
137651fb419SQu Wenruo 	if (IS_ERR(subpage))
138651fb419SQu Wenruo 		return  PTR_ERR(subpage);
139651fb419SQu Wenruo 
140cac06d84SQu Wenruo 	attach_page_private(page, subpage);
141cac06d84SQu Wenruo 	return 0;
142cac06d84SQu Wenruo }
143cac06d84SQu Wenruo 
144cac06d84SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
145cac06d84SQu Wenruo 			  struct page *page)
146cac06d84SQu Wenruo {
147cac06d84SQu Wenruo 	struct btrfs_subpage *subpage;
148cac06d84SQu Wenruo 
149cac06d84SQu Wenruo 	/* Either not subpage, or already detached */
150*fbca46ebSQu Wenruo 	if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page))
151cac06d84SQu Wenruo 		return;
152cac06d84SQu Wenruo 
153cac06d84SQu Wenruo 	subpage = (struct btrfs_subpage *)detach_page_private(page);
154cac06d84SQu Wenruo 	ASSERT(subpage);
155760f991fSQu Wenruo 	btrfs_free_subpage(subpage);
156760f991fSQu Wenruo }
157760f991fSQu Wenruo 
158651fb419SQu Wenruo struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
159760f991fSQu Wenruo 					  enum btrfs_subpage_type type)
160760f991fSQu Wenruo {
161651fb419SQu Wenruo 	struct btrfs_subpage *ret;
16272a69cd0SQu Wenruo 	unsigned int real_size;
163651fb419SQu Wenruo 
164fdf250dbSQu Wenruo 	ASSERT(fs_info->sectorsize < PAGE_SIZE);
165760f991fSQu Wenruo 
16672a69cd0SQu Wenruo 	real_size = struct_size(ret, bitmaps,
16772a69cd0SQu Wenruo 			BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
16872a69cd0SQu Wenruo 	ret = kzalloc(real_size, GFP_NOFS);
169651fb419SQu Wenruo 	if (!ret)
170651fb419SQu Wenruo 		return ERR_PTR(-ENOMEM);
171651fb419SQu Wenruo 
172651fb419SQu Wenruo 	spin_lock_init(&ret->lock);
1731e1de387SQu Wenruo 	if (type == BTRFS_SUBPAGE_METADATA) {
174651fb419SQu Wenruo 		atomic_set(&ret->eb_refs, 0);
1751e1de387SQu Wenruo 	} else {
176651fb419SQu Wenruo 		atomic_set(&ret->readers, 0);
177651fb419SQu Wenruo 		atomic_set(&ret->writers, 0);
1781e1de387SQu Wenruo 	}
179651fb419SQu Wenruo 	return ret;
180760f991fSQu Wenruo }
181760f991fSQu Wenruo 
182760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage)
183760f991fSQu Wenruo {
184cac06d84SQu Wenruo 	kfree(subpage);
185cac06d84SQu Wenruo }
1868ff8466dSQu Wenruo 
1878ff8466dSQu Wenruo /*
1888ff8466dSQu Wenruo  * Increase the eb_refs of current subpage.
1898ff8466dSQu Wenruo  *
1908ff8466dSQu Wenruo  * This is important for eb allocation, to prevent race with last eb freeing
1918ff8466dSQu Wenruo  * of the same page.
1928ff8466dSQu Wenruo  * With the eb_refs increased before the eb inserted into radix tree,
1938ff8466dSQu Wenruo  * detach_extent_buffer_page() won't detach the page private while we're still
1948ff8466dSQu Wenruo  * allocating the extent buffer.
1958ff8466dSQu Wenruo  */
1968ff8466dSQu Wenruo void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
1978ff8466dSQu Wenruo 			    struct page *page)
1988ff8466dSQu Wenruo {
1998ff8466dSQu Wenruo 	struct btrfs_subpage *subpage;
2008ff8466dSQu Wenruo 
201*fbca46ebSQu Wenruo 	if (!btrfs_is_subpage(fs_info, page))
2028ff8466dSQu Wenruo 		return;
2038ff8466dSQu Wenruo 
2048ff8466dSQu Wenruo 	ASSERT(PagePrivate(page) && page->mapping);
2058ff8466dSQu Wenruo 	lockdep_assert_held(&page->mapping->private_lock);
2068ff8466dSQu Wenruo 
2078ff8466dSQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
2088ff8466dSQu Wenruo 	atomic_inc(&subpage->eb_refs);
2098ff8466dSQu Wenruo }
2108ff8466dSQu Wenruo 
2118ff8466dSQu Wenruo void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
2128ff8466dSQu Wenruo 			    struct page *page)
2138ff8466dSQu Wenruo {
2148ff8466dSQu Wenruo 	struct btrfs_subpage *subpage;
2158ff8466dSQu Wenruo 
216*fbca46ebSQu Wenruo 	if (!btrfs_is_subpage(fs_info, page))
2178ff8466dSQu Wenruo 		return;
2188ff8466dSQu Wenruo 
2198ff8466dSQu Wenruo 	ASSERT(PagePrivate(page) && page->mapping);
2208ff8466dSQu Wenruo 	lockdep_assert_held(&page->mapping->private_lock);
2218ff8466dSQu Wenruo 
2228ff8466dSQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
2238ff8466dSQu Wenruo 	ASSERT(atomic_read(&subpage->eb_refs));
2248ff8466dSQu Wenruo 	atomic_dec(&subpage->eb_refs);
2258ff8466dSQu Wenruo }
226a1d767c1SQu Wenruo 
22792082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
22892082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
22992082d40SQu Wenruo {
23092082d40SQu Wenruo 	/* Basic checks */
23192082d40SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
23292082d40SQu Wenruo 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
23392082d40SQu Wenruo 	       IS_ALIGNED(len, fs_info->sectorsize));
23492082d40SQu Wenruo 	/*
23592082d40SQu Wenruo 	 * The range check only works for mapped page, we can still have
23692082d40SQu Wenruo 	 * unmapped page like dummy extent buffer pages.
23792082d40SQu Wenruo 	 */
23892082d40SQu Wenruo 	if (page->mapping)
23992082d40SQu Wenruo 		ASSERT(page_offset(page) <= start &&
24092082d40SQu Wenruo 		       start + len <= page_offset(page) + PAGE_SIZE);
24192082d40SQu Wenruo }
24292082d40SQu Wenruo 
24392082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
24492082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
24592082d40SQu Wenruo {
24692082d40SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
24792082d40SQu Wenruo 	const int nbits = len >> fs_info->sectorsize_bits;
24892082d40SQu Wenruo 
24992082d40SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
25092082d40SQu Wenruo 
2513d078efaSQu Wenruo 	atomic_add(nbits, &subpage->readers);
25292082d40SQu Wenruo }
25392082d40SQu Wenruo 
25492082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
25592082d40SQu Wenruo 		struct page *page, u64 start, u32 len)
25692082d40SQu Wenruo {
25792082d40SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
25892082d40SQu Wenruo 	const int nbits = len >> fs_info->sectorsize_bits;
2593d078efaSQu Wenruo 	bool is_data;
2603d078efaSQu Wenruo 	bool last;
26192082d40SQu Wenruo 
26292082d40SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
2633d078efaSQu Wenruo 	is_data = is_data_inode(page->mapping->host);
26492082d40SQu Wenruo 	ASSERT(atomic_read(&subpage->readers) >= nbits);
2653d078efaSQu Wenruo 	last = atomic_sub_and_test(nbits, &subpage->readers);
2663d078efaSQu Wenruo 
2673d078efaSQu Wenruo 	/*
2683d078efaSQu Wenruo 	 * For data we need to unlock the page if the last read has finished.
2693d078efaSQu Wenruo 	 *
2703d078efaSQu Wenruo 	 * And please don't replace @last with atomic_sub_and_test() call
2713d078efaSQu Wenruo 	 * inside if () condition.
2723d078efaSQu Wenruo 	 * As we want the atomic_sub_and_test() to be always executed.
2733d078efaSQu Wenruo 	 */
2743d078efaSQu Wenruo 	if (is_data && last)
27592082d40SQu Wenruo 		unlock_page(page);
27692082d40SQu Wenruo }
27792082d40SQu Wenruo 
2781e1de387SQu Wenruo static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
2791e1de387SQu Wenruo {
2801e1de387SQu Wenruo 	u64 orig_start = *start;
2811e1de387SQu Wenruo 	u32 orig_len = *len;
2821e1de387SQu Wenruo 
2831e1de387SQu Wenruo 	*start = max_t(u64, page_offset(page), orig_start);
284e4f94347SQu Wenruo 	/*
285e4f94347SQu Wenruo 	 * For certain call sites like btrfs_drop_pages(), we may have pages
286e4f94347SQu Wenruo 	 * beyond the target range. In that case, just set @len to 0, subpage
287e4f94347SQu Wenruo 	 * helpers can handle @len == 0 without any problem.
288e4f94347SQu Wenruo 	 */
289e4f94347SQu Wenruo 	if (page_offset(page) >= orig_start + orig_len)
290e4f94347SQu Wenruo 		*len = 0;
291e4f94347SQu Wenruo 	else
2921e1de387SQu Wenruo 		*len = min_t(u64, page_offset(page) + PAGE_SIZE,
2931e1de387SQu Wenruo 			     orig_start + orig_len) - *start;
2941e1de387SQu Wenruo }
2951e1de387SQu Wenruo 
2961e1de387SQu Wenruo void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
2971e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
2981e1de387SQu Wenruo {
2991e1de387SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3001e1de387SQu Wenruo 	const int nbits = (len >> fs_info->sectorsize_bits);
3011e1de387SQu Wenruo 	int ret;
3021e1de387SQu Wenruo 
3031e1de387SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
3041e1de387SQu Wenruo 
3051e1de387SQu Wenruo 	ASSERT(atomic_read(&subpage->readers) == 0);
3061e1de387SQu Wenruo 	ret = atomic_add_return(nbits, &subpage->writers);
3071e1de387SQu Wenruo 	ASSERT(ret == nbits);
3081e1de387SQu Wenruo }
3091e1de387SQu Wenruo 
3101e1de387SQu Wenruo bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
3111e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
3121e1de387SQu Wenruo {
3131e1de387SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3141e1de387SQu Wenruo 	const int nbits = (len >> fs_info->sectorsize_bits);
3151e1de387SQu Wenruo 
3161e1de387SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);
3171e1de387SQu Wenruo 
318164674a7SQu Wenruo 	/*
319164674a7SQu Wenruo 	 * We have call sites passing @lock_page into
320164674a7SQu Wenruo 	 * extent_clear_unlock_delalloc() for compression path.
321164674a7SQu Wenruo 	 *
322164674a7SQu Wenruo 	 * This @locked_page is locked by plain lock_page(), thus its
323164674a7SQu Wenruo 	 * subpage::writers is 0.  Handle them in a special way.
324164674a7SQu Wenruo 	 */
325164674a7SQu Wenruo 	if (atomic_read(&subpage->writers) == 0)
326164674a7SQu Wenruo 		return true;
327164674a7SQu Wenruo 
3281e1de387SQu Wenruo 	ASSERT(atomic_read(&subpage->writers) >= nbits);
3291e1de387SQu Wenruo 	return atomic_sub_and_test(nbits, &subpage->writers);
3301e1de387SQu Wenruo }
3311e1de387SQu Wenruo 
3321e1de387SQu Wenruo /*
3331e1de387SQu Wenruo  * Lock a page for delalloc page writeback.
3341e1de387SQu Wenruo  *
3351e1de387SQu Wenruo  * Return -EAGAIN if the page is not properly initialized.
3361e1de387SQu Wenruo  * Return 0 with the page locked, and writer counter updated.
3371e1de387SQu Wenruo  *
3381e1de387SQu Wenruo  * Even with 0 returned, the page still need extra check to make sure
3391e1de387SQu Wenruo  * it's really the correct page, as the caller is using
3401e1de387SQu Wenruo  * find_get_pages_contig(), which can race with page invalidating.
3411e1de387SQu Wenruo  */
3421e1de387SQu Wenruo int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
3431e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
3441e1de387SQu Wenruo {
345*fbca46ebSQu Wenruo 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {
3461e1de387SQu Wenruo 		lock_page(page);
3471e1de387SQu Wenruo 		return 0;
3481e1de387SQu Wenruo 	}
3491e1de387SQu Wenruo 	lock_page(page);
3501e1de387SQu Wenruo 	if (!PagePrivate(page) || !page->private) {
3511e1de387SQu Wenruo 		unlock_page(page);
3521e1de387SQu Wenruo 		return -EAGAIN;
3531e1de387SQu Wenruo 	}
3541e1de387SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);
3551e1de387SQu Wenruo 	btrfs_subpage_start_writer(fs_info, page, start, len);
3561e1de387SQu Wenruo 	return 0;
3571e1de387SQu Wenruo }
3581e1de387SQu Wenruo 
3591e1de387SQu Wenruo void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
3601e1de387SQu Wenruo 		struct page *page, u64 start, u32 len)
3611e1de387SQu Wenruo {
362*fbca46ebSQu Wenruo 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))
3631e1de387SQu Wenruo 		return unlock_page(page);
3641e1de387SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);
3651e1de387SQu Wenruo 	if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
3661e1de387SQu Wenruo 		unlock_page(page);
3671e1de387SQu Wenruo }
3681e1de387SQu Wenruo 
36972a69cd0SQu Wenruo static bool bitmap_test_range_all_set(unsigned long *addr, unsigned int start,
37072a69cd0SQu Wenruo 				      unsigned int nbits)
371a1d767c1SQu Wenruo {
37272a69cd0SQu Wenruo 	unsigned int found_zero;
373a1d767c1SQu Wenruo 
37472a69cd0SQu Wenruo 	found_zero = find_next_zero_bit(addr, start + nbits, start);
37572a69cd0SQu Wenruo 	if (found_zero == start + nbits)
37672a69cd0SQu Wenruo 		return true;
37772a69cd0SQu Wenruo 	return false;
378a1d767c1SQu Wenruo }
379a1d767c1SQu Wenruo 
38072a69cd0SQu Wenruo static bool bitmap_test_range_all_zero(unsigned long *addr, unsigned int start,
38172a69cd0SQu Wenruo 				       unsigned int nbits)
38272a69cd0SQu Wenruo {
38372a69cd0SQu Wenruo 	unsigned int found_set;
38472a69cd0SQu Wenruo 
38572a69cd0SQu Wenruo 	found_set = find_next_bit(addr, start + nbits, start);
38672a69cd0SQu Wenruo 	if (found_set == start + nbits)
38772a69cd0SQu Wenruo 		return true;
38872a69cd0SQu Wenruo 	return false;
38972a69cd0SQu Wenruo }
39072a69cd0SQu Wenruo 
39172a69cd0SQu Wenruo #define subpage_calc_start_bit(fs_info, page, name, start, len)		\
39272a69cd0SQu Wenruo ({									\
39372a69cd0SQu Wenruo 	unsigned int start_bit;						\
39472a69cd0SQu Wenruo 									\
39572a69cd0SQu Wenruo 	btrfs_subpage_assert(fs_info, page, start, len);		\
39672a69cd0SQu Wenruo 	start_bit = offset_in_page(start) >> fs_info->sectorsize_bits;	\
39772a69cd0SQu Wenruo 	start_bit += fs_info->subpage_info->name##_offset;		\
39872a69cd0SQu Wenruo 	start_bit;							\
39972a69cd0SQu Wenruo })
40072a69cd0SQu Wenruo 
40172a69cd0SQu Wenruo #define subpage_test_bitmap_all_set(fs_info, subpage, name)		\
40272a69cd0SQu Wenruo 	bitmap_test_range_all_set(subpage->bitmaps,			\
40372a69cd0SQu Wenruo 			fs_info->subpage_info->name##_offset,		\
40472a69cd0SQu Wenruo 			fs_info->subpage_info->bitmap_nr_bits)
40572a69cd0SQu Wenruo 
40672a69cd0SQu Wenruo #define subpage_test_bitmap_all_zero(fs_info, subpage, name)		\
40772a69cd0SQu Wenruo 	bitmap_test_range_all_zero(subpage->bitmaps,			\
40872a69cd0SQu Wenruo 			fs_info->subpage_info->name##_offset,		\
40972a69cd0SQu Wenruo 			fs_info->subpage_info->bitmap_nr_bits)
41072a69cd0SQu Wenruo 
411a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
412a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
413a1d767c1SQu Wenruo {
414a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
41572a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
41672a69cd0SQu Wenruo 							uptodate, start, len);
417a1d767c1SQu Wenruo 	unsigned long flags;
418a1d767c1SQu Wenruo 
419a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
42072a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
42172a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
422a1d767c1SQu Wenruo 		SetPageUptodate(page);
423a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
424a1d767c1SQu Wenruo }
425a1d767c1SQu Wenruo 
426a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
427a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)
428a1d767c1SQu Wenruo {
429a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
43072a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
43172a69cd0SQu Wenruo 							uptodate, start, len);
432a1d767c1SQu Wenruo 	unsigned long flags;
433a1d767c1SQu Wenruo 
434a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
43572a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
436a1d767c1SQu Wenruo 	ClearPageUptodate(page);
437a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
438a1d767c1SQu Wenruo }
439a1d767c1SQu Wenruo 
44003a816b3SQu Wenruo void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
44103a816b3SQu Wenruo 		struct page *page, u64 start, u32 len)
44203a816b3SQu Wenruo {
44303a816b3SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
44472a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
44572a69cd0SQu Wenruo 							error, start, len);
44603a816b3SQu Wenruo 	unsigned long flags;
44703a816b3SQu Wenruo 
44803a816b3SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
44972a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
45003a816b3SQu Wenruo 	SetPageError(page);
45103a816b3SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
45203a816b3SQu Wenruo }
45303a816b3SQu Wenruo 
45403a816b3SQu Wenruo void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
45503a816b3SQu Wenruo 		struct page *page, u64 start, u32 len)
45603a816b3SQu Wenruo {
45703a816b3SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
45872a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
45972a69cd0SQu Wenruo 							error, start, len);
46003a816b3SQu Wenruo 	unsigned long flags;
46103a816b3SQu Wenruo 
46203a816b3SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
46372a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
46472a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, error))
46503a816b3SQu Wenruo 		ClearPageError(page);
46603a816b3SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
46703a816b3SQu Wenruo }
46803a816b3SQu Wenruo 
469d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
470d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
471d8a5713eSQu Wenruo {
472d8a5713eSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
47372a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
47472a69cd0SQu Wenruo 							dirty, start, len);
475d8a5713eSQu Wenruo 	unsigned long flags;
476d8a5713eSQu Wenruo 
477d8a5713eSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
47872a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
479d8a5713eSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
480d8a5713eSQu Wenruo 	set_page_dirty(page);
481d8a5713eSQu Wenruo }
482d8a5713eSQu Wenruo 
483d8a5713eSQu Wenruo /*
484d8a5713eSQu Wenruo  * Extra clear_and_test function for subpage dirty bitmap.
485d8a5713eSQu Wenruo  *
486d8a5713eSQu Wenruo  * Return true if we're the last bits in the dirty_bitmap and clear the
487d8a5713eSQu Wenruo  * dirty_bitmap.
488d8a5713eSQu Wenruo  * Return false otherwise.
489d8a5713eSQu Wenruo  *
490d8a5713eSQu Wenruo  * NOTE: Callers should manually clear page dirty for true case, as we have
491d8a5713eSQu Wenruo  * extra handling for tree blocks.
492d8a5713eSQu Wenruo  */
493d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
494d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
495d8a5713eSQu Wenruo {
496d8a5713eSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
49772a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
49872a69cd0SQu Wenruo 							dirty, start, len);
499d8a5713eSQu Wenruo 	unsigned long flags;
500d8a5713eSQu Wenruo 	bool last = false;
501d8a5713eSQu Wenruo 
502d8a5713eSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
50372a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
50472a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
505d8a5713eSQu Wenruo 		last = true;
506d8a5713eSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
507d8a5713eSQu Wenruo 	return last;
508d8a5713eSQu Wenruo }
509d8a5713eSQu Wenruo 
510d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
511d8a5713eSQu Wenruo 		struct page *page, u64 start, u32 len)
512d8a5713eSQu Wenruo {
513d8a5713eSQu Wenruo 	bool last;
514d8a5713eSQu Wenruo 
515d8a5713eSQu Wenruo 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
516d8a5713eSQu Wenruo 	if (last)
517d8a5713eSQu Wenruo 		clear_page_dirty_for_io(page);
518d8a5713eSQu Wenruo }
519d8a5713eSQu Wenruo 
5203470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
5213470da3bSQu Wenruo 		struct page *page, u64 start, u32 len)
5223470da3bSQu Wenruo {
5233470da3bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
52472a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
52572a69cd0SQu Wenruo 							writeback, start, len);
5263470da3bSQu Wenruo 	unsigned long flags;
5273470da3bSQu Wenruo 
5283470da3bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
52972a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
5303470da3bSQu Wenruo 	set_page_writeback(page);
5313470da3bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5323470da3bSQu Wenruo }
5333470da3bSQu Wenruo 
5343470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
5353470da3bSQu Wenruo 		struct page *page, u64 start, u32 len)
5363470da3bSQu Wenruo {
5373470da3bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
53872a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
53972a69cd0SQu Wenruo 							writeback, start, len);
5403470da3bSQu Wenruo 	unsigned long flags;
5413470da3bSQu Wenruo 
5423470da3bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
54372a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
54472a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
5457c11d0aeSQu Wenruo 		ASSERT(PageWriteback(page));
5463470da3bSQu Wenruo 		end_page_writeback(page);
5477c11d0aeSQu Wenruo 	}
5483470da3bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5493470da3bSQu Wenruo }
5503470da3bSQu Wenruo 
5516f17400bSQu Wenruo void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
5526f17400bSQu Wenruo 		struct page *page, u64 start, u32 len)
5536f17400bSQu Wenruo {
5546f17400bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
55572a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
55672a69cd0SQu Wenruo 							ordered, start, len);
5576f17400bSQu Wenruo 	unsigned long flags;
5586f17400bSQu Wenruo 
5596f17400bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
56072a69cd0SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
5616f17400bSQu Wenruo 	SetPageOrdered(page);
5626f17400bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5636f17400bSQu Wenruo }
5646f17400bSQu Wenruo 
5656f17400bSQu Wenruo void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
5666f17400bSQu Wenruo 		struct page *page, u64 start, u32 len)
5676f17400bSQu Wenruo {
5686f17400bSQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
56972a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
57072a69cd0SQu Wenruo 							ordered, start, len);
5716f17400bSQu Wenruo 	unsigned long flags;
5726f17400bSQu Wenruo 
5736f17400bSQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
57472a69cd0SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
57572a69cd0SQu Wenruo 	if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
5766f17400bSQu Wenruo 		ClearPageOrdered(page);
5776f17400bSQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
5786f17400bSQu Wenruo }
579e4f94347SQu Wenruo 
580e4f94347SQu Wenruo void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
581e4f94347SQu Wenruo 			       struct page *page, u64 start, u32 len)
582e4f94347SQu Wenruo {
583e4f94347SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
584e4f94347SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
585e4f94347SQu Wenruo 							checked, start, len);
586e4f94347SQu Wenruo 	unsigned long flags;
587e4f94347SQu Wenruo 
588e4f94347SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
589e4f94347SQu Wenruo 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
590e4f94347SQu Wenruo 	if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
591e4f94347SQu Wenruo 		SetPageChecked(page);
592e4f94347SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
593e4f94347SQu Wenruo }
594e4f94347SQu Wenruo 
595e4f94347SQu Wenruo void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
596e4f94347SQu Wenruo 				 struct page *page, u64 start, u32 len)
597e4f94347SQu Wenruo {
598e4f94347SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
599e4f94347SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
600e4f94347SQu Wenruo 							checked, start, len);
601e4f94347SQu Wenruo 	unsigned long flags;
602e4f94347SQu Wenruo 
603e4f94347SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);
604e4f94347SQu Wenruo 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
605e4f94347SQu Wenruo 	ClearPageChecked(page);
606e4f94347SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);
607e4f94347SQu Wenruo }
608e4f94347SQu Wenruo 
609a1d767c1SQu Wenruo /*
610a1d767c1SQu Wenruo  * Unlike set/clear which is dependent on each page status, for test all bits
611a1d767c1SQu Wenruo  * are tested in the same way.
612a1d767c1SQu Wenruo  */
613a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
614a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
615a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
616a1d767c1SQu Wenruo {									\
617a1d767c1SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
61872a69cd0SQu Wenruo 	unsigned int start_bit = subpage_calc_start_bit(fs_info, page,	\
61972a69cd0SQu Wenruo 						name, start, len);	\
620a1d767c1SQu Wenruo 	unsigned long flags;						\
621a1d767c1SQu Wenruo 	bool ret;							\
622a1d767c1SQu Wenruo 									\
623a1d767c1SQu Wenruo 	spin_lock_irqsave(&subpage->lock, flags);			\
62472a69cd0SQu Wenruo 	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
62572a69cd0SQu Wenruo 				len >> fs_info->sectorsize_bits);	\
626a1d767c1SQu Wenruo 	spin_unlock_irqrestore(&subpage->lock, flags);			\
627a1d767c1SQu Wenruo 	return ret;							\
628a1d767c1SQu Wenruo }
629a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
63003a816b3SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
631d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
6323470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
6336f17400bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
634e4f94347SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
635a1d767c1SQu Wenruo 
636a1d767c1SQu Wenruo /*
637a1d767c1SQu Wenruo  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
638a1d767c1SQu Wenruo  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
639a1d767c1SQu Wenruo  * back to regular sectorsize branch.
640a1d767c1SQu Wenruo  */
641a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func,	\
642a1d767c1SQu Wenruo 			       test_page_func)				\
643a1d767c1SQu Wenruo void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info,		\
644a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
645a1d767c1SQu Wenruo {									\
646*fbca46ebSQu Wenruo 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {	\
647a1d767c1SQu Wenruo 		set_page_func(page);					\
648a1d767c1SQu Wenruo 		return;							\
649a1d767c1SQu Wenruo 	}								\
650a1d767c1SQu Wenruo 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
651a1d767c1SQu Wenruo }									\
652a1d767c1SQu Wenruo void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info,	\
653a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
654a1d767c1SQu Wenruo {									\
655*fbca46ebSQu Wenruo 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {	\
656a1d767c1SQu Wenruo 		clear_page_func(page);					\
657a1d767c1SQu Wenruo 		return;							\
658a1d767c1SQu Wenruo 	}								\
659a1d767c1SQu Wenruo 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
660a1d767c1SQu Wenruo }									\
661a1d767c1SQu Wenruo bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info,	\
662a1d767c1SQu Wenruo 		struct page *page, u64 start, u32 len)			\
663a1d767c1SQu Wenruo {									\
664*fbca46ebSQu Wenruo 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))	\
665a1d767c1SQu Wenruo 		return test_page_func(page);				\
666a1d767c1SQu Wenruo 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
66760e2d255SQu Wenruo }									\
66860e2d255SQu Wenruo void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
66960e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
67060e2d255SQu Wenruo {									\
671*fbca46ebSQu Wenruo 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {	\
67260e2d255SQu Wenruo 		set_page_func(page);					\
67360e2d255SQu Wenruo 		return;							\
67460e2d255SQu Wenruo 	}								\
67560e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
67660e2d255SQu Wenruo 	btrfs_subpage_set_##name(fs_info, page, start, len);		\
67760e2d255SQu Wenruo }									\
67860e2d255SQu Wenruo void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
67960e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
68060e2d255SQu Wenruo {									\
681*fbca46ebSQu Wenruo 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {	\
68260e2d255SQu Wenruo 		clear_page_func(page);					\
68360e2d255SQu Wenruo 		return;							\
68460e2d255SQu Wenruo 	}								\
68560e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
68660e2d255SQu Wenruo 	btrfs_subpage_clear_##name(fs_info, page, start, len);		\
68760e2d255SQu Wenruo }									\
68860e2d255SQu Wenruo bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
68960e2d255SQu Wenruo 		struct page *page, u64 start, u32 len)			\
69060e2d255SQu Wenruo {									\
691*fbca46ebSQu Wenruo 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))	\
69260e2d255SQu Wenruo 		return test_page_func(page);				\
69360e2d255SQu Wenruo 	btrfs_subpage_clamp_range(page, &start, &len);			\
69460e2d255SQu Wenruo 	return btrfs_subpage_test_##name(fs_info, page, start, len);	\
695a1d767c1SQu Wenruo }
696a1d767c1SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
697a1d767c1SQu Wenruo 			 PageUptodate);
69803a816b3SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
699d8a5713eSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
700d8a5713eSQu Wenruo 			 PageDirty);
7013470da3bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
7023470da3bSQu Wenruo 			 PageWriteback);
7036f17400bSQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
7046f17400bSQu Wenruo 			 PageOrdered);
705e4f94347SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
706cc1d0d93SQu Wenruo 
707cc1d0d93SQu Wenruo /*
708cc1d0d93SQu Wenruo  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
709cc1d0d93SQu Wenruo  * is cleared.
710cc1d0d93SQu Wenruo  */
711cc1d0d93SQu Wenruo void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
712cc1d0d93SQu Wenruo 				 struct page *page)
713cc1d0d93SQu Wenruo {
714cc1d0d93SQu Wenruo 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
715cc1d0d93SQu Wenruo 
716cc1d0d93SQu Wenruo 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
717cc1d0d93SQu Wenruo 		return;
718cc1d0d93SQu Wenruo 
719cc1d0d93SQu Wenruo 	ASSERT(!PageDirty(page));
720*fbca46ebSQu Wenruo 	if (!btrfs_is_subpage(fs_info, page))
721cc1d0d93SQu Wenruo 		return;
722cc1d0d93SQu Wenruo 
723cc1d0d93SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
72472a69cd0SQu Wenruo 	ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
725cc1d0d93SQu Wenruo }
726e55a0de1SQu Wenruo 
727e55a0de1SQu Wenruo /*
728e55a0de1SQu Wenruo  * Handle different locked pages with different page sizes:
729e55a0de1SQu Wenruo  *
730e55a0de1SQu Wenruo  * - Page locked by plain lock_page()
731e55a0de1SQu Wenruo  *   It should not have any subpage::writers count.
732e55a0de1SQu Wenruo  *   Can be unlocked by unlock_page().
733e55a0de1SQu Wenruo  *   This is the most common locked page for __extent_writepage() called
734e55a0de1SQu Wenruo  *   inside extent_write_cache_pages() or extent_write_full_page().
735e55a0de1SQu Wenruo  *   Rarer cases include the @locked_page from extent_write_locked_range().
736e55a0de1SQu Wenruo  *
737e55a0de1SQu Wenruo  * - Page locked by lock_delalloc_pages()
738e55a0de1SQu Wenruo  *   There is only one caller, all pages except @locked_page for
739e55a0de1SQu Wenruo  *   extent_write_locked_range().
740e55a0de1SQu Wenruo  *   In this case, we have to call subpage helper to handle the case.
741e55a0de1SQu Wenruo  */
742e55a0de1SQu Wenruo void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
743e55a0de1SQu Wenruo 			      u64 start, u32 len)
744e55a0de1SQu Wenruo {
745e55a0de1SQu Wenruo 	struct btrfs_subpage *subpage;
746e55a0de1SQu Wenruo 
747e55a0de1SQu Wenruo 	ASSERT(PageLocked(page));
748*fbca46ebSQu Wenruo 	/* For non-subpage case, we just unlock the page */
749*fbca46ebSQu Wenruo 	if (!btrfs_is_subpage(fs_info, page))
750e55a0de1SQu Wenruo 		return unlock_page(page);
751e55a0de1SQu Wenruo 
752e55a0de1SQu Wenruo 	ASSERT(PagePrivate(page) && page->private);
753e55a0de1SQu Wenruo 	subpage = (struct btrfs_subpage *)page->private;
754e55a0de1SQu Wenruo 
755e55a0de1SQu Wenruo 	/*
756e55a0de1SQu Wenruo 	 * For subpage case, there are two types of locked page.  With or
757e55a0de1SQu Wenruo 	 * without writers number.
758e55a0de1SQu Wenruo 	 *
759e55a0de1SQu Wenruo 	 * Since we own the page lock, no one else could touch subpage::writers
760e55a0de1SQu Wenruo 	 * and we are safe to do several atomic operations without spinlock.
761e55a0de1SQu Wenruo 	 */
762c992fa1fSQu Wenruo 	if (atomic_read(&subpage->writers) == 0)
763e55a0de1SQu Wenruo 		/* No writers, locked by plain lock_page() */
764e55a0de1SQu Wenruo 		return unlock_page(page);
765e55a0de1SQu Wenruo 
766e55a0de1SQu Wenruo 	/* Have writers, use proper subpage helper to end it */
767e55a0de1SQu Wenruo 	btrfs_page_end_writer_lock(fs_info, page, start, len);
768e55a0de1SQu Wenruo }
769