xref: /linux/fs/btrfs/subpage.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/slab.h>
4 #include "messages.h"
5 #include "ctree.h"
6 #include "subpage.h"
7 #include "btrfs_inode.h"
8 
9 /*
10  * Subpage (sectorsize < PAGE_SIZE) support overview:
11  *
12  * Limitations:
13  *
14  * - Only support 64K page size for now
15  *   This is to make metadata handling easier, as 64K page would ensure
16  *   all nodesize would fit inside one page, thus we don't need to handle
17  *   cases where a tree block crosses several pages.
18  *
19  * - Only metadata read-write for now
20  *   The data read-write part is in development.
21  *
22  * - Metadata can't cross 64K page boundary
23  *   btrfs-progs and kernel have done that for a while, thus only ancient
24  *   filesystems could have such problem.  For such case, do a graceful
25  *   rejection.
26  *
27  * Special behavior:
28  *
29  * - Metadata
30  *   Metadata read is fully supported.
31  *   Meaning when reading one tree block will only trigger the read for the
32  *   needed range, other unrelated range in the same page will not be touched.
33  *
34  *   Metadata write support is partial.
35  *   The writeback is still for the full page, but we will only submit
36  *   the dirty extent buffers in the page.
37  *
38  *   This means, if we have a metadata page like this:
39  *
40  *   Page offset
41  *   0         16K         32K         48K        64K
42  *   |/////////|           |///////////|
43  *        \- Tree block A        \- Tree block B
44  *
45  *   Even if we just want to writeback tree block A, we will also writeback
46  *   tree block B if it's also dirty.
47  *
48  *   This may cause extra metadata writeback which results more COW.
49  *
50  * Implementation:
51  *
52  * - Common
53  *   Both metadata and data will use a new structure, btrfs_subpage, to
54  *   record the status of each sector inside a page.  This provides the extra
55  *   granularity needed.
56  *
57  * - Metadata
58  *   Since we have multiple tree blocks inside one page, we can't rely on page
59  *   locking anymore, or we will have greatly reduced concurrency or even
60  *   deadlocks (hold one tree lock while trying to lock another tree lock in
61  *   the same page).
62  *
63  *   Thus for metadata locking, subpage support relies on io_tree locking only.
64  *   This means a slightly higher tree locking latency.
65  */
66 
67 #if PAGE_SIZE > SZ_4K
68 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
69 {
70 	if (fs_info->sectorsize >= PAGE_SIZE)
71 		return false;
72 
73 	/*
74 	 * Only data pages (either through DIO or compression) can have no
75 	 * mapping. And if page->mapping->host is data inode, it's subpage.
76 	 * As we have ruled our sectorsize >= PAGE_SIZE case already.
77 	 */
78 	if (!mapping || !mapping->host || is_data_inode(BTRFS_I(mapping->host)))
79 		return true;
80 
81 	/*
82 	 * Now the only remaining case is metadata, which we only go subpage
83 	 * routine if nodesize < PAGE_SIZE.
84 	 */
85 	if (fs_info->nodesize < PAGE_SIZE)
86 		return true;
87 	return false;
88 }
89 #endif
90 
91 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
92 			 struct folio *folio, enum btrfs_subpage_type type)
93 {
94 	struct btrfs_subpage *subpage;
95 
96 	/*
97 	 * We have cases like a dummy extent buffer page, which is not mapped
98 	 * and doesn't need to be locked.
99 	 */
100 	if (folio->mapping)
101 		ASSERT(folio_test_locked(folio));
102 
103 	/* Either not subpage, or the folio already has private attached. */
104 	if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
105 		return 0;
106 
107 	subpage = btrfs_alloc_subpage(fs_info, type);
108 	if (IS_ERR(subpage))
109 		return  PTR_ERR(subpage);
110 
111 	folio_attach_private(folio, subpage);
112 	return 0;
113 }
114 
115 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
116 {
117 	struct btrfs_subpage *subpage;
118 
119 	/* Either not subpage, or the folio already has private attached. */
120 	if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
121 		return;
122 
123 	subpage = folio_detach_private(folio);
124 	ASSERT(subpage);
125 	btrfs_free_subpage(subpage);
126 }
127 
128 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
129 					  enum btrfs_subpage_type type)
130 {
131 	struct btrfs_subpage *ret;
132 	unsigned int real_size;
133 
134 	ASSERT(fs_info->sectorsize < PAGE_SIZE);
135 
136 	real_size = struct_size(ret, bitmaps,
137 			BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page));
138 	ret = kzalloc(real_size, GFP_NOFS);
139 	if (!ret)
140 		return ERR_PTR(-ENOMEM);
141 
142 	spin_lock_init(&ret->lock);
143 	if (type == BTRFS_SUBPAGE_METADATA)
144 		atomic_set(&ret->eb_refs, 0);
145 	else
146 		atomic_set(&ret->nr_locked, 0);
147 	return ret;
148 }
149 
150 void btrfs_free_subpage(struct btrfs_subpage *subpage)
151 {
152 	kfree(subpage);
153 }
154 
155 /*
156  * Increase the eb_refs of current subpage.
157  *
158  * This is important for eb allocation, to prevent race with last eb freeing
159  * of the same page.
160  * With the eb_refs increased before the eb inserted into radix tree,
161  * detach_extent_buffer_page() won't detach the folio private while we're still
162  * allocating the extent buffer.
163  */
164 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
165 {
166 	struct btrfs_subpage *subpage;
167 
168 	if (!btrfs_is_subpage(fs_info, folio->mapping))
169 		return;
170 
171 	ASSERT(folio_test_private(folio) && folio->mapping);
172 	lockdep_assert_held(&folio->mapping->i_private_lock);
173 
174 	subpage = folio_get_private(folio);
175 	atomic_inc(&subpage->eb_refs);
176 }
177 
178 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
179 {
180 	struct btrfs_subpage *subpage;
181 
182 	if (!btrfs_is_subpage(fs_info, folio->mapping))
183 		return;
184 
185 	ASSERT(folio_test_private(folio) && folio->mapping);
186 	lockdep_assert_held(&folio->mapping->i_private_lock);
187 
188 	subpage = folio_get_private(folio);
189 	ASSERT(atomic_read(&subpage->eb_refs));
190 	atomic_dec(&subpage->eb_refs);
191 }
192 
193 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
194 				 struct folio *folio, u64 start, u32 len)
195 {
196 	/* For subpage support, the folio must be single page. */
197 	ASSERT(folio_order(folio) == 0);
198 
199 	/* Basic checks */
200 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
201 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
202 	       IS_ALIGNED(len, fs_info->sectorsize));
203 	/*
204 	 * The range check only works for mapped page, we can still have
205 	 * unmapped page like dummy extent buffer pages.
206 	 */
207 	if (folio->mapping)
208 		ASSERT(folio_pos(folio) <= start &&
209 		       start + len <= folio_pos(folio) + PAGE_SIZE);
210 }
211 
212 #define subpage_calc_start_bit(fs_info, folio, name, start, len)	\
213 ({									\
214 	unsigned int __start_bit;						\
215 									\
216 	btrfs_subpage_assert(fs_info, folio, start, len);		\
217 	__start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
218 	__start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
219 	__start_bit;							\
220 })
221 
222 static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
223 {
224 	u64 orig_start = *start;
225 	u32 orig_len = *len;
226 
227 	*start = max_t(u64, folio_pos(folio), orig_start);
228 	/*
229 	 * For certain call sites like btrfs_drop_pages(), we may have pages
230 	 * beyond the target range. In that case, just set @len to 0, subpage
231 	 * helpers can handle @len == 0 without any problem.
232 	 */
233 	if (folio_pos(folio) >= orig_start + orig_len)
234 		*len = 0;
235 	else
236 		*len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
237 			     orig_start + orig_len) - *start;
238 }
239 
240 static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
241 					    struct folio *folio, u64 start, u32 len)
242 {
243 	struct btrfs_subpage *subpage = folio_get_private(folio);
244 	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
245 	const int nbits = (len >> fs_info->sectorsize_bits);
246 	unsigned long flags;
247 	unsigned int cleared = 0;
248 	int bit = start_bit;
249 	bool last;
250 
251 	btrfs_subpage_assert(fs_info, folio, start, len);
252 
253 	spin_lock_irqsave(&subpage->lock, flags);
254 	/*
255 	 * We have call sites passing @lock_page into
256 	 * extent_clear_unlock_delalloc() for compression path.
257 	 *
258 	 * This @locked_page is locked by plain lock_page(), thus its
259 	 * subpage::locked is 0.  Handle them in a special way.
260 	 */
261 	if (atomic_read(&subpage->nr_locked) == 0) {
262 		spin_unlock_irqrestore(&subpage->lock, flags);
263 		return true;
264 	}
265 
266 	for_each_set_bit_from(bit, subpage->bitmaps, start_bit + nbits) {
267 		clear_bit(bit, subpage->bitmaps);
268 		cleared++;
269 	}
270 	ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
271 	last = atomic_sub_and_test(cleared, &subpage->nr_locked);
272 	spin_unlock_irqrestore(&subpage->lock, flags);
273 	return last;
274 }
275 
276 /*
277  * Handle different locked folios:
278  *
279  * - Non-subpage folio
280  *   Just unlock it.
281  *
282  * - folio locked but without any subpage locked
283  *   This happens either before writepage_delalloc() or the delalloc range is
284  *   already handled by previous folio.
285  *   We can simple unlock it.
286  *
287  * - folio locked with subpage range locked.
288  *   We go through the locked sectors inside the range and clear their locked
289  *   bitmap, reduce the writer lock number, and unlock the page if that's
290  *   the last locked range.
291  */
292 void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
293 			  struct folio *folio, u64 start, u32 len)
294 {
295 	struct btrfs_subpage *subpage = folio_get_private(folio);
296 
297 	ASSERT(folio_test_locked(folio));
298 
299 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
300 		folio_unlock(folio);
301 		return;
302 	}
303 
304 	/*
305 	 * For subpage case, there are two types of locked page.  With or
306 	 * without locked number.
307 	 *
308 	 * Since we own the page lock, no one else could touch subpage::locked
309 	 * and we are safe to do several atomic operations without spinlock.
310 	 */
311 	if (atomic_read(&subpage->nr_locked) == 0) {
312 		/* No subpage lock, locked by plain lock_page(). */
313 		folio_unlock(folio);
314 		return;
315 	}
316 
317 	btrfs_subpage_clamp_range(folio, &start, &len);
318 	if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
319 		folio_unlock(folio);
320 }
321 
322 void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
323 				 struct folio *folio, unsigned long bitmap)
324 {
325 	struct btrfs_subpage *subpage = folio_get_private(folio);
326 	const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
327 	unsigned long flags;
328 	bool last = false;
329 	int cleared = 0;
330 	int bit;
331 
332 	if (!btrfs_is_subpage(fs_info, folio->mapping)) {
333 		folio_unlock(folio);
334 		return;
335 	}
336 
337 	if (atomic_read(&subpage->nr_locked) == 0) {
338 		/* No subpage lock, locked by plain lock_page(). */
339 		folio_unlock(folio);
340 		return;
341 	}
342 
343 	spin_lock_irqsave(&subpage->lock, flags);
344 	for_each_set_bit(bit, &bitmap, fs_info->sectors_per_page) {
345 		if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
346 			cleared++;
347 	}
348 	ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
349 	last = atomic_sub_and_test(cleared, &subpage->nr_locked);
350 	spin_unlock_irqrestore(&subpage->lock, flags);
351 	if (last)
352 		folio_unlock(folio);
353 }
354 
355 #define subpage_test_bitmap_all_set(fs_info, subpage, name)		\
356 	bitmap_test_range_all_set(subpage->bitmaps,			\
357 			fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
358 			fs_info->sectors_per_page)
359 
360 #define subpage_test_bitmap_all_zero(fs_info, subpage, name)		\
361 	bitmap_test_range_all_zero(subpage->bitmaps,			\
362 			fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
363 			fs_info->sectors_per_page)
364 
365 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
366 				struct folio *folio, u64 start, u32 len)
367 {
368 	struct btrfs_subpage *subpage = folio_get_private(folio);
369 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
370 							uptodate, start, len);
371 	unsigned long flags;
372 
373 	spin_lock_irqsave(&subpage->lock, flags);
374 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
375 	if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
376 		folio_mark_uptodate(folio);
377 	spin_unlock_irqrestore(&subpage->lock, flags);
378 }
379 
380 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
381 				  struct folio *folio, u64 start, u32 len)
382 {
383 	struct btrfs_subpage *subpage = folio_get_private(folio);
384 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
385 							uptodate, start, len);
386 	unsigned long flags;
387 
388 	spin_lock_irqsave(&subpage->lock, flags);
389 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
390 	folio_clear_uptodate(folio);
391 	spin_unlock_irqrestore(&subpage->lock, flags);
392 }
393 
394 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
395 			     struct folio *folio, u64 start, u32 len)
396 {
397 	struct btrfs_subpage *subpage = folio_get_private(folio);
398 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
399 							dirty, start, len);
400 	unsigned long flags;
401 
402 	spin_lock_irqsave(&subpage->lock, flags);
403 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
404 	spin_unlock_irqrestore(&subpage->lock, flags);
405 	folio_mark_dirty(folio);
406 }
407 
408 /*
409  * Extra clear_and_test function for subpage dirty bitmap.
410  *
411  * Return true if we're the last bits in the dirty_bitmap and clear the
412  * dirty_bitmap.
413  * Return false otherwise.
414  *
415  * NOTE: Callers should manually clear page dirty for true case, as we have
416  * extra handling for tree blocks.
417  */
418 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
419 					struct folio *folio, u64 start, u32 len)
420 {
421 	struct btrfs_subpage *subpage = folio_get_private(folio);
422 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
423 							dirty, start, len);
424 	unsigned long flags;
425 	bool last = false;
426 
427 	spin_lock_irqsave(&subpage->lock, flags);
428 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
429 	if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
430 		last = true;
431 	spin_unlock_irqrestore(&subpage->lock, flags);
432 	return last;
433 }
434 
435 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
436 			       struct folio *folio, u64 start, u32 len)
437 {
438 	bool last;
439 
440 	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
441 	if (last)
442 		folio_clear_dirty_for_io(folio);
443 }
444 
445 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
446 				 struct folio *folio, u64 start, u32 len)
447 {
448 	struct btrfs_subpage *subpage = folio_get_private(folio);
449 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
450 							writeback, start, len);
451 	unsigned long flags;
452 
453 	spin_lock_irqsave(&subpage->lock, flags);
454 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
455 	if (!folio_test_writeback(folio))
456 		folio_start_writeback(folio);
457 	spin_unlock_irqrestore(&subpage->lock, flags);
458 }
459 
460 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
461 				   struct folio *folio, u64 start, u32 len)
462 {
463 	struct btrfs_subpage *subpage = folio_get_private(folio);
464 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
465 							writeback, start, len);
466 	unsigned long flags;
467 
468 	spin_lock_irqsave(&subpage->lock, flags);
469 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
470 	if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
471 		ASSERT(folio_test_writeback(folio));
472 		folio_end_writeback(folio);
473 	}
474 	spin_unlock_irqrestore(&subpage->lock, flags);
475 }
476 
477 void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
478 			       struct folio *folio, u64 start, u32 len)
479 {
480 	struct btrfs_subpage *subpage = folio_get_private(folio);
481 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
482 							ordered, start, len);
483 	unsigned long flags;
484 
485 	spin_lock_irqsave(&subpage->lock, flags);
486 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
487 	folio_set_ordered(folio);
488 	spin_unlock_irqrestore(&subpage->lock, flags);
489 }
490 
491 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
492 				 struct folio *folio, u64 start, u32 len)
493 {
494 	struct btrfs_subpage *subpage = folio_get_private(folio);
495 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
496 							ordered, start, len);
497 	unsigned long flags;
498 
499 	spin_lock_irqsave(&subpage->lock, flags);
500 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
501 	if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
502 		folio_clear_ordered(folio);
503 	spin_unlock_irqrestore(&subpage->lock, flags);
504 }
505 
506 void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
507 			       struct folio *folio, u64 start, u32 len)
508 {
509 	struct btrfs_subpage *subpage = folio_get_private(folio);
510 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
511 							checked, start, len);
512 	unsigned long flags;
513 
514 	spin_lock_irqsave(&subpage->lock, flags);
515 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
516 	if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
517 		folio_set_checked(folio);
518 	spin_unlock_irqrestore(&subpage->lock, flags);
519 }
520 
521 void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
522 				 struct folio *folio, u64 start, u32 len)
523 {
524 	struct btrfs_subpage *subpage = folio_get_private(folio);
525 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
526 							checked, start, len);
527 	unsigned long flags;
528 
529 	spin_lock_irqsave(&subpage->lock, flags);
530 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
531 	folio_clear_checked(folio);
532 	spin_unlock_irqrestore(&subpage->lock, flags);
533 }
534 
535 /*
536  * Unlike set/clear which is dependent on each page status, for test all bits
537  * are tested in the same way.
538  */
539 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
540 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
541 			       struct folio *folio, u64 start, u32 len)	\
542 {									\
543 	struct btrfs_subpage *subpage = folio_get_private(folio);	\
544 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,	\
545 						name, start, len);	\
546 	unsigned long flags;						\
547 	bool ret;							\
548 									\
549 	spin_lock_irqsave(&subpage->lock, flags);			\
550 	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
551 				len >> fs_info->sectorsize_bits);	\
552 	spin_unlock_irqrestore(&subpage->lock, flags);			\
553 	return ret;							\
554 }
555 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
556 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
557 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
558 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
559 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
560 
561 /*
562  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
563  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
564  * back to regular sectorsize branch.
565  */
566 #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func,			\
567 				 folio_clear_func, folio_test_func)	\
568 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info,	\
569 			    struct folio *folio, u64 start, u32 len)	\
570 {									\
571 	if (unlikely(!fs_info) ||					\
572 	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
573 		folio_set_func(folio);					\
574 		return;							\
575 	}								\
576 	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
577 }									\
578 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info,	\
579 			      struct folio *folio, u64 start, u32 len)	\
580 {									\
581 	if (unlikely(!fs_info) ||					\
582 	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
583 		folio_clear_func(folio);				\
584 		return;							\
585 	}								\
586 	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
587 }									\
588 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info,	\
589 			     struct folio *folio, u64 start, u32 len)	\
590 {									\
591 	if (unlikely(!fs_info) ||					\
592 	    !btrfs_is_subpage(fs_info, folio->mapping))			\
593 		return folio_test_func(folio);				\
594 	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
595 }									\
596 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
597 				  struct folio *folio, u64 start, u32 len) \
598 {									\
599 	if (unlikely(!fs_info) ||					\
600 	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
601 		folio_set_func(folio);					\
602 		return;							\
603 	}								\
604 	btrfs_subpage_clamp_range(folio, &start, &len);			\
605 	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
606 }									\
607 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
608 				    struct folio *folio, u64 start, u32 len) \
609 {									\
610 	if (unlikely(!fs_info) ||					\
611 	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
612 		folio_clear_func(folio);				\
613 		return;							\
614 	}								\
615 	btrfs_subpage_clamp_range(folio, &start, &len);			\
616 	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
617 }									\
618 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
619 				   struct folio *folio, u64 start, u32 len) \
620 {									\
621 	if (unlikely(!fs_info) ||					\
622 	    !btrfs_is_subpage(fs_info, folio->mapping))			\
623 		return folio_test_func(folio);				\
624 	btrfs_subpage_clamp_range(folio, &start, &len);			\
625 	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
626 }
627 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
628 			 folio_test_uptodate);
629 IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
630 			 folio_test_dirty);
631 IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
632 			 folio_test_writeback);
633 IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
634 			 folio_test_ordered);
635 IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
636 			 folio_test_checked);
637 
638 /*
639  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
640  * is cleared.
641  */
642 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
643 				  struct folio *folio, u64 start, u32 len)
644 {
645 	struct btrfs_subpage *subpage;
646 	unsigned int start_bit;
647 	unsigned int nbits;
648 	unsigned long flags;
649 
650 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
651 		return;
652 
653 	if (!btrfs_is_subpage(fs_info, folio->mapping)) {
654 		ASSERT(!folio_test_dirty(folio));
655 		return;
656 	}
657 
658 	start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
659 	nbits = len >> fs_info->sectorsize_bits;
660 	subpage = folio_get_private(folio);
661 	ASSERT(subpage);
662 	spin_lock_irqsave(&subpage->lock, flags);
663 	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
664 	spin_unlock_irqrestore(&subpage->lock, flags);
665 }
666 
667 /*
668  * This is for folio already locked by plain lock_page()/folio_lock(), which
669  * doesn't have any subpage awareness.
670  *
671  * This populates the involved subpage ranges so that subpage helpers can
672  * properly unlock them.
673  */
674 void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
675 			  struct folio *folio, u64 start, u32 len)
676 {
677 	struct btrfs_subpage *subpage;
678 	unsigned long flags;
679 	unsigned int start_bit;
680 	unsigned int nbits;
681 	int ret;
682 
683 	ASSERT(folio_test_locked(folio));
684 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping))
685 		return;
686 
687 	subpage = folio_get_private(folio);
688 	start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
689 	nbits = len >> fs_info->sectorsize_bits;
690 	spin_lock_irqsave(&subpage->lock, flags);
691 	/* Target range should not yet be locked. */
692 	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
693 	bitmap_set(subpage->bitmaps, start_bit, nbits);
694 	ret = atomic_add_return(nbits, &subpage->nr_locked);
695 	ASSERT(ret <= fs_info->sectors_per_page);
696 	spin_unlock_irqrestore(&subpage->lock, flags);
697 }
698 
699 #define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst)			\
700 {									\
701 	const int sectors_per_page = fs_info->sectors_per_page;		\
702 									\
703 	ASSERT(sectors_per_page < BITS_PER_LONG);			\
704 	*dst = bitmap_read(subpage->bitmaps,				\
705 			   sectors_per_page * btrfs_bitmap_nr_##name,	\
706 			   sectors_per_page);				\
707 }
708 
709 void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
710 				      struct folio *folio, u64 start, u32 len)
711 {
712 	struct btrfs_subpage *subpage;
713 	const u32 sectors_per_page = fs_info->sectors_per_page;
714 	unsigned long uptodate_bitmap;
715 	unsigned long dirty_bitmap;
716 	unsigned long writeback_bitmap;
717 	unsigned long ordered_bitmap;
718 	unsigned long checked_bitmap;
719 	unsigned long flags;
720 
721 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
722 	ASSERT(sectors_per_page > 1);
723 	subpage = folio_get_private(folio);
724 
725 	spin_lock_irqsave(&subpage->lock, flags);
726 	GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap);
727 	GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap);
728 	GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
729 	GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
730 	GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
731 	GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &checked_bitmap);
732 	spin_unlock_irqrestore(&subpage->lock, flags);
733 
734 	dump_page(folio_page(folio, 0), "btrfs subpage dump");
735 	btrfs_warn(fs_info,
736 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
737 		    start, len, folio_pos(folio),
738 		    sectors_per_page, &uptodate_bitmap,
739 		    sectors_per_page, &dirty_bitmap,
740 		    sectors_per_page, &writeback_bitmap,
741 		    sectors_per_page, &ordered_bitmap,
742 		    sectors_per_page, &checked_bitmap);
743 }
744 
745 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
746 				    struct folio *folio,
747 				    unsigned long *ret_bitmap)
748 {
749 	struct btrfs_subpage *subpage;
750 	unsigned long flags;
751 
752 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
753 	ASSERT(fs_info->sectors_per_page > 1);
754 	subpage = folio_get_private(folio);
755 
756 	spin_lock_irqsave(&subpage->lock, flags);
757 	GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap);
758 	spin_unlock_irqrestore(&subpage->lock, flags);
759 }
760