xref: /linux/fs/btrfs/subpage.c (revision 257ca10c7317d4a424e48bb95d14ca53a1f1dd6f)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/slab.h>
4 #include "messages.h"
5 #include "ctree.h"
6 #include "subpage.h"
7 #include "btrfs_inode.h"
8 
9 /*
10  * Subpage (sectorsize < PAGE_SIZE) support overview:
11  *
12  * Limitations:
13  *
14  * - Only support 64K page size for now
15  *   This is to make metadata handling easier, as 64K page would ensure
16  *   all nodesize would fit inside one page, thus we don't need to handle
17  *   cases where a tree block crosses several pages.
18  *
19  * - Only metadata read-write for now
20  *   The data read-write part is in development.
21  *
22  * - Metadata can't cross 64K page boundary
23  *   btrfs-progs and kernel have done that for a while, thus only ancient
24  *   filesystems could have such problem.  For such case, do a graceful
25  *   rejection.
26  *
27  * Special behavior:
28  *
29  * - Metadata
30  *   Metadata read is fully supported.
31  *   Meaning when reading one tree block will only trigger the read for the
32  *   needed range, other unrelated range in the same page will not be touched.
33  *
34  *   Metadata write support is partial.
35  *   The writeback is still for the full page, but we will only submit
36  *   the dirty extent buffers in the page.
37  *
38  *   This means, if we have a metadata page like this:
39  *
40  *   Page offset
41  *   0         16K         32K         48K        64K
42  *   |/////////|           |///////////|
43  *        \- Tree block A        \- Tree block B
44  *
45  *   Even if we just want to writeback tree block A, we will also writeback
46  *   tree block B if it's also dirty.
47  *
48  *   This may cause extra metadata writeback which results more COW.
49  *
50  * Implementation:
51  *
52  * - Common
53  *   Both metadata and data will use a new structure, btrfs_subpage, to
54  *   record the status of each sector inside a page.  This provides the extra
55  *   granularity needed.
56  *
57  * - Metadata
58  *   Since we have multiple tree blocks inside one page, we can't rely on page
59  *   locking anymore, or we will have greatly reduced concurrency or even
60  *   deadlocks (hold one tree lock while trying to lock another tree lock in
61  *   the same page).
62  *
63  *   Thus for metadata locking, subpage support relies on io_tree locking only.
64  *   This means a slightly higher tree locking latency.
65  */
66 
67 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
68 {
69 	if (fs_info->sectorsize >= PAGE_SIZE)
70 		return false;
71 
72 	/*
73 	 * Only data pages (either through DIO or compression) can have no
74 	 * mapping. And if page->mapping->host is data inode, it's subpage.
75 	 * As we have ruled our sectorsize >= PAGE_SIZE case already.
76 	 */
77 	if (!mapping || !mapping->host || is_data_inode(mapping->host))
78 		return true;
79 
80 	/*
81 	 * Now the only remaining case is metadata, which we only go subpage
82 	 * routine if nodesize < PAGE_SIZE.
83 	 */
84 	if (fs_info->nodesize < PAGE_SIZE)
85 		return true;
86 	return false;
87 }
88 
89 void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
90 {
91 	unsigned int cur = 0;
92 	unsigned int nr_bits;
93 
94 	ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
95 
96 	nr_bits = PAGE_SIZE / sectorsize;
97 	subpage_info->bitmap_nr_bits = nr_bits;
98 
99 	subpage_info->uptodate_offset = cur;
100 	cur += nr_bits;
101 
102 	subpage_info->dirty_offset = cur;
103 	cur += nr_bits;
104 
105 	subpage_info->writeback_offset = cur;
106 	cur += nr_bits;
107 
108 	subpage_info->ordered_offset = cur;
109 	cur += nr_bits;
110 
111 	subpage_info->checked_offset = cur;
112 	cur += nr_bits;
113 
114 	subpage_info->total_nr_bits = cur;
115 }
116 
117 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
118 			 struct folio *folio, enum btrfs_subpage_type type)
119 {
120 	struct btrfs_subpage *subpage;
121 
122 	/*
123 	 * We have cases like a dummy extent buffer page, which is not mapped
124 	 * and doesn't need to be locked.
125 	 */
126 	if (folio->mapping)
127 		ASSERT(folio_test_locked(folio));
128 
129 	/* Either not subpage, or the folio already has private attached. */
130 	if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
131 		return 0;
132 
133 	subpage = btrfs_alloc_subpage(fs_info, type);
134 	if (IS_ERR(subpage))
135 		return  PTR_ERR(subpage);
136 
137 	folio_attach_private(folio, subpage);
138 	return 0;
139 }
140 
141 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
142 {
143 	struct btrfs_subpage *subpage;
144 
145 	/* Either not subpage, or the folio already has private attached. */
146 	if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
147 		return;
148 
149 	subpage = folio_detach_private(folio);
150 	ASSERT(subpage);
151 	btrfs_free_subpage(subpage);
152 }
153 
154 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
155 					  enum btrfs_subpage_type type)
156 {
157 	struct btrfs_subpage *ret;
158 	unsigned int real_size;
159 
160 	ASSERT(fs_info->sectorsize < PAGE_SIZE);
161 
162 	real_size = struct_size(ret, bitmaps,
163 			BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
164 	ret = kzalloc(real_size, GFP_NOFS);
165 	if (!ret)
166 		return ERR_PTR(-ENOMEM);
167 
168 	spin_lock_init(&ret->lock);
169 	if (type == BTRFS_SUBPAGE_METADATA) {
170 		atomic_set(&ret->eb_refs, 0);
171 	} else {
172 		atomic_set(&ret->readers, 0);
173 		atomic_set(&ret->writers, 0);
174 	}
175 	return ret;
176 }
177 
178 void btrfs_free_subpage(struct btrfs_subpage *subpage)
179 {
180 	kfree(subpage);
181 }
182 
183 /*
184  * Increase the eb_refs of current subpage.
185  *
186  * This is important for eb allocation, to prevent race with last eb freeing
187  * of the same page.
188  * With the eb_refs increased before the eb inserted into radix tree,
189  * detach_extent_buffer_page() won't detach the folio private while we're still
190  * allocating the extent buffer.
191  */
192 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
193 {
194 	struct btrfs_subpage *subpage;
195 
196 	if (!btrfs_is_subpage(fs_info, folio->mapping))
197 		return;
198 
199 	ASSERT(folio_test_private(folio) && folio->mapping);
200 	lockdep_assert_held(&folio->mapping->i_private_lock);
201 
202 	subpage = folio_get_private(folio);
203 	atomic_inc(&subpage->eb_refs);
204 }
205 
206 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
207 {
208 	struct btrfs_subpage *subpage;
209 
210 	if (!btrfs_is_subpage(fs_info, folio->mapping))
211 		return;
212 
213 	ASSERT(folio_test_private(folio) && folio->mapping);
214 	lockdep_assert_held(&folio->mapping->i_private_lock);
215 
216 	subpage = folio_get_private(folio);
217 	ASSERT(atomic_read(&subpage->eb_refs));
218 	atomic_dec(&subpage->eb_refs);
219 }
220 
221 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
222 				 struct folio *folio, u64 start, u32 len)
223 {
224 	/* For subpage support, the folio must be single page. */
225 	ASSERT(folio_order(folio) == 0);
226 
227 	/* Basic checks */
228 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
229 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
230 	       IS_ALIGNED(len, fs_info->sectorsize));
231 	/*
232 	 * The range check only works for mapped page, we can still have
233 	 * unmapped page like dummy extent buffer pages.
234 	 */
235 	if (folio->mapping)
236 		ASSERT(folio_pos(folio) <= start &&
237 		       start + len <= folio_pos(folio) + PAGE_SIZE);
238 }
239 
240 void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
241 				struct folio *folio, u64 start, u32 len)
242 {
243 	struct btrfs_subpage *subpage = folio_get_private(folio);
244 	const int nbits = len >> fs_info->sectorsize_bits;
245 
246 	btrfs_subpage_assert(fs_info, folio, start, len);
247 
248 	atomic_add(nbits, &subpage->readers);
249 }
250 
251 void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
252 			      struct folio *folio, u64 start, u32 len)
253 {
254 	struct btrfs_subpage *subpage = folio_get_private(folio);
255 	const int nbits = len >> fs_info->sectorsize_bits;
256 	bool is_data;
257 	bool last;
258 
259 	btrfs_subpage_assert(fs_info, folio, start, len);
260 	is_data = is_data_inode(folio->mapping->host);
261 	ASSERT(atomic_read(&subpage->readers) >= nbits);
262 	last = atomic_sub_and_test(nbits, &subpage->readers);
263 
264 	/*
265 	 * For data we need to unlock the page if the last read has finished.
266 	 *
267 	 * And please don't replace @last with atomic_sub_and_test() call
268 	 * inside if () condition.
269 	 * As we want the atomic_sub_and_test() to be always executed.
270 	 */
271 	if (is_data && last)
272 		folio_unlock(folio);
273 }
274 
275 static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
276 {
277 	u64 orig_start = *start;
278 	u32 orig_len = *len;
279 
280 	*start = max_t(u64, folio_pos(folio), orig_start);
281 	/*
282 	 * For certain call sites like btrfs_drop_pages(), we may have pages
283 	 * beyond the target range. In that case, just set @len to 0, subpage
284 	 * helpers can handle @len == 0 without any problem.
285 	 */
286 	if (folio_pos(folio) >= orig_start + orig_len)
287 		*len = 0;
288 	else
289 		*len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
290 			     orig_start + orig_len) - *start;
291 }
292 
293 void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
294 				struct folio *folio, u64 start, u32 len)
295 {
296 	struct btrfs_subpage *subpage = folio_get_private(folio);
297 	const int nbits = (len >> fs_info->sectorsize_bits);
298 	int ret;
299 
300 	btrfs_subpage_assert(fs_info, folio, start, len);
301 
302 	ASSERT(atomic_read(&subpage->readers) == 0);
303 	ret = atomic_add_return(nbits, &subpage->writers);
304 	ASSERT(ret == nbits);
305 }
306 
307 bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
308 				       struct folio *folio, u64 start, u32 len)
309 {
310 	struct btrfs_subpage *subpage = folio_get_private(folio);
311 	const int nbits = (len >> fs_info->sectorsize_bits);
312 
313 	btrfs_subpage_assert(fs_info, folio, start, len);
314 
315 	/*
316 	 * We have call sites passing @lock_page into
317 	 * extent_clear_unlock_delalloc() for compression path.
318 	 *
319 	 * This @locked_page is locked by plain lock_page(), thus its
320 	 * subpage::writers is 0.  Handle them in a special way.
321 	 */
322 	if (atomic_read(&subpage->writers) == 0)
323 		return true;
324 
325 	ASSERT(atomic_read(&subpage->writers) >= nbits);
326 	return atomic_sub_and_test(nbits, &subpage->writers);
327 }
328 
329 /*
330  * Lock a folio for delalloc page writeback.
331  *
332  * Return -EAGAIN if the page is not properly initialized.
333  * Return 0 with the page locked, and writer counter updated.
334  *
335  * Even with 0 returned, the page still need extra check to make sure
336  * it's really the correct page, as the caller is using
337  * filemap_get_folios_contig(), which can race with page invalidating.
338  */
339 int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
340 				  struct folio *folio, u64 start, u32 len)
341 {
342 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
343 		folio_lock(folio);
344 		return 0;
345 	}
346 	folio_lock(folio);
347 	if (!folio_test_private(folio) || !folio_get_private(folio)) {
348 		folio_unlock(folio);
349 		return -EAGAIN;
350 	}
351 	btrfs_subpage_clamp_range(folio, &start, &len);
352 	btrfs_subpage_start_writer(fs_info, folio, start, len);
353 	return 0;
354 }
355 
356 void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
357 				 struct folio *folio, u64 start, u32 len)
358 {
359 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
360 		folio_unlock(folio);
361 		return;
362 	}
363 	btrfs_subpage_clamp_range(folio, &start, &len);
364 	if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
365 		folio_unlock(folio);
366 }
367 
368 #define subpage_calc_start_bit(fs_info, folio, name, start, len)	\
369 ({									\
370 	unsigned int start_bit;						\
371 									\
372 	btrfs_subpage_assert(fs_info, folio, start, len);		\
373 	start_bit = offset_in_page(start) >> fs_info->sectorsize_bits;	\
374 	start_bit += fs_info->subpage_info->name##_offset;		\
375 	start_bit;							\
376 })
377 
378 #define subpage_test_bitmap_all_set(fs_info, subpage, name)		\
379 	bitmap_test_range_all_set(subpage->bitmaps,			\
380 			fs_info->subpage_info->name##_offset,		\
381 			fs_info->subpage_info->bitmap_nr_bits)
382 
383 #define subpage_test_bitmap_all_zero(fs_info, subpage, name)		\
384 	bitmap_test_range_all_zero(subpage->bitmaps,			\
385 			fs_info->subpage_info->name##_offset,		\
386 			fs_info->subpage_info->bitmap_nr_bits)
387 
388 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
389 				struct folio *folio, u64 start, u32 len)
390 {
391 	struct btrfs_subpage *subpage = folio_get_private(folio);
392 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
393 							uptodate, start, len);
394 	unsigned long flags;
395 
396 	spin_lock_irqsave(&subpage->lock, flags);
397 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
398 	if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
399 		folio_mark_uptodate(folio);
400 	spin_unlock_irqrestore(&subpage->lock, flags);
401 }
402 
403 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
404 				  struct folio *folio, u64 start, u32 len)
405 {
406 	struct btrfs_subpage *subpage = folio_get_private(folio);
407 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
408 							uptodate, start, len);
409 	unsigned long flags;
410 
411 	spin_lock_irqsave(&subpage->lock, flags);
412 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
413 	folio_clear_uptodate(folio);
414 	spin_unlock_irqrestore(&subpage->lock, flags);
415 }
416 
417 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
418 			     struct folio *folio, u64 start, u32 len)
419 {
420 	struct btrfs_subpage *subpage = folio_get_private(folio);
421 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
422 							dirty, start, len);
423 	unsigned long flags;
424 
425 	spin_lock_irqsave(&subpage->lock, flags);
426 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
427 	spin_unlock_irqrestore(&subpage->lock, flags);
428 	folio_mark_dirty(folio);
429 }
430 
431 /*
432  * Extra clear_and_test function for subpage dirty bitmap.
433  *
434  * Return true if we're the last bits in the dirty_bitmap and clear the
435  * dirty_bitmap.
436  * Return false otherwise.
437  *
438  * NOTE: Callers should manually clear page dirty for true case, as we have
439  * extra handling for tree blocks.
440  */
441 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
442 					struct folio *folio, u64 start, u32 len)
443 {
444 	struct btrfs_subpage *subpage = folio_get_private(folio);
445 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
446 							dirty, start, len);
447 	unsigned long flags;
448 	bool last = false;
449 
450 	spin_lock_irqsave(&subpage->lock, flags);
451 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
452 	if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
453 		last = true;
454 	spin_unlock_irqrestore(&subpage->lock, flags);
455 	return last;
456 }
457 
458 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
459 			       struct folio *folio, u64 start, u32 len)
460 {
461 	bool last;
462 
463 	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
464 	if (last)
465 		folio_clear_dirty_for_io(folio);
466 }
467 
468 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
469 				 struct folio *folio, u64 start, u32 len)
470 {
471 	struct btrfs_subpage *subpage = folio_get_private(folio);
472 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
473 							writeback, start, len);
474 	unsigned long flags;
475 
476 	spin_lock_irqsave(&subpage->lock, flags);
477 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
478 	if (!folio_test_writeback(folio))
479 		folio_start_writeback(folio);
480 	spin_unlock_irqrestore(&subpage->lock, flags);
481 }
482 
483 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
484 				   struct folio *folio, u64 start, u32 len)
485 {
486 	struct btrfs_subpage *subpage = folio_get_private(folio);
487 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
488 							writeback, start, len);
489 	unsigned long flags;
490 
491 	spin_lock_irqsave(&subpage->lock, flags);
492 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
493 	if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
494 		ASSERT(folio_test_writeback(folio));
495 		folio_end_writeback(folio);
496 	}
497 	spin_unlock_irqrestore(&subpage->lock, flags);
498 }
499 
500 void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
501 			       struct folio *folio, u64 start, u32 len)
502 {
503 	struct btrfs_subpage *subpage = folio_get_private(folio);
504 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
505 							ordered, start, len);
506 	unsigned long flags;
507 
508 	spin_lock_irqsave(&subpage->lock, flags);
509 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
510 	folio_set_ordered(folio);
511 	spin_unlock_irqrestore(&subpage->lock, flags);
512 }
513 
514 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
515 				 struct folio *folio, u64 start, u32 len)
516 {
517 	struct btrfs_subpage *subpage = folio_get_private(folio);
518 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
519 							ordered, start, len);
520 	unsigned long flags;
521 
522 	spin_lock_irqsave(&subpage->lock, flags);
523 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
524 	if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
525 		folio_clear_ordered(folio);
526 	spin_unlock_irqrestore(&subpage->lock, flags);
527 }
528 
529 void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
530 			       struct folio *folio, u64 start, u32 len)
531 {
532 	struct btrfs_subpage *subpage = folio_get_private(folio);
533 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
534 							checked, start, len);
535 	unsigned long flags;
536 
537 	spin_lock_irqsave(&subpage->lock, flags);
538 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
539 	if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
540 		folio_set_checked(folio);
541 	spin_unlock_irqrestore(&subpage->lock, flags);
542 }
543 
544 void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
545 				 struct folio *folio, u64 start, u32 len)
546 {
547 	struct btrfs_subpage *subpage = folio_get_private(folio);
548 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
549 							checked, start, len);
550 	unsigned long flags;
551 
552 	spin_lock_irqsave(&subpage->lock, flags);
553 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
554 	folio_clear_checked(folio);
555 	spin_unlock_irqrestore(&subpage->lock, flags);
556 }
557 
558 /*
559  * Unlike set/clear which is dependent on each page status, for test all bits
560  * are tested in the same way.
561  */
562 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
563 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
564 			       struct folio *folio, u64 start, u32 len)	\
565 {									\
566 	struct btrfs_subpage *subpage = folio_get_private(folio);	\
567 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,	\
568 						name, start, len);	\
569 	unsigned long flags;						\
570 	bool ret;							\
571 									\
572 	spin_lock_irqsave(&subpage->lock, flags);			\
573 	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
574 				len >> fs_info->sectorsize_bits);	\
575 	spin_unlock_irqrestore(&subpage->lock, flags);			\
576 	return ret;							\
577 }
578 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
579 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
580 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
581 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
582 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
583 
584 /*
585  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
586  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
587  * back to regular sectorsize branch.
588  */
589 #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func,			\
590 				 folio_clear_func, folio_test_func)	\
591 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info,	\
592 			    struct folio *folio, u64 start, u32 len)	\
593 {									\
594 	if (unlikely(!fs_info) ||					\
595 	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
596 		folio_set_func(folio);					\
597 		return;							\
598 	}								\
599 	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
600 }									\
601 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info,	\
602 			      struct folio *folio, u64 start, u32 len)	\
603 {									\
604 	if (unlikely(!fs_info) ||					\
605 	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
606 		folio_clear_func(folio);				\
607 		return;							\
608 	}								\
609 	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
610 }									\
611 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info,	\
612 			     struct folio *folio, u64 start, u32 len)	\
613 {									\
614 	if (unlikely(!fs_info) ||					\
615 	    !btrfs_is_subpage(fs_info, folio->mapping))			\
616 		return folio_test_func(folio);				\
617 	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
618 }									\
619 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
620 				  struct folio *folio, u64 start, u32 len) \
621 {									\
622 	if (unlikely(!fs_info) ||					\
623 	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
624 		folio_set_func(folio);					\
625 		return;							\
626 	}								\
627 	btrfs_subpage_clamp_range(folio, &start, &len);			\
628 	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
629 }									\
630 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
631 				    struct folio *folio, u64 start, u32 len) \
632 {									\
633 	if (unlikely(!fs_info) ||					\
634 	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
635 		folio_clear_func(folio);				\
636 		return;							\
637 	}								\
638 	btrfs_subpage_clamp_range(folio, &start, &len);			\
639 	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
640 }									\
641 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
642 				   struct folio *folio, u64 start, u32 len) \
643 {									\
644 	if (unlikely(!fs_info) ||					\
645 	    !btrfs_is_subpage(fs_info, folio->mapping))			\
646 		return folio_test_func(folio);				\
647 	btrfs_subpage_clamp_range(folio, &start, &len);			\
648 	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
649 }
650 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
651 			 folio_test_uptodate);
652 IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
653 			 folio_test_dirty);
654 IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
655 			 folio_test_writeback);
656 IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
657 			 folio_test_ordered);
658 IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
659 			 folio_test_checked);
660 
661 /*
662  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
663  * is cleared.
664  */
665 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio)
666 {
667 	struct btrfs_subpage *subpage = folio_get_private(folio);
668 
669 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
670 		return;
671 
672 	ASSERT(!folio_test_dirty(folio));
673 	if (!btrfs_is_subpage(fs_info, folio->mapping))
674 		return;
675 
676 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
677 	ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
678 }
679 
680 /*
681  * Handle different locked pages with different page sizes:
682  *
683  * - Page locked by plain lock_page()
684  *   It should not have any subpage::writers count.
685  *   Can be unlocked by unlock_page().
686  *   This is the most common locked page for __extent_writepage() called
687  *   inside extent_write_cache_pages().
688  *   Rarer cases include the @locked_page from extent_write_locked_range().
689  *
690  * - Page locked by lock_delalloc_pages()
691  *   There is only one caller, all pages except @locked_page for
692  *   extent_write_locked_range().
693  *   In this case, we have to call subpage helper to handle the case.
694  */
695 void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
696 			       struct folio *folio, u64 start, u32 len)
697 {
698 	struct btrfs_subpage *subpage;
699 
700 	ASSERT(folio_test_locked(folio));
701 	/* For non-subpage case, we just unlock the page */
702 	if (!btrfs_is_subpage(fs_info, folio->mapping)) {
703 		folio_unlock(folio);
704 		return;
705 	}
706 
707 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
708 	subpage = folio_get_private(folio);
709 
710 	/*
711 	 * For subpage case, there are two types of locked page.  With or
712 	 * without writers number.
713 	 *
714 	 * Since we own the page lock, no one else could touch subpage::writers
715 	 * and we are safe to do several atomic operations without spinlock.
716 	 */
717 	if (atomic_read(&subpage->writers) == 0) {
718 		/* No writers, locked by plain lock_page() */
719 		folio_unlock(folio);
720 		return;
721 	}
722 
723 	/* Have writers, use proper subpage helper to end it */
724 	btrfs_folio_end_writer_lock(fs_info, folio, start, len);
725 }
726 
727 #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst)		\
728 	bitmap_cut(dst, subpage->bitmaps, 0,				\
729 		   subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
730 
731 void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
732 				      struct folio *folio, u64 start, u32 len)
733 {
734 	struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
735 	struct btrfs_subpage *subpage;
736 	unsigned long uptodate_bitmap;
737 	unsigned long error_bitmap;
738 	unsigned long dirty_bitmap;
739 	unsigned long writeback_bitmap;
740 	unsigned long ordered_bitmap;
741 	unsigned long checked_bitmap;
742 	unsigned long flags;
743 
744 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
745 	ASSERT(subpage_info);
746 	subpage = folio_get_private(folio);
747 
748 	spin_lock_irqsave(&subpage->lock, flags);
749 	GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap);
750 	GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap);
751 	GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap);
752 	GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap);
753 	GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap);
754 	spin_unlock_irqrestore(&subpage->lock, flags);
755 
756 	dump_page(folio_page(folio, 0), "btrfs subpage dump");
757 	btrfs_warn(fs_info,
758 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
759 		    start, len, folio_pos(folio),
760 		    subpage_info->bitmap_nr_bits, &uptodate_bitmap,
761 		    subpage_info->bitmap_nr_bits, &error_bitmap,
762 		    subpage_info->bitmap_nr_bits, &dirty_bitmap,
763 		    subpage_info->bitmap_nr_bits, &writeback_bitmap,
764 		    subpage_info->bitmap_nr_bits, &ordered_bitmap,
765 		    subpage_info->bitmap_nr_bits, &checked_bitmap);
766 }
767