xref: /linux/fs/btrfs/subpage.c (revision fd71def6d9abc5ae362fb9995d46049b7b0ed391)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/slab.h>
4 #include "messages.h"
5 #include "subpage.h"
6 #include "btrfs_inode.h"
7 
8 /*
9  * Subpage (block size < folio size) support overview:
10  *
11  * Limitations:
12  *
13  * - Only support 64K page size for now
14  *   This is to make metadata handling easier, as 64K page would ensure
15  *   all nodesize would fit inside one page, thus we don't need to handle
16  *   cases where a tree block crosses several pages.
17  *
18  * - Only metadata read-write for now
19  *   The data read-write part is in development.
20  *
21  * - Metadata can't cross 64K page boundary
22  *   btrfs-progs and kernel have done that for a while, thus only ancient
23  *   filesystems could have such problem.  For such case, do a graceful
24  *   rejection.
25  *
26  * Special behavior:
27  *
28  * - Metadata
29  *   Metadata read is fully supported.
30  *   Meaning when reading one tree block will only trigger the read for the
31  *   needed range, other unrelated range in the same page will not be touched.
32  *
33  *   Metadata write support is partial.
34  *   The writeback is still for the full page, but we will only submit
35  *   the dirty extent buffers in the page.
36  *
37  *   This means, if we have a metadata page like this:
38  *
39  *   Page offset
40  *   0         16K         32K         48K        64K
41  *   |/////////|           |///////////|
42  *        \- Tree block A        \- Tree block B
43  *
44  *   Even if we just want to writeback tree block A, we will also writeback
45  *   tree block B if it's also dirty.
46  *
47  *   This may cause extra metadata writeback which results more COW.
48  *
49  * Implementation:
50  *
51  * - Common
52  *   Both metadata and data will use a new structure, btrfs_subpage, to
53  *   record the status of each sector inside a page.  This provides the extra
54  *   granularity needed.
55  *
56  * - Metadata
57  *   Since we have multiple tree blocks inside one page, we can't rely on page
58  *   locking anymore, or we will have greatly reduced concurrency or even
59  *   deadlocks (hold one tree lock while trying to lock another tree lock in
60  *   the same page).
61  *
62  *   Thus for metadata locking, subpage support relies on io_tree locking only.
63  *   This means a slightly higher tree locking latency.
64  */
65 
btrfs_attach_subpage(const struct btrfs_fs_info * fs_info,struct folio * folio,enum btrfs_subpage_type type)66 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
67 			 struct folio *folio, enum btrfs_subpage_type type)
68 {
69 	struct btrfs_subpage *subpage;
70 
71 	/* For metadata we don't support large folio yet. */
72 	ASSERT(!folio_test_large(folio));
73 
74 	/*
75 	 * We have cases like a dummy extent buffer page, which is not mapped
76 	 * and doesn't need to be locked.
77 	 */
78 	if (folio->mapping)
79 		ASSERT(folio_test_locked(folio));
80 
81 	/* Either not subpage, or the folio already has private attached. */
82 	if (folio_test_private(folio))
83 		return 0;
84 	if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
85 		return 0;
86 	if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
87 		return 0;
88 
89 	subpage = btrfs_alloc_subpage(fs_info, folio_size(folio), type);
90 	if (IS_ERR(subpage))
91 		return  PTR_ERR(subpage);
92 
93 	folio_attach_private(folio, subpage);
94 	return 0;
95 }
96 
btrfs_detach_subpage(const struct btrfs_fs_info * fs_info,struct folio * folio,enum btrfs_subpage_type type)97 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio,
98 			  enum btrfs_subpage_type type)
99 {
100 	struct btrfs_subpage *subpage;
101 
102 	/* Either not subpage, or the folio already has private attached. */
103 	if (!folio_test_private(folio))
104 		return;
105 	if (type == BTRFS_SUBPAGE_METADATA && !btrfs_meta_is_subpage(fs_info))
106 		return;
107 	if (type == BTRFS_SUBPAGE_DATA && !btrfs_is_subpage(fs_info, folio))
108 		return;
109 
110 	subpage = folio_detach_private(folio);
111 	ASSERT(subpage);
112 	btrfs_free_subpage(subpage);
113 }
114 
btrfs_alloc_subpage(const struct btrfs_fs_info * fs_info,size_t fsize,enum btrfs_subpage_type type)115 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
116 				size_t fsize, enum btrfs_subpage_type type)
117 {
118 	struct btrfs_subpage *ret;
119 	unsigned int real_size;
120 
121 	ASSERT(fs_info->sectorsize < fsize);
122 
123 	real_size = struct_size(ret, bitmaps,
124 			BITS_TO_LONGS(btrfs_bitmap_nr_max *
125 				      (fsize >> fs_info->sectorsize_bits)));
126 	ret = kzalloc(real_size, GFP_NOFS);
127 	if (!ret)
128 		return ERR_PTR(-ENOMEM);
129 
130 	spin_lock_init(&ret->lock);
131 	if (type == BTRFS_SUBPAGE_METADATA)
132 		atomic_set(&ret->eb_refs, 0);
133 	else
134 		atomic_set(&ret->nr_locked, 0);
135 	return ret;
136 }
137 
btrfs_free_subpage(struct btrfs_subpage * subpage)138 void btrfs_free_subpage(struct btrfs_subpage *subpage)
139 {
140 	kfree(subpage);
141 }
142 
143 /*
144  * Increase the eb_refs of current subpage.
145  *
146  * This is important for eb allocation, to prevent race with last eb freeing
147  * of the same page.
148  * With the eb_refs increased before the eb inserted into radix tree,
149  * detach_extent_buffer_page() won't detach the folio private while we're still
150  * allocating the extent buffer.
151  */
btrfs_folio_inc_eb_refs(const struct btrfs_fs_info * fs_info,struct folio * folio)152 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
153 {
154 	struct btrfs_subpage *subpage;
155 
156 	if (!btrfs_meta_is_subpage(fs_info))
157 		return;
158 
159 	ASSERT(folio_test_private(folio) && folio->mapping);
160 	lockdep_assert_held(&folio->mapping->i_private_lock);
161 
162 	subpage = folio_get_private(folio);
163 	atomic_inc(&subpage->eb_refs);
164 }
165 
btrfs_folio_dec_eb_refs(const struct btrfs_fs_info * fs_info,struct folio * folio)166 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
167 {
168 	struct btrfs_subpage *subpage;
169 
170 	if (!btrfs_meta_is_subpage(fs_info))
171 		return;
172 
173 	ASSERT(folio_test_private(folio) && folio->mapping);
174 	lockdep_assert_held(&folio->mapping->i_private_lock);
175 
176 	subpage = folio_get_private(folio);
177 	ASSERT(atomic_read(&subpage->eb_refs));
178 	atomic_dec(&subpage->eb_refs);
179 }
180 
btrfs_subpage_assert(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)181 static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
182 				 struct folio *folio, u64 start, u32 len)
183 {
184 	/* For subpage support, the folio must be single page. */
185 	ASSERT(folio_order(folio) == 0);
186 
187 	/* Basic checks */
188 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
189 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
190 	       IS_ALIGNED(len, fs_info->sectorsize));
191 	/*
192 	 * The range check only works for mapped page, we can still have
193 	 * unmapped page like dummy extent buffer pages.
194 	 */
195 	if (folio->mapping)
196 		ASSERT(folio_pos(folio) <= start &&
197 		       start + len <= folio_pos(folio) + folio_size(folio));
198 }
199 
200 #define subpage_calc_start_bit(fs_info, folio, name, start, len)	\
201 ({									\
202 	unsigned int __start_bit;					\
203 	const unsigned int blocks_per_folio =				\
204 			   btrfs_blocks_per_folio(fs_info, folio);	\
205 									\
206 	btrfs_subpage_assert(fs_info, folio, start, len);		\
207 	__start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
208 	__start_bit += blocks_per_folio * btrfs_bitmap_nr_##name;	\
209 	__start_bit;							\
210 })
211 
btrfs_subpage_clamp_range(struct folio * folio,u64 * start,u32 * len)212 static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
213 {
214 	u64 orig_start = *start;
215 	u32 orig_len = *len;
216 
217 	*start = max_t(u64, folio_pos(folio), orig_start);
218 	/*
219 	 * For certain call sites like btrfs_drop_pages(), we may have pages
220 	 * beyond the target range. In that case, just set @len to 0, subpage
221 	 * helpers can handle @len == 0 without any problem.
222 	 */
223 	if (folio_pos(folio) >= orig_start + orig_len)
224 		*len = 0;
225 	else
226 		*len = min_t(u64, folio_pos(folio) + folio_size(folio),
227 			     orig_start + orig_len) - *start;
228 }
229 
btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)230 static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
231 					    struct folio *folio, u64 start, u32 len)
232 {
233 	struct btrfs_subpage *subpage = folio_get_private(folio);
234 	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
235 	const int nbits = (len >> fs_info->sectorsize_bits);
236 	unsigned long flags;
237 	unsigned int cleared = 0;
238 	int bit = start_bit;
239 	bool last;
240 
241 	btrfs_subpage_assert(fs_info, folio, start, len);
242 
243 	spin_lock_irqsave(&subpage->lock, flags);
244 	/*
245 	 * We have call sites passing @lock_page into
246 	 * extent_clear_unlock_delalloc() for compression path.
247 	 *
248 	 * This @locked_page is locked by plain lock_page(), thus its
249 	 * subpage::locked is 0.  Handle them in a special way.
250 	 */
251 	if (atomic_read(&subpage->nr_locked) == 0) {
252 		spin_unlock_irqrestore(&subpage->lock, flags);
253 		return true;
254 	}
255 
256 	for_each_set_bit_from(bit, subpage->bitmaps, start_bit + nbits) {
257 		clear_bit(bit, subpage->bitmaps);
258 		cleared++;
259 	}
260 	ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
261 	last = atomic_sub_and_test(cleared, &subpage->nr_locked);
262 	spin_unlock_irqrestore(&subpage->lock, flags);
263 	return last;
264 }
265 
266 /*
267  * Handle different locked folios:
268  *
269  * - Non-subpage folio
270  *   Just unlock it.
271  *
272  * - folio locked but without any subpage locked
273  *   This happens either before writepage_delalloc() or the delalloc range is
274  *   already handled by previous folio.
275  *   We can simple unlock it.
276  *
277  * - folio locked with subpage range locked.
278  *   We go through the locked sectors inside the range and clear their locked
279  *   bitmap, reduce the writer lock number, and unlock the page if that's
280  *   the last locked range.
281  */
btrfs_folio_end_lock(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)282 void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
283 			  struct folio *folio, u64 start, u32 len)
284 {
285 	struct btrfs_subpage *subpage = folio_get_private(folio);
286 
287 	ASSERT(folio_test_locked(folio));
288 
289 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio)) {
290 		folio_unlock(folio);
291 		return;
292 	}
293 
294 	/*
295 	 * For subpage case, there are two types of locked page.  With or
296 	 * without locked number.
297 	 *
298 	 * Since we own the page lock, no one else could touch subpage::locked
299 	 * and we are safe to do several atomic operations without spinlock.
300 	 */
301 	if (atomic_read(&subpage->nr_locked) == 0) {
302 		/* No subpage lock, locked by plain lock_page(). */
303 		folio_unlock(folio);
304 		return;
305 	}
306 
307 	btrfs_subpage_clamp_range(folio, &start, &len);
308 	if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
309 		folio_unlock(folio);
310 }
311 
btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info * fs_info,struct folio * folio,unsigned long bitmap)312 void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
313 				 struct folio *folio, unsigned long bitmap)
314 {
315 	struct btrfs_subpage *subpage = folio_get_private(folio);
316 	const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
317 	const int start_bit = blocks_per_folio * btrfs_bitmap_nr_locked;
318 	unsigned long flags;
319 	bool last = false;
320 	int cleared = 0;
321 	int bit;
322 
323 	if (!btrfs_is_subpage(fs_info, folio)) {
324 		folio_unlock(folio);
325 		return;
326 	}
327 
328 	if (atomic_read(&subpage->nr_locked) == 0) {
329 		/* No subpage lock, locked by plain lock_page(). */
330 		folio_unlock(folio);
331 		return;
332 	}
333 
334 	spin_lock_irqsave(&subpage->lock, flags);
335 	for_each_set_bit(bit, &bitmap, blocks_per_folio) {
336 		if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
337 			cleared++;
338 	}
339 	ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
340 	last = atomic_sub_and_test(cleared, &subpage->nr_locked);
341 	spin_unlock_irqrestore(&subpage->lock, flags);
342 	if (last)
343 		folio_unlock(folio);
344 }
345 
346 #define subpage_test_bitmap_all_set(fs_info, folio, name)		\
347 ({									\
348 	struct btrfs_subpage *subpage = folio_get_private(folio);	\
349 	const unsigned int blocks_per_folio =				\
350 				btrfs_blocks_per_folio(fs_info, folio); \
351 									\
352 	bitmap_test_range_all_set(subpage->bitmaps,			\
353 			blocks_per_folio * btrfs_bitmap_nr_##name,	\
354 			blocks_per_folio);				\
355 })
356 
357 #define subpage_test_bitmap_all_zero(fs_info, folio, name)		\
358 ({									\
359 	struct btrfs_subpage *subpage = folio_get_private(folio);	\
360 	const unsigned int blocks_per_folio =				\
361 				btrfs_blocks_per_folio(fs_info, folio); \
362 									\
363 	bitmap_test_range_all_zero(subpage->bitmaps,			\
364 			blocks_per_folio * btrfs_bitmap_nr_##name,	\
365 			blocks_per_folio);				\
366 })
367 
btrfs_subpage_set_uptodate(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)368 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
369 				struct folio *folio, u64 start, u32 len)
370 {
371 	struct btrfs_subpage *subpage = folio_get_private(folio);
372 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
373 							uptodate, start, len);
374 	unsigned long flags;
375 
376 	spin_lock_irqsave(&subpage->lock, flags);
377 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
378 	if (subpage_test_bitmap_all_set(fs_info, folio, uptodate))
379 		folio_mark_uptodate(folio);
380 	spin_unlock_irqrestore(&subpage->lock, flags);
381 }
382 
btrfs_subpage_clear_uptodate(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)383 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
384 				  struct folio *folio, u64 start, u32 len)
385 {
386 	struct btrfs_subpage *subpage = folio_get_private(folio);
387 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
388 							uptodate, start, len);
389 	unsigned long flags;
390 
391 	spin_lock_irqsave(&subpage->lock, flags);
392 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
393 	folio_clear_uptodate(folio);
394 	spin_unlock_irqrestore(&subpage->lock, flags);
395 }
396 
btrfs_subpage_set_dirty(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)397 void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
398 			     struct folio *folio, u64 start, u32 len)
399 {
400 	struct btrfs_subpage *subpage = folio_get_private(folio);
401 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
402 							dirty, start, len);
403 	unsigned long flags;
404 
405 	spin_lock_irqsave(&subpage->lock, flags);
406 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
407 	spin_unlock_irqrestore(&subpage->lock, flags);
408 	folio_mark_dirty(folio);
409 }
410 
411 /*
412  * Extra clear_and_test function for subpage dirty bitmap.
413  *
414  * Return true if we're the last bits in the dirty_bitmap and clear the
415  * dirty_bitmap.
416  * Return false otherwise.
417  *
418  * NOTE: Callers should manually clear page dirty for true case, as we have
419  * extra handling for tree blocks.
420  */
btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)421 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
422 					struct folio *folio, u64 start, u32 len)
423 {
424 	struct btrfs_subpage *subpage = folio_get_private(folio);
425 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
426 							dirty, start, len);
427 	unsigned long flags;
428 	bool last = false;
429 
430 	spin_lock_irqsave(&subpage->lock, flags);
431 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
432 	if (subpage_test_bitmap_all_zero(fs_info, folio, dirty))
433 		last = true;
434 	spin_unlock_irqrestore(&subpage->lock, flags);
435 	return last;
436 }
437 
btrfs_subpage_clear_dirty(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)438 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
439 			       struct folio *folio, u64 start, u32 len)
440 {
441 	bool last;
442 
443 	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
444 	if (last)
445 		folio_clear_dirty_for_io(folio);
446 }
447 
btrfs_subpage_set_writeback(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)448 void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
449 				 struct folio *folio, u64 start, u32 len)
450 {
451 	struct btrfs_subpage *subpage = folio_get_private(folio);
452 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
453 							writeback, start, len);
454 	unsigned long flags;
455 
456 	spin_lock_irqsave(&subpage->lock, flags);
457 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
458 	if (!folio_test_writeback(folio))
459 		folio_start_writeback(folio);
460 	spin_unlock_irqrestore(&subpage->lock, flags);
461 }
462 
btrfs_subpage_clear_writeback(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)463 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
464 				   struct folio *folio, u64 start, u32 len)
465 {
466 	struct btrfs_subpage *subpage = folio_get_private(folio);
467 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
468 							writeback, start, len);
469 	unsigned long flags;
470 
471 	spin_lock_irqsave(&subpage->lock, flags);
472 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
473 	if (subpage_test_bitmap_all_zero(fs_info, folio, writeback)) {
474 		ASSERT(folio_test_writeback(folio));
475 		folio_end_writeback(folio);
476 	}
477 	spin_unlock_irqrestore(&subpage->lock, flags);
478 }
479 
btrfs_subpage_set_ordered(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)480 void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
481 			       struct folio *folio, u64 start, u32 len)
482 {
483 	struct btrfs_subpage *subpage = folio_get_private(folio);
484 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
485 							ordered, start, len);
486 	unsigned long flags;
487 
488 	spin_lock_irqsave(&subpage->lock, flags);
489 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
490 	folio_set_ordered(folio);
491 	spin_unlock_irqrestore(&subpage->lock, flags);
492 }
493 
btrfs_subpage_clear_ordered(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)494 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
495 				 struct folio *folio, u64 start, u32 len)
496 {
497 	struct btrfs_subpage *subpage = folio_get_private(folio);
498 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
499 							ordered, start, len);
500 	unsigned long flags;
501 
502 	spin_lock_irqsave(&subpage->lock, flags);
503 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
504 	if (subpage_test_bitmap_all_zero(fs_info, folio, ordered))
505 		folio_clear_ordered(folio);
506 	spin_unlock_irqrestore(&subpage->lock, flags);
507 }
508 
btrfs_subpage_set_checked(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)509 void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
510 			       struct folio *folio, u64 start, u32 len)
511 {
512 	struct btrfs_subpage *subpage = folio_get_private(folio);
513 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
514 							checked, start, len);
515 	unsigned long flags;
516 
517 	spin_lock_irqsave(&subpage->lock, flags);
518 	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
519 	if (subpage_test_bitmap_all_set(fs_info, folio, checked))
520 		folio_set_checked(folio);
521 	spin_unlock_irqrestore(&subpage->lock, flags);
522 }
523 
btrfs_subpage_clear_checked(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)524 void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
525 				 struct folio *folio, u64 start, u32 len)
526 {
527 	struct btrfs_subpage *subpage = folio_get_private(folio);
528 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
529 							checked, start, len);
530 	unsigned long flags;
531 
532 	spin_lock_irqsave(&subpage->lock, flags);
533 	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
534 	folio_clear_checked(folio);
535 	spin_unlock_irqrestore(&subpage->lock, flags);
536 }
537 
538 /*
539  * Unlike set/clear which is dependent on each page status, for test all bits
540  * are tested in the same way.
541  */
542 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
543 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
544 			       struct folio *folio, u64 start, u32 len)	\
545 {									\
546 	struct btrfs_subpage *subpage = folio_get_private(folio);	\
547 	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,	\
548 						name, start, len);	\
549 	unsigned long flags;						\
550 	bool ret;							\
551 									\
552 	spin_lock_irqsave(&subpage->lock, flags);			\
553 	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
554 				len >> fs_info->sectorsize_bits);	\
555 	spin_unlock_irqrestore(&subpage->lock, flags);			\
556 	return ret;							\
557 }
558 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
559 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
560 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
561 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
562 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
563 
564 /*
565  * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
566  * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
567  * back to regular sectorsize branch.
568  */
569 #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func,			\
570 				 folio_clear_func, folio_test_func)	\
571 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info,	\
572 			    struct folio *folio, u64 start, u32 len)	\
573 {									\
574 	if (unlikely(!fs_info) ||					\
575 	    !btrfs_is_subpage(fs_info, folio)) {			\
576 		folio_set_func(folio);					\
577 		return;							\
578 	}								\
579 	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
580 }									\
581 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info,	\
582 			      struct folio *folio, u64 start, u32 len)	\
583 {									\
584 	if (unlikely(!fs_info) ||					\
585 	    !btrfs_is_subpage(fs_info, folio)) {			\
586 		folio_clear_func(folio);				\
587 		return;							\
588 	}								\
589 	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
590 }									\
591 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info,	\
592 			     struct folio *folio, u64 start, u32 len)	\
593 {									\
594 	if (unlikely(!fs_info) ||					\
595 	    !btrfs_is_subpage(fs_info, folio))				\
596 		return folio_test_func(folio);				\
597 	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
598 }									\
599 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
600 				  struct folio *folio, u64 start, u32 len) \
601 {									\
602 	if (unlikely(!fs_info) ||					\
603 	    !btrfs_is_subpage(fs_info, folio)) {			\
604 		folio_set_func(folio);					\
605 		return;							\
606 	}								\
607 	btrfs_subpage_clamp_range(folio, &start, &len);			\
608 	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
609 }									\
610 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
611 				    struct folio *folio, u64 start, u32 len) \
612 {									\
613 	if (unlikely(!fs_info) ||					\
614 	    !btrfs_is_subpage(fs_info, folio)) {			\
615 		folio_clear_func(folio);				\
616 		return;							\
617 	}								\
618 	btrfs_subpage_clamp_range(folio, &start, &len);			\
619 	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
620 }									\
621 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
622 				   struct folio *folio, u64 start, u32 len) \
623 {									\
624 	if (unlikely(!fs_info) ||					\
625 	    !btrfs_is_subpage(fs_info, folio))				\
626 		return folio_test_func(folio);				\
627 	btrfs_subpage_clamp_range(folio, &start, &len);			\
628 	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
629 }									\
630 void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb) \
631 {									\
632 	if (!btrfs_meta_is_subpage(eb->fs_info)) {			\
633 		folio_set_func(folio);					\
634 		return;							\
635 	}								\
636 	btrfs_subpage_set_##name(eb->fs_info, folio, eb->start, eb->len); \
637 }									\
638 void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb) \
639 {									\
640 	if (!btrfs_meta_is_subpage(eb->fs_info)) {			\
641 		folio_clear_func(folio);				\
642 		return;							\
643 	}								\
644 	btrfs_subpage_clear_##name(eb->fs_info, folio, eb->start, eb->len); \
645 }									\
646 bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb) \
647 {									\
648 	if (!btrfs_meta_is_subpage(eb->fs_info))			\
649 		return folio_test_func(folio);				\
650 	return btrfs_subpage_test_##name(eb->fs_info, folio, eb->start, eb->len); \
651 }
652 IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
653 			 folio_test_uptodate);
654 IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
655 			 folio_test_dirty);
656 IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
657 			 folio_test_writeback);
658 IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
659 			 folio_test_ordered);
660 IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
661 			 folio_test_checked);
662 
663 #define GET_SUBPAGE_BITMAP(fs_info, folio, name, dst)			\
664 {									\
665 	const unsigned int blocks_per_folio =				\
666 				btrfs_blocks_per_folio(fs_info, folio);	\
667 	const struct btrfs_subpage *subpage = folio_get_private(folio);	\
668 									\
669 	ASSERT(blocks_per_folio < BITS_PER_LONG);			\
670 	*dst = bitmap_read(subpage->bitmaps,				\
671 			   blocks_per_folio * btrfs_bitmap_nr_##name,	\
672 			   blocks_per_folio);				\
673 }
674 
675 #define SUBPAGE_DUMP_BITMAP(fs_info, folio, name, start, len)		\
676 {									\
677 	unsigned long bitmap;						\
678 	const unsigned int blocks_per_folio =				\
679 				btrfs_blocks_per_folio(fs_info, folio);	\
680 									\
681 	GET_SUBPAGE_BITMAP(fs_info, folio, name, &bitmap);		\
682 	btrfs_warn(fs_info,						\
683 	"dumpping bitmap start=%llu len=%u folio=%llu " #name "_bitmap=%*pbl", \
684 		   start, len, folio_pos(folio),			\
685 		   blocks_per_folio, &bitmap);				\
686 }
687 
688 /*
689  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
690  * is cleared.
691  */
btrfs_folio_assert_not_dirty(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)692 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
693 				  struct folio *folio, u64 start, u32 len)
694 {
695 	struct btrfs_subpage *subpage;
696 	unsigned int start_bit;
697 	unsigned int nbits;
698 	unsigned long flags;
699 
700 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
701 		return;
702 
703 	if (!btrfs_is_subpage(fs_info, folio)) {
704 		ASSERT(!folio_test_dirty(folio));
705 		return;
706 	}
707 
708 	start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
709 	nbits = len >> fs_info->sectorsize_bits;
710 	subpage = folio_get_private(folio);
711 	ASSERT(subpage);
712 	spin_lock_irqsave(&subpage->lock, flags);
713 	if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) {
714 		SUBPAGE_DUMP_BITMAP(fs_info, folio, dirty, start, len);
715 		ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
716 	}
717 	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
718 	spin_unlock_irqrestore(&subpage->lock, flags);
719 }
720 
721 /*
722  * This is for folio already locked by plain lock_page()/folio_lock(), which
723  * doesn't have any subpage awareness.
724  *
725  * This populates the involved subpage ranges so that subpage helpers can
726  * properly unlock them.
727  */
btrfs_folio_set_lock(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)728 void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
729 			  struct folio *folio, u64 start, u32 len)
730 {
731 	struct btrfs_subpage *subpage;
732 	unsigned long flags;
733 	unsigned int start_bit;
734 	unsigned int nbits;
735 	int ret;
736 
737 	ASSERT(folio_test_locked(folio));
738 	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio))
739 		return;
740 
741 	subpage = folio_get_private(folio);
742 	start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
743 	nbits = len >> fs_info->sectorsize_bits;
744 	spin_lock_irqsave(&subpage->lock, flags);
745 	/* Target range should not yet be locked. */
746 	if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) {
747 		SUBPAGE_DUMP_BITMAP(fs_info, folio, locked, start, len);
748 		ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
749 	}
750 	bitmap_set(subpage->bitmaps, start_bit, nbits);
751 	ret = atomic_add_return(nbits, &subpage->nr_locked);
752 	ASSERT(ret <= btrfs_blocks_per_folio(fs_info, folio));
753 	spin_unlock_irqrestore(&subpage->lock, flags);
754 }
755 
756 /*
757  * Clear the dirty flag for the folio.
758  *
759  * If the affected folio is no longer dirty, return true. Otherwise return false.
760  */
btrfs_meta_folio_clear_and_test_dirty(struct folio * folio,const struct extent_buffer * eb)761 bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb)
762 {
763 	bool last;
764 
765 	if (!btrfs_meta_is_subpage(eb->fs_info)) {
766 		folio_clear_dirty_for_io(folio);
767 		return true;
768 	}
769 
770 	last = btrfs_subpage_clear_and_test_dirty(eb->fs_info, folio, eb->start, eb->len);
771 	if (last) {
772 		folio_clear_dirty_for_io(folio);
773 		return true;
774 	}
775 	return false;
776 }
777 
btrfs_subpage_dump_bitmap(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 start,u32 len)778 void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
779 				      struct folio *folio, u64 start, u32 len)
780 {
781 	struct btrfs_subpage *subpage;
782 	const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
783 	unsigned long uptodate_bitmap;
784 	unsigned long dirty_bitmap;
785 	unsigned long writeback_bitmap;
786 	unsigned long ordered_bitmap;
787 	unsigned long checked_bitmap;
788 	unsigned long locked_bitmap;
789 	unsigned long flags;
790 
791 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
792 	ASSERT(blocks_per_folio > 1);
793 	subpage = folio_get_private(folio);
794 
795 	spin_lock_irqsave(&subpage->lock, flags);
796 	GET_SUBPAGE_BITMAP(fs_info, folio, uptodate, &uptodate_bitmap);
797 	GET_SUBPAGE_BITMAP(fs_info, folio, dirty, &dirty_bitmap);
798 	GET_SUBPAGE_BITMAP(fs_info, folio, writeback, &writeback_bitmap);
799 	GET_SUBPAGE_BITMAP(fs_info, folio, ordered, &ordered_bitmap);
800 	GET_SUBPAGE_BITMAP(fs_info, folio, checked, &checked_bitmap);
801 	GET_SUBPAGE_BITMAP(fs_info, folio, locked, &locked_bitmap);
802 	spin_unlock_irqrestore(&subpage->lock, flags);
803 
804 	dump_page(folio_page(folio, 0), "btrfs subpage dump");
805 	btrfs_warn(fs_info,
806 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
807 		    start, len, folio_pos(folio),
808 		    blocks_per_folio, &uptodate_bitmap,
809 		    blocks_per_folio, &dirty_bitmap,
810 		    blocks_per_folio, &locked_bitmap,
811 		    blocks_per_folio, &writeback_bitmap,
812 		    blocks_per_folio, &ordered_bitmap,
813 		    blocks_per_folio, &checked_bitmap);
814 }
815 
btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info * fs_info,struct folio * folio,unsigned long * ret_bitmap)816 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
817 				    struct folio *folio,
818 				    unsigned long *ret_bitmap)
819 {
820 	struct btrfs_subpage *subpage;
821 	unsigned long flags;
822 
823 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
824 	ASSERT(btrfs_blocks_per_folio(fs_info, folio) > 1);
825 	subpage = folio_get_private(folio);
826 
827 	spin_lock_irqsave(&subpage->lock, flags);
828 	GET_SUBPAGE_BITMAP(fs_info, folio, dirty, ret_bitmap);
829 	spin_unlock_irqrestore(&subpage->lock, flags);
830 }
831