xref: /linux/fs/btrfs/subpage.h (revision fd71def6d9abc5ae362fb9995d46049b7b0ed391)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_SUBPAGE_H
4 #define BTRFS_SUBPAGE_H
5 
6 #include <linux/spinlock.h>
7 #include <linux/atomic.h>
8 #include <linux/sizes.h>
9 #include "btrfs_inode.h"
10 #include "fs.h"
11 
12 struct address_space;
13 struct folio;
14 
15 /*
16  * Extra info for subpapge bitmap.
17  *
18  * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
19  * one larger bitmap.
20  *
21  * This structure records how they are organized in the bitmap:
22  *
23  * /- uptodate          /- dirty        /- ordered
24  * |			|		|
25  * v			v		v
26  * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
27  * |< sectors_per_page >|
28  *
29  * Unlike regular macro-like enums, here we do not go upper-case names, as
30  * these names will be utilized in various macros to define function names.
31  */
32 enum {
33 	btrfs_bitmap_nr_uptodate = 0,
34 	btrfs_bitmap_nr_dirty,
35 	btrfs_bitmap_nr_writeback,
36 	btrfs_bitmap_nr_ordered,
37 	btrfs_bitmap_nr_checked,
38 	btrfs_bitmap_nr_locked,
39 	btrfs_bitmap_nr_max
40 };
41 
42 /*
43  * Structure to trace status of each sector inside a page, attached to
44  * page::private for both data and metadata inodes.
45  */
46 struct btrfs_subpage {
47 	/* Common members for both data and metadata pages */
48 	spinlock_t lock;
49 	union {
50 		/*
51 		 * Structures only used by metadata
52 		 *
53 		 * @eb_refs should only be operated under private_lock, as it
54 		 * manages whether the subpage can be detached.
55 		 */
56 		atomic_t eb_refs;
57 
58 		/*
59 		 * Structures only used by data,
60 		 *
61 		 * How many sectors inside the page is locked.
62 		 */
63 		atomic_t nr_locked;
64 	};
65 	unsigned long bitmaps[];
66 };
67 
68 enum btrfs_subpage_type {
69 	BTRFS_SUBPAGE_METADATA,
70 	BTRFS_SUBPAGE_DATA,
71 };
72 
73 #if PAGE_SIZE > BTRFS_MIN_BLOCKSIZE
74 /*
75  * Subpage support for metadata is more complex, as we can have dummy extent
76  * buffers, where folios have no mapping to determine the owning inode.
77  *
78  * Thankfully we only need to check if node size is smaller than page size.
79  * Even with larger folio support, we will only allocate a folio as large as
80  * node size.
81  * Thus if nodesize < PAGE_SIZE, we know metadata needs need to subpage routine.
82  */
btrfs_meta_is_subpage(const struct btrfs_fs_info * fs_info)83 static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
84 {
85 	return fs_info->nodesize < PAGE_SIZE;
86 }
btrfs_is_subpage(const struct btrfs_fs_info * fs_info,struct folio * folio)87 static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
88 				    struct folio *folio)
89 {
90 	if (folio->mapping && folio->mapping->host)
91 		ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
92 	return fs_info->sectorsize < folio_size(folio);
93 }
94 #else
btrfs_meta_is_subpage(const struct btrfs_fs_info * fs_info)95 static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
96 {
97 	return false;
98 }
btrfs_is_subpage(const struct btrfs_fs_info * fs_info,struct folio * folio)99 static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
100 				    struct folio *folio)
101 {
102 	if (folio->mapping && folio->mapping->host)
103 		ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
104 	return false;
105 }
106 #endif
107 
108 int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
109 			 struct folio *folio, enum btrfs_subpage_type type);
110 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio,
111 			  enum btrfs_subpage_type type);
112 
113 /* Allocate additional data where page represents more than one sector */
114 struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
115 				size_t fsize, enum btrfs_subpage_type type);
116 void btrfs_free_subpage(struct btrfs_subpage *subpage);
117 
118 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
119 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
120 
121 void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
122 			  struct folio *folio, u64 start, u32 len);
123 void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
124 			  struct folio *folio, u64 start, u32 len);
125 void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
126 				 struct folio *folio, unsigned long bitmap);
127 /*
128  * Template for subpage related operations.
129  *
130  * btrfs_subpage_*() are for call sites where the folio has subpage attached and
131  * the range is ensured to be inside the folio's single page.
132  *
133  * btrfs_folio_*() are for call sites where the page can either be subpage
134  * specific or regular folios. The function will handle both cases.
135  * But the range still needs to be inside one single page.
136  *
137  * btrfs_folio_clamp_*() are similar to btrfs_folio_*(), except the range doesn't
138  * need to be inside the page. Those functions will truncate the range
139  * automatically.
140  *
141  * Both btrfs_folio_*() and btrfs_folio_clamp_*() are for data folios.
142  *
143  * For metadata, one should use btrfs_meta_folio_*() helpers instead, and there
144  * is no clamp version for metadata helpers, as we either go subpage
145  * (nodesize < PAGE_SIZE) or go regular folio helpers (nodesize >= PAGE_SIZE,
146  * and our folio is never larger than nodesize).
147  */
148 #define DECLARE_BTRFS_SUBPAGE_OPS(name)					\
149 void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info,	\
150 		struct folio *folio, u64 start, u32 len);			\
151 void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info,	\
152 		struct folio *folio, u64 start, u32 len);			\
153 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
154 		struct folio *folio, u64 start, u32 len);			\
155 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info,	\
156 		struct folio *folio, u64 start, u32 len);			\
157 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info,	\
158 		struct folio *folio, u64 start, u32 len);			\
159 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info,	\
160 		struct folio *folio, u64 start, u32 len);			\
161 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
162 		struct folio *folio, u64 start, u32 len);			\
163 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info,	\
164 		struct folio *folio, u64 start, u32 len);			\
165 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
166 		struct folio *folio, u64 start, u32 len);		\
167 void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb); \
168 void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb); \
169 bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb);
170 
171 DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
172 DECLARE_BTRFS_SUBPAGE_OPS(dirty);
173 DECLARE_BTRFS_SUBPAGE_OPS(writeback);
174 DECLARE_BTRFS_SUBPAGE_OPS(ordered);
175 DECLARE_BTRFS_SUBPAGE_OPS(checked);
176 
177 /*
178  * Helper for error cleanup, where a folio will have its dirty flag cleared,
179  * with writeback started and finished.
180  */
btrfs_folio_clamp_finish_io(struct btrfs_fs_info * fs_info,struct folio * locked_folio,u64 start,u32 len)181 static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
182 					       struct folio *locked_folio,
183 					       u64 start, u32 len)
184 {
185 	btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
186 	btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
187 	btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
188 }
189 
190 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
191 					struct folio *folio, u64 start, u32 len);
192 
193 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
194 				  struct folio *folio, u64 start, u32 len);
195 bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb);
196 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
197 				    struct folio *folio,
198 				    unsigned long *ret_bitmap);
199 void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
200 				      struct folio *folio, u64 start, u32 len);
201 
202 #endif
203