1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef BTRFS_SUBPAGE_H
4 #define BTRFS_SUBPAGE_H
5
6 #include <linux/spinlock.h>
7 #include <linux/atomic.h>
8 #include <linux/sizes.h>
9 #include "btrfs_inode.h"
10 #include "fs.h"
11
12 struct address_space;
13 struct folio;
14
15 /*
16 * Extra info for subpapge bitmap.
17 *
18 * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
19 * one larger bitmap.
20 *
21 * This structure records how they are organized in the bitmap:
22 *
23 * /- uptodate /- dirty /- ordered
24 * | | |
25 * v v v
26 * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
27 * |< sectors_per_page >|
28 *
29 * Unlike regular macro-like enums, here we do not go upper-case names, as
30 * these names will be utilized in various macros to define function names.
31 */
32 enum {
33 btrfs_bitmap_nr_uptodate = 0,
34 btrfs_bitmap_nr_dirty,
35
36 /*
37 * This can be changed to atomic eventually. But this change will rely
38 * on the async delalloc range rework for locked bitmap. As async
39 * delalloc can unlock its range and mark blocks writeback at random
40 * timing.
41 */
42 btrfs_bitmap_nr_writeback,
43
44 /*
45 * The ordered and checked flags are for COW fixup, already marked
46 * deprecated, and will be removed eventually.
47 */
48 btrfs_bitmap_nr_ordered,
49 btrfs_bitmap_nr_checked,
50
51 /*
52 * The locked bit is for async delalloc range (compression), currently
53 * async extent is queued with the range locked, until the compression
54 * is done.
55 * So an async extent can unlock the range at any random timing.
56 *
57 * This will need a rework on the async extent lifespan (mark writeback
58 * and do compression) before deprecating this flag.
59 */
60 btrfs_bitmap_nr_locked,
61 btrfs_bitmap_nr_max
62 };
63
64 /*
65 * Structure to trace status of each sector inside a page, attached to
66 * page::private for both data and metadata inodes.
67 */
68 struct btrfs_folio_state {
69 /* Common members for both data and metadata pages */
70 spinlock_t lock;
71 union {
72 /*
73 * Structures only used by metadata
74 *
75 * @eb_refs should only be operated under private_lock, as it
76 * manages whether the btrfs_folio_state can be detached.
77 */
78 atomic_t eb_refs;
79
80 /*
81 * Structures only used by data,
82 *
83 * How many sectors inside the page is locked.
84 */
85 atomic_t nr_locked;
86 };
87 unsigned long bitmaps[];
88 };
89
90 enum btrfs_folio_type {
91 BTRFS_SUBPAGE_METADATA,
92 BTRFS_SUBPAGE_DATA,
93 };
94
95 /*
96 * Subpage support for metadata is more complex, as we can have dummy extent
97 * buffers, where folios have no mapping to determine the owning inode.
98 *
99 * Thankfully we only need to check if node size is smaller than page size.
100 * Even with larger folio support, we will only allocate a folio as large as
101 * node size.
102 * Thus if nodesize < PAGE_SIZE, we know metadata needs need to subpage routine.
103 */
btrfs_meta_is_subpage(const struct btrfs_fs_info * fs_info)104 static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
105 {
106 return fs_info->nodesize < PAGE_SIZE;
107 }
btrfs_is_subpage(const struct btrfs_fs_info * fs_info,struct folio * folio)108 static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
109 struct folio *folio)
110 {
111 if (folio->mapping && folio->mapping->host)
112 ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
113 return fs_info->sectorsize < folio_size(folio);
114 }
115
116 int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info,
117 struct folio *folio, enum btrfs_folio_type type);
118 void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio,
119 enum btrfs_folio_type type);
120
121 /* Allocate additional data where page represents more than one sector */
122 struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info,
123 size_t fsize, enum btrfs_folio_type type);
btrfs_free_folio_state(struct btrfs_folio_state * bfs)124 static inline void btrfs_free_folio_state(struct btrfs_folio_state *bfs)
125 {
126 kfree(bfs);
127 }
128
129 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
130 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
131
132 void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
133 struct folio *folio, u64 start, u32 len);
134 void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
135 struct folio *folio, u64 start, u32 len);
136 void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
137 struct folio *folio, unsigned long bitmap);
138 /*
139 * Template for subpage related operations.
140 *
141 * btrfs_subpage_*() are for call sites where the folio has subpage attached and
142 * the range is ensured to be inside the folio's single page.
143 *
144 * btrfs_folio_*() are for call sites where the page can either be subpage
145 * specific or regular folios. The function will handle both cases.
146 * But the range still needs to be inside one single page.
147 *
148 * btrfs_folio_clamp_*() are similar to btrfs_folio_*(), except the range doesn't
149 * need to be inside the page. Those functions will truncate the range
150 * automatically.
151 *
152 * Both btrfs_folio_*() and btrfs_folio_clamp_*() are for data folios.
153 *
154 * For metadata, one should use btrfs_meta_folio_*() helpers instead, and there
155 * is no clamp version for metadata helpers, as we either go subpage
156 * (nodesize < PAGE_SIZE) or go regular folio helpers (nodesize >= PAGE_SIZE,
157 * and our folio is never larger than nodesize).
158 */
159 #define DECLARE_BTRFS_SUBPAGE_OPS(name) \
160 void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
161 struct folio *folio, u64 start, u32 len); \
162 void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \
163 struct folio *folio, u64 start, u32 len); \
164 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
165 struct folio *folio, u64 start, u32 len); \
166 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
167 struct folio *folio, u64 start, u32 len); \
168 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
169 struct folio *folio, u64 start, u32 len); \
170 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
171 struct folio *folio, u64 start, u32 len); \
172 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
173 struct folio *folio, u64 start, u32 len); \
174 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
175 struct folio *folio, u64 start, u32 len); \
176 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
177 struct folio *folio, u64 start, u32 len); \
178 void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb); \
179 void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb); \
180 bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb);
181
182 DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
183 DECLARE_BTRFS_SUBPAGE_OPS(dirty);
184 DECLARE_BTRFS_SUBPAGE_OPS(writeback);
185 DECLARE_BTRFS_SUBPAGE_OPS(ordered);
186 DECLARE_BTRFS_SUBPAGE_OPS(checked);
187
188 /*
189 * Helper for error cleanup, where a folio will have its dirty flag cleared,
190 * with writeback started and finished.
191 */
btrfs_folio_clamp_finish_io(struct btrfs_fs_info * fs_info,struct folio * locked_folio,u64 start,u32 len)192 static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
193 struct folio *locked_folio,
194 u64 start, u32 len)
195 {
196 btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
197 btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
198 btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
199 }
200
201 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
202 struct folio *folio, u64 start, u32 len);
203
204 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
205 struct folio *folio, u64 start, u32 len);
206 bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb);
207 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
208 struct folio *folio,
209 unsigned long *ret_bitmap);
210 void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
211 struct folio *folio, u64 start, u32 len);
212
213 #endif
214