xref: /linux/fs/btrfs/extent-io-tree.h (revision 8ca4fc323d2e4ab9dabbdd57633af40b0c7e6af9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_EXTENT_IO_TREE_H
4 #define BTRFS_EXTENT_IO_TREE_H
5 
6 #include "misc.h"
7 
8 struct extent_changeset;
9 struct io_failure_record;
10 
11 /* Bits for the extent state */
12 enum {
13 	ENUM_BIT(EXTENT_DIRTY),
14 	ENUM_BIT(EXTENT_UPTODATE),
15 	ENUM_BIT(EXTENT_LOCKED),
16 	ENUM_BIT(EXTENT_NEW),
17 	ENUM_BIT(EXTENT_DELALLOC),
18 	ENUM_BIT(EXTENT_DEFRAG),
19 	ENUM_BIT(EXTENT_BOUNDARY),
20 	ENUM_BIT(EXTENT_NODATASUM),
21 	ENUM_BIT(EXTENT_CLEAR_META_RESV),
22 	ENUM_BIT(EXTENT_NEED_WAIT),
23 	ENUM_BIT(EXTENT_NORESERVE),
24 	ENUM_BIT(EXTENT_QGROUP_RESERVED),
25 	ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
26 	/*
27 	 * Must be cleared only during ordered extent completion or on error
28 	 * paths if we did not manage to submit bios and create the ordered
29 	 * extents for the range.  Should not be cleared during page release
30 	 * and page invalidation (if there is an ordered extent in flight),
31 	 * that is left for the ordered extent completion.
32 	 */
33 	ENUM_BIT(EXTENT_DELALLOC_NEW),
34 	/*
35 	 * When an ordered extent successfully completes for a region marked as
36 	 * a new delalloc range, use this flag when clearing a new delalloc
37 	 * range to indicate that the VFS' inode number of bytes should be
38 	 * incremented and the inode's new delalloc bytes decremented, in an
39 	 * atomic way to prevent races with stat(2).
40 	 */
41 	ENUM_BIT(EXTENT_ADD_INODE_BYTES),
42 	/*
43 	 * Set during truncate when we're clearing an entire range and we just
44 	 * want the extent states to go away.
45 	 */
46 	ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
47 };
48 
49 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
50 				 EXTENT_CLEAR_DATA_RESV)
51 #define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING | \
52 				 EXTENT_ADD_INODE_BYTES | \
53 				 EXTENT_CLEAR_ALL_BITS)
54 
55 /*
56  * Redefined bits above which are used only in the device allocation tree,
57  * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
58  * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
59  * manipulation functions
60  */
61 #define CHUNK_ALLOCATED				EXTENT_DIRTY
62 #define CHUNK_TRIMMED				EXTENT_DEFRAG
63 #define CHUNK_STATE_MASK			(CHUNK_ALLOCATED |		\
64 						 CHUNK_TRIMMED)
65 
66 enum {
67 	IO_TREE_FS_PINNED_EXTENTS,
68 	IO_TREE_FS_EXCLUDED_EXTENTS,
69 	IO_TREE_BTREE_INODE_IO,
70 	IO_TREE_INODE_IO,
71 	IO_TREE_RELOC_BLOCKS,
72 	IO_TREE_TRANS_DIRTY_PAGES,
73 	IO_TREE_ROOT_DIRTY_LOG_PAGES,
74 	IO_TREE_INODE_FILE_EXTENT,
75 	IO_TREE_LOG_CSUM_RANGE,
76 	IO_TREE_SELFTEST,
77 	IO_TREE_DEVICE_ALLOC_STATE,
78 };
79 
80 struct extent_io_tree {
81 	struct rb_root state;
82 	struct btrfs_fs_info *fs_info;
83 	/* Inode associated with this tree, or NULL. */
84 	struct btrfs_inode *inode;
85 
86 	/* Who owns this io tree, should be one of IO_TREE_* */
87 	u8 owner;
88 
89 	spinlock_t lock;
90 };
91 
92 struct extent_state {
93 	u64 start;
94 	u64 end; /* inclusive */
95 	struct rb_node rb_node;
96 
97 	/* ADD NEW ELEMENTS AFTER THIS */
98 	wait_queue_head_t wq;
99 	refcount_t refs;
100 	u32 state;
101 
102 #ifdef CONFIG_BTRFS_DEBUG
103 	struct list_head leak_list;
104 #endif
105 };
106 
107 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
108 			 struct extent_io_tree *tree, unsigned int owner);
109 void extent_io_tree_release(struct extent_io_tree *tree);
110 
111 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
112 		struct extent_state **cached);
113 
114 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
115 		    struct extent_state **cached);
116 
117 int __init extent_state_init_cachep(void);
118 void __cold extent_state_free_cachep(void);
119 
120 u64 count_range_bits(struct extent_io_tree *tree,
121 		     u64 *start, u64 search_end,
122 		     u64 max_bytes, u32 bits, int contig,
123 		     struct extent_state **cached_state);
124 
125 void free_extent_state(struct extent_state *state);
126 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
127 		   u32 bits, int filled, struct extent_state *cached_state);
128 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
129 			     u32 bits, struct extent_changeset *changeset);
130 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
131 		       u32 bits, struct extent_state **cached, gfp_t mask,
132 		       struct extent_changeset *changeset);
133 
134 static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
135 				   u64 end, u32 bits,
136 				   struct extent_state **cached)
137 {
138 	return __clear_extent_bit(tree, start, end, bits, cached,
139 				  GFP_NOFS, NULL);
140 }
141 
142 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
143 				struct extent_state **cached)
144 {
145 	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached,
146 				  GFP_NOFS, NULL);
147 }
148 
149 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
150 				    u64 end, u32 bits)
151 {
152 	return clear_extent_bit(tree, start, end, bits, NULL);
153 }
154 
155 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
156 			   u32 bits, struct extent_changeset *changeset);
157 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
158 		   u32 bits, struct extent_state **cached_state, gfp_t mask);
159 
160 static inline int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start,
161 					 u64 end, u32 bits)
162 {
163 	return set_extent_bit(tree, start, end, bits, NULL, GFP_NOWAIT);
164 }
165 
166 static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
167 		u64 end, u32 bits)
168 {
169 	return set_extent_bit(tree, start, end, bits, NULL, GFP_NOFS);
170 }
171 
172 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
173 		u64 end, struct extent_state **cached_state)
174 {
175 	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
176 				  cached_state, GFP_NOFS, NULL);
177 }
178 
179 static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
180 		u64 end, gfp_t mask)
181 {
182 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, mask);
183 }
184 
185 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
186 				     u64 end, struct extent_state **cached)
187 {
188 	return clear_extent_bit(tree, start, end,
189 				EXTENT_DIRTY | EXTENT_DELALLOC |
190 				EXTENT_DO_ACCOUNTING, cached);
191 }
192 
193 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
194 		       u32 bits, u32 clear_bits,
195 		       struct extent_state **cached_state);
196 
197 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
198 				      u64 end, u32 extra_bits,
199 				      struct extent_state **cached_state)
200 {
201 	return set_extent_bit(tree, start, end,
202 			      EXTENT_DELALLOC | extra_bits,
203 			      cached_state, GFP_NOFS);
204 }
205 
206 static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
207 		u64 end, struct extent_state **cached_state)
208 {
209 	return set_extent_bit(tree, start, end,
210 			      EXTENT_DELALLOC | EXTENT_DEFRAG,
211 			      cached_state, GFP_NOFS);
212 }
213 
214 static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
215 		u64 end)
216 {
217 	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, GFP_NOFS);
218 }
219 
220 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
221 			  u64 *start_ret, u64 *end_ret, u32 bits,
222 			  struct extent_state **cached_state);
223 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
224 				 u64 *start_ret, u64 *end_ret, u32 bits);
225 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
226 			       u64 *start_ret, u64 *end_ret, u32 bits);
227 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
228 			       u64 *end, u64 max_bytes,
229 			       struct extent_state **cached_state);
230 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
231 		     struct extent_state **cached_state);
232 
233 #endif /* BTRFS_EXTENT_IO_TREE_H */
234