xref: /linux/fs/btrfs/extent-io-tree.h (revision 7a40974fd0efa3698de4c6d1d0ee0436bcc4445d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_EXTENT_IO_TREE_H
4 #define BTRFS_EXTENT_IO_TREE_H
5 
6 #include <linux/rbtree.h>
7 #include <linux/spinlock.h>
8 #include <linux/refcount.h>
9 #include <linux/list.h>
10 #include <linux/wait.h>
11 #include "misc.h"
12 
13 struct extent_changeset;
14 struct btrfs_fs_info;
15 struct btrfs_inode;
16 
17 /* Bits for the extent state */
18 enum {
19 	ENUM_BIT(EXTENT_DIRTY),
20 	ENUM_BIT(EXTENT_UPTODATE),
21 	ENUM_BIT(EXTENT_LOCKED),
22 	ENUM_BIT(EXTENT_DIO_LOCKED),
23 	ENUM_BIT(EXTENT_NEW),
24 	ENUM_BIT(EXTENT_DELALLOC),
25 	ENUM_BIT(EXTENT_DEFRAG),
26 	ENUM_BIT(EXTENT_BOUNDARY),
27 	ENUM_BIT(EXTENT_NODATASUM),
28 	ENUM_BIT(EXTENT_CLEAR_META_RESV),
29 	ENUM_BIT(EXTENT_NEED_WAIT),
30 	ENUM_BIT(EXTENT_NORESERVE),
31 	ENUM_BIT(EXTENT_QGROUP_RESERVED),
32 	ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
33 	/*
34 	 * Must be cleared only during ordered extent completion or on error
35 	 * paths if we did not manage to submit bios and create the ordered
36 	 * extents for the range.  Should not be cleared during page release
37 	 * and page invalidation (if there is an ordered extent in flight),
38 	 * that is left for the ordered extent completion.
39 	 */
40 	ENUM_BIT(EXTENT_DELALLOC_NEW),
41 	/*
42 	 * When an ordered extent successfully completes for a region marked as
43 	 * a new delalloc range, use this flag when clearing a new delalloc
44 	 * range to indicate that the VFS' inode number of bytes should be
45 	 * incremented and the inode's new delalloc bytes decremented, in an
46 	 * atomic way to prevent races with stat(2).
47 	 */
48 	ENUM_BIT(EXTENT_ADD_INODE_BYTES),
49 	/*
50 	 * Set during truncate when we're clearing an entire range and we just
51 	 * want the extent states to go away.
52 	 */
53 	ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
54 
55 	/*
56 	 * This must be last.
57 	 *
58 	 * Bit not representing a state but a request for NOWAIT semantics,
59 	 * e.g. when allocating memory, and must be masked out from the other
60 	 * bits.
61 	 */
62 	ENUM_BIT(EXTENT_NOWAIT)
63 };
64 
65 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
66 				 EXTENT_CLEAR_DATA_RESV)
67 #define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING | \
68 				 EXTENT_ADD_INODE_BYTES | \
69 				 EXTENT_CLEAR_ALL_BITS)
70 
71 #define EXTENT_LOCK_BITS	(EXTENT_LOCKED | EXTENT_DIO_LOCKED)
72 
73 /*
74  * Redefined bits above which are used only in the device allocation tree,
75  * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
76  * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
77  * manipulation functions
78  */
79 #define CHUNK_ALLOCATED				EXTENT_DIRTY
80 #define CHUNK_TRIMMED				EXTENT_DEFRAG
81 #define CHUNK_STATE_MASK			(CHUNK_ALLOCATED |		\
82 						 CHUNK_TRIMMED)
83 
84 enum {
85 	IO_TREE_FS_PINNED_EXTENTS,
86 	IO_TREE_FS_EXCLUDED_EXTENTS,
87 	IO_TREE_BTREE_INODE_IO,
88 	IO_TREE_INODE_IO,
89 	IO_TREE_RELOC_BLOCKS,
90 	IO_TREE_TRANS_DIRTY_PAGES,
91 	IO_TREE_ROOT_DIRTY_LOG_PAGES,
92 	IO_TREE_INODE_FILE_EXTENT,
93 	IO_TREE_LOG_CSUM_RANGE,
94 	IO_TREE_SELFTEST,
95 	IO_TREE_DEVICE_ALLOC_STATE,
96 };
97 
98 struct extent_io_tree {
99 	struct rb_root state;
100 	/*
101 	 * The fs_info is needed for trace points, a tree attached to an inode
102 	 * needs the inode.
103 	 *
104 	 * owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be
105 	 *                             accessed as inode->root->fs_info
106 	 */
107 	union {
108 		struct btrfs_fs_info *fs_info;
109 		struct btrfs_inode *inode;
110 	};
111 
112 	/* Who owns this io tree, should be one of IO_TREE_* */
113 	u8 owner;
114 
115 	spinlock_t lock;
116 };
117 
118 struct extent_state {
119 	u64 start;
120 	u64 end; /* inclusive */
121 	struct rb_node rb_node;
122 
123 	/* ADD NEW ELEMENTS AFTER THIS */
124 	wait_queue_head_t wq;
125 	refcount_t refs;
126 	u32 state;
127 
128 #ifdef CONFIG_BTRFS_DEBUG
129 	struct list_head leak_list;
130 #endif
131 };
132 
133 struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree);
134 const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree);
135 const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
136 
137 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
138 			 struct extent_io_tree *tree, unsigned int owner);
139 void extent_io_tree_release(struct extent_io_tree *tree);
140 int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
141 		  struct extent_state **cached);
142 bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
143 		       struct extent_state **cached);
144 
lock_extent(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)145 static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
146 			      struct extent_state **cached)
147 {
148 	return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
149 }
150 
try_lock_extent(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)151 static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
152 				   u64 end, struct extent_state **cached)
153 {
154 	return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
155 }
156 
157 int __init extent_state_init_cachep(void);
158 void __cold extent_state_free_cachep(void);
159 
160 u64 count_range_bits(struct extent_io_tree *tree,
161 		     u64 *start, u64 search_end,
162 		     u64 max_bytes, u32 bits, int contig,
163 		     struct extent_state **cached_state);
164 
165 void free_extent_state(struct extent_state *state);
166 bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
167 		    struct extent_state *cached_state);
168 bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
169 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
170 			     u32 bits, struct extent_changeset *changeset);
171 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
172 		       u32 bits, struct extent_state **cached,
173 		       struct extent_changeset *changeset);
174 
clear_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,struct extent_state ** cached)175 static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
176 				   u64 end, u32 bits,
177 				   struct extent_state **cached)
178 {
179 	return __clear_extent_bit(tree, start, end, bits, cached, NULL);
180 }
181 
unlock_extent(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)182 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
183 				struct extent_state **cached)
184 {
185 	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
186 }
187 
clear_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,u32 bits)188 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
189 				    u64 end, u32 bits)
190 {
191 	return clear_extent_bit(tree, start, end, bits, NULL);
192 }
193 
194 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
195 			   u32 bits, struct extent_changeset *changeset);
196 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
197 		   u32 bits, struct extent_state **cached_state);
198 
clear_extent_uptodate(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached_state)199 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
200 		u64 end, struct extent_state **cached_state)
201 {
202 	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
203 				  cached_state, NULL);
204 }
205 
clear_extent_dirty(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)206 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
207 				     u64 end, struct extent_state **cached)
208 {
209 	return clear_extent_bit(tree, start, end,
210 				EXTENT_DIRTY | EXTENT_DELALLOC |
211 				EXTENT_DO_ACCOUNTING, cached);
212 }
213 
214 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
215 		       u32 bits, u32 clear_bits,
216 		       struct extent_state **cached_state);
217 
218 bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
219 			   u64 *start_ret, u64 *end_ret, u32 bits,
220 			   struct extent_state **cached_state);
221 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
222 				 u64 *start_ret, u64 *end_ret, u32 bits);
223 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
224 			       u64 *start_ret, u64 *end_ret, u32 bits);
225 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
226 			       u64 *end, u64 max_bytes,
227 			       struct extent_state **cached_state);
lock_dio_extent(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)228 static inline int lock_dio_extent(struct extent_io_tree *tree, u64 start,
229 				  u64 end, struct extent_state **cached)
230 {
231 	return __lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
232 }
233 
try_lock_dio_extent(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)234 static inline bool try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
235 				       u64 end, struct extent_state **cached)
236 {
237 	return __try_lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
238 }
239 
unlock_dio_extent(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)240 static inline int unlock_dio_extent(struct extent_io_tree *tree, u64 start,
241 				    u64 end, struct extent_state **cached)
242 {
243 	return __clear_extent_bit(tree, start, end, EXTENT_DIO_LOCKED, cached, NULL);
244 }
245 
246 #endif /* BTRFS_EXTENT_IO_TREE_H */
247