xref: /linux/fs/btrfs/extent-io-tree.h (revision a7f7f6248d9740d710fd6bd190293fe5e16410ac)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_EXTENT_IO_TREE_H
4 #define BTRFS_EXTENT_IO_TREE_H
5 
6 struct extent_changeset;
7 struct io_failure_record;
8 
9 /* Bits for the extent state */
10 #define EXTENT_DIRTY		(1U << 0)
11 #define EXTENT_UPTODATE		(1U << 1)
12 #define EXTENT_LOCKED		(1U << 2)
13 #define EXTENT_NEW		(1U << 3)
14 #define EXTENT_DELALLOC		(1U << 4)
15 #define EXTENT_DEFRAG		(1U << 5)
16 #define EXTENT_BOUNDARY		(1U << 6)
17 #define EXTENT_NODATASUM	(1U << 7)
18 #define EXTENT_CLEAR_META_RESV	(1U << 8)
19 #define EXTENT_NEED_WAIT	(1U << 9)
20 #define EXTENT_DAMAGED		(1U << 10)
21 #define EXTENT_NORESERVE	(1U << 11)
22 #define EXTENT_QGROUP_RESERVED	(1U << 12)
23 #define EXTENT_CLEAR_DATA_RESV	(1U << 13)
24 #define EXTENT_DELALLOC_NEW	(1U << 14)
25 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
26 				 EXTENT_CLEAR_DATA_RESV)
27 #define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING)
28 
29 /*
30  * Redefined bits above which are used only in the device allocation tree,
31  * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
32  * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
33  * manipulation functions
34  */
35 #define CHUNK_ALLOCATED				EXTENT_DIRTY
36 #define CHUNK_TRIMMED				EXTENT_DEFRAG
37 
38 enum {
39 	IO_TREE_FS_PINNED_EXTENTS,
40 	IO_TREE_FS_EXCLUDED_EXTENTS,
41 	IO_TREE_INODE_IO,
42 	IO_TREE_INODE_IO_FAILURE,
43 	IO_TREE_RELOC_BLOCKS,
44 	IO_TREE_TRANS_DIRTY_PAGES,
45 	IO_TREE_ROOT_DIRTY_LOG_PAGES,
46 	IO_TREE_INODE_FILE_EXTENT,
47 	IO_TREE_LOG_CSUM_RANGE,
48 	IO_TREE_SELFTEST,
49 };
50 
51 struct extent_io_tree {
52 	struct rb_root state;
53 	struct btrfs_fs_info *fs_info;
54 	void *private_data;
55 	u64 dirty_bytes;
56 	bool track_uptodate;
57 
58 	/* Who owns this io tree, should be one of IO_TREE_* */
59 	u8 owner;
60 
61 	spinlock_t lock;
62 	const struct extent_io_ops *ops;
63 };
64 
65 struct extent_state {
66 	u64 start;
67 	u64 end; /* inclusive */
68 	struct rb_node rb_node;
69 
70 	/* ADD NEW ELEMENTS AFTER THIS */
71 	wait_queue_head_t wq;
72 	refcount_t refs;
73 	unsigned state;
74 
75 	struct io_failure_record *failrec;
76 
77 #ifdef CONFIG_BTRFS_DEBUG
78 	struct list_head leak_list;
79 #endif
80 };
81 
82 int __init extent_state_cache_init(void);
83 void __cold extent_state_cache_exit(void);
84 
85 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
86 			 struct extent_io_tree *tree, unsigned int owner,
87 			 void *private_data);
88 void extent_io_tree_release(struct extent_io_tree *tree);
89 
90 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
91 		     struct extent_state **cached);
92 
93 static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
94 {
95 	return lock_extent_bits(tree, start, end, NULL);
96 }
97 
98 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
99 
100 int __init extent_io_init(void);
101 void __cold extent_io_exit(void);
102 
103 u64 count_range_bits(struct extent_io_tree *tree,
104 		     u64 *start, u64 search_end,
105 		     u64 max_bytes, unsigned bits, int contig);
106 
107 void free_extent_state(struct extent_state *state);
108 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
109 		   unsigned bits, int filled,
110 		   struct extent_state *cached_state);
111 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
112 		unsigned bits, struct extent_changeset *changeset);
113 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
114 		     unsigned bits, int wake, int delete,
115 		     struct extent_state **cached);
116 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
117 		     unsigned bits, int wake, int delete,
118 		     struct extent_state **cached, gfp_t mask,
119 		     struct extent_changeset *changeset);
120 
121 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
122 {
123 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
124 }
125 
126 static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
127 		u64 end, struct extent_state **cached)
128 {
129 	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
130 				GFP_NOFS, NULL);
131 }
132 
133 static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
134 		u64 start, u64 end, struct extent_state **cached)
135 {
136 	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
137 				GFP_ATOMIC, NULL);
138 }
139 
140 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
141 		u64 end, unsigned bits)
142 {
143 	int wake = 0;
144 
145 	if (bits & EXTENT_LOCKED)
146 		wake = 1;
147 
148 	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
149 }
150 
151 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
152 			   unsigned bits, struct extent_changeset *changeset);
153 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
154 		   unsigned bits, u64 *failed_start,
155 		   struct extent_state **cached_state, gfp_t mask);
156 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
157 			   unsigned bits);
158 
159 static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
160 		u64 end, unsigned bits)
161 {
162 	return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
163 }
164 
165 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
166 		u64 end, struct extent_state **cached_state)
167 {
168 	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
169 				cached_state, GFP_NOFS, NULL);
170 }
171 
172 static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
173 		u64 end, gfp_t mask)
174 {
175 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
176 			      NULL, mask);
177 }
178 
179 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
180 				     u64 end, struct extent_state **cached)
181 {
182 	return clear_extent_bit(tree, start, end,
183 				EXTENT_DIRTY | EXTENT_DELALLOC |
184 				EXTENT_DO_ACCOUNTING, 0, 0, cached);
185 }
186 
187 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
188 		       unsigned bits, unsigned clear_bits,
189 		       struct extent_state **cached_state);
190 
191 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
192 				      u64 end, unsigned int extra_bits,
193 				      struct extent_state **cached_state)
194 {
195 	return set_extent_bit(tree, start, end,
196 			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
197 			      NULL, cached_state, GFP_NOFS);
198 }
199 
200 static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
201 		u64 end, struct extent_state **cached_state)
202 {
203 	return set_extent_bit(tree, start, end,
204 			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
205 			      NULL, cached_state, GFP_NOFS);
206 }
207 
208 static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
209 		u64 end)
210 {
211 	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
212 			GFP_NOFS);
213 }
214 
215 static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
216 		u64 end, struct extent_state **cached_state, gfp_t mask)
217 {
218 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
219 			      cached_state, mask);
220 }
221 
222 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
223 			  u64 *start_ret, u64 *end_ret, unsigned bits,
224 			  struct extent_state **cached_state);
225 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
226 				 u64 *start_ret, u64 *end_ret, unsigned bits);
227 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
228 			       u64 *start_ret, u64 *end_ret, unsigned bits);
229 int extent_invalidatepage(struct extent_io_tree *tree,
230 			  struct page *page, unsigned long offset);
231 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
232 			       u64 *end, u64 max_bytes,
233 			       struct extent_state **cached_state);
234 
235 /* This should be reworked in the future and put elsewhere. */
236 int get_state_failrec(struct extent_io_tree *tree, u64 start,
237 		      struct io_failure_record **failrec);
238 int set_state_failrec(struct extent_io_tree *tree, u64 start,
239 		      struct io_failure_record *failrec);
240 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
241 		u64 end);
242 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
243 				struct io_failure_record **failrec_ret);
244 int free_io_failure(struct extent_io_tree *failure_tree,
245 		    struct extent_io_tree *io_tree,
246 		    struct io_failure_record *rec);
247 int clean_io_failure(struct btrfs_fs_info *fs_info,
248 		     struct extent_io_tree *failure_tree,
249 		     struct extent_io_tree *io_tree, u64 start,
250 		     struct page *page, u64 ino, unsigned int pg_offset);
251 
252 #endif /* BTRFS_EXTENT_IO_TREE_H */
253