xref: /linux/fs/btrfs/misc.h (revision 7696286034ac72cf9b46499be1715ac62fd302c3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_MISC_H
4 #define BTRFS_MISC_H
5 
6 #include <linux/types.h>
7 #include <linux/bitmap.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/math64.h>
13 #include <linux/rbtree.h>
14 #include <linux/bio.h>
15 
16 /*
17  * Convenience macros to define a pointer with the __free(kfree) and
18  * __free(kvfree) cleanup attributes and initialized to NULL.
19  */
20 #define AUTO_KFREE(name)       *name __free(kfree) = NULL
21 #define AUTO_KVFREE(name)      *name __free(kvfree) = NULL
22 
23 /*
24  * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
25  */
26 #define ENUM_BIT(name)                                  \
27 	__ ## name ## _BIT,                             \
28 	name = (1U << __ ## name ## _BIT),              \
29 	__ ## name ## _SEQ = __ ## name ## _BIT
30 
31 static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
32 {
33 	struct bio_vec bv = bio_iter_iovec(bio, *iter);
34 
35 	return bvec_phys(&bv);
36 }
37 
38 /*
39  * Iterate bio using btrfs block size.
40  *
41  * This will handle large folio and highmem.
42  *
43  * @paddr:	Physical memory address of each iteration
44  * @bio:	The bio to iterate
45  * @iter:	The bvec_iter (pointer) to use.
46  * @blocksize:	The blocksize to iterate.
47  *
48  * This requires all folios in the bio to cover at least one block.
49  */
50 #define btrfs_bio_for_each_block(paddr, bio, iter, blocksize)		\
51 	for (; (iter)->bi_size &&					\
52 	     (paddr = bio_iter_phys((bio), (iter)), 1);			\
53 	     bio_advance_iter_single((bio), (iter), (blocksize)))
54 
55 /* Initialize a bvec_iter to the size of the specified bio. */
56 static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
57 {
58 	struct bio_vec *bvec;
59 	u32 bio_size = 0;
60 	int i;
61 
62 	bio_for_each_bvec_all(bvec, bio, i)
63 		bio_size += bvec->bv_len;
64 
65 	return (struct bvec_iter) {
66 		.bi_sector = 0,
67 		.bi_size = bio_size,
68 		.bi_idx = 0,
69 		.bi_bvec_done = 0,
70 	};
71 }
72 
73 #define btrfs_bio_for_each_block_all(paddr, bio, blocksize)		\
74 	for (struct bvec_iter iter = init_bvec_iter_for_bio(bio);	\
75 	     (iter).bi_size &&						\
76 	     (paddr = bio_iter_phys((bio), &(iter)), 1);		\
77 	     bio_advance_iter_single((bio), &(iter), (blocksize)))
78 
79 static inline void cond_wake_up(struct wait_queue_head *wq)
80 {
81 	/*
82 	 * This implies a full smp_mb barrier, see comments for
83 	 * waitqueue_active why.
84 	 */
85 	if (wq_has_sleeper(wq))
86 		wake_up(wq);
87 }
88 
89 static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
90 {
91 	/*
92 	 * Special case for conditional wakeup where the barrier required for
93 	 * waitqueue_active is implied by some of the preceding code. Eg. one
94 	 * of such atomic operations (atomic_dec_and_return, ...), or a
95 	 * unlock/lock sequence, etc.
96 	 */
97 	if (waitqueue_active(wq))
98 		wake_up(wq);
99 }
100 
101 static inline u64 mult_perc(u64 num, u32 percent)
102 {
103 	return div_u64(num * percent, 100);
104 }
105 /* Copy of is_power_of_two that is 64bit safe */
106 static inline bool is_power_of_two_u64(u64 n)
107 {
108 	return n != 0 && (n & (n - 1)) == 0;
109 }
110 
111 static inline bool has_single_bit_set(u64 n)
112 {
113 	return is_power_of_two_u64(n);
114 }
115 
116 /*
117  * Simple bytenr based rb_tree relate structures
118  *
119  * Any structure wants to use bytenr as single search index should have their
120  * structure start with these members.
121  */
122 struct rb_simple_node {
123 	struct rb_node rb_node;
124 	u64 bytenr;
125 };
126 
127 static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
128 {
129 	struct rb_node *node = root->rb_node;
130 	struct rb_simple_node *entry;
131 
132 	while (node) {
133 		entry = rb_entry(node, struct rb_simple_node, rb_node);
134 
135 		if (bytenr < entry->bytenr)
136 			node = node->rb_left;
137 		else if (bytenr > entry->bytenr)
138 			node = node->rb_right;
139 		else
140 			return node;
141 	}
142 	return NULL;
143 }
144 
145 /*
146  * Search @root from an entry that starts or comes after @bytenr.
147  *
148  * @root:	the root to search.
149  * @bytenr:	bytenr to search from.
150  *
151  * Return the rb_node that start at or after @bytenr.  If there is no entry at
152  * or after @bytner return NULL.
153  */
154 static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
155 						     u64 bytenr)
156 {
157 	struct rb_node *node = root->rb_node, *ret = NULL;
158 	struct rb_simple_node *entry, *ret_entry = NULL;
159 
160 	while (node) {
161 		entry = rb_entry(node, struct rb_simple_node, rb_node);
162 
163 		if (bytenr < entry->bytenr) {
164 			if (!ret || entry->bytenr < ret_entry->bytenr) {
165 				ret = node;
166 				ret_entry = entry;
167 			}
168 
169 			node = node->rb_left;
170 		} else if (bytenr > entry->bytenr) {
171 			node = node->rb_right;
172 		} else {
173 			return node;
174 		}
175 	}
176 
177 	return ret;
178 }
179 
180 static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
181 {
182 	struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node);
183 	struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node);
184 
185 	if (new_entry->bytenr < existing_entry->bytenr)
186 		return -1;
187 	else if (new_entry->bytenr > existing_entry->bytenr)
188 		return 1;
189 
190 	return 0;
191 }
192 
193 static inline struct rb_node *rb_simple_insert(struct rb_root *root,
194 					       struct rb_simple_node *simple_node)
195 {
196 	return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp);
197 }
198 
199 static inline bool bitmap_test_range_all_set(const unsigned long *addr,
200 					     unsigned long start,
201 					     unsigned long nbits)
202 {
203 	unsigned long found_zero;
204 
205 	found_zero = find_next_zero_bit(addr, start + nbits, start);
206 	return (found_zero == start + nbits);
207 }
208 
209 static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
210 					      unsigned long start,
211 					      unsigned long nbits)
212 {
213 	unsigned long found_set;
214 
215 	found_set = find_next_bit(addr, start + nbits, start);
216 	return (found_set == start + nbits);
217 }
218 
219 #endif
220