xref: /linux/fs/btrfs/misc.h (revision f3827213abae9291b7525b05e6fd29b1f0536ce6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_MISC_H
4 #define BTRFS_MISC_H
5 
6 #include <linux/types.h>
7 #include <linux/bitmap.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/math64.h>
13 #include <linux/rbtree.h>
14 #include <linux/bio.h>
15 
16 /*
17  * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
18  */
19 #define ENUM_BIT(name)                                  \
20 	__ ## name ## _BIT,                             \
21 	name = (1U << __ ## name ## _BIT),              \
22 	__ ## name ## _SEQ = __ ## name ## _BIT
23 
bio_iter_phys(struct bio * bio,struct bvec_iter * iter)24 static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
25 {
26 	struct bio_vec bv = bio_iter_iovec(bio, *iter);
27 
28 	return bvec_phys(&bv);
29 }
30 
31 /*
32  * Iterate bio using btrfs block size.
33  *
34  * This will handle large folio and highmem.
35  *
36  * @paddr:	Physical memory address of each iteration
37  * @bio:	The bio to iterate
38  * @iter:	The bvec_iter (pointer) to use.
39  * @blocksize:	The blocksize to iterate.
40  *
41  * This requires all folios in the bio to cover at least one block.
42  */
43 #define btrfs_bio_for_each_block(paddr, bio, iter, blocksize)		\
44 	for (; (iter)->bi_size &&					\
45 	     (paddr = bio_iter_phys((bio), (iter)), 1);			\
46 	     bio_advance_iter_single((bio), (iter), (blocksize)))
47 
48 /* Initialize a bvec_iter to the size of the specified bio. */
init_bvec_iter_for_bio(struct bio * bio)49 static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
50 {
51 	struct bio_vec *bvec;
52 	u32 bio_size = 0;
53 	int i;
54 
55 	bio_for_each_bvec_all(bvec, bio, i)
56 		bio_size += bvec->bv_len;
57 
58 	return (struct bvec_iter) {
59 		.bi_sector = 0,
60 		.bi_size = bio_size,
61 		.bi_idx = 0,
62 		.bi_bvec_done = 0,
63 	};
64 }
65 
66 #define btrfs_bio_for_each_block_all(paddr, bio, blocksize)		\
67 	for (struct bvec_iter iter = init_bvec_iter_for_bio(bio);	\
68 	     (iter).bi_size &&						\
69 	     (paddr = bio_iter_phys((bio), &(iter)), 1);		\
70 	     bio_advance_iter_single((bio), &(iter), (blocksize)))
71 
cond_wake_up(struct wait_queue_head * wq)72 static inline void cond_wake_up(struct wait_queue_head *wq)
73 {
74 	/*
75 	 * This implies a full smp_mb barrier, see comments for
76 	 * waitqueue_active why.
77 	 */
78 	if (wq_has_sleeper(wq))
79 		wake_up(wq);
80 }
81 
cond_wake_up_nomb(struct wait_queue_head * wq)82 static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
83 {
84 	/*
85 	 * Special case for conditional wakeup where the barrier required for
86 	 * waitqueue_active is implied by some of the preceding code. Eg. one
87 	 * of such atomic operations (atomic_dec_and_return, ...), or a
88 	 * unlock/lock sequence, etc.
89 	 */
90 	if (waitqueue_active(wq))
91 		wake_up(wq);
92 }
93 
mult_perc(u64 num,u32 percent)94 static inline u64 mult_perc(u64 num, u32 percent)
95 {
96 	return div_u64(num * percent, 100);
97 }
98 /* Copy of is_power_of_two that is 64bit safe */
is_power_of_two_u64(u64 n)99 static inline bool is_power_of_two_u64(u64 n)
100 {
101 	return n != 0 && (n & (n - 1)) == 0;
102 }
103 
has_single_bit_set(u64 n)104 static inline bool has_single_bit_set(u64 n)
105 {
106 	return is_power_of_two_u64(n);
107 }
108 
109 /*
110  * Simple bytenr based rb_tree relate structures
111  *
112  * Any structure wants to use bytenr as single search index should have their
113  * structure start with these members.
114  */
115 struct rb_simple_node {
116 	struct rb_node rb_node;
117 	u64 bytenr;
118 };
119 
rb_simple_search(const struct rb_root * root,u64 bytenr)120 static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
121 {
122 	struct rb_node *node = root->rb_node;
123 	struct rb_simple_node *entry;
124 
125 	while (node) {
126 		entry = rb_entry(node, struct rb_simple_node, rb_node);
127 
128 		if (bytenr < entry->bytenr)
129 			node = node->rb_left;
130 		else if (bytenr > entry->bytenr)
131 			node = node->rb_right;
132 		else
133 			return node;
134 	}
135 	return NULL;
136 }
137 
138 /*
139  * Search @root from an entry that starts or comes after @bytenr.
140  *
141  * @root:	the root to search.
142  * @bytenr:	bytenr to search from.
143  *
144  * Return the rb_node that start at or after @bytenr.  If there is no entry at
145  * or after @bytner return NULL.
146  */
rb_simple_search_first(const struct rb_root * root,u64 bytenr)147 static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
148 						     u64 bytenr)
149 {
150 	struct rb_node *node = root->rb_node, *ret = NULL;
151 	struct rb_simple_node *entry, *ret_entry = NULL;
152 
153 	while (node) {
154 		entry = rb_entry(node, struct rb_simple_node, rb_node);
155 
156 		if (bytenr < entry->bytenr) {
157 			if (!ret || entry->bytenr < ret_entry->bytenr) {
158 				ret = node;
159 				ret_entry = entry;
160 			}
161 
162 			node = node->rb_left;
163 		} else if (bytenr > entry->bytenr) {
164 			node = node->rb_right;
165 		} else {
166 			return node;
167 		}
168 	}
169 
170 	return ret;
171 }
172 
rb_simple_node_bytenr_cmp(struct rb_node * new,const struct rb_node * existing)173 static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
174 {
175 	struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node);
176 	struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node);
177 
178 	if (new_entry->bytenr < existing_entry->bytenr)
179 		return -1;
180 	else if (new_entry->bytenr > existing_entry->bytenr)
181 		return 1;
182 
183 	return 0;
184 }
185 
rb_simple_insert(struct rb_root * root,struct rb_simple_node * simple_node)186 static inline struct rb_node *rb_simple_insert(struct rb_root *root,
187 					       struct rb_simple_node *simple_node)
188 {
189 	return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp);
190 }
191 
bitmap_test_range_all_set(const unsigned long * addr,unsigned long start,unsigned long nbits)192 static inline bool bitmap_test_range_all_set(const unsigned long *addr,
193 					     unsigned long start,
194 					     unsigned long nbits)
195 {
196 	unsigned long found_zero;
197 
198 	found_zero = find_next_zero_bit(addr, start + nbits, start);
199 	return (found_zero == start + nbits);
200 }
201 
bitmap_test_range_all_zero(const unsigned long * addr,unsigned long start,unsigned long nbits)202 static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
203 					      unsigned long start,
204 					      unsigned long nbits)
205 {
206 	unsigned long found_set;
207 
208 	found_set = find_next_bit(addr, start + nbits, start);
209 	return (found_set == start + nbits);
210 }
211 
folio_end(struct folio * folio)212 static inline u64 folio_end(struct folio *folio)
213 {
214 	return folio_pos(folio) + folio_size(folio);
215 }
216 
217 #endif
218