xref: /linux/fs/btrfs/misc.h (revision f92b71ffca8c7e45e194aecc85e31bd11582f4d2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_MISC_H
4 #define BTRFS_MISC_H
5 
6 #include <linux/types.h>
7 #include <linux/bitmap.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/math64.h>
13 #include <linux/rbtree.h>
14 
15 /*
16  * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
17  */
18 #define ENUM_BIT(name)                                  \
19 	__ ## name ## _BIT,                             \
20 	name = (1U << __ ## name ## _BIT),              \
21 	__ ## name ## _SEQ = __ ## name ## _BIT
22 
cond_wake_up(struct wait_queue_head * wq)23 static inline void cond_wake_up(struct wait_queue_head *wq)
24 {
25 	/*
26 	 * This implies a full smp_mb barrier, see comments for
27 	 * waitqueue_active why.
28 	 */
29 	if (wq_has_sleeper(wq))
30 		wake_up(wq);
31 }
32 
cond_wake_up_nomb(struct wait_queue_head * wq)33 static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
34 {
35 	/*
36 	 * Special case for conditional wakeup where the barrier required for
37 	 * waitqueue_active is implied by some of the preceding code. Eg. one
38 	 * of such atomic operations (atomic_dec_and_return, ...), or a
39 	 * unlock/lock sequence, etc.
40 	 */
41 	if (waitqueue_active(wq))
42 		wake_up(wq);
43 }
44 
mult_perc(u64 num,u32 percent)45 static inline u64 mult_perc(u64 num, u32 percent)
46 {
47 	return div_u64(num * percent, 100);
48 }
49 /* Copy of is_power_of_two that is 64bit safe */
is_power_of_two_u64(u64 n)50 static inline bool is_power_of_two_u64(u64 n)
51 {
52 	return n != 0 && (n & (n - 1)) == 0;
53 }
54 
has_single_bit_set(u64 n)55 static inline bool has_single_bit_set(u64 n)
56 {
57 	return is_power_of_two_u64(n);
58 }
59 
60 /*
61  * Simple bytenr based rb_tree relate structures
62  *
63  * Any structure wants to use bytenr as single search index should have their
64  * structure start with these members.
65  */
66 struct rb_simple_node {
67 	struct rb_node rb_node;
68 	u64 bytenr;
69 };
70 
rb_simple_search(const struct rb_root * root,u64 bytenr)71 static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
72 {
73 	struct rb_node *node = root->rb_node;
74 	struct rb_simple_node *entry;
75 
76 	while (node) {
77 		entry = rb_entry(node, struct rb_simple_node, rb_node);
78 
79 		if (bytenr < entry->bytenr)
80 			node = node->rb_left;
81 		else if (bytenr > entry->bytenr)
82 			node = node->rb_right;
83 		else
84 			return node;
85 	}
86 	return NULL;
87 }
88 
89 /*
90  * Search @root from an entry that starts or comes after @bytenr.
91  *
92  * @root:	the root to search.
93  * @bytenr:	bytenr to search from.
94  *
95  * Return the rb_node that start at or after @bytenr.  If there is no entry at
96  * or after @bytner return NULL.
97  */
rb_simple_search_first(const struct rb_root * root,u64 bytenr)98 static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
99 						     u64 bytenr)
100 {
101 	struct rb_node *node = root->rb_node, *ret = NULL;
102 	struct rb_simple_node *entry, *ret_entry = NULL;
103 
104 	while (node) {
105 		entry = rb_entry(node, struct rb_simple_node, rb_node);
106 
107 		if (bytenr < entry->bytenr) {
108 			if (!ret || entry->bytenr < ret_entry->bytenr) {
109 				ret = node;
110 				ret_entry = entry;
111 			}
112 
113 			node = node->rb_left;
114 		} else if (bytenr > entry->bytenr) {
115 			node = node->rb_right;
116 		} else {
117 			return node;
118 		}
119 	}
120 
121 	return ret;
122 }
123 
rb_simple_node_bytenr_cmp(struct rb_node * new,const struct rb_node * existing)124 static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
125 {
126 	struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node);
127 	struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node);
128 
129 	if (new_entry->bytenr < existing_entry->bytenr)
130 		return -1;
131 	else if (new_entry->bytenr > existing_entry->bytenr)
132 		return 1;
133 
134 	return 0;
135 }
136 
rb_simple_insert(struct rb_root * root,struct rb_simple_node * simple_node)137 static inline struct rb_node *rb_simple_insert(struct rb_root *root,
138 					       struct rb_simple_node *simple_node)
139 {
140 	return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp);
141 }
142 
bitmap_test_range_all_set(const unsigned long * addr,unsigned long start,unsigned long nbits)143 static inline bool bitmap_test_range_all_set(const unsigned long *addr,
144 					     unsigned long start,
145 					     unsigned long nbits)
146 {
147 	unsigned long found_zero;
148 
149 	found_zero = find_next_zero_bit(addr, start + nbits, start);
150 	return (found_zero == start + nbits);
151 }
152 
bitmap_test_range_all_zero(const unsigned long * addr,unsigned long start,unsigned long nbits)153 static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
154 					      unsigned long start,
155 					      unsigned long nbits)
156 {
157 	unsigned long found_set;
158 
159 	found_set = find_next_bit(addr, start + nbits, start);
160 	return (found_set == start + nbits);
161 }
162 
folio_end(struct folio * folio)163 static inline u64 folio_end(struct folio *folio)
164 {
165 	return folio_pos(folio) + folio_size(folio);
166 }
167 
168 #endif
169