xref: /linux/fs/btrfs/ulist.c (revision f92b71ffca8c7e45e194aecc85e31bd11582f4d2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO AG
4  * written by Arne Jansen <sensille@gmx.net>
5  */
6 
7 #include <linux/slab.h>
8 #include "messages.h"
9 #include "ulist.h"
10 
11 /*
12  * ulist is a generic data structure to hold a collection of unique u64
13  * values. The only operations it supports is adding to the list and
14  * enumerating it.
15  * It is possible to store an auxiliary value along with the key.
16  *
17  * A sample usage for ulists is the enumeration of directed graphs without
18  * visiting a node twice. The pseudo-code could look like this:
19  *
20  * ulist = ulist_alloc();
21  * ulist_add(ulist, root);
22  * ULIST_ITER_INIT(&uiter);
23  *
24  * while ((elem = ulist_next(ulist, &uiter)) {
25  * 	for (all child nodes n in elem)
26  *		ulist_add(ulist, n);
27  *	do something useful with the node;
28  * }
29  * ulist_free(ulist);
30  *
31  * This assumes the graph nodes are addressable by u64. This stems from the
32  * usage for tree enumeration in btrfs, where the logical addresses are
33  * 64 bit.
34  *
35  * It is also useful for tree enumeration which could be done elegantly
36  * recursively, but is not possible due to kernel stack limitations. The
37  * loop would be similar to the above.
38  */
39 
40 /*
41  * Freshly initialize a ulist.
42  *
43  * @ulist:	the ulist to initialize
44  *
45  * Note: don't use this function to init an already used ulist, use
46  * ulist_reinit instead.
47  */
ulist_init(struct ulist * ulist)48 void ulist_init(struct ulist *ulist)
49 {
50 	INIT_LIST_HEAD(&ulist->nodes);
51 	ulist->root = RB_ROOT;
52 	ulist->nnodes = 0;
53 	ulist->prealloc = NULL;
54 }
55 
56 /*
57  * Free up additionally allocated memory for the ulist.
58  *
59  * @ulist:	the ulist from which to free the additional memory
60  *
61  * This is useful in cases where the base 'struct ulist' has been statically
62  * allocated.
63  */
ulist_release(struct ulist * ulist)64 void ulist_release(struct ulist *ulist)
65 {
66 	struct ulist_node *node;
67 	struct ulist_node *next;
68 
69 	list_for_each_entry_safe(node, next, &ulist->nodes, list) {
70 		kfree(node);
71 	}
72 	kfree(ulist->prealloc);
73 	ulist->prealloc = NULL;
74 	ulist->root = RB_ROOT;
75 	INIT_LIST_HEAD(&ulist->nodes);
76 }
77 
78 /*
79  * Prepare a ulist for reuse.
80  *
81  * @ulist:	ulist to be reused
82  *
83  * Free up all additional memory allocated for the list elements and reinit
84  * the ulist.
85  */
ulist_reinit(struct ulist * ulist)86 void ulist_reinit(struct ulist *ulist)
87 {
88 	ulist_release(ulist);
89 	ulist_init(ulist);
90 }
91 
92 /*
93  * Dynamically allocate a ulist.
94  *
95  * @gfp_mask:	allocation flags to for base allocation
96  *
97  * The allocated ulist will be returned in an initialized state.
98  */
ulist_alloc(gfp_t gfp_mask)99 struct ulist *ulist_alloc(gfp_t gfp_mask)
100 {
101 	struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
102 
103 	if (!ulist)
104 		return NULL;
105 
106 	ulist_init(ulist);
107 
108 	return ulist;
109 }
110 
ulist_prealloc(struct ulist * ulist,gfp_t gfp_mask)111 void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask)
112 {
113 	if (!ulist->prealloc)
114 		ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask);
115 }
116 
117 /*
118  * Free dynamically allocated ulist.
119  *
120  * @ulist:	ulist to free
121  *
122  * It is not necessary to call ulist_release before.
123  */
ulist_free(struct ulist * ulist)124 void ulist_free(struct ulist *ulist)
125 {
126 	if (!ulist)
127 		return;
128 	ulist_release(ulist);
129 	kfree(ulist);
130 }
131 
ulist_node_val_key_cmp(const void * key,const struct rb_node * node)132 static int ulist_node_val_key_cmp(const void *key, const struct rb_node *node)
133 {
134 	const u64 *val = key;
135 	const struct ulist_node *unode = rb_entry(node, struct ulist_node, rb_node);
136 
137 	if (unode->val < *val)
138 		return 1;
139 	else if (unode->val > *val)
140 		return -1;
141 
142 	return 0;
143 }
144 
ulist_rbtree_search(struct ulist * ulist,u64 val)145 static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
146 {
147 	struct rb_node *node;
148 
149 	node = rb_find(&val, &ulist->root, ulist_node_val_key_cmp);
150 	return rb_entry_safe(node, struct ulist_node, rb_node);
151 }
152 
ulist_rbtree_erase(struct ulist * ulist,struct ulist_node * node)153 static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
154 {
155 	rb_erase(&node->rb_node, &ulist->root);
156 	list_del(&node->list);
157 	kfree(node);
158 	BUG_ON(ulist->nnodes == 0);
159 	ulist->nnodes--;
160 }
161 
ulist_node_val_cmp(struct rb_node * new,const struct rb_node * existing)162 static int ulist_node_val_cmp(struct rb_node *new, const struct rb_node *existing)
163 {
164 	const struct ulist_node *unode = rb_entry(new, struct ulist_node, rb_node);
165 
166 	return ulist_node_val_key_cmp(&unode->val, existing);
167 }
168 
ulist_rbtree_insert(struct ulist * ulist,struct ulist_node * ins)169 static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
170 {
171 	struct rb_node *node;
172 
173 	node = rb_find_add(&ins->rb_node, &ulist->root, ulist_node_val_cmp);
174 	if (node)
175 		return -EEXIST;
176 	return 0;
177 }
178 
179 /*
180  * Add an element to the ulist.
181  *
182  * @ulist:	ulist to add the element to
183  * @val:	value to add to ulist
184  * @aux:	auxiliary value to store along with val
185  * @gfp_mask:	flags to use for allocation
186  *
187  * Note: locking must be provided by the caller. In case of rwlocks write
188  *       locking is needed
189  *
190  * Add an element to a ulist. The @val will only be added if it doesn't
191  * already exist. If it is added, the auxiliary value @aux is stored along with
192  * it. In case @val already exists in the ulist, @aux is ignored, even if
193  * it differs from the already stored value.
194  *
195  * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
196  * inserted.
197  * In case of allocation failure -ENOMEM is returned and the ulist stays
198  * unaltered.
199  */
ulist_add(struct ulist * ulist,u64 val,u64 aux,gfp_t gfp_mask)200 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
201 {
202 	return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
203 }
204 
ulist_add_merge(struct ulist * ulist,u64 val,u64 aux,u64 * old_aux,gfp_t gfp_mask)205 int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
206 		    u64 *old_aux, gfp_t gfp_mask)
207 {
208 	int ret;
209 	struct ulist_node *node;
210 
211 	node = ulist_rbtree_search(ulist, val);
212 	if (node) {
213 		if (old_aux)
214 			*old_aux = node->aux;
215 		return 0;
216 	}
217 
218 	if (ulist->prealloc) {
219 		node = ulist->prealloc;
220 		ulist->prealloc = NULL;
221 	} else {
222 		node = kmalloc(sizeof(*node), gfp_mask);
223 		if (!node)
224 			return -ENOMEM;
225 	}
226 
227 	node->val = val;
228 	node->aux = aux;
229 
230 	ret = ulist_rbtree_insert(ulist, node);
231 	ASSERT(!ret);
232 	list_add_tail(&node->list, &ulist->nodes);
233 	ulist->nnodes++;
234 
235 	return 1;
236 }
237 
238 /*
239  * Delete one node from ulist.
240  *
241  * @ulist:	ulist to remove node from
242  * @val:	value to delete
243  * @aux:	aux to delete
244  *
245  * The deletion will only be done when *BOTH* val and aux matches.
246  * Return 0 for successful delete.
247  * Return > 0 for not found.
248  */
ulist_del(struct ulist * ulist,u64 val,u64 aux)249 int ulist_del(struct ulist *ulist, u64 val, u64 aux)
250 {
251 	struct ulist_node *node;
252 
253 	node = ulist_rbtree_search(ulist, val);
254 	/* Not found */
255 	if (!node)
256 		return 1;
257 
258 	if (node->aux != aux)
259 		return 1;
260 
261 	/* Found and delete */
262 	ulist_rbtree_erase(ulist, node);
263 	return 0;
264 }
265 
266 /*
267  * Iterate ulist.
268  *
269  * @ulist:	ulist to iterate
270  * @uiter:	iterator variable, initialized with ULIST_ITER_INIT(&iterator)
271  *
272  * Note: locking must be provided by the caller. In case of rwlocks only read
273  *       locking is needed
274  *
275  * This function is used to iterate an ulist.
276  * It returns the next element from the ulist or %NULL when the
277  * end is reached. No guarantee is made with respect to the order in which
278  * the elements are returned. They might neither be returned in order of
279  * addition nor in ascending order.
280  * It is allowed to call ulist_add during an enumeration. Newly added items
281  * are guaranteed to show up in the running enumeration.
282  */
ulist_next(const struct ulist * ulist,struct ulist_iterator * uiter)283 struct ulist_node *ulist_next(const struct ulist *ulist, struct ulist_iterator *uiter)
284 {
285 	struct ulist_node *node;
286 
287 	if (list_empty(&ulist->nodes))
288 		return NULL;
289 	if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
290 		return NULL;
291 	if (uiter->cur_list) {
292 		uiter->cur_list = uiter->cur_list->next;
293 	} else {
294 		uiter->cur_list = ulist->nodes.next;
295 	}
296 	node = list_entry(uiter->cur_list, struct ulist_node, list);
297 	return node;
298 }
299