xref: /freebsd/sys/compat/linuxkpi/common/src/linux_idr.c (revision 895f86f15fbf6540071feb9328c3c50ed1f027b8)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 
41 #include <machine/stdarg.h>
42 
43 #include <linux/bitops.h>
44 #include <linux/kobject.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/err.h>
48 
49 /*
50  * IDR Implementation.
51  *
52  * This is quick and dirty and not as re-entrant as the linux version
53  * however it should be fairly fast.  It is basically a radix tree with
54  * a builtin bitmap for allocation.
55  */
56 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
57 
58 static inline int
59 idr_max(struct idr *idr)
60 {
61 	return (1 << (idr->layers * IDR_BITS)) - 1;
62 }
63 
64 static inline int
65 idr_pos(int id, int layer)
66 {
67 	return (id >> (IDR_BITS * layer)) & IDR_MASK;
68 }
69 
70 void
71 idr_init(struct idr *idr)
72 {
73 	bzero(idr, sizeof(*idr));
74 	mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
75 }
76 
77 /* Only frees cached pages. */
78 void
79 idr_destroy(struct idr *idr)
80 {
81 	struct idr_layer *il, *iln;
82 
83 	idr_remove_all(idr);
84 	mtx_lock(&idr->lock);
85 	for (il = idr->free; il != NULL; il = iln) {
86 		iln = il->ary[0];
87 		free(il, M_IDR);
88 	}
89 	mtx_unlock(&idr->lock);
90 }
91 
92 static void
93 idr_remove_layer(struct idr_layer *il, int layer)
94 {
95 	int i;
96 
97 	if (il == NULL)
98 		return;
99 	if (layer == 0) {
100 		free(il, M_IDR);
101 		return;
102 	}
103 	for (i = 0; i < IDR_SIZE; i++)
104 		if (il->ary[i])
105 			idr_remove_layer(il->ary[i], layer - 1);
106 }
107 
108 void
109 idr_remove_all(struct idr *idr)
110 {
111 
112 	mtx_lock(&idr->lock);
113 	idr_remove_layer(idr->top, idr->layers - 1);
114 	idr->top = NULL;
115 	idr->layers = 0;
116 	mtx_unlock(&idr->lock);
117 }
118 
119 void
120 idr_remove(struct idr *idr, int id)
121 {
122 	struct idr_layer *il;
123 	int layer;
124 	int idx;
125 
126 	id &= MAX_ID_MASK;
127 	mtx_lock(&idr->lock);
128 	il = idr->top;
129 	layer = idr->layers - 1;
130 	if (il == NULL || id > idr_max(idr)) {
131 		mtx_unlock(&idr->lock);
132 		return;
133 	}
134 	/*
135 	 * Walk down the tree to this item setting bitmaps along the way
136 	 * as we know at least one item will be free along this path.
137 	 */
138 	while (layer && il) {
139 		idx = idr_pos(id, layer);
140 		il->bitmap |= 1 << idx;
141 		il = il->ary[idx];
142 		layer--;
143 	}
144 	idx = id & IDR_MASK;
145 	/*
146 	 * At this point we've set free space bitmaps up the whole tree.
147 	 * We could make this non-fatal and unwind but linux dumps a stack
148 	 * and a warning so I don't think it's necessary.
149 	 */
150 	if (il == NULL || (il->bitmap & (1 << idx)) != 0)
151 		panic("idr_remove: Item %d not allocated (%p, %p)\n",
152 		    id, idr, il);
153 	il->ary[idx] = NULL;
154 	il->bitmap |= 1 << idx;
155 	mtx_unlock(&idr->lock);
156 	return;
157 }
158 
159 void *
160 idr_replace(struct idr *idr, void *ptr, int id)
161 {
162 	struct idr_layer *il;
163 	void *res;
164 	int layer;
165 	int idx;
166 
167 	res = ERR_PTR(-EINVAL);
168 	id &= MAX_ID_MASK;
169 	mtx_lock(&idr->lock);
170 	il = idr->top;
171 	layer = idr->layers - 1;
172 	if (il == NULL || id > idr_max(idr))
173 		goto out;
174 	while (layer && il) {
175 		il = il->ary[idr_pos(id, layer)];
176 		layer--;
177 	}
178 	idx = id & IDR_MASK;
179 	/*
180 	 * Replace still returns an error if the item was not allocated.
181 	 */
182 	if (il != NULL && (il->bitmap & (1 << idx)) != 0) {
183 		res = il->ary[idx];
184 		il->ary[idx] = ptr;
185 	}
186 out:
187 	mtx_unlock(&idr->lock);
188 	return (res);
189 }
190 
191 static inline void *
192 idr_find_locked(struct idr *idr, int id)
193 {
194 	struct idr_layer *il;
195 	void *res;
196 	int layer;
197 
198 	mtx_assert(&idr->lock, MA_OWNED);
199 
200 	id &= MAX_ID_MASK;
201 	res = NULL;
202 	il = idr->top;
203 	layer = idr->layers - 1;
204 	if (il == NULL || id > idr_max(idr))
205 		return (NULL);
206 	while (layer && il) {
207 		il = il->ary[idr_pos(id, layer)];
208 		layer--;
209 	}
210 	if (il != NULL)
211 		res = il->ary[id & IDR_MASK];
212 	return (res);
213 }
214 
215 void *
216 idr_find(struct idr *idr, int id)
217 {
218 	void *res;
219 
220 	mtx_lock(&idr->lock);
221 	res = idr_find_locked(idr, id);
222 	mtx_unlock(&idr->lock);
223 	return (res);
224 }
225 
226 int
227 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
228 {
229 	struct idr_layer *il, *iln;
230 	struct idr_layer *head;
231 	int need;
232 
233 	mtx_lock(&idr->lock);
234 	for (;;) {
235 		need = idr->layers + 1;
236 		for (il = idr->free; il != NULL; il = il->ary[0])
237 			need--;
238 		mtx_unlock(&idr->lock);
239 		if (need <= 0)
240 			break;
241 		for (head = NULL; need; need--) {
242 			iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
243 			if (iln == NULL)
244 				break;
245 			bitmap_fill(&iln->bitmap, IDR_SIZE);
246 			if (head != NULL) {
247 				il->ary[0] = iln;
248 				il = iln;
249 			} else
250 				head = il = iln;
251 		}
252 		if (head == NULL)
253 			return (0);
254 		mtx_lock(&idr->lock);
255 		il->ary[0] = idr->free;
256 		idr->free = head;
257 	}
258 	return (1);
259 }
260 
261 static inline struct idr_layer *
262 idr_get(struct idr *idr)
263 {
264 	struct idr_layer *il;
265 
266 	il = idr->free;
267 	if (il) {
268 		idr->free = il->ary[0];
269 		il->ary[0] = NULL;
270 		return (il);
271 	}
272 	il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT);
273 	bitmap_fill(&il->bitmap, IDR_SIZE);
274 	return (il);
275 }
276 
277 /*
278  * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
279  * first for simplicity sake.
280  */
281 int
282 idr_get_new(struct idr *idr, void *ptr, int *idp)
283 {
284 	struct idr_layer *stack[MAX_LEVEL];
285 	struct idr_layer *il;
286 	int error;
287 	int layer;
288 	int idx;
289 	int id;
290 
291 	error = -EAGAIN;
292 	mtx_lock(&idr->lock);
293 	/*
294 	 * Expand the tree until there is free space.
295 	 */
296 	if (idr->top == NULL || idr->top->bitmap == 0) {
297 		if (idr->layers == MAX_LEVEL + 1) {
298 			error = -ENOSPC;
299 			goto out;
300 		}
301 		il = idr_get(idr);
302 		if (il == NULL)
303 			goto out;
304 		il->ary[0] = idr->top;
305 		if (idr->top)
306 			il->bitmap &= ~1;
307 		idr->top = il;
308 		idr->layers++;
309 	}
310 	il = idr->top;
311 	id = 0;
312 	/*
313 	 * Walk the tree following free bitmaps, record our path.
314 	 */
315 	for (layer = idr->layers - 1;; layer--) {
316 		stack[layer] = il;
317 		idx = ffsl(il->bitmap);
318 		if (idx == 0)
319 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
320 			    idr, il);
321 		idx--;
322 		id |= idx << (layer * IDR_BITS);
323 		if (layer == 0)
324 			break;
325 		if (il->ary[idx] == NULL) {
326 			il->ary[idx] = idr_get(idr);
327 			if (il->ary[idx] == NULL)
328 				goto out;
329 		}
330 		il = il->ary[idx];
331 	}
332 	/*
333 	 * Allocate the leaf to the consumer.
334 	 */
335 	il->bitmap &= ~(1 << idx);
336 	il->ary[idx] = ptr;
337 	*idp = id;
338 	/*
339 	 * Clear bitmaps potentially up to the root.
340 	 */
341 	while (il->bitmap == 0 && ++layer < idr->layers) {
342 		il = stack[layer];
343 		il->bitmap &= ~(1 << idr_pos(id, layer));
344 	}
345 	error = 0;
346 out:
347 #ifdef INVARIANTS
348 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
349 		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
350 		    idr, id, ptr);
351 	}
352 #endif
353 	mtx_unlock(&idr->lock);
354 	return (error);
355 }
356 
357 int
358 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
359 {
360 	struct idr_layer *stack[MAX_LEVEL];
361 	struct idr_layer *il;
362 	int error;
363 	int layer;
364 	int idx, sidx;
365 	int id;
366 
367 	error = -EAGAIN;
368 	mtx_lock(&idr->lock);
369 	/*
370 	 * Compute the layers required to support starting_id and the mask
371 	 * at the top layer.
372 	 */
373 restart:
374 	idx = starting_id;
375 	layer = 0;
376 	while (idx & ~IDR_MASK) {
377 		layer++;
378 		idx >>= IDR_BITS;
379 	}
380 	if (layer == MAX_LEVEL + 1) {
381 		error = -ENOSPC;
382 		goto out;
383 	}
384 	/*
385 	 * Expand the tree until there is free space at or beyond starting_id.
386 	 */
387 	while (idr->layers <= layer ||
388 	    idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
389 		if (idr->layers == MAX_LEVEL + 1) {
390 			error = -ENOSPC;
391 			goto out;
392 		}
393 		il = idr_get(idr);
394 		if (il == NULL)
395 			goto out;
396 		il->ary[0] = idr->top;
397 		if (idr->top && idr->top->bitmap == 0)
398 			il->bitmap &= ~1;
399 		idr->top = il;
400 		idr->layers++;
401 	}
402 	il = idr->top;
403 	id = 0;
404 	/*
405 	 * Walk the tree following free bitmaps, record our path.
406 	 */
407 	for (layer = idr->layers - 1;; layer--) {
408 		stack[layer] = il;
409 		sidx = idr_pos(starting_id, layer);
410 		/* Returns index numbered from 0 or size if none exists. */
411 		idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
412 		if (idx == IDR_SIZE && sidx == 0)
413 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
414 			    idr, il);
415 		/*
416 		 * We may have walked a path where there was a free bit but
417 		 * it was lower than what we wanted.  Restart the search with
418 		 * a larger starting id.  id contains the progress we made so
419 		 * far.  Search the leaf one above this level.  This may
420 		 * restart as many as MAX_LEVEL times but that is expected
421 		 * to be rare.
422 		 */
423 		if (idx == IDR_SIZE) {
424 			starting_id = id + (1 << ((layer + 1) * IDR_BITS));
425 			goto restart;
426 		}
427 		if (idx > sidx)
428 			starting_id = 0;	/* Search the whole subtree. */
429 		id |= idx << (layer * IDR_BITS);
430 		if (layer == 0)
431 			break;
432 		if (il->ary[idx] == NULL) {
433 			il->ary[idx] = idr_get(idr);
434 			if (il->ary[idx] == NULL)
435 				goto out;
436 		}
437 		il = il->ary[idx];
438 	}
439 	/*
440 	 * Allocate the leaf to the consumer.
441 	 */
442 	il->bitmap &= ~(1 << idx);
443 	il->ary[idx] = ptr;
444 	*idp = id;
445 	/*
446 	 * Clear bitmaps potentially up to the root.
447 	 */
448 	while (il->bitmap == 0 && ++layer < idr->layers) {
449 		il = stack[layer];
450 		il->bitmap &= ~(1 << idr_pos(id, layer));
451 	}
452 	error = 0;
453 out:
454 #ifdef INVARIANTS
455 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
456 		panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
457 		    idr, id, ptr);
458 	}
459 #endif
460 	mtx_unlock(&idr->lock);
461 	return (error);
462 }
463