xref: /freebsd/sys/compat/linuxkpi/common/src/linux_idr.c (revision 49b49cda41feabe3439f7318e8bf40e3896c7bf4)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 
41 #include <machine/stdarg.h>
42 
43 #include <linux/bitops.h>
44 #include <linux/kobject.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/err.h>
48 
49 /*
50  * IDR Implementation.
51  *
52  * This is quick and dirty and not as re-entrant as the linux version
53  * however it should be fairly fast.  It is basically a radix tree with
54  * a builtin bitmap for allocation.
55  */
56 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
57 
58 static inline int
59 idr_max(struct idr *idr)
60 {
61 	return (1 << (idr->layers * IDR_BITS)) - 1;
62 }
63 
64 static inline int
65 idr_pos(int id, int layer)
66 {
67 	return (id >> (IDR_BITS * layer)) & IDR_MASK;
68 }
69 
70 void
71 idr_init(struct idr *idr)
72 {
73 	bzero(idr, sizeof(*idr));
74 	mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
75 }
76 
77 /* Only frees cached pages. */
78 void
79 idr_destroy(struct idr *idr)
80 {
81 	struct idr_layer *il, *iln;
82 
83 	idr_remove_all(idr);
84 	mtx_lock(&idr->lock);
85 	for (il = idr->free; il != NULL; il = iln) {
86 		iln = il->ary[0];
87 		free(il, M_IDR);
88 	}
89 	mtx_unlock(&idr->lock);
90 }
91 
92 static void
93 idr_remove_layer(struct idr_layer *il, int layer)
94 {
95 	int i;
96 
97 	if (il == NULL)
98 		return;
99 	if (layer == 0) {
100 		free(il, M_IDR);
101 		return;
102 	}
103 	for (i = 0; i < IDR_SIZE; i++)
104 		if (il->ary[i])
105 			idr_remove_layer(il->ary[i], layer - 1);
106 }
107 
108 void
109 idr_remove_all(struct idr *idr)
110 {
111 
112 	mtx_lock(&idr->lock);
113 	idr_remove_layer(idr->top, idr->layers - 1);
114 	idr->top = NULL;
115 	idr->layers = 0;
116 	mtx_unlock(&idr->lock);
117 }
118 
119 static void
120 idr_remove_locked(struct idr *idr, int id)
121 {
122 	struct idr_layer *il;
123 	int layer;
124 	int idx;
125 
126 	id &= MAX_ID_MASK;
127 	il = idr->top;
128 	layer = idr->layers - 1;
129 	if (il == NULL || id > idr_max(idr))
130 		return;
131 	/*
132 	 * Walk down the tree to this item setting bitmaps along the way
133 	 * as we know at least one item will be free along this path.
134 	 */
135 	while (layer && il) {
136 		idx = idr_pos(id, layer);
137 		il->bitmap |= 1 << idx;
138 		il = il->ary[idx];
139 		layer--;
140 	}
141 	idx = id & IDR_MASK;
142 	/*
143 	 * At this point we've set free space bitmaps up the whole tree.
144 	 * We could make this non-fatal and unwind but linux dumps a stack
145 	 * and a warning so I don't think it's necessary.
146 	 */
147 	if (il == NULL || (il->bitmap & (1 << idx)) != 0)
148 		panic("idr_remove: Item %d not allocated (%p, %p)\n",
149 		    id, idr, il);
150 	il->ary[idx] = NULL;
151 	il->bitmap |= 1 << idx;
152 }
153 
154 void
155 idr_remove(struct idr *idr, int id)
156 {
157 	mtx_lock(&idr->lock);
158 	idr_remove_locked(idr, id);
159 	mtx_unlock(&idr->lock);
160 }
161 
162 void *
163 idr_replace(struct idr *idr, void *ptr, int id)
164 {
165 	struct idr_layer *il;
166 	void *res;
167 	int layer;
168 	int idx;
169 
170 	res = ERR_PTR(-EINVAL);
171 	id &= MAX_ID_MASK;
172 	mtx_lock(&idr->lock);
173 	il = idr->top;
174 	layer = idr->layers - 1;
175 	if (il == NULL || id > idr_max(idr))
176 		goto out;
177 	while (layer && il) {
178 		il = il->ary[idr_pos(id, layer)];
179 		layer--;
180 	}
181 	idx = id & IDR_MASK;
182 	/*
183 	 * Replace still returns an error if the item was not allocated.
184 	 */
185 	if (il != NULL && (il->bitmap & (1 << idx)) != 0) {
186 		res = il->ary[idx];
187 		il->ary[idx] = ptr;
188 	}
189 out:
190 	mtx_unlock(&idr->lock);
191 	return (res);
192 }
193 
194 static inline void *
195 idr_find_locked(struct idr *idr, int id)
196 {
197 	struct idr_layer *il;
198 	void *res;
199 	int layer;
200 
201 	mtx_assert(&idr->lock, MA_OWNED);
202 
203 	id &= MAX_ID_MASK;
204 	res = NULL;
205 	il = idr->top;
206 	layer = idr->layers - 1;
207 	if (il == NULL || id > idr_max(idr))
208 		return (NULL);
209 	while (layer && il) {
210 		il = il->ary[idr_pos(id, layer)];
211 		layer--;
212 	}
213 	if (il != NULL)
214 		res = il->ary[id & IDR_MASK];
215 	return (res);
216 }
217 
218 void *
219 idr_find(struct idr *idr, int id)
220 {
221 	void *res;
222 
223 	mtx_lock(&idr->lock);
224 	res = idr_find_locked(idr, id);
225 	mtx_unlock(&idr->lock);
226 	return (res);
227 }
228 
229 int
230 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
231 {
232 	struct idr_layer *il, *iln;
233 	struct idr_layer *head;
234 	int need;
235 
236 	mtx_lock(&idr->lock);
237 	for (;;) {
238 		need = idr->layers + 1;
239 		for (il = idr->free; il != NULL; il = il->ary[0])
240 			need--;
241 		mtx_unlock(&idr->lock);
242 		if (need <= 0)
243 			break;
244 		for (head = NULL; need; need--) {
245 			iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
246 			if (iln == NULL)
247 				break;
248 			bitmap_fill(&iln->bitmap, IDR_SIZE);
249 			if (head != NULL) {
250 				il->ary[0] = iln;
251 				il = iln;
252 			} else
253 				head = il = iln;
254 		}
255 		if (head == NULL)
256 			return (0);
257 		mtx_lock(&idr->lock);
258 		il->ary[0] = idr->free;
259 		idr->free = head;
260 	}
261 	return (1);
262 }
263 
264 static inline struct idr_layer *
265 idr_get(struct idr *idr)
266 {
267 	struct idr_layer *il;
268 
269 	il = idr->free;
270 	if (il) {
271 		idr->free = il->ary[0];
272 		il->ary[0] = NULL;
273 		return (il);
274 	}
275 	il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT);
276 	bitmap_fill(&il->bitmap, IDR_SIZE);
277 	return (il);
278 }
279 
280 /*
281  * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
282  * first for simplicity sake.
283  */
284 static int
285 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
286 {
287 	struct idr_layer *stack[MAX_LEVEL];
288 	struct idr_layer *il;
289 	int error;
290 	int layer;
291 	int idx;
292 	int id;
293 
294 	mtx_assert(&idr->lock, MA_OWNED);
295 
296 	error = -EAGAIN;
297 	/*
298 	 * Expand the tree until there is free space.
299 	 */
300 	if (idr->top == NULL || idr->top->bitmap == 0) {
301 		if (idr->layers == MAX_LEVEL + 1) {
302 			error = -ENOSPC;
303 			goto out;
304 		}
305 		il = idr_get(idr);
306 		if (il == NULL)
307 			goto out;
308 		il->ary[0] = idr->top;
309 		if (idr->top)
310 			il->bitmap &= ~1;
311 		idr->top = il;
312 		idr->layers++;
313 	}
314 	il = idr->top;
315 	id = 0;
316 	/*
317 	 * Walk the tree following free bitmaps, record our path.
318 	 */
319 	for (layer = idr->layers - 1;; layer--) {
320 		stack[layer] = il;
321 		idx = ffsl(il->bitmap);
322 		if (idx == 0)
323 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
324 			    idr, il);
325 		idx--;
326 		id |= idx << (layer * IDR_BITS);
327 		if (layer == 0)
328 			break;
329 		if (il->ary[idx] == NULL) {
330 			il->ary[idx] = idr_get(idr);
331 			if (il->ary[idx] == NULL)
332 				goto out;
333 		}
334 		il = il->ary[idx];
335 	}
336 	/*
337 	 * Allocate the leaf to the consumer.
338 	 */
339 	il->bitmap &= ~(1 << idx);
340 	il->ary[idx] = ptr;
341 	*idp = id;
342 	/*
343 	 * Clear bitmaps potentially up to the root.
344 	 */
345 	while (il->bitmap == 0 && ++layer < idr->layers) {
346 		il = stack[layer];
347 		il->bitmap &= ~(1 << idr_pos(id, layer));
348 	}
349 	error = 0;
350 out:
351 #ifdef INVARIANTS
352 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
353 		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
354 		    idr, id, ptr);
355 	}
356 #endif
357 	return (error);
358 }
359 
360 int
361 idr_get_new(struct idr *idr, void *ptr, int *idp)
362 {
363 	int retval;
364 
365 	mtx_lock(&idr->lock);
366 	retval = idr_get_new_locked(idr, ptr, idp);
367 	mtx_unlock(&idr->lock);
368 	return (retval);
369 }
370 
371 static int
372 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
373 {
374 	struct idr_layer *stack[MAX_LEVEL];
375 	struct idr_layer *il;
376 	int error;
377 	int layer;
378 	int idx, sidx;
379 	int id;
380 
381 	mtx_assert(&idr->lock, MA_OWNED);
382 
383 	error = -EAGAIN;
384 	/*
385 	 * Compute the layers required to support starting_id and the mask
386 	 * at the top layer.
387 	 */
388 restart:
389 	idx = starting_id;
390 	layer = 0;
391 	while (idx & ~IDR_MASK) {
392 		layer++;
393 		idx >>= IDR_BITS;
394 	}
395 	if (layer == MAX_LEVEL + 1) {
396 		error = -ENOSPC;
397 		goto out;
398 	}
399 	/*
400 	 * Expand the tree until there is free space at or beyond starting_id.
401 	 */
402 	while (idr->layers <= layer ||
403 	    idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
404 		if (idr->layers == MAX_LEVEL + 1) {
405 			error = -ENOSPC;
406 			goto out;
407 		}
408 		il = idr_get(idr);
409 		if (il == NULL)
410 			goto out;
411 		il->ary[0] = idr->top;
412 		if (idr->top && idr->top->bitmap == 0)
413 			il->bitmap &= ~1;
414 		idr->top = il;
415 		idr->layers++;
416 	}
417 	il = idr->top;
418 	id = 0;
419 	/*
420 	 * Walk the tree following free bitmaps, record our path.
421 	 */
422 	for (layer = idr->layers - 1;; layer--) {
423 		stack[layer] = il;
424 		sidx = idr_pos(starting_id, layer);
425 		/* Returns index numbered from 0 or size if none exists. */
426 		idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
427 		if (idx == IDR_SIZE && sidx == 0)
428 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
429 			    idr, il);
430 		/*
431 		 * We may have walked a path where there was a free bit but
432 		 * it was lower than what we wanted.  Restart the search with
433 		 * a larger starting id.  id contains the progress we made so
434 		 * far.  Search the leaf one above this level.  This may
435 		 * restart as many as MAX_LEVEL times but that is expected
436 		 * to be rare.
437 		 */
438 		if (idx == IDR_SIZE) {
439 			starting_id = id + (1 << ((layer + 1) * IDR_BITS));
440 			goto restart;
441 		}
442 		if (idx > sidx)
443 			starting_id = 0;	/* Search the whole subtree. */
444 		id |= idx << (layer * IDR_BITS);
445 		if (layer == 0)
446 			break;
447 		if (il->ary[idx] == NULL) {
448 			il->ary[idx] = idr_get(idr);
449 			if (il->ary[idx] == NULL)
450 				goto out;
451 		}
452 		il = il->ary[idx];
453 	}
454 	/*
455 	 * Allocate the leaf to the consumer.
456 	 */
457 	il->bitmap &= ~(1 << idx);
458 	il->ary[idx] = ptr;
459 	*idp = id;
460 	/*
461 	 * Clear bitmaps potentially up to the root.
462 	 */
463 	while (il->bitmap == 0 && ++layer < idr->layers) {
464 		il = stack[layer];
465 		il->bitmap &= ~(1 << idr_pos(id, layer));
466 	}
467 	error = 0;
468 out:
469 #ifdef INVARIANTS
470 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
471 		panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
472 		    idr, id, ptr);
473 	}
474 #endif
475 	return (error);
476 }
477 
478 int
479 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
480 {
481 	int retval;
482 
483 	mtx_lock(&idr->lock);
484 	retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
485 	mtx_unlock(&idr->lock);
486 	return (retval);
487 }
488 
489 static int
490 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
491 {
492 	int max = end > 0 ? end - 1 : INT_MAX;
493 	int error;
494 	int id;
495 
496 	mtx_assert(&idr->lock, MA_OWNED);
497 
498 	if (unlikely(start < 0))
499 		return (-EINVAL);
500 	if (unlikely(max < start))
501 		return (-ENOSPC);
502 
503 	if (start == 0)
504 		error = idr_get_new_locked(idr, ptr, &id);
505 	else
506 		error = idr_get_new_above_locked(idr, ptr, start, &id);
507 
508 	if (unlikely(error < 0))
509 		return (error);
510 	if (unlikely(id > max)) {
511 		idr_remove_locked(idr, id);
512 		return (-ENOSPC);
513 	}
514 	return (id);
515 }
516 
517 int
518 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
519 {
520 	int retval;
521 
522 	mtx_lock(&idr->lock);
523 	retval = idr_alloc_locked(idr, ptr, start, end);
524 	mtx_unlock(&idr->lock);
525 	return (retval);
526 }
527 
528 int
529 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
530 {
531 	int retval;
532 
533 	mtx_lock(&idr->lock);
534 	retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
535 	if (unlikely(retval == -ENOSPC))
536 		retval = idr_alloc_locked(idr, ptr, start, end);
537 	if (likely(retval >= 0))
538 		idr->next_cyclic_id = retval + 1;
539 	mtx_unlock(&idr->lock);
540 	return (retval);
541 }
542