xref: /freebsd/sys/kern/subr_unit.c (revision cca48a59de682fe40c6ac3b2bb4356d0e42f21dd)
1 /*-
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  *
29  * Unit number allocation functions.
30  *
31  * These functions implement a mixed run-length/bitmap management of unit
32  * number spaces in a very memory efficient manner.
33  *
34  * Allocation policy is always lowest free number first.
35  *
36  * A return value of -1 signals that no more unit numbers are available.
37  *
38  * There is no cost associated with the range of unitnumbers, so unless
39  * the resource really is finite, specify INT_MAX to new_unrhdr() and
40  * forget about checking the return value.
41  *
42  * If a mutex is not provided when the unit number space is created, a
43  * default global mutex is used.  The advantage to passing a mutex in, is
44  * that the alloc_unrl() function can be called with the mutex already
45  * held (it will not be released by alloc_unrl()).
46  *
47  * The allocation function alloc_unr{l}() never sleeps (but it may block on
48  * the mutex of course).
49  *
50  * Freeing a unit number may require allocating memory, and can therefore
51  * sleep so the free_unr() function does not come in a pre-locked variant.
52  *
53  * A userland test program is included.
54  *
55  * Memory usage is a very complex function of the exact allocation
56  * pattern, but always very compact:
57  *    * For the very typical case where a single unbroken run of unit
58  *      numbers are allocated 44 bytes are used on i386.
59  *    * For a unit number space of 1000 units and the random pattern
60  *      in the usermode test program included, the worst case usage
61  *	was 252 bytes on i386 for 500 allocated and 500 free units.
62  *    * For a unit number space of 10000 units and the random pattern
63  *      in the usermode test program included, the worst case usage
64  *	was 798 bytes on i386 for 5000 allocated and 5000 free units.
65  *    * The worst case is where every other unit number is allocated and
66  *	the rest are free.  In that case 44 + N/4 bytes are used where
67  *	N is the number of the highest unit allocated.
68  */
69 
70 #include <sys/types.h>
71 #include <sys/_unrhdr.h>
72 
73 #ifdef _KERNEL
74 
75 #include <sys/bitstring.h>
76 #include <sys/param.h>
77 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/systm.h>
80 #include <sys/limits.h>
81 #include <sys/lock.h>
82 #include <sys/mutex.h>
83 
84 /*
85  * In theory it would be smarter to allocate the individual blocks
86  * with the zone allocator, but at this time the expectation is that
87  * there will typically not even be enough allocations to fill a single
88  * page, so we stick with malloc for now.
89  */
90 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
91 
92 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
93 #define Free(foo) free(foo, M_UNIT)
94 
95 static struct mtx unitmtx;
96 
97 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
98 
99 #else /* ...USERLAND */
100 
101 #include <bitstring.h>
102 #include <err.h>
103 #include <errno.h>
104 #include <getopt.h>
105 #include <stdbool.h>
106 #include <stdio.h>
107 #include <stdlib.h>
108 #include <string.h>
109 
110 #define KASSERT(cond, arg) \
111 	do { \
112 		if (!(cond)) { \
113 			printf arg; \
114 			abort(); \
115 		} \
116 	} while (0)
117 
118 static int no_alloc;
119 #define Malloc(foo) _Malloc(foo, __LINE__)
120 static void *
121 _Malloc(size_t foo, int line)
122 {
123 
124 	KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
125 	return (calloc(foo, 1));
126 }
127 #define Free(foo) free(foo)
128 
129 struct unrhdr;
130 
131 
132 struct mtx {
133 	int	state;
134 } unitmtx;
135 
136 static void
137 mtx_lock(struct mtx *mp)
138 {
139 	KASSERT(mp->state == 0, ("mutex already locked"));
140 	mp->state = 1;
141 }
142 
143 static void
144 mtx_unlock(struct mtx *mp)
145 {
146 	KASSERT(mp->state == 1, ("mutex not locked"));
147 	mp->state = 0;
148 }
149 
150 #define MA_OWNED	9
151 
152 static void
153 mtx_assert(struct mtx *mp, int flag)
154 {
155 	if (flag == MA_OWNED) {
156 		KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
157 	}
158 }
159 
160 #define CTASSERT(foo)
161 #define WITNESS_WARN(flags, lock, fmt, ...)	(void)0
162 
163 #endif /* USERLAND */
164 
165 /*
166  * This is our basic building block.
167  *
168  * It can be used in three different ways depending on the value of the ptr
169  * element:
170  *     If ptr is NULL, it represents a run of free items.
171  *     If ptr points to the unrhdr it represents a run of allocated items.
172  *     Otherwise it points to an bitstring of allocated items.
173  *
174  * For runs the len field is the length of the run.
175  * For bitmaps the len field represents the number of allocated items.
176  *
177  * The bitmap is the same size as struct unr to optimize memory management.
178  */
179 struct unr {
180 	TAILQ_ENTRY(unr)	list;
181 	u_int			len;
182 	void			*ptr;
183 };
184 
185 struct unrb {
186 	u_char			busy;
187 	bitstr_t		map[sizeof(struct unr) - 1];
188 };
189 
190 CTASSERT(sizeof(struct unr) == sizeof(struct unrb));
191 
192 /* Number of bits in the bitmap */
193 #define NBITS	((int)sizeof(((struct unrb *)NULL)->map) * 8)
194 
195 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
196 /*
197  * Consistency check function.
198  *
199  * Checks the internal consistency as well as we can.
200  *
201  * Called at all boundaries of this API.
202  */
203 static void
204 check_unrhdr(struct unrhdr *uh, int line)
205 {
206 	struct unr *up;
207 	struct unrb *ub;
208 	u_int x, y, z, w;
209 
210 	y = uh->first;
211 	z = 0;
212 	TAILQ_FOREACH(up, &uh->head, list) {
213 		z++;
214 		if (up->ptr != uh && up->ptr != NULL) {
215 			ub = up->ptr;
216 			KASSERT (up->len <= NBITS,
217 			    ("UNR inconsistency: len %u max %d (line %d)\n",
218 			    up->len, NBITS, line));
219 			z++;
220 			w = 0;
221 			for (x = 0; x < up->len; x++)
222 				if (bit_test(ub->map, x))
223 					w++;
224 			KASSERT (w == ub->busy,
225 			    ("UNR inconsistency: busy %u found %u (line %d)\n",
226 			    ub->busy, w, line));
227 			y += w;
228 		} else if (up->ptr != NULL)
229 			y += up->len;
230 	}
231 	KASSERT (y == uh->busy,
232 	    ("UNR inconsistency: items %u found %u (line %d)\n",
233 	    uh->busy, y, line));
234 	KASSERT (z == uh->alloc,
235 	    ("UNR inconsistency: chunks %u found %u (line %d)\n",
236 	    uh->alloc, z, line));
237 }
238 
239 #else
240 
241 static __inline void
242 check_unrhdr(struct unrhdr *uh, int line)
243 {
244 
245 }
246 
247 #endif
248 
249 
250 /*
251  * Userland memory management.  Just use calloc and keep track of how
252  * many elements we have allocated for check_unrhdr().
253  */
254 
255 static __inline void *
256 new_unr(struct unrhdr *uh, void **p1, void **p2)
257 {
258 	void *p;
259 
260 	uh->alloc++;
261 	KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
262 	if (*p1 != NULL) {
263 		p = *p1;
264 		*p1 = NULL;
265 		return (p);
266 	} else {
267 		p = *p2;
268 		*p2 = NULL;
269 		return (p);
270 	}
271 }
272 
273 static __inline void
274 delete_unr(struct unrhdr *uh, void *ptr)
275 {
276 	struct unr *up;
277 
278 	uh->alloc--;
279 	up = ptr;
280 	TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
281 }
282 
283 void
284 clean_unrhdrl(struct unrhdr *uh)
285 {
286 	struct unr *up;
287 
288 	mtx_assert(uh->mtx, MA_OWNED);
289 	while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
290 		TAILQ_REMOVE(&uh->ppfree, up, list);
291 		mtx_unlock(uh->mtx);
292 		Free(up);
293 		mtx_lock(uh->mtx);
294 	}
295 
296 }
297 
298 void
299 clean_unrhdr(struct unrhdr *uh)
300 {
301 
302 	mtx_lock(uh->mtx);
303 	clean_unrhdrl(uh);
304 	mtx_unlock(uh->mtx);
305 }
306 
307 void
308 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
309 {
310 
311 	KASSERT(low >= 0 && low <= high,
312 	    ("UNR: use error: new_unrhdr(%d, %d)", low, high));
313 	if (mutex != NULL)
314 		uh->mtx = mutex;
315 	else
316 		uh->mtx = &unitmtx;
317 	TAILQ_INIT(&uh->head);
318 	TAILQ_INIT(&uh->ppfree);
319 	uh->low = low;
320 	uh->high = high;
321 	uh->first = 0;
322 	uh->last = 1 + (high - low);
323 	check_unrhdr(uh, __LINE__);
324 }
325 
326 /*
327  * Allocate a new unrheader set.
328  *
329  * Highest and lowest valid values given as parameters.
330  */
331 
332 struct unrhdr *
333 new_unrhdr(int low, int high, struct mtx *mutex)
334 {
335 	struct unrhdr *uh;
336 
337 	uh = Malloc(sizeof *uh);
338 	init_unrhdr(uh, low, high, mutex);
339 	return (uh);
340 }
341 
342 void
343 delete_unrhdr(struct unrhdr *uh)
344 {
345 
346 	check_unrhdr(uh, __LINE__);
347 	KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
348 	KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
349 	KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
350 	    ("unrhdr has postponed item for free"));
351 	Free(uh);
352 }
353 
354 static __inline int
355 is_bitmap(struct unrhdr *uh, struct unr *up)
356 {
357 	return (up->ptr != uh && up->ptr != NULL);
358 }
359 
360 /*
361  * Look for sequence of items which can be combined into a bitmap, if
362  * multiple are present, take the one which saves most memory.
363  *
364  * Return (1) if a sequence was found to indicate that another call
365  * might be able to do more.  Return (0) if we found no suitable sequence.
366  *
367  * NB: called from alloc_unr(), no new memory allocation allowed.
368  */
369 static int
370 optimize_unr(struct unrhdr *uh)
371 {
372 	struct unr *up, *uf, *us;
373 	struct unrb *ub, *ubf;
374 	u_int a, l, ba;
375 
376 	/*
377 	 * Look for the run of items (if any) which when collapsed into
378 	 * a bitmap would save most memory.
379 	 */
380 	us = NULL;
381 	ba = 0;
382 	TAILQ_FOREACH(uf, &uh->head, list) {
383 		if (uf->len >= NBITS)
384 			continue;
385 		a = 1;
386 		if (is_bitmap(uh, uf))
387 			a++;
388 		l = uf->len;
389 		up = uf;
390 		while (1) {
391 			up = TAILQ_NEXT(up, list);
392 			if (up == NULL)
393 				break;
394 			if ((up->len + l) > NBITS)
395 				break;
396 			a++;
397 			if (is_bitmap(uh, up))
398 				a++;
399 			l += up->len;
400 		}
401 		if (a > ba) {
402 			ba = a;
403 			us = uf;
404 		}
405 	}
406 	if (ba < 3)
407 		return (0);
408 
409 	/*
410 	 * If the first element is not a bitmap, make it one.
411 	 * Trying to do so without allocating more memory complicates things
412 	 * a bit
413 	 */
414 	if (!is_bitmap(uh, us)) {
415 		uf = TAILQ_NEXT(us, list);
416 		TAILQ_REMOVE(&uh->head, us, list);
417 		a = us->len;
418 		l = us->ptr == uh ? 1 : 0;
419 		ub = (void *)us;
420 		ub->busy = 0;
421 		if (l) {
422 			bit_nset(ub->map, 0, a);
423 			ub->busy += a;
424 		} else {
425 			bit_nclear(ub->map, 0, a);
426 		}
427 		if (!is_bitmap(uh, uf)) {
428 			if (uf->ptr == NULL) {
429 				bit_nclear(ub->map, a, a + uf->len - 1);
430 			} else {
431 				bit_nset(ub->map, a, a + uf->len - 1);
432 				ub->busy += uf->len;
433 			}
434 			uf->ptr = ub;
435 			uf->len += a;
436 			us = uf;
437 		} else {
438 			ubf = uf->ptr;
439 			for (l = 0; l < uf->len; l++, a++) {
440 				if (bit_test(ubf->map, l)) {
441 					bit_set(ub->map, a);
442 					ub->busy++;
443 				} else {
444 					bit_clear(ub->map, a);
445 				}
446 			}
447 			uf->len = a;
448 			delete_unr(uh, uf->ptr);
449 			uf->ptr = ub;
450 			us = uf;
451 		}
452 	}
453 	ub = us->ptr;
454 	while (1) {
455 		uf = TAILQ_NEXT(us, list);
456 		if (uf == NULL)
457 			return (1);
458 		if (uf->len + us->len > NBITS)
459 			return (1);
460 		if (uf->ptr == NULL) {
461 			bit_nclear(ub->map, us->len, us->len + uf->len - 1);
462 			us->len += uf->len;
463 			TAILQ_REMOVE(&uh->head, uf, list);
464 			delete_unr(uh, uf);
465 		} else if (uf->ptr == uh) {
466 			bit_nset(ub->map, us->len, us->len + uf->len - 1);
467 			ub->busy += uf->len;
468 			us->len += uf->len;
469 			TAILQ_REMOVE(&uh->head, uf, list);
470 			delete_unr(uh, uf);
471 		} else {
472 			ubf = uf->ptr;
473 			for (l = 0; l < uf->len; l++, us->len++) {
474 				if (bit_test(ubf->map, l)) {
475 					bit_set(ub->map, us->len);
476 					ub->busy++;
477 				} else {
478 					bit_clear(ub->map, us->len);
479 				}
480 			}
481 			TAILQ_REMOVE(&uh->head, uf, list);
482 			delete_unr(uh, ubf);
483 			delete_unr(uh, uf);
484 		}
485 	}
486 }
487 
488 /*
489  * See if a given unr should be collapsed with a neighbor.
490  *
491  * NB: called from alloc_unr(), no new memory allocation allowed.
492  */
493 static void
494 collapse_unr(struct unrhdr *uh, struct unr *up)
495 {
496 	struct unr *upp;
497 	struct unrb *ub;
498 
499 	/* If bitmap is all set or clear, change it to runlength */
500 	if (is_bitmap(uh, up)) {
501 		ub = up->ptr;
502 		if (ub->busy == up->len) {
503 			delete_unr(uh, up->ptr);
504 			up->ptr = uh;
505 		} else if (ub->busy == 0) {
506 			delete_unr(uh, up->ptr);
507 			up->ptr = NULL;
508 		}
509 	}
510 
511 	/* If nothing left in runlength, delete it */
512 	if (up->len == 0) {
513 		upp = TAILQ_PREV(up, unrhd, list);
514 		if (upp == NULL)
515 			upp = TAILQ_NEXT(up, list);
516 		TAILQ_REMOVE(&uh->head, up, list);
517 		delete_unr(uh, up);
518 		up = upp;
519 	}
520 
521 	/* If we have "hot-spot" still, merge with neighbor if possible */
522 	if (up != NULL) {
523 		upp = TAILQ_PREV(up, unrhd, list);
524 		if (upp != NULL && up->ptr == upp->ptr) {
525 			up->len += upp->len;
526 			TAILQ_REMOVE(&uh->head, upp, list);
527 			delete_unr(uh, upp);
528 			}
529 		upp = TAILQ_NEXT(up, list);
530 		if (upp != NULL && up->ptr == upp->ptr) {
531 			up->len += upp->len;
532 			TAILQ_REMOVE(&uh->head, upp, list);
533 			delete_unr(uh, upp);
534 		}
535 	}
536 
537 	/* Merge into ->first if possible */
538 	upp = TAILQ_FIRST(&uh->head);
539 	if (upp != NULL && upp->ptr == uh) {
540 		uh->first += upp->len;
541 		TAILQ_REMOVE(&uh->head, upp, list);
542 		delete_unr(uh, upp);
543 		if (up == upp)
544 			up = NULL;
545 	}
546 
547 	/* Merge into ->last if possible */
548 	upp = TAILQ_LAST(&uh->head, unrhd);
549 	if (upp != NULL && upp->ptr == NULL) {
550 		uh->last += upp->len;
551 		TAILQ_REMOVE(&uh->head, upp, list);
552 		delete_unr(uh, upp);
553 		if (up == upp)
554 			up = NULL;
555 	}
556 
557 	/* Try to make bitmaps */
558 	while (optimize_unr(uh))
559 		continue;
560 }
561 
562 /*
563  * Allocate a free unr.
564  */
565 int
566 alloc_unrl(struct unrhdr *uh)
567 {
568 	struct unr *up;
569 	struct unrb *ub;
570 	u_int x;
571 	int y;
572 
573 	mtx_assert(uh->mtx, MA_OWNED);
574 	check_unrhdr(uh, __LINE__);
575 	x = uh->low + uh->first;
576 
577 	up = TAILQ_FIRST(&uh->head);
578 
579 	/*
580 	 * If we have an ideal split, just adjust the first+last
581 	 */
582 	if (up == NULL && uh->last > 0) {
583 		uh->first++;
584 		uh->last--;
585 		uh->busy++;
586 		return (x);
587 	}
588 
589 	/*
590 	 * We can always allocate from the first list element, so if we have
591 	 * nothing on the list, we must have run out of unit numbers.
592 	 */
593 	if (up == NULL)
594 		return (-1);
595 
596 	KASSERT(up->ptr != uh, ("UNR first element is allocated"));
597 
598 	if (up->ptr == NULL) {	/* free run */
599 		uh->first++;
600 		up->len--;
601 	} else {		/* bitmap */
602 		ub = up->ptr;
603 		KASSERT(ub->busy < up->len, ("UNR bitmap confusion"));
604 		bit_ffc(ub->map, up->len, &y);
605 		KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
606 		bit_set(ub->map, y);
607 		ub->busy++;
608 		x += y;
609 	}
610 	uh->busy++;
611 	collapse_unr(uh, up);
612 	return (x);
613 }
614 
615 int
616 alloc_unr(struct unrhdr *uh)
617 {
618 	int i;
619 
620 	mtx_lock(uh->mtx);
621 	i = alloc_unrl(uh);
622 	clean_unrhdrl(uh);
623 	mtx_unlock(uh->mtx);
624 	return (i);
625 }
626 
627 static int
628 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
629 {
630 	struct unr *up, *upn;
631 	struct unrb *ub;
632 	u_int i, last, tl;
633 
634 	mtx_assert(uh->mtx, MA_OWNED);
635 
636 	if (item < uh->low + uh->first || item > uh->high)
637 		return (-1);
638 
639 	up = TAILQ_FIRST(&uh->head);
640 	/* Ideal split. */
641 	if (up == NULL && item - uh->low == uh->first) {
642 		uh->first++;
643 		uh->last--;
644 		uh->busy++;
645 		check_unrhdr(uh, __LINE__);
646 		return (item);
647 	}
648 
649 	i = item - uh->low - uh->first;
650 
651 	if (up == NULL) {
652 		up = new_unr(uh, p1, p2);
653 		up->ptr = NULL;
654 		up->len = i;
655 		TAILQ_INSERT_TAIL(&uh->head, up, list);
656 		up = new_unr(uh, p1, p2);
657 		up->ptr = uh;
658 		up->len = 1;
659 		TAILQ_INSERT_TAIL(&uh->head, up, list);
660 		uh->last = uh->high - uh->low - i;
661 		uh->busy++;
662 		check_unrhdr(uh, __LINE__);
663 		return (item);
664 	} else {
665 		/* Find the item which contains the unit we want to allocate. */
666 		TAILQ_FOREACH(up, &uh->head, list) {
667 			if (up->len > i)
668 				break;
669 			i -= up->len;
670 		}
671 	}
672 
673 	if (up == NULL) {
674 		if (i > 0) {
675 			up = new_unr(uh, p1, p2);
676 			up->ptr = NULL;
677 			up->len = i;
678 			TAILQ_INSERT_TAIL(&uh->head, up, list);
679 		}
680 		up = new_unr(uh, p1, p2);
681 		up->ptr = uh;
682 		up->len = 1;
683 		TAILQ_INSERT_TAIL(&uh->head, up, list);
684 		goto done;
685 	}
686 
687 	if (is_bitmap(uh, up)) {
688 		ub = up->ptr;
689 		if (bit_test(ub->map, i) == 0) {
690 			bit_set(ub->map, i);
691 			ub->busy++;
692 			goto done;
693 		} else
694 			return (-1);
695 	} else if (up->ptr == uh)
696 		return (-1);
697 
698 	KASSERT(up->ptr == NULL,
699 	    ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
700 
701 	/* Split off the tail end, if any. */
702 	tl = up->len - (1 + i);
703 	if (tl > 0) {
704 		upn = new_unr(uh, p1, p2);
705 		upn->ptr = NULL;
706 		upn->len = tl;
707 		TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
708 	}
709 
710 	/* Split off head end, if any */
711 	if (i > 0) {
712 		upn = new_unr(uh, p1, p2);
713 		upn->len = i;
714 		upn->ptr = NULL;
715 		TAILQ_INSERT_BEFORE(up, upn, list);
716 	}
717 	up->len = 1;
718 	up->ptr = uh;
719 
720 done:
721 	last = uh->high - uh->low - (item - uh->low);
722 	if (uh->last > last)
723 		uh->last = last;
724 	uh->busy++;
725 	collapse_unr(uh, up);
726 	check_unrhdr(uh, __LINE__);
727 	return (item);
728 }
729 
730 int
731 alloc_unr_specific(struct unrhdr *uh, u_int item)
732 {
733 	void *p1, *p2;
734 	int i;
735 
736 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
737 
738 	p1 = Malloc(sizeof(struct unr));
739 	p2 = Malloc(sizeof(struct unr));
740 
741 	mtx_lock(uh->mtx);
742 	i = alloc_unr_specificl(uh, item, &p1, &p2);
743 	mtx_unlock(uh->mtx);
744 
745 	if (p1 != NULL)
746 		Free(p1);
747 	if (p2 != NULL)
748 		Free(p2);
749 
750 	return (i);
751 }
752 
753 /*
754  * Free a unr.
755  *
756  * If we can save unrs by using a bitmap, do so.
757  */
758 static void
759 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
760 {
761 	struct unr *up, *upp, *upn;
762 	struct unrb *ub;
763 	u_int pl;
764 
765 	KASSERT(item >= uh->low && item <= uh->high,
766 	    ("UNR: free_unr(%u) out of range [%u...%u]",
767 	     item, uh->low, uh->high));
768 	check_unrhdr(uh, __LINE__);
769 	item -= uh->low;
770 	upp = TAILQ_FIRST(&uh->head);
771 	/*
772 	 * Freeing in the ideal split case
773 	 */
774 	if (item + 1 == uh->first && upp == NULL) {
775 		uh->last++;
776 		uh->first--;
777 		uh->busy--;
778 		check_unrhdr(uh, __LINE__);
779 		return;
780 	}
781 	/*
782  	 * Freeing in the ->first section.  Create a run starting at the
783 	 * freed item.  The code below will subdivide it.
784 	 */
785 	if (item < uh->first) {
786 		up = new_unr(uh, p1, p2);
787 		up->ptr = uh;
788 		up->len = uh->first - item;
789 		TAILQ_INSERT_HEAD(&uh->head, up, list);
790 		uh->first -= up->len;
791 	}
792 
793 	item -= uh->first;
794 
795 	/* Find the item which contains the unit we want to free */
796 	TAILQ_FOREACH(up, &uh->head, list) {
797 		if (up->len > item)
798 			break;
799 		item -= up->len;
800 	}
801 
802 	/* Handle bitmap items */
803 	if (is_bitmap(uh, up)) {
804 		ub = up->ptr;
805 
806 		KASSERT(bit_test(ub->map, item) != 0,
807 		    ("UNR: Freeing free item %d (bitmap)\n", item));
808 		bit_clear(ub->map, item);
809 		uh->busy--;
810 		ub->busy--;
811 		collapse_unr(uh, up);
812 		return;
813 	}
814 
815 	KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
816 
817 	/* Just this one left, reap it */
818 	if (up->len == 1) {
819 		up->ptr = NULL;
820 		uh->busy--;
821 		collapse_unr(uh, up);
822 		return;
823 	}
824 
825 	/* Check if we can shift the item into the previous 'free' run */
826 	upp = TAILQ_PREV(up, unrhd, list);
827 	if (item == 0 && upp != NULL && upp->ptr == NULL) {
828 		upp->len++;
829 		up->len--;
830 		uh->busy--;
831 		collapse_unr(uh, up);
832 		return;
833 	}
834 
835 	/* Check if we can shift the item to the next 'free' run */
836 	upn = TAILQ_NEXT(up, list);
837 	if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
838 		upn->len++;
839 		up->len--;
840 		uh->busy--;
841 		collapse_unr(uh, up);
842 		return;
843 	}
844 
845 	/* Split off the tail end, if any. */
846 	pl = up->len - (1 + item);
847 	if (pl > 0) {
848 		upp = new_unr(uh, p1, p2);
849 		upp->ptr = uh;
850 		upp->len = pl;
851 		TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
852 	}
853 
854 	/* Split off head end, if any */
855 	if (item > 0) {
856 		upp = new_unr(uh, p1, p2);
857 		upp->len = item;
858 		upp->ptr = uh;
859 		TAILQ_INSERT_BEFORE(up, upp, list);
860 	}
861 	up->len = 1;
862 	up->ptr = NULL;
863 	uh->busy--;
864 	collapse_unr(uh, up);
865 }
866 
867 void
868 free_unr(struct unrhdr *uh, u_int item)
869 {
870 	void *p1, *p2;
871 
872 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
873 	p1 = Malloc(sizeof(struct unr));
874 	p2 = Malloc(sizeof(struct unr));
875 	mtx_lock(uh->mtx);
876 	free_unrl(uh, item, &p1, &p2);
877 	clean_unrhdrl(uh);
878 	mtx_unlock(uh->mtx);
879 	if (p1 != NULL)
880 		Free(p1);
881 	if (p2 != NULL)
882 		Free(p2);
883 }
884 
885 #ifndef _KERNEL	/* USERLAND test driver */
886 
887 /*
888  * Simple stochastic test driver for the above functions.  The code resides
889  * here so that it can access static functions and structures.
890  */
891 
892 static bool verbose;
893 #define VPRINTF(...)	{if (verbose) printf(__VA_ARGS__);}
894 
895 static void
896 print_unr(struct unrhdr *uh, struct unr *up)
897 {
898 	u_int x;
899 	struct unrb *ub;
900 
901 	printf("  %p len = %5u ", up, up->len);
902 	if (up->ptr == NULL)
903 		printf("free\n");
904 	else if (up->ptr == uh)
905 		printf("alloc\n");
906 	else {
907 		ub = up->ptr;
908 		printf("bitmap(%d) [", ub->busy);
909 		for (x = 0; x < up->len; x++) {
910 			if (bit_test(ub->map, x))
911 				printf("#");
912 			else
913 				printf(" ");
914 		}
915 		printf("]\n");
916 	}
917 }
918 
919 static void
920 print_unrhdr(struct unrhdr *uh)
921 {
922 	struct unr *up;
923 	u_int x;
924 
925 	printf(
926 	    "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
927 	    uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
928 	x = uh->low + uh->first;
929 	TAILQ_FOREACH(up, &uh->head, list) {
930 		printf("  from = %5u", x);
931 		print_unr(uh, up);
932 		if (up->ptr == NULL || up->ptr == uh)
933 			x += up->len;
934 		else
935 			x += NBITS;
936 	}
937 }
938 
939 static void
940 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
941 {
942 	int j;
943 
944 	if (a[i]) {
945 		VPRINTF("F %u\n", i);
946 		free_unr(uh, i);
947 		a[i] = 0;
948 	} else {
949 		no_alloc = 1;
950 		j = alloc_unr(uh);
951 		if (j != -1) {
952 			a[j] = 1;
953 			VPRINTF("A %d\n", j);
954 		}
955 		no_alloc = 0;
956 	}
957 }
958 
959 static void
960 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
961 {
962 	int j;
963 
964 	j = alloc_unr_specific(uh, i);
965 	if (j == -1) {
966 		VPRINTF("F %u\n", i);
967 		a[i] = 0;
968 		free_unr(uh, i);
969 	} else {
970 		a[i] = 1;
971 		VPRINTF("A %d\n", j);
972 	}
973 }
974 
975 static void
976 usage(char** argv)
977 {
978 	printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]);
979 }
980 
981 int
982 main(int argc, char **argv)
983 {
984 	struct unrhdr *uh;
985 	char *a;
986 	long count = 10000;	/* Number of unrs to test */
987 	long reps = 1;
988 	int ch;
989 	u_int i, x, m, j;
990 
991 	verbose = false;
992 
993 	while ((ch = getopt(argc, argv, "hr:v")) != -1) {
994 		switch (ch) {
995 		case 'r':
996 			errno = 0;
997 			reps = strtol(optarg, NULL, 0);
998 			if (errno == ERANGE || errno == EINVAL) {
999 				usage(argv);
1000 				exit(2);
1001 			}
1002 
1003 			break;
1004 		case 'v':
1005 			verbose = true;
1006 			break;
1007 		case 'h':
1008 		default:
1009 			usage(argv);
1010 			exit(2);
1011 		}
1012 
1013 
1014 	}
1015 
1016 	setbuf(stdout, NULL);
1017 	uh = new_unrhdr(0, count - 1, NULL);
1018 	print_unrhdr(uh);
1019 
1020 	a = calloc(count, sizeof(char));
1021 	if (a == NULL)
1022 		err(1, "calloc failed");
1023 	srandomdev();
1024 
1025 	printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
1026 	printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
1027 	printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
1028 	printf("NBITS %d\n", NBITS);
1029 	x = 1;
1030 	for (m = 0; m < count * reps; m++) {
1031 		j = random();
1032 		i = (j >> 1) % count;
1033 #if 0
1034 		if (a[i] && (j & 1))
1035 			continue;
1036 #endif
1037 		if ((random() & 1) != 0)
1038 			test_alloc_unr(uh, i, a);
1039 		else
1040 			test_alloc_unr_specific(uh, i, a);
1041 
1042 		if (verbose)
1043 			print_unrhdr(uh);
1044 		check_unrhdr(uh, __LINE__);
1045 	}
1046 	for (i = 0; i < count; i++) {
1047 		if (a[i]) {
1048 			if (verbose) {
1049 				printf("C %u\n", i);
1050 				print_unrhdr(uh);
1051 			}
1052 			free_unr(uh, i);
1053 		}
1054 	}
1055 	print_unrhdr(uh);
1056 	delete_unrhdr(uh);
1057 	free(a);
1058 	return (0);
1059 }
1060 #endif
1061