xref: /freebsd/sys/kern/subr_unit.c (revision 0fcefb433d1265c4e6d4275eae7478cda1bf6a34)
1 /*-
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  *
29  * Unit number allocation functions.
30  *
31  * These functions implement a mixed run-length/bitmap management of unit
32  * number spaces in a very memory efficient manner.
33  *
34  * Allocation policy is always lowest free number first.
35  *
36  * A return value of -1 signals that no more unit numbers are available.
37  *
38  * There is no cost associated with the range of unitnumbers, so unless
39  * the resource really is finite, specify INT_MAX to new_unrhdr() and
40  * forget about checking the return value.
41  *
42  * If a mutex is not provided when the unit number space is created, a
43  * default global mutex is used.  The advantage to passing a mutex in, is
44  * that the alloc_unrl() function can be called with the mutex already
45  * held (it will not be released by alloc_unrl()).
46  *
47  * The allocation function alloc_unr{l}() never sleeps (but it may block on
48  * the mutex of course).
49  *
50  * Freeing a unit number may require allocating memory, and can therefore
51  * sleep so the free_unr() function does not come in a pre-locked variant.
52  *
53  * A userland test program is included.
54  *
55  * Memory usage is a very complex function of the exact allocation
56  * pattern, but always very compact:
57  *    * For the very typical case where a single unbroken run of unit
58  *      numbers are allocated 44 bytes are used on i386.
59  *    * For a unit number space of 1000 units and the random pattern
60  *      in the usermode test program included, the worst case usage
61  *	was 252 bytes on i386 for 500 allocated and 500 free units.
62  *    * For a unit number space of 10000 units and the random pattern
63  *      in the usermode test program included, the worst case usage
64  *	was 798 bytes on i386 for 5000 allocated and 5000 free units.
65  *    * The worst case is where every other unit number is allocated and
66  *	the rest are free.  In that case 44 + N/4 bytes are used where
67  *	N is the number of the highest unit allocated.
68  */
69 
70 #include <sys/types.h>
71 #include <sys/bitstring.h>
72 #include <sys/_unrhdr.h>
73 
74 #ifdef _KERNEL
75 
76 #include <sys/param.h>
77 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/systm.h>
80 #include <sys/limits.h>
81 #include <sys/lock.h>
82 #include <sys/mutex.h>
83 
84 /*
85  * In theory it would be smarter to allocate the individual blocks
86  * with the zone allocator, but at this time the expectation is that
87  * there will typically not even be enough allocations to fill a single
88  * page, so we stick with malloc for now.
89  */
90 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
91 
92 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
93 #define Free(foo) free(foo, M_UNIT)
94 
95 static struct mtx unitmtx;
96 
97 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
98 
99 #else /* ...USERLAND */
100 
101 #include <stdio.h>
102 #include <stdlib.h>
103 #include <string.h>
104 
105 #define KASSERT(cond, arg) \
106 	do { \
107 		if (!(cond)) { \
108 			printf arg; \
109 			abort(); \
110 		} \
111 	} while (0)
112 
113 static int no_alloc;
114 #define Malloc(foo) _Malloc(foo, __LINE__)
115 static void *
116 _Malloc(size_t foo, int line)
117 {
118 
119 	KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
120 	return (calloc(foo, 1));
121 }
122 #define Free(foo) free(foo)
123 
124 struct unrhdr;
125 
126 
127 struct mtx {
128 	int	state;
129 } unitmtx;
130 
131 static void
132 mtx_lock(struct mtx *mp)
133 {
134 	KASSERT(mp->state == 0, ("mutex already locked"));
135 	mp->state = 1;
136 }
137 
138 static void
139 mtx_unlock(struct mtx *mp)
140 {
141 	KASSERT(mp->state == 1, ("mutex not locked"));
142 	mp->state = 0;
143 }
144 
145 #define MA_OWNED	9
146 
147 static void
148 mtx_assert(struct mtx *mp, int flag)
149 {
150 	if (flag == MA_OWNED) {
151 		KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
152 	}
153 }
154 
155 #define CTASSERT(foo)
156 #define WITNESS_WARN(flags, lock, fmt, ...)	(void)0
157 
158 #endif /* USERLAND */
159 
160 /*
161  * This is our basic building block.
162  *
163  * It can be used in three different ways depending on the value of the ptr
164  * element:
165  *     If ptr is NULL, it represents a run of free items.
166  *     If ptr points to the unrhdr it represents a run of allocated items.
167  *     Otherwise it points to an bitstring of allocated items.
168  *
169  * For runs the len field is the length of the run.
170  * For bitmaps the len field represents the number of allocated items.
171  *
172  * The bitmap is the same size as struct unr to optimize memory management.
173  */
174 struct unr {
175 	TAILQ_ENTRY(unr)	list;
176 	u_int			len;
177 	void			*ptr;
178 };
179 
180 struct unrb {
181 	u_char			busy;
182 	bitstr_t		map[sizeof(struct unr) - 1];
183 };
184 
185 CTASSERT(sizeof(struct unr) == sizeof(struct unrb));
186 
187 /* Number of bits in the bitmap */
188 #define NBITS	((int)sizeof(((struct unrb *)NULL)->map) * 8)
189 
190 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
191 /*
192  * Consistency check function.
193  *
194  * Checks the internal consistency as well as we can.
195  *
196  * Called at all boundaries of this API.
197  */
198 static void
199 check_unrhdr(struct unrhdr *uh, int line)
200 {
201 	struct unr *up;
202 	struct unrb *ub;
203 	u_int x, y, z, w;
204 
205 	y = uh->first;
206 	z = 0;
207 	TAILQ_FOREACH(up, &uh->head, list) {
208 		z++;
209 		if (up->ptr != uh && up->ptr != NULL) {
210 			ub = up->ptr;
211 			KASSERT (up->len <= NBITS,
212 			    ("UNR inconsistency: len %u max %d (line %d)\n",
213 			    up->len, NBITS, line));
214 			z++;
215 			w = 0;
216 			for (x = 0; x < up->len; x++)
217 				if (bit_test(ub->map, x))
218 					w++;
219 			KASSERT (w == ub->busy,
220 			    ("UNR inconsistency: busy %u found %u (line %d)\n",
221 			    ub->busy, w, line));
222 			y += w;
223 		} else if (up->ptr != NULL)
224 			y += up->len;
225 	}
226 	KASSERT (y == uh->busy,
227 	    ("UNR inconsistency: items %u found %u (line %d)\n",
228 	    uh->busy, y, line));
229 	KASSERT (z == uh->alloc,
230 	    ("UNR inconsistency: chunks %u found %u (line %d)\n",
231 	    uh->alloc, z, line));
232 }
233 
234 #else
235 
236 static __inline void
237 check_unrhdr(struct unrhdr *uh, int line)
238 {
239 
240 }
241 
242 #endif
243 
244 
245 /*
246  * Userland memory management.  Just use calloc and keep track of how
247  * many elements we have allocated for check_unrhdr().
248  */
249 
250 static __inline void *
251 new_unr(struct unrhdr *uh, void **p1, void **p2)
252 {
253 	void *p;
254 
255 	uh->alloc++;
256 	KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
257 	if (*p1 != NULL) {
258 		p = *p1;
259 		*p1 = NULL;
260 		return (p);
261 	} else {
262 		p = *p2;
263 		*p2 = NULL;
264 		return (p);
265 	}
266 }
267 
268 static __inline void
269 delete_unr(struct unrhdr *uh, void *ptr)
270 {
271 	struct unr *up;
272 
273 	uh->alloc--;
274 	up = ptr;
275 	TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
276 }
277 
278 void
279 clean_unrhdrl(struct unrhdr *uh)
280 {
281 	struct unr *up;
282 
283 	mtx_assert(uh->mtx, MA_OWNED);
284 	while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
285 		TAILQ_REMOVE(&uh->ppfree, up, list);
286 		mtx_unlock(uh->mtx);
287 		Free(up);
288 		mtx_lock(uh->mtx);
289 	}
290 
291 }
292 
293 void
294 clean_unrhdr(struct unrhdr *uh)
295 {
296 
297 	mtx_lock(uh->mtx);
298 	clean_unrhdrl(uh);
299 	mtx_unlock(uh->mtx);
300 }
301 
302 void
303 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
304 {
305 
306 	KASSERT(low >= 0 && low <= high,
307 	    ("UNR: use error: new_unrhdr(%d, %d)", low, high));
308 	if (mutex != NULL)
309 		uh->mtx = mutex;
310 	else
311 		uh->mtx = &unitmtx;
312 	TAILQ_INIT(&uh->head);
313 	TAILQ_INIT(&uh->ppfree);
314 	uh->low = low;
315 	uh->high = high;
316 	uh->first = 0;
317 	uh->last = 1 + (high - low);
318 	check_unrhdr(uh, __LINE__);
319 }
320 
321 /*
322  * Allocate a new unrheader set.
323  *
324  * Highest and lowest valid values given as parameters.
325  */
326 
327 struct unrhdr *
328 new_unrhdr(int low, int high, struct mtx *mutex)
329 {
330 	struct unrhdr *uh;
331 
332 	uh = Malloc(sizeof *uh);
333 	init_unrhdr(uh, low, high, mutex);
334 	return (uh);
335 }
336 
337 void
338 delete_unrhdr(struct unrhdr *uh)
339 {
340 
341 	check_unrhdr(uh, __LINE__);
342 	KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
343 	KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
344 	KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
345 	    ("unrhdr has postponed item for free"));
346 	Free(uh);
347 }
348 
349 static __inline int
350 is_bitmap(struct unrhdr *uh, struct unr *up)
351 {
352 	return (up->ptr != uh && up->ptr != NULL);
353 }
354 
355 /*
356  * Look for sequence of items which can be combined into a bitmap, if
357  * multiple are present, take the one which saves most memory.
358  *
359  * Return (1) if a sequence was found to indicate that another call
360  * might be able to do more.  Return (0) if we found no suitable sequence.
361  *
362  * NB: called from alloc_unr(), no new memory allocation allowed.
363  */
364 static int
365 optimize_unr(struct unrhdr *uh)
366 {
367 	struct unr *up, *uf, *us;
368 	struct unrb *ub, *ubf;
369 	u_int a, l, ba;
370 
371 	/*
372 	 * Look for the run of items (if any) which when collapsed into
373 	 * a bitmap would save most memory.
374 	 */
375 	us = NULL;
376 	ba = 0;
377 	TAILQ_FOREACH(uf, &uh->head, list) {
378 		if (uf->len >= NBITS)
379 			continue;
380 		a = 1;
381 		if (is_bitmap(uh, uf))
382 			a++;
383 		l = uf->len;
384 		up = uf;
385 		while (1) {
386 			up = TAILQ_NEXT(up, list);
387 			if (up == NULL)
388 				break;
389 			if ((up->len + l) > NBITS)
390 				break;
391 			a++;
392 			if (is_bitmap(uh, up))
393 				a++;
394 			l += up->len;
395 		}
396 		if (a > ba) {
397 			ba = a;
398 			us = uf;
399 		}
400 	}
401 	if (ba < 3)
402 		return (0);
403 
404 	/*
405 	 * If the first element is not a bitmap, make it one.
406 	 * Trying to do so without allocating more memory complicates things
407 	 * a bit
408 	 */
409 	if (!is_bitmap(uh, us)) {
410 		uf = TAILQ_NEXT(us, list);
411 		TAILQ_REMOVE(&uh->head, us, list);
412 		a = us->len;
413 		l = us->ptr == uh ? 1 : 0;
414 		ub = (void *)us;
415 		ub->busy = 0;
416 		if (l) {
417 			bit_nset(ub->map, 0, a);
418 			ub->busy += a;
419 		} else {
420 			bit_nclear(ub->map, 0, a);
421 		}
422 		if (!is_bitmap(uh, uf)) {
423 			if (uf->ptr == NULL) {
424 				bit_nclear(ub->map, a, a + uf->len - 1);
425 			} else {
426 				bit_nset(ub->map, a, a + uf->len - 1);
427 				ub->busy += uf->len;
428 			}
429 			uf->ptr = ub;
430 			uf->len += a;
431 			us = uf;
432 		} else {
433 			ubf = uf->ptr;
434 			for (l = 0; l < uf->len; l++, a++) {
435 				if (bit_test(ubf->map, l)) {
436 					bit_set(ub->map, a);
437 					ub->busy++;
438 				} else {
439 					bit_clear(ub->map, a);
440 				}
441 			}
442 			uf->len = a;
443 			delete_unr(uh, uf->ptr);
444 			uf->ptr = ub;
445 			us = uf;
446 		}
447 	}
448 	ub = us->ptr;
449 	while (1) {
450 		uf = TAILQ_NEXT(us, list);
451 		if (uf == NULL)
452 			return (1);
453 		if (uf->len + us->len > NBITS)
454 			return (1);
455 		if (uf->ptr == NULL) {
456 			bit_nclear(ub->map, us->len, us->len + uf->len - 1);
457 			us->len += uf->len;
458 			TAILQ_REMOVE(&uh->head, uf, list);
459 			delete_unr(uh, uf);
460 		} else if (uf->ptr == uh) {
461 			bit_nset(ub->map, us->len, us->len + uf->len - 1);
462 			ub->busy += uf->len;
463 			us->len += uf->len;
464 			TAILQ_REMOVE(&uh->head, uf, list);
465 			delete_unr(uh, uf);
466 		} else {
467 			ubf = uf->ptr;
468 			for (l = 0; l < uf->len; l++, us->len++) {
469 				if (bit_test(ubf->map, l)) {
470 					bit_set(ub->map, us->len);
471 					ub->busy++;
472 				} else {
473 					bit_clear(ub->map, us->len);
474 				}
475 			}
476 			TAILQ_REMOVE(&uh->head, uf, list);
477 			delete_unr(uh, ubf);
478 			delete_unr(uh, uf);
479 		}
480 	}
481 }
482 
483 /*
484  * See if a given unr should be collapsed with a neighbor.
485  *
486  * NB: called from alloc_unr(), no new memory allocation allowed.
487  */
488 static void
489 collapse_unr(struct unrhdr *uh, struct unr *up)
490 {
491 	struct unr *upp;
492 	struct unrb *ub;
493 
494 	/* If bitmap is all set or clear, change it to runlength */
495 	if (is_bitmap(uh, up)) {
496 		ub = up->ptr;
497 		if (ub->busy == up->len) {
498 			delete_unr(uh, up->ptr);
499 			up->ptr = uh;
500 		} else if (ub->busy == 0) {
501 			delete_unr(uh, up->ptr);
502 			up->ptr = NULL;
503 		}
504 	}
505 
506 	/* If nothing left in runlength, delete it */
507 	if (up->len == 0) {
508 		upp = TAILQ_PREV(up, unrhd, list);
509 		if (upp == NULL)
510 			upp = TAILQ_NEXT(up, list);
511 		TAILQ_REMOVE(&uh->head, up, list);
512 		delete_unr(uh, up);
513 		up = upp;
514 	}
515 
516 	/* If we have "hot-spot" still, merge with neighbor if possible */
517 	if (up != NULL) {
518 		upp = TAILQ_PREV(up, unrhd, list);
519 		if (upp != NULL && up->ptr == upp->ptr) {
520 			up->len += upp->len;
521 			TAILQ_REMOVE(&uh->head, upp, list);
522 			delete_unr(uh, upp);
523 			}
524 		upp = TAILQ_NEXT(up, list);
525 		if (upp != NULL && up->ptr == upp->ptr) {
526 			up->len += upp->len;
527 			TAILQ_REMOVE(&uh->head, upp, list);
528 			delete_unr(uh, upp);
529 		}
530 	}
531 
532 	/* Merge into ->first if possible */
533 	upp = TAILQ_FIRST(&uh->head);
534 	if (upp != NULL && upp->ptr == uh) {
535 		uh->first += upp->len;
536 		TAILQ_REMOVE(&uh->head, upp, list);
537 		delete_unr(uh, upp);
538 		if (up == upp)
539 			up = NULL;
540 	}
541 
542 	/* Merge into ->last if possible */
543 	upp = TAILQ_LAST(&uh->head, unrhd);
544 	if (upp != NULL && upp->ptr == NULL) {
545 		uh->last += upp->len;
546 		TAILQ_REMOVE(&uh->head, upp, list);
547 		delete_unr(uh, upp);
548 		if (up == upp)
549 			up = NULL;
550 	}
551 
552 	/* Try to make bitmaps */
553 	while (optimize_unr(uh))
554 		continue;
555 }
556 
557 /*
558  * Allocate a free unr.
559  */
560 int
561 alloc_unrl(struct unrhdr *uh)
562 {
563 	struct unr *up;
564 	struct unrb *ub;
565 	u_int x;
566 	int y;
567 
568 	mtx_assert(uh->mtx, MA_OWNED);
569 	check_unrhdr(uh, __LINE__);
570 	x = uh->low + uh->first;
571 
572 	up = TAILQ_FIRST(&uh->head);
573 
574 	/*
575 	 * If we have an ideal split, just adjust the first+last
576 	 */
577 	if (up == NULL && uh->last > 0) {
578 		uh->first++;
579 		uh->last--;
580 		uh->busy++;
581 		return (x);
582 	}
583 
584 	/*
585 	 * We can always allocate from the first list element, so if we have
586 	 * nothing on the list, we must have run out of unit numbers.
587 	 */
588 	if (up == NULL)
589 		return (-1);
590 
591 	KASSERT(up->ptr != uh, ("UNR first element is allocated"));
592 
593 	if (up->ptr == NULL) {	/* free run */
594 		uh->first++;
595 		up->len--;
596 	} else {		/* bitmap */
597 		ub = up->ptr;
598 		KASSERT(ub->busy < up->len, ("UNR bitmap confusion"));
599 		bit_ffc(ub->map, up->len, &y);
600 		KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
601 		bit_set(ub->map, y);
602 		ub->busy++;
603 		x += y;
604 	}
605 	uh->busy++;
606 	collapse_unr(uh, up);
607 	return (x);
608 }
609 
610 int
611 alloc_unr(struct unrhdr *uh)
612 {
613 	int i;
614 
615 	mtx_lock(uh->mtx);
616 	i = alloc_unrl(uh);
617 	clean_unrhdrl(uh);
618 	mtx_unlock(uh->mtx);
619 	return (i);
620 }
621 
622 static int
623 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
624 {
625 	struct unr *up, *upn;
626 	struct unrb *ub;
627 	u_int i, last, tl;
628 
629 	mtx_assert(uh->mtx, MA_OWNED);
630 
631 	if (item < uh->low + uh->first || item > uh->high)
632 		return (-1);
633 
634 	up = TAILQ_FIRST(&uh->head);
635 	/* Ideal split. */
636 	if (up == NULL && item - uh->low == uh->first) {
637 		uh->first++;
638 		uh->last--;
639 		uh->busy++;
640 		check_unrhdr(uh, __LINE__);
641 		return (item);
642 	}
643 
644 	i = item - uh->low - uh->first;
645 
646 	if (up == NULL) {
647 		up = new_unr(uh, p1, p2);
648 		up->ptr = NULL;
649 		up->len = i;
650 		TAILQ_INSERT_TAIL(&uh->head, up, list);
651 		up = new_unr(uh, p1, p2);
652 		up->ptr = uh;
653 		up->len = 1;
654 		TAILQ_INSERT_TAIL(&uh->head, up, list);
655 		uh->last = uh->high - uh->low - i;
656 		uh->busy++;
657 		check_unrhdr(uh, __LINE__);
658 		return (item);
659 	} else {
660 		/* Find the item which contains the unit we want to allocate. */
661 		TAILQ_FOREACH(up, &uh->head, list) {
662 			if (up->len > i)
663 				break;
664 			i -= up->len;
665 		}
666 	}
667 
668 	if (up == NULL) {
669 		if (i > 0) {
670 			up = new_unr(uh, p1, p2);
671 			up->ptr = NULL;
672 			up->len = i;
673 			TAILQ_INSERT_TAIL(&uh->head, up, list);
674 		}
675 		up = new_unr(uh, p1, p2);
676 		up->ptr = uh;
677 		up->len = 1;
678 		TAILQ_INSERT_TAIL(&uh->head, up, list);
679 		goto done;
680 	}
681 
682 	if (is_bitmap(uh, up)) {
683 		ub = up->ptr;
684 		if (bit_test(ub->map, i) == 0) {
685 			bit_set(ub->map, i);
686 			ub->busy++;
687 			goto done;
688 		} else
689 			return (-1);
690 	} else if (up->ptr == uh)
691 		return (-1);
692 
693 	KASSERT(up->ptr == NULL,
694 	    ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
695 
696 	/* Split off the tail end, if any. */
697 	tl = up->len - (1 + i);
698 	if (tl > 0) {
699 		upn = new_unr(uh, p1, p2);
700 		upn->ptr = NULL;
701 		upn->len = tl;
702 		TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
703 	}
704 
705 	/* Split off head end, if any */
706 	if (i > 0) {
707 		upn = new_unr(uh, p1, p2);
708 		upn->len = i;
709 		upn->ptr = NULL;
710 		TAILQ_INSERT_BEFORE(up, upn, list);
711 	}
712 	up->len = 1;
713 	up->ptr = uh;
714 
715 done:
716 	last = uh->high - uh->low - (item - uh->low);
717 	if (uh->last > last)
718 		uh->last = last;
719 	uh->busy++;
720 	collapse_unr(uh, up);
721 	check_unrhdr(uh, __LINE__);
722 	return (item);
723 }
724 
725 int
726 alloc_unr_specific(struct unrhdr *uh, u_int item)
727 {
728 	void *p1, *p2;
729 	int i;
730 
731 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
732 
733 	p1 = Malloc(sizeof(struct unr));
734 	p2 = Malloc(sizeof(struct unr));
735 
736 	mtx_lock(uh->mtx);
737 	i = alloc_unr_specificl(uh, item, &p1, &p2);
738 	mtx_unlock(uh->mtx);
739 
740 	if (p1 != NULL)
741 		Free(p1);
742 	if (p2 != NULL)
743 		Free(p2);
744 
745 	return (i);
746 }
747 
748 /*
749  * Free a unr.
750  *
751  * If we can save unrs by using a bitmap, do so.
752  */
753 static void
754 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
755 {
756 	struct unr *up, *upp, *upn;
757 	struct unrb *ub;
758 	u_int pl;
759 
760 	KASSERT(item >= uh->low && item <= uh->high,
761 	    ("UNR: free_unr(%u) out of range [%u...%u]",
762 	     item, uh->low, uh->high));
763 	check_unrhdr(uh, __LINE__);
764 	item -= uh->low;
765 	upp = TAILQ_FIRST(&uh->head);
766 	/*
767 	 * Freeing in the ideal split case
768 	 */
769 	if (item + 1 == uh->first && upp == NULL) {
770 		uh->last++;
771 		uh->first--;
772 		uh->busy--;
773 		check_unrhdr(uh, __LINE__);
774 		return;
775 	}
776 	/*
777  	 * Freeing in the ->first section.  Create a run starting at the
778 	 * freed item.  The code below will subdivide it.
779 	 */
780 	if (item < uh->first) {
781 		up = new_unr(uh, p1, p2);
782 		up->ptr = uh;
783 		up->len = uh->first - item;
784 		TAILQ_INSERT_HEAD(&uh->head, up, list);
785 		uh->first -= up->len;
786 	}
787 
788 	item -= uh->first;
789 
790 	/* Find the item which contains the unit we want to free */
791 	TAILQ_FOREACH(up, &uh->head, list) {
792 		if (up->len > item)
793 			break;
794 		item -= up->len;
795 	}
796 
797 	/* Handle bitmap items */
798 	if (is_bitmap(uh, up)) {
799 		ub = up->ptr;
800 
801 		KASSERT(bit_test(ub->map, item) != 0,
802 		    ("UNR: Freeing free item %d (bitmap)\n", item));
803 		bit_clear(ub->map, item);
804 		uh->busy--;
805 		ub->busy--;
806 		collapse_unr(uh, up);
807 		return;
808 	}
809 
810 	KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
811 
812 	/* Just this one left, reap it */
813 	if (up->len == 1) {
814 		up->ptr = NULL;
815 		uh->busy--;
816 		collapse_unr(uh, up);
817 		return;
818 	}
819 
820 	/* Check if we can shift the item into the previous 'free' run */
821 	upp = TAILQ_PREV(up, unrhd, list);
822 	if (item == 0 && upp != NULL && upp->ptr == NULL) {
823 		upp->len++;
824 		up->len--;
825 		uh->busy--;
826 		collapse_unr(uh, up);
827 		return;
828 	}
829 
830 	/* Check if we can shift the item to the next 'free' run */
831 	upn = TAILQ_NEXT(up, list);
832 	if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
833 		upn->len++;
834 		up->len--;
835 		uh->busy--;
836 		collapse_unr(uh, up);
837 		return;
838 	}
839 
840 	/* Split off the tail end, if any. */
841 	pl = up->len - (1 + item);
842 	if (pl > 0) {
843 		upp = new_unr(uh, p1, p2);
844 		upp->ptr = uh;
845 		upp->len = pl;
846 		TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
847 	}
848 
849 	/* Split off head end, if any */
850 	if (item > 0) {
851 		upp = new_unr(uh, p1, p2);
852 		upp->len = item;
853 		upp->ptr = uh;
854 		TAILQ_INSERT_BEFORE(up, upp, list);
855 	}
856 	up->len = 1;
857 	up->ptr = NULL;
858 	uh->busy--;
859 	collapse_unr(uh, up);
860 }
861 
862 void
863 free_unr(struct unrhdr *uh, u_int item)
864 {
865 	void *p1, *p2;
866 
867 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
868 	p1 = Malloc(sizeof(struct unr));
869 	p2 = Malloc(sizeof(struct unr));
870 	mtx_lock(uh->mtx);
871 	free_unrl(uh, item, &p1, &p2);
872 	clean_unrhdrl(uh);
873 	mtx_unlock(uh->mtx);
874 	if (p1 != NULL)
875 		Free(p1);
876 	if (p2 != NULL)
877 		Free(p2);
878 }
879 
880 #ifndef _KERNEL	/* USERLAND test driver */
881 
882 /*
883  * Simple stochastic test driver for the above functions
884  */
885 
886 static void
887 print_unr(struct unrhdr *uh, struct unr *up)
888 {
889 	u_int x;
890 	struct unrb *ub;
891 
892 	printf("  %p len = %5u ", up, up->len);
893 	if (up->ptr == NULL)
894 		printf("free\n");
895 	else if (up->ptr == uh)
896 		printf("alloc\n");
897 	else {
898 		ub = up->ptr;
899 		printf("bitmap(%d) [", ub->busy);
900 		for (x = 0; x < up->len; x++) {
901 			if (bit_test(ub->map, x))
902 				printf("#");
903 			else
904 				printf(" ");
905 		}
906 		printf("]\n");
907 	}
908 }
909 
910 static void
911 print_unrhdr(struct unrhdr *uh)
912 {
913 	struct unr *up;
914 	u_int x;
915 
916 	printf(
917 	    "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
918 	    uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
919 	x = uh->low + uh->first;
920 	TAILQ_FOREACH(up, &uh->head, list) {
921 		printf("  from = %5u", x);
922 		print_unr(uh, up);
923 		if (up->ptr == NULL || up->ptr == uh)
924 			x += up->len;
925 		else
926 			x += NBITS;
927 	}
928 }
929 
930 static void
931 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
932 {
933 	int j;
934 
935 	if (a[i]) {
936 		printf("F %u\n", i);
937 		free_unr(uh, i);
938 		a[i] = 0;
939 	} else {
940 		no_alloc = 1;
941 		j = alloc_unr(uh);
942 		if (j != -1) {
943 			a[j] = 1;
944 			printf("A %d\n", j);
945 		}
946 		no_alloc = 0;
947 	}
948 }
949 
950 static void
951 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
952 {
953 	int j;
954 
955 	j = alloc_unr_specific(uh, i);
956 	if (j == -1) {
957 		printf("F %u\n", i);
958 		a[i] = 0;
959 		free_unr(uh, i);
960 	} else {
961 		a[i] = 1;
962 		printf("A %d\n", j);
963 	}
964 }
965 
966 /* Number of unrs to test */
967 #define NN	10000
968 
969 int
970 main(int argc __unused, const char **argv __unused)
971 {
972 	struct unrhdr *uh;
973 	u_int i, x, m, j;
974 	char a[NN];
975 
976 	setbuf(stdout, NULL);
977 	uh = new_unrhdr(0, NN - 1, NULL);
978 	print_unrhdr(uh);
979 
980 	memset(a, 0, sizeof a);
981 	srandomdev();
982 
983 	fprintf(stderr, "sizeof(struct unr) %zu\n", sizeof(struct unr));
984 	fprintf(stderr, "sizeof(struct unrb) %zu\n", sizeof(struct unrb));
985 	fprintf(stderr, "sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
986 	fprintf(stderr, "NBITS %d\n", NBITS);
987 	x = 1;
988 	for (m = 0; m < NN * 100; m++) {
989 		j = random();
990 		i = (j >> 1) % NN;
991 #if 0
992 		if (a[i] && (j & 1))
993 			continue;
994 #endif
995 		if ((random() & 1) != 0)
996 			test_alloc_unr(uh, i, a);
997 		else
998 			test_alloc_unr_specific(uh, i, a);
999 
1000 		if (1)	/* XXX: change this for detailed debug printout */
1001 			print_unrhdr(uh);
1002 		check_unrhdr(uh, __LINE__);
1003 	}
1004 	for (i = 0; i < NN; i++) {
1005 		if (a[i]) {
1006 			printf("C %u\n", i);
1007 			free_unr(uh, i);
1008 			print_unrhdr(uh);
1009 		}
1010 	}
1011 	print_unrhdr(uh);
1012 	delete_unrhdr(uh);
1013 	return (0);
1014 }
1015 #endif
1016