xref: /illumos-gate/usr/src/lib/libuutil/common/uu_list.c (revision bb0ade0978a02d3fe0b0165cd4725fdcb593fbfb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include "libuutil_common.h"
29 
30 #include <stdlib.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <sys/time.h>
34 
35 #define	ELEM_TO_NODE(lp, e) \
36 	((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
37 
38 #define	NODE_TO_ELEM(lp, n) \
39 	((void *)((uintptr_t)(n) - (lp)->ul_offset))
40 
41 /*
42  * uu_list_index_ts define a location for insertion.  They are simply a
43  * pointer to the object after the insertion point.  We store a mark
44  * in the low-bits of the index, to help prevent mistakes.
45  *
46  * When debugging, the index mark changes on every insert and delete, to
47  * catch stale references.
48  */
49 #define	INDEX_MAX		(sizeof (uintptr_t) - 1)
50 #define	INDEX_NEXT(m)		(((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
51 
52 #define	INDEX_TO_NODE(i)	((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
53 #define	NODE_TO_INDEX(p, n)	(((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
54 #define	INDEX_VALID(p, i)	(((i) & INDEX_MAX) == (p)->ul_index)
55 #define	INDEX_CHECK(i)		(((i) & INDEX_MAX) != 0)
56 
57 #define	POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
58 
59 static uu_list_pool_t	uu_null_lpool = { &uu_null_lpool, &uu_null_lpool };
60 static pthread_mutex_t	uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER;
61 
62 uu_list_pool_t *
63 uu_list_pool_create(const char *name, size_t objsize,
64     size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags)
65 {
66 	uu_list_pool_t *pp, *next, *prev;
67 
68 	if (name == NULL ||
69 	    uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
70 	    nodeoffset + sizeof (uu_list_node_t) > objsize) {
71 		uu_set_error(UU_ERROR_INVALID_ARGUMENT);
72 		return (NULL);
73 	}
74 
75 	if (flags & ~UU_LIST_POOL_DEBUG) {
76 		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
77 		return (NULL);
78 	}
79 
80 	pp = uu_zalloc(sizeof (uu_list_pool_t));
81 	if (pp == NULL) {
82 		uu_set_error(UU_ERROR_NO_MEMORY);
83 		return (NULL);
84 	}
85 
86 	(void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name));
87 	pp->ulp_nodeoffset = nodeoffset;
88 	pp->ulp_objsize = objsize;
89 	pp->ulp_cmp = compare_func;
90 	if (flags & UU_LIST_POOL_DEBUG)
91 		pp->ulp_debug = 1;
92 	pp->ulp_last_index = 0;
93 
94 	(void) pthread_mutex_init(&pp->ulp_lock, NULL);
95 
96 	pp->ulp_null_list.ul_next_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
97 	pp->ulp_null_list.ul_prev_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
98 
99 	(void) pthread_mutex_lock(&uu_lpool_list_lock);
100 	pp->ulp_next = next = &uu_null_lpool;
101 	pp->ulp_prev = prev = next->ulp_prev;
102 	next->ulp_prev = pp;
103 	prev->ulp_next = pp;
104 	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
105 
106 	return (pp);
107 }
108 
109 void
110 uu_list_pool_destroy(uu_list_pool_t *pp)
111 {
112 	if (pp->ulp_debug) {
113 		if (pp->ulp_null_list.ul_next_enc !=
114 		    UU_PTR_ENCODE(&pp->ulp_null_list) ||
115 		    pp->ulp_null_list.ul_prev_enc !=
116 		    UU_PTR_ENCODE(&pp->ulp_null_list)) {
117 			uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
118 			    "outstanding lists, or is corrupt.\n",
119 			    (int)sizeof (pp->ulp_name), pp->ulp_name,
120 			    (void *)pp);
121 		}
122 	}
123 	(void) pthread_mutex_lock(&uu_lpool_list_lock);
124 	pp->ulp_next->ulp_prev = pp->ulp_prev;
125 	pp->ulp_prev->ulp_next = pp->ulp_next;
126 	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
127 	pp->ulp_prev = NULL;
128 	pp->ulp_next = NULL;
129 	uu_free(pp);
130 }
131 
132 void
133 uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
134 {
135 	uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
136 
137 	if (pp->ulp_debug) {
138 		uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
139 		if (offset + sizeof (*np) > pp->ulp_objsize) {
140 			uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
141 			    "offset %ld doesn't fit in object (size %ld)\n",
142 			    base, (void *)np, (void *)pp, pp->ulp_name,
143 			    (long)offset, (long)pp->ulp_objsize);
144 		}
145 		if (offset != pp->ulp_nodeoffset) {
146 			uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
147 			    "offset %ld doesn't match pool's offset (%ld)\n",
148 			    base, (void *)np, (void *)pp, pp->ulp_name,
149 			    (long)offset, (long)pp->ulp_objsize);
150 		}
151 	}
152 	np->uln_next = POOL_TO_MARKER(pp);
153 	np->uln_prev = NULL;
154 }
155 
156 void
157 uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
158 {
159 	uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
160 
161 	if (pp->ulp_debug) {
162 		if (np->uln_next == NULL &&
163 		    np->uln_prev == NULL) {
164 			uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
165 			    "node already finied\n",
166 			    base, (void *)np_arg, (void *)pp, pp->ulp_name);
167 		}
168 		if (np->uln_next != POOL_TO_MARKER(pp) ||
169 		    np->uln_prev != NULL) {
170 			uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
171 			    "node corrupt or on list\n",
172 			    base, (void *)np_arg, (void *)pp, pp->ulp_name);
173 		}
174 	}
175 	np->uln_next = NULL;
176 	np->uln_prev = NULL;
177 }
178 
179 uu_list_t *
180 uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags)
181 {
182 	uu_list_t *lp, *next, *prev;
183 
184 	if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) {
185 		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
186 		return (NULL);
187 	}
188 
189 	if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) {
190 		if (pp->ulp_debug)
191 			uu_panic("uu_list_create(%p, ...): requested "
192 			    "UU_LIST_SORTED, but pool has no comparison func\n",
193 			    (void *)pp);
194 		uu_set_error(UU_ERROR_NOT_SUPPORTED);
195 		return (NULL);
196 	}
197 
198 	lp = uu_zalloc(sizeof (*lp));
199 	if (lp == NULL) {
200 		uu_set_error(UU_ERROR_NO_MEMORY);
201 		return (NULL);
202 	}
203 
204 	lp->ul_pool = pp;
205 	lp->ul_parent_enc = UU_PTR_ENCODE(parent);
206 	lp->ul_offset = pp->ulp_nodeoffset;
207 	lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG);
208 	lp->ul_sorted = (flags & UU_LIST_SORTED);
209 	lp->ul_numnodes = 0;
210 	lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index));
211 
212 	lp->ul_null_node.uln_next = &lp->ul_null_node;
213 	lp->ul_null_node.uln_prev = &lp->ul_null_node;
214 
215 	lp->ul_null_walk.ulw_next = &lp->ul_null_walk;
216 	lp->ul_null_walk.ulw_prev = &lp->ul_null_walk;
217 
218 	(void) pthread_mutex_lock(&pp->ulp_lock);
219 	next = &pp->ulp_null_list;
220 	prev = UU_PTR_DECODE(next->ul_prev_enc);
221 	lp->ul_next_enc = UU_PTR_ENCODE(next);
222 	lp->ul_prev_enc = UU_PTR_ENCODE(prev);
223 	next->ul_prev_enc = UU_PTR_ENCODE(lp);
224 	prev->ul_next_enc = UU_PTR_ENCODE(lp);
225 	(void) pthread_mutex_unlock(&pp->ulp_lock);
226 
227 	return (lp);
228 }
229 
230 void
231 uu_list_destroy(uu_list_t *lp)
232 {
233 	uu_list_pool_t *pp = lp->ul_pool;
234 
235 	if (lp->ul_debug) {
236 		if (lp->ul_null_node.uln_next != &lp->ul_null_node ||
237 		    lp->ul_null_node.uln_prev != &lp->ul_null_node) {
238 			uu_panic("uu_list_destroy(%p):  list not empty\n",
239 			    (void *)lp);
240 		}
241 		if (lp->ul_numnodes != 0) {
242 			uu_panic("uu_list_destroy(%p):  numnodes is nonzero, "
243 			    "but list is empty\n", (void *)lp);
244 		}
245 		if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk ||
246 		    lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) {
247 			uu_panic("uu_list_destroy(%p):  outstanding walkers\n",
248 			    (void *)lp);
249 		}
250 	}
251 
252 	(void) pthread_mutex_lock(&pp->ulp_lock);
253 	UU_LIST_PTR(lp->ul_next_enc)->ul_prev_enc = lp->ul_prev_enc;
254 	UU_LIST_PTR(lp->ul_prev_enc)->ul_next_enc = lp->ul_next_enc;
255 	(void) pthread_mutex_unlock(&pp->ulp_lock);
256 	lp->ul_prev_enc = UU_PTR_ENCODE(NULL);
257 	lp->ul_next_enc = UU_PTR_ENCODE(NULL);
258 	lp->ul_pool = NULL;
259 	uu_free(lp);
260 }
261 
262 static void
263 list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev,
264     uu_list_node_impl_t *next)
265 {
266 	if (lp->ul_debug) {
267 		if (next->uln_prev != prev || prev->uln_next != next)
268 			uu_panic("insert(%p): internal error: %p and %p not "
269 			    "neighbors\n", (void *)lp, (void *)next,
270 			    (void *)prev);
271 
272 		if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) ||
273 		    np->uln_prev != NULL) {
274 			uu_panic("insert(%p): elem %p node %p corrupt, "
275 			    "not initialized, or already in a list.\n",
276 			    (void *)lp, NODE_TO_ELEM(lp, np), (void *)np);
277 		}
278 		/*
279 		 * invalidate outstanding uu_list_index_ts.
280 		 */
281 		lp->ul_index = INDEX_NEXT(lp->ul_index);
282 	}
283 	np->uln_next = next;
284 	np->uln_prev = prev;
285 	next->uln_prev = np;
286 	prev->uln_next = np;
287 
288 	lp->ul_numnodes++;
289 }
290 
291 void
292 uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx)
293 {
294 	uu_list_node_impl_t *np;
295 
296 	np = INDEX_TO_NODE(idx);
297 	if (np == NULL)
298 		np = &lp->ul_null_node;
299 
300 	if (lp->ul_debug) {
301 		if (!INDEX_VALID(lp, idx))
302 			uu_panic("uu_list_insert(%p, %p, %p): %s\n",
303 			    (void *)lp, elem, (void *)idx,
304 			    INDEX_CHECK(idx)? "outdated index" :
305 			    "invalid index");
306 		if (np->uln_prev == NULL)
307 			uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
308 			    "index\n", (void *)lp, elem, (void *)idx);
309 	}
310 
311 	list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
312 }
313 
314 void *
315 uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out)
316 {
317 	int sorted = lp->ul_sorted;
318 	uu_compare_fn_t *func = lp->ul_pool->ulp_cmp;
319 	uu_list_node_impl_t *np;
320 
321 	if (func == NULL) {
322 		if (out != NULL)
323 			*out = 0;
324 		uu_set_error(UU_ERROR_NOT_SUPPORTED);
325 		return (NULL);
326 	}
327 	for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node;
328 	    np = np->uln_next) {
329 		void *ep = NODE_TO_ELEM(lp, np);
330 		int cmp = func(ep, elem, private);
331 		if (cmp == 0) {
332 			if (out != NULL)
333 				*out = NODE_TO_INDEX(lp, np);
334 			return (ep);
335 		}
336 		if (sorted && cmp > 0) {
337 			if (out != NULL)
338 				*out = NODE_TO_INDEX(lp, np);
339 			return (NULL);
340 		}
341 	}
342 	if (out != NULL)
343 		*out = NODE_TO_INDEX(lp, 0);
344 	return (NULL);
345 }
346 
347 void *
348 uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx)
349 {
350 	uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
351 
352 	if (np == NULL)
353 		np = &lp->ul_null_node;
354 
355 	if (lp->ul_debug) {
356 		if (!INDEX_VALID(lp, idx))
357 			uu_panic("uu_list_nearest_next(%p, %p): %s\n",
358 			    (void *)lp, (void *)idx,
359 			    INDEX_CHECK(idx)? "outdated index" :
360 			    "invalid index");
361 		if (np->uln_prev == NULL)
362 			uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
363 			    "index\n", (void *)lp, (void *)idx);
364 	}
365 
366 	if (np == &lp->ul_null_node)
367 		return (NULL);
368 	else
369 		return (NODE_TO_ELEM(lp, np));
370 }
371 
372 void *
373 uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx)
374 {
375 	uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
376 
377 	if (np == NULL)
378 		np = &lp->ul_null_node;
379 
380 	if (lp->ul_debug) {
381 		if (!INDEX_VALID(lp, idx))
382 			uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
383 			    (void *)lp, (void *)idx, INDEX_CHECK(idx)?
384 			    "outdated index" : "invalid index");
385 		if (np->uln_prev == NULL)
386 			uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
387 			    "index\n", (void *)lp, (void *)idx);
388 	}
389 
390 	if ((np = np->uln_prev) == &lp->ul_null_node)
391 		return (NULL);
392 	else
393 		return (NODE_TO_ELEM(lp, np));
394 }
395 
396 static void
397 list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags)
398 {
399 	uu_list_walk_t *next, *prev;
400 
401 	int robust = (flags & UU_WALK_ROBUST);
402 	int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
403 
404 	(void) memset(wp, 0, sizeof (*wp));
405 	wp->ulw_list = lp;
406 	wp->ulw_robust = robust;
407 	wp->ulw_dir = direction;
408 	if (direction > 0)
409 		wp->ulw_next_result = lp->ul_null_node.uln_next;
410 	else
411 		wp->ulw_next_result = lp->ul_null_node.uln_prev;
412 
413 	if (lp->ul_debug || robust) {
414 		wp->ulw_next = next = &lp->ul_null_walk;
415 		wp->ulw_prev = prev = next->ulw_prev;
416 		next->ulw_prev = wp;
417 		prev->ulw_next = wp;
418 	}
419 }
420 
421 static uu_list_node_impl_t *
422 list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp)
423 {
424 	uu_list_node_impl_t *np = wp->ulw_next_result;
425 	uu_list_node_impl_t *next;
426 
427 	if (np == &lp->ul_null_node)
428 		return (NULL);
429 
430 	next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev;
431 
432 	wp->ulw_next_result = next;
433 	return (np);
434 }
435 
436 static void
437 list_walk_fini(uu_list_walk_t *wp)
438 {
439 	/* GLXXX debugging? */
440 	if (wp->ulw_next != NULL) {
441 		wp->ulw_next->ulw_prev = wp->ulw_prev;
442 		wp->ulw_prev->ulw_next = wp->ulw_next;
443 		wp->ulw_next = NULL;
444 		wp->ulw_prev = NULL;
445 	}
446 	wp->ulw_list = NULL;
447 	wp->ulw_next_result = NULL;
448 }
449 
450 uu_list_walk_t *
451 uu_list_walk_start(uu_list_t *lp, uint32_t flags)
452 {
453 	uu_list_walk_t *wp;
454 
455 	if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
456 		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
457 		return (NULL);
458 	}
459 
460 	wp = uu_zalloc(sizeof (*wp));
461 	if (wp == NULL) {
462 		uu_set_error(UU_ERROR_NO_MEMORY);
463 		return (NULL);
464 	}
465 
466 	list_walk_init(wp, lp, flags);
467 	return (wp);
468 }
469 
470 void *
471 uu_list_walk_next(uu_list_walk_t *wp)
472 {
473 	uu_list_t *lp = wp->ulw_list;
474 	uu_list_node_impl_t *np = list_walk_advance(wp, lp);
475 
476 	if (np == NULL)
477 		return (NULL);
478 
479 	return (NODE_TO_ELEM(lp, np));
480 }
481 
482 void
483 uu_list_walk_end(uu_list_walk_t *wp)
484 {
485 	list_walk_fini(wp);
486 	uu_free(wp);
487 }
488 
489 int
490 uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags)
491 {
492 	uu_list_node_impl_t *np;
493 
494 	int status = UU_WALK_NEXT;
495 
496 	int robust = (flags & UU_WALK_ROBUST);
497 	int reverse = (flags & UU_WALK_REVERSE);
498 
499 	if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
500 		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
501 		return (-1);
502 	}
503 
504 	if (lp->ul_debug || robust) {
505 		uu_list_walk_t my_walk;
506 		void *e;
507 
508 		list_walk_init(&my_walk, lp, flags);
509 		while (status == UU_WALK_NEXT &&
510 		    (e = uu_list_walk_next(&my_walk)) != NULL)
511 			status = (*func)(e, private);
512 		list_walk_fini(&my_walk);
513 	} else {
514 		if (!reverse) {
515 			for (np = lp->ul_null_node.uln_next;
516 			    status == UU_WALK_NEXT && np != &lp->ul_null_node;
517 			    np = np->uln_next) {
518 				status = (*func)(NODE_TO_ELEM(lp, np), private);
519 			}
520 		} else {
521 			for (np = lp->ul_null_node.uln_prev;
522 			    status == UU_WALK_NEXT && np != &lp->ul_null_node;
523 			    np = np->uln_prev) {
524 				status = (*func)(NODE_TO_ELEM(lp, np), private);
525 			}
526 		}
527 	}
528 	if (status >= 0)
529 		return (0);
530 	uu_set_error(UU_ERROR_CALLBACK_FAILED);
531 	return (-1);
532 }
533 
534 void
535 uu_list_remove(uu_list_t *lp, void *elem)
536 {
537 	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem);
538 	uu_list_walk_t *wp;
539 
540 	if (lp->ul_debug) {
541 		if (np->uln_prev == NULL)
542 			uu_panic("uu_list_remove(%p, %p): elem not on list\n",
543 			    (void *)lp, elem);
544 		/*
545 		 * invalidate outstanding uu_list_index_ts.
546 		 */
547 		lp->ul_index = INDEX_NEXT(lp->ul_index);
548 	}
549 
550 	/*
551 	 * robust walkers must be advanced.  In debug mode, non-robust
552 	 * walkers are also on the list.  If there are any, it's an error.
553 	 */
554 	for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk;
555 	    wp = wp->ulw_next) {
556 		if (wp->ulw_robust) {
557 			if (np == wp->ulw_next_result)
558 				(void) list_walk_advance(wp, lp);
559 		} else if (wp->ulw_next_result != NULL) {
560 			uu_panic("uu_list_remove(%p, %p): active non-robust "
561 			    "walker\n", (void *)lp, elem);
562 		}
563 	}
564 
565 	np->uln_next->uln_prev = np->uln_prev;
566 	np->uln_prev->uln_next = np->uln_next;
567 
568 	lp->ul_numnodes--;
569 
570 	np->uln_next = POOL_TO_MARKER(lp->ul_pool);
571 	np->uln_prev = NULL;
572 }
573 
574 void *
575 uu_list_teardown(uu_list_t *lp, void **cookie)
576 {
577 	void *ep;
578 
579 	/*
580 	 * XXX: disable list modification until list is empty
581 	 */
582 	if (lp->ul_debug && *cookie != NULL)
583 		uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
584 		    (void *)lp, (void *)cookie);
585 
586 	ep = uu_list_first(lp);
587 	if (ep)
588 		uu_list_remove(lp, ep);
589 	return (ep);
590 }
591 
592 int
593 uu_list_insert_before(uu_list_t *lp, void *target, void *elem)
594 {
595 	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
596 
597 	if (target == NULL)
598 		np = &lp->ul_null_node;
599 
600 	if (lp->ul_debug) {
601 		if (np->uln_prev == NULL)
602 			uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
603 			    "not currently on a list\n",
604 			    (void *)lp, target, elem, target);
605 	}
606 	if (lp->ul_sorted) {
607 		if (lp->ul_debug)
608 			uu_panic("uu_list_insert_before(%p, ...): list is "
609 			    "UU_LIST_SORTED\n", (void *)lp);
610 		uu_set_error(UU_ERROR_NOT_SUPPORTED);
611 		return (-1);
612 	}
613 
614 	list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
615 	return (0);
616 }
617 
618 int
619 uu_list_insert_after(uu_list_t *lp, void *target, void *elem)
620 {
621 	uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
622 
623 	if (target == NULL)
624 		np = &lp->ul_null_node;
625 
626 	if (lp->ul_debug) {
627 		if (np->uln_prev == NULL)
628 			uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
629 			    "not currently on a list\n",
630 			    (void *)lp, target, elem, target);
631 	}
632 	if (lp->ul_sorted) {
633 		if (lp->ul_debug)
634 			uu_panic("uu_list_insert_after(%p, ...): list is "
635 			    "UU_LIST_SORTED\n", (void *)lp);
636 		uu_set_error(UU_ERROR_NOT_SUPPORTED);
637 		return (-1);
638 	}
639 
640 	list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next);
641 	return (0);
642 }
643 
644 size_t
645 uu_list_numnodes(uu_list_t *lp)
646 {
647 	return (lp->ul_numnodes);
648 }
649 
650 void *
651 uu_list_first(uu_list_t *lp)
652 {
653 	uu_list_node_impl_t *n = lp->ul_null_node.uln_next;
654 	if (n == &lp->ul_null_node)
655 		return (NULL);
656 	return (NODE_TO_ELEM(lp, n));
657 }
658 
659 void *
660 uu_list_last(uu_list_t *lp)
661 {
662 	uu_list_node_impl_t *n = lp->ul_null_node.uln_prev;
663 	if (n == &lp->ul_null_node)
664 		return (NULL);
665 	return (NODE_TO_ELEM(lp, n));
666 }
667 
668 void *
669 uu_list_next(uu_list_t *lp, void *elem)
670 {
671 	uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
672 
673 	n = n->uln_next;
674 	if (n == &lp->ul_null_node)
675 		return (NULL);
676 	return (NODE_TO_ELEM(lp, n));
677 }
678 
679 void *
680 uu_list_prev(uu_list_t *lp, void *elem)
681 {
682 	uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
683 
684 	n = n->uln_prev;
685 	if (n == &lp->ul_null_node)
686 		return (NULL);
687 	return (NODE_TO_ELEM(lp, n));
688 }
689 
690 /*
691  * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
692  */
693 void
694 uu_list_lockup(void)
695 {
696 	uu_list_pool_t *pp;
697 
698 	(void) pthread_mutex_lock(&uu_lpool_list_lock);
699 	for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
700 	    pp = pp->ulp_next)
701 		(void) pthread_mutex_lock(&pp->ulp_lock);
702 }
703 
704 void
705 uu_list_release(void)
706 {
707 	uu_list_pool_t *pp;
708 
709 	for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
710 	    pp = pp->ulp_next)
711 		(void) pthread_mutex_unlock(&pp->ulp_lock);
712 	(void) pthread_mutex_unlock(&uu_lpool_list_lock);
713 }
714