1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Copyright 2011 Jason King. All rights reserved. 27 */ 28 29 #include "libuutil_common.h" 30 31 #include <stdlib.h> 32 #include <string.h> 33 #include <unistd.h> 34 #include <sys/time.h> 35 36 #define ELEM_TO_NODE(lp, e) \ 37 ((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset)) 38 39 #define NODE_TO_ELEM(lp, n) \ 40 ((void *)((uintptr_t)(n) - (lp)->ul_offset)) 41 42 /* 43 * uu_list_index_ts define a location for insertion. They are simply a 44 * pointer to the object after the insertion point. We store a mark 45 * in the low-bits of the index, to help prevent mistakes. 46 * 47 * When debugging, the index mark changes on every insert and delete, to 48 * catch stale references. 49 */ 50 #define INDEX_MAX (sizeof (uintptr_t) - 1) 51 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX) 52 53 #define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX)) 54 #define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index) 55 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index) 56 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0) 57 58 #define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1)) 59 60 static uu_list_pool_t uu_null_lpool = { &uu_null_lpool, &uu_null_lpool }; 61 static pthread_mutex_t uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER; 62 63 uu_list_pool_t * 64 uu_list_pool_create(const char *name, size_t objsize, 65 size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags) 66 { 67 uu_list_pool_t *pp, *next, *prev; 68 69 if (name == NULL || 70 uu_check_name(name, UU_NAME_DOMAIN) == -1 || 71 nodeoffset + sizeof (uu_list_node_t) > objsize) { 72 uu_set_error(UU_ERROR_INVALID_ARGUMENT); 73 return (NULL); 74 } 75 76 if (flags & ~UU_LIST_POOL_DEBUG) { 77 uu_set_error(UU_ERROR_UNKNOWN_FLAG); 78 return (NULL); 79 } 80 81 pp = uu_zalloc(sizeof (uu_list_pool_t)); 82 if (pp == NULL) { 83 uu_set_error(UU_ERROR_NO_MEMORY); 84 return (NULL); 85 } 86 87 (void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name)); 88 pp->ulp_nodeoffset = nodeoffset; 89 pp->ulp_objsize = objsize; 90 pp->ulp_cmp = compare_func; 91 if (flags & UU_LIST_POOL_DEBUG) 92 pp->ulp_debug = 1; 93 pp->ulp_last_index = 0; 94 95 (void) pthread_mutex_init(&pp->ulp_lock, NULL); 96 97 pp->ulp_null_list.ul_next_enc = UU_PTR_ENCODE(&pp->ulp_null_list); 98 pp->ulp_null_list.ul_prev_enc = UU_PTR_ENCODE(&pp->ulp_null_list); 99 100 (void) pthread_mutex_lock(&uu_lpool_list_lock); 101 pp->ulp_next = next = &uu_null_lpool; 102 pp->ulp_prev = prev = next->ulp_prev; 103 next->ulp_prev = pp; 104 prev->ulp_next = pp; 105 (void) pthread_mutex_unlock(&uu_lpool_list_lock); 106 107 return (pp); 108 } 109 110 void 111 uu_list_pool_destroy(uu_list_pool_t *pp) 112 { 113 if (pp->ulp_debug) { 114 if (pp->ulp_null_list.ul_next_enc != 115 UU_PTR_ENCODE(&pp->ulp_null_list) || 116 pp->ulp_null_list.ul_prev_enc != 117 UU_PTR_ENCODE(&pp->ulp_null_list)) { 118 uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has " 119 "outstanding lists, or is corrupt.\n", 120 (int)sizeof (pp->ulp_name), pp->ulp_name, 121 (void *)pp); 122 } 123 } 124 (void) pthread_mutex_lock(&uu_lpool_list_lock); 125 pp->ulp_next->ulp_prev = pp->ulp_prev; 126 pp->ulp_prev->ulp_next = pp->ulp_next; 127 (void) pthread_mutex_unlock(&uu_lpool_list_lock); 128 pp->ulp_prev = NULL; 129 pp->ulp_next = NULL; 130 uu_free(pp); 131 } 132 133 void 134 uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp) 135 { 136 uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg; 137 138 if (pp->ulp_debug) { 139 uintptr_t offset = (uintptr_t)np - (uintptr_t)base; 140 if (offset + sizeof (*np) > pp->ulp_objsize) { 141 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): " 142 "offset %ld doesn't fit in object (size %ld)\n", 143 base, (void *)np, (void *)pp, pp->ulp_name, 144 (long)offset, (long)pp->ulp_objsize); 145 } 146 if (offset != pp->ulp_nodeoffset) { 147 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): " 148 "offset %ld doesn't match pool's offset (%ld)\n", 149 base, (void *)np, (void *)pp, pp->ulp_name, 150 (long)offset, (long)pp->ulp_objsize); 151 } 152 } 153 np->uln_next = POOL_TO_MARKER(pp); 154 np->uln_prev = NULL; 155 } 156 157 void 158 uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp) 159 { 160 uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg; 161 162 if (pp->ulp_debug) { 163 if (np->uln_next == NULL && 164 np->uln_prev == NULL) { 165 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): " 166 "node already finied\n", 167 base, (void *)np_arg, (void *)pp, pp->ulp_name); 168 } 169 if (np->uln_next != POOL_TO_MARKER(pp) || 170 np->uln_prev != NULL) { 171 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): " 172 "node corrupt or on list\n", 173 base, (void *)np_arg, (void *)pp, pp->ulp_name); 174 } 175 } 176 np->uln_next = NULL; 177 np->uln_prev = NULL; 178 } 179 180 uu_list_t * 181 uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags) 182 { 183 uu_list_t *lp, *next, *prev; 184 185 if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) { 186 uu_set_error(UU_ERROR_UNKNOWN_FLAG); 187 return (NULL); 188 } 189 190 if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) { 191 if (pp->ulp_debug) 192 uu_panic("uu_list_create(%p, ...): requested " 193 "UU_LIST_SORTED, but pool has no comparison func\n", 194 (void *)pp); 195 uu_set_error(UU_ERROR_NOT_SUPPORTED); 196 return (NULL); 197 } 198 199 lp = uu_zalloc(sizeof (*lp)); 200 if (lp == NULL) { 201 uu_set_error(UU_ERROR_NO_MEMORY); 202 return (NULL); 203 } 204 205 lp->ul_pool = pp; 206 lp->ul_parent_enc = UU_PTR_ENCODE(parent); 207 lp->ul_offset = pp->ulp_nodeoffset; 208 lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG); 209 lp->ul_sorted = (flags & UU_LIST_SORTED); 210 lp->ul_numnodes = 0; 211 lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index)); 212 213 lp->ul_null_node.uln_next = &lp->ul_null_node; 214 lp->ul_null_node.uln_prev = &lp->ul_null_node; 215 216 lp->ul_null_walk.ulw_next = &lp->ul_null_walk; 217 lp->ul_null_walk.ulw_prev = &lp->ul_null_walk; 218 219 (void) pthread_mutex_lock(&pp->ulp_lock); 220 next = &pp->ulp_null_list; 221 prev = UU_PTR_DECODE(next->ul_prev_enc); 222 lp->ul_next_enc = UU_PTR_ENCODE(next); 223 lp->ul_prev_enc = UU_PTR_ENCODE(prev); 224 next->ul_prev_enc = UU_PTR_ENCODE(lp); 225 prev->ul_next_enc = UU_PTR_ENCODE(lp); 226 (void) pthread_mutex_unlock(&pp->ulp_lock); 227 228 return (lp); 229 } 230 231 void 232 uu_list_destroy(uu_list_t *lp) 233 { 234 uu_list_pool_t *pp = lp->ul_pool; 235 236 if (lp->ul_debug) { 237 if (lp->ul_null_node.uln_next != &lp->ul_null_node || 238 lp->ul_null_node.uln_prev != &lp->ul_null_node) { 239 uu_panic("uu_list_destroy(%p): list not empty\n", 240 (void *)lp); 241 } 242 if (lp->ul_numnodes != 0) { 243 uu_panic("uu_list_destroy(%p): numnodes is nonzero, " 244 "but list is empty\n", (void *)lp); 245 } 246 if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk || 247 lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) { 248 uu_panic("uu_list_destroy(%p): outstanding walkers\n", 249 (void *)lp); 250 } 251 } 252 253 (void) pthread_mutex_lock(&pp->ulp_lock); 254 UU_LIST_PTR(lp->ul_next_enc)->ul_prev_enc = lp->ul_prev_enc; 255 UU_LIST_PTR(lp->ul_prev_enc)->ul_next_enc = lp->ul_next_enc; 256 (void) pthread_mutex_unlock(&pp->ulp_lock); 257 lp->ul_prev_enc = UU_PTR_ENCODE(NULL); 258 lp->ul_next_enc = UU_PTR_ENCODE(NULL); 259 lp->ul_pool = NULL; 260 uu_free(lp); 261 } 262 263 static void 264 list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev, 265 uu_list_node_impl_t *next) 266 { 267 if (lp->ul_debug) { 268 if (next->uln_prev != prev || prev->uln_next != next) 269 uu_panic("insert(%p): internal error: %p and %p not " 270 "neighbors\n", (void *)lp, (void *)next, 271 (void *)prev); 272 273 if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) || 274 np->uln_prev != NULL) { 275 uu_panic("insert(%p): elem %p node %p corrupt, " 276 "not initialized, or already in a list.\n", 277 (void *)lp, NODE_TO_ELEM(lp, np), (void *)np); 278 } 279 /* 280 * invalidate outstanding uu_list_index_ts. 281 */ 282 lp->ul_index = INDEX_NEXT(lp->ul_index); 283 } 284 np->uln_next = next; 285 np->uln_prev = prev; 286 next->uln_prev = np; 287 prev->uln_next = np; 288 289 lp->ul_numnodes++; 290 } 291 292 void 293 uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx) 294 { 295 uu_list_node_impl_t *np; 296 297 np = INDEX_TO_NODE(idx); 298 if (np == NULL) 299 np = &lp->ul_null_node; 300 301 if (lp->ul_debug) { 302 if (!INDEX_VALID(lp, idx)) 303 uu_panic("uu_list_insert(%p, %p, %p): %s\n", 304 (void *)lp, elem, (void *)idx, 305 INDEX_CHECK(idx)? "outdated index" : 306 "invalid index"); 307 if (np->uln_prev == NULL) 308 uu_panic("uu_list_insert(%p, %p, %p): out-of-date " 309 "index\n", (void *)lp, elem, (void *)idx); 310 } 311 312 list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np); 313 } 314 315 void * 316 uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out) 317 { 318 int sorted = lp->ul_sorted; 319 uu_compare_fn_t *func = lp->ul_pool->ulp_cmp; 320 uu_list_node_impl_t *np; 321 322 uu_set_error(UU_ERROR_NONE); 323 324 if (func == NULL) { 325 if (out != NULL) 326 *out = 0; 327 uu_set_error(UU_ERROR_NOT_SUPPORTED); 328 return (NULL); 329 } 330 for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node; 331 np = np->uln_next) { 332 void *ep = NODE_TO_ELEM(lp, np); 333 int cmp = func(ep, elem, private); 334 if (cmp == 0) { 335 if (out != NULL) 336 *out = NODE_TO_INDEX(lp, np); 337 return (ep); 338 } 339 if (sorted && cmp > 0) { 340 if (out != NULL) 341 *out = NODE_TO_INDEX(lp, np); 342 return (NULL); 343 } 344 } 345 if (out != NULL) 346 *out = NODE_TO_INDEX(lp, 0); 347 return (NULL); 348 } 349 350 void * 351 uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx) 352 { 353 uu_list_node_impl_t *np = INDEX_TO_NODE(idx); 354 355 if (np == NULL) 356 np = &lp->ul_null_node; 357 358 if (lp->ul_debug) { 359 if (!INDEX_VALID(lp, idx)) 360 uu_panic("uu_list_nearest_next(%p, %p): %s\n", 361 (void *)lp, (void *)idx, 362 INDEX_CHECK(idx)? "outdated index" : 363 "invalid index"); 364 if (np->uln_prev == NULL) 365 uu_panic("uu_list_nearest_next(%p, %p): out-of-date " 366 "index\n", (void *)lp, (void *)idx); 367 } 368 369 if (np == &lp->ul_null_node) 370 return (NULL); 371 else 372 return (NODE_TO_ELEM(lp, np)); 373 } 374 375 void * 376 uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx) 377 { 378 uu_list_node_impl_t *np = INDEX_TO_NODE(idx); 379 380 if (np == NULL) 381 np = &lp->ul_null_node; 382 383 if (lp->ul_debug) { 384 if (!INDEX_VALID(lp, idx)) 385 uu_panic("uu_list_nearest_prev(%p, %p): %s\n", 386 (void *)lp, (void *)idx, INDEX_CHECK(idx)? 387 "outdated index" : "invalid index"); 388 if (np->uln_prev == NULL) 389 uu_panic("uu_list_nearest_prev(%p, %p): out-of-date " 390 "index\n", (void *)lp, (void *)idx); 391 } 392 393 if ((np = np->uln_prev) == &lp->ul_null_node) 394 return (NULL); 395 else 396 return (NODE_TO_ELEM(lp, np)); 397 } 398 399 static void 400 list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags) 401 { 402 uu_list_walk_t *next, *prev; 403 404 int robust = (flags & UU_WALK_ROBUST); 405 int direction = (flags & UU_WALK_REVERSE)? -1 : 1; 406 407 (void) memset(wp, 0, sizeof (*wp)); 408 wp->ulw_list = lp; 409 wp->ulw_robust = robust; 410 wp->ulw_dir = direction; 411 if (direction > 0) 412 wp->ulw_next_result = lp->ul_null_node.uln_next; 413 else 414 wp->ulw_next_result = lp->ul_null_node.uln_prev; 415 416 if (lp->ul_debug || robust) { 417 /* 418 * Add this walker to the list's list of walkers so 419 * uu_list_remove() can advance us if somebody tries to 420 * remove ulw_next_result. 421 */ 422 wp->ulw_next = next = &lp->ul_null_walk; 423 wp->ulw_prev = prev = next->ulw_prev; 424 next->ulw_prev = wp; 425 prev->ulw_next = wp; 426 } 427 } 428 429 static uu_list_node_impl_t * 430 list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp) 431 { 432 uu_list_node_impl_t *np = wp->ulw_next_result; 433 uu_list_node_impl_t *next; 434 435 if (np == &lp->ul_null_node) 436 return (NULL); 437 438 next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev; 439 440 wp->ulw_next_result = next; 441 return (np); 442 } 443 444 static void 445 list_walk_fini(uu_list_walk_t *wp) 446 { 447 /* GLXXX debugging? */ 448 if (wp->ulw_next != NULL) { 449 wp->ulw_next->ulw_prev = wp->ulw_prev; 450 wp->ulw_prev->ulw_next = wp->ulw_next; 451 wp->ulw_next = NULL; 452 wp->ulw_prev = NULL; 453 } 454 wp->ulw_list = NULL; 455 wp->ulw_next_result = NULL; 456 } 457 458 uu_list_walk_t * 459 uu_list_walk_start(uu_list_t *lp, uint32_t flags) 460 { 461 uu_list_walk_t *wp; 462 463 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) { 464 uu_set_error(UU_ERROR_UNKNOWN_FLAG); 465 return (NULL); 466 } 467 468 wp = uu_zalloc(sizeof (*wp)); 469 if (wp == NULL) { 470 uu_set_error(UU_ERROR_NO_MEMORY); 471 return (NULL); 472 } 473 474 list_walk_init(wp, lp, flags); 475 return (wp); 476 } 477 478 void * 479 uu_list_walk_next(uu_list_walk_t *wp) 480 { 481 uu_list_t *lp = wp->ulw_list; 482 uu_list_node_impl_t *np = list_walk_advance(wp, lp); 483 484 if (np == NULL) 485 return (NULL); 486 487 return (NODE_TO_ELEM(lp, np)); 488 } 489 490 void 491 uu_list_walk_end(uu_list_walk_t *wp) 492 { 493 list_walk_fini(wp); 494 uu_free(wp); 495 } 496 497 int 498 uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags) 499 { 500 uu_list_node_impl_t *np; 501 502 int status = UU_WALK_NEXT; 503 504 int robust = (flags & UU_WALK_ROBUST); 505 int reverse = (flags & UU_WALK_REVERSE); 506 507 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) { 508 uu_set_error(UU_ERROR_UNKNOWN_FLAG); 509 return (-1); 510 } 511 512 if (lp->ul_debug || robust) { 513 uu_list_walk_t my_walk; 514 void *e; 515 516 list_walk_init(&my_walk, lp, flags); 517 while (status == UU_WALK_NEXT && 518 (e = uu_list_walk_next(&my_walk)) != NULL) 519 status = (*func)(e, private); 520 list_walk_fini(&my_walk); 521 } else { 522 if (!reverse) { 523 for (np = lp->ul_null_node.uln_next; 524 status == UU_WALK_NEXT && np != &lp->ul_null_node; 525 np = np->uln_next) { 526 status = (*func)(NODE_TO_ELEM(lp, np), private); 527 } 528 } else { 529 for (np = lp->ul_null_node.uln_prev; 530 status == UU_WALK_NEXT && np != &lp->ul_null_node; 531 np = np->uln_prev) { 532 status = (*func)(NODE_TO_ELEM(lp, np), private); 533 } 534 } 535 } 536 if (status >= 0) 537 return (0); 538 uu_set_error(UU_ERROR_CALLBACK_FAILED); 539 return (-1); 540 } 541 542 void 543 uu_list_remove(uu_list_t *lp, void *elem) 544 { 545 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem); 546 uu_list_walk_t *wp; 547 548 if (lp->ul_debug) { 549 if (np->uln_prev == NULL) 550 uu_panic("uu_list_remove(%p, %p): elem not on list\n", 551 (void *)lp, elem); 552 /* 553 * invalidate outstanding uu_list_index_ts. 554 */ 555 lp->ul_index = INDEX_NEXT(lp->ul_index); 556 } 557 558 /* 559 * robust walkers must be advanced. In debug mode, non-robust 560 * walkers are also on the list. If there are any, it's an error. 561 */ 562 for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk; 563 wp = wp->ulw_next) { 564 if (wp->ulw_robust) { 565 if (np == wp->ulw_next_result) 566 (void) list_walk_advance(wp, lp); 567 } else if (wp->ulw_next_result != NULL) { 568 uu_panic("uu_list_remove(%p, %p): active non-robust " 569 "walker\n", (void *)lp, elem); 570 } 571 } 572 573 np->uln_next->uln_prev = np->uln_prev; 574 np->uln_prev->uln_next = np->uln_next; 575 576 lp->ul_numnodes--; 577 578 np->uln_next = POOL_TO_MARKER(lp->ul_pool); 579 np->uln_prev = NULL; 580 } 581 582 void * 583 uu_list_teardown(uu_list_t *lp, void **cookie) 584 { 585 void *ep; 586 587 /* 588 * XXX: disable list modification until list is empty 589 */ 590 if (lp->ul_debug && *cookie != NULL) 591 uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n", 592 (void *)lp, (void *)cookie); 593 594 ep = uu_list_first(lp); 595 if (ep) 596 uu_list_remove(lp, ep); 597 return (ep); 598 } 599 600 int 601 uu_list_insert_before(uu_list_t *lp, void *target, void *elem) 602 { 603 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target); 604 605 if (target == NULL) 606 np = &lp->ul_null_node; 607 608 if (lp->ul_debug) { 609 if (np->uln_prev == NULL) 610 uu_panic("uu_list_insert_before(%p, %p, %p): %p is " 611 "not currently on a list\n", 612 (void *)lp, target, elem, target); 613 } 614 if (lp->ul_sorted) { 615 if (lp->ul_debug) 616 uu_panic("uu_list_insert_before(%p, ...): list is " 617 "UU_LIST_SORTED\n", (void *)lp); 618 uu_set_error(UU_ERROR_NOT_SUPPORTED); 619 return (-1); 620 } 621 622 list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np); 623 return (0); 624 } 625 626 int 627 uu_list_insert_after(uu_list_t *lp, void *target, void *elem) 628 { 629 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target); 630 631 if (target == NULL) 632 np = &lp->ul_null_node; 633 634 if (lp->ul_debug) { 635 if (np->uln_prev == NULL) 636 uu_panic("uu_list_insert_after(%p, %p, %p): %p is " 637 "not currently on a list\n", 638 (void *)lp, target, elem, target); 639 } 640 if (lp->ul_sorted) { 641 if (lp->ul_debug) 642 uu_panic("uu_list_insert_after(%p, ...): list is " 643 "UU_LIST_SORTED\n", (void *)lp); 644 uu_set_error(UU_ERROR_NOT_SUPPORTED); 645 return (-1); 646 } 647 648 list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next); 649 return (0); 650 } 651 652 size_t 653 uu_list_numnodes(uu_list_t *lp) 654 { 655 return (lp->ul_numnodes); 656 } 657 658 void * 659 uu_list_first(uu_list_t *lp) 660 { 661 uu_list_node_impl_t *n = lp->ul_null_node.uln_next; 662 if (n == &lp->ul_null_node) 663 return (NULL); 664 return (NODE_TO_ELEM(lp, n)); 665 } 666 667 void * 668 uu_list_last(uu_list_t *lp) 669 { 670 uu_list_node_impl_t *n = lp->ul_null_node.uln_prev; 671 if (n == &lp->ul_null_node) 672 return (NULL); 673 return (NODE_TO_ELEM(lp, n)); 674 } 675 676 void * 677 uu_list_next(uu_list_t *lp, void *elem) 678 { 679 uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem); 680 681 n = n->uln_next; 682 if (n == &lp->ul_null_node) 683 return (NULL); 684 return (NODE_TO_ELEM(lp, n)); 685 } 686 687 void * 688 uu_list_prev(uu_list_t *lp, void *elem) 689 { 690 uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem); 691 692 n = n->uln_prev; 693 if (n == &lp->ul_null_node) 694 return (NULL); 695 return (NODE_TO_ELEM(lp, n)); 696 } 697 698 /* 699 * called from uu_lockup() and uu_release(), as part of our fork1()-safety. 700 */ 701 void 702 uu_list_lockup(void) 703 { 704 uu_list_pool_t *pp; 705 706 (void) pthread_mutex_lock(&uu_lpool_list_lock); 707 for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool; 708 pp = pp->ulp_next) 709 (void) pthread_mutex_lock(&pp->ulp_lock); 710 } 711 712 void 713 uu_list_release(void) 714 { 715 uu_list_pool_t *pp; 716 717 for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool; 718 pp = pp->ulp_next) 719 (void) pthread_mutex_unlock(&pp->ulp_lock); 720 (void) pthread_mutex_unlock(&uu_lpool_list_lock); 721 } 722