1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include "libuutil_common.h" 29 30 #include <stdlib.h> 31 #include <string.h> 32 #include <unistd.h> 33 #include <sys/avl.h> 34 35 static uu_avl_pool_t uu_null_apool = { &uu_null_apool, &uu_null_apool }; 36 static pthread_mutex_t uu_apool_list_lock = PTHREAD_MUTEX_INITIALIZER; 37 38 /* 39 * The index mark change on every insert and delete, to catch stale 40 * references. 41 * 42 * We leave the low bit alone, since the avl code uses it. 43 */ 44 #define INDEX_MAX (sizeof (uintptr_t) - 2) 45 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX) 46 47 #define INDEX_DECODE(i) ((i) & ~INDEX_MAX) 48 #define INDEX_ENCODE(p, n) (((n) & ~INDEX_MAX) | (p)->ua_index) 49 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ua_index) 50 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0) 51 52 /* 53 * When an element is inactive (not in a tree), we keep a marked pointer to 54 * its containing pool in its first word, and a NULL pointer in its second. 55 * 56 * On insert, we use these to verify that it comes from the correct pool. 57 */ 58 #define NODE_ARRAY(p, n) ((uintptr_t *)((uintptr_t)(n) + \ 59 (pp)->uap_nodeoffset)) 60 61 #define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1)) 62 63 #define DEAD_MARKER 0xc4 64 65 uu_avl_pool_t * 66 uu_avl_pool_create(const char *name, size_t objsize, size_t nodeoffset, 67 uu_compare_fn_t *compare_func, uint32_t flags) 68 { 69 uu_avl_pool_t *pp, *next, *prev; 70 71 if (name == NULL || 72 uu_check_name(name, UU_NAME_DOMAIN) == -1 || 73 nodeoffset + sizeof (uu_avl_node_t) > objsize || 74 compare_func == NULL) { 75 uu_set_error(UU_ERROR_INVALID_ARGUMENT); 76 return (NULL); 77 } 78 79 if (flags & ~UU_AVL_POOL_DEBUG) { 80 uu_set_error(UU_ERROR_UNKNOWN_FLAG); 81 return (NULL); 82 } 83 84 pp = uu_zalloc(sizeof (uu_avl_pool_t)); 85 if (pp == NULL) { 86 uu_set_error(UU_ERROR_NO_MEMORY); 87 return (NULL); 88 } 89 90 (void) strlcpy(pp->uap_name, name, sizeof (pp->uap_name)); 91 pp->uap_nodeoffset = nodeoffset; 92 pp->uap_objsize = objsize; 93 pp->uap_cmp = compare_func; 94 if (flags & UU_AVL_POOL_DEBUG) 95 pp->uap_debug = 1; 96 pp->uap_last_index = 0; 97 98 (void) pthread_mutex_init(&pp->uap_lock, NULL); 99 100 pp->uap_null_avl.ua_next_enc = UU_PTR_ENCODE(&pp->uap_null_avl); 101 pp->uap_null_avl.ua_prev_enc = UU_PTR_ENCODE(&pp->uap_null_avl); 102 103 (void) pthread_mutex_lock(&uu_apool_list_lock); 104 pp->uap_next = next = &uu_null_apool; 105 pp->uap_prev = prev = next->uap_prev; 106 next->uap_prev = pp; 107 prev->uap_next = pp; 108 (void) pthread_mutex_unlock(&uu_apool_list_lock); 109 110 return (pp); 111 } 112 113 void 114 uu_avl_pool_destroy(uu_avl_pool_t *pp) 115 { 116 if (pp->uap_debug) { 117 if (pp->uap_null_avl.ua_next_enc != 118 UU_PTR_ENCODE(&pp->uap_null_avl) || 119 pp->uap_null_avl.ua_prev_enc != 120 UU_PTR_ENCODE(&pp->uap_null_avl)) { 121 uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has " 122 "outstanding avls, or is corrupt.\n", 123 sizeof (pp->uap_name), pp->uap_name, pp); 124 } 125 } 126 (void) pthread_mutex_lock(&uu_apool_list_lock); 127 pp->uap_next->uap_prev = pp->uap_prev; 128 pp->uap_prev->uap_next = pp->uap_next; 129 (void) pthread_mutex_unlock(&uu_apool_list_lock); 130 pp->uap_prev = NULL; 131 pp->uap_next = NULL; 132 uu_free(pp); 133 } 134 135 void 136 uu_avl_node_init(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp) 137 { 138 uintptr_t *na = (uintptr_t *)np; 139 140 if (pp->uap_debug) { 141 uintptr_t offset = (uintptr_t)np - (uintptr_t)base; 142 if (offset + sizeof (*np) > pp->uap_objsize) { 143 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): " 144 "offset %ld doesn't fit in object (size %ld)\n", 145 base, np, pp, pp->uap_name, offset, 146 pp->uap_objsize); 147 } 148 if (offset != pp->uap_nodeoffset) { 149 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): " 150 "offset %ld doesn't match pool's offset (%ld)\n", 151 base, np, pp, pp->uap_name, offset, 152 pp->uap_objsize); 153 } 154 } 155 156 na[0] = POOL_TO_MARKER(pp); 157 na[1] = 0; 158 } 159 160 void 161 uu_avl_node_fini(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp) 162 { 163 uintptr_t *na = (uintptr_t *)np; 164 165 if (pp->uap_debug) { 166 if (na[0] == DEAD_MARKER && na[1] == DEAD_MARKER) { 167 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): " 168 "node already finied\n", 169 base, np, pp, pp->uap_name); 170 } 171 if (na[0] != POOL_TO_MARKER(pp) || na[1] != 0) { 172 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): " 173 "node corrupt, in tree, or in different pool\n", 174 base, np, pp, pp->uap_name); 175 } 176 } 177 178 na[0] = DEAD_MARKER; 179 na[1] = DEAD_MARKER; 180 na[2] = DEAD_MARKER; 181 } 182 183 struct uu_avl_node_compare_info { 184 uu_compare_fn_t *ac_compare; 185 void *ac_private; 186 void *ac_right; 187 void *ac_found; 188 }; 189 190 static int 191 uu_avl_node_compare(const void *l, const void *r) 192 { 193 struct uu_avl_node_compare_info *info = 194 (struct uu_avl_node_compare_info *)l; 195 196 int res = info->ac_compare(r, info->ac_right, info->ac_private); 197 198 if (res == 0) { 199 if (info->ac_found == NULL) 200 info->ac_found = (void *)r; 201 return (-1); 202 } 203 if (res < 0) 204 return (1); 205 return (-1); 206 } 207 208 uu_avl_t * 209 uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags) 210 { 211 uu_avl_t *ap, *next, *prev; 212 213 if (flags & ~UU_AVL_DEBUG) { 214 uu_set_error(UU_ERROR_UNKNOWN_FLAG); 215 return (NULL); 216 } 217 218 ap = uu_zalloc(sizeof (*ap)); 219 if (ap == NULL) { 220 uu_set_error(UU_ERROR_NO_MEMORY); 221 return (NULL); 222 } 223 224 ap->ua_pool = pp; 225 ap->ua_parent_enc = UU_PTR_ENCODE(parent); 226 ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG); 227 ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index)); 228 229 avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize, 230 pp->uap_nodeoffset); 231 232 ap->ua_null_walk.uaw_next = &ap->ua_null_walk; 233 ap->ua_null_walk.uaw_prev = &ap->ua_null_walk; 234 235 (void) pthread_mutex_lock(&pp->uap_lock); 236 next = &pp->uap_null_avl; 237 prev = UU_PTR_DECODE(next->ua_prev_enc); 238 ap->ua_next_enc = UU_PTR_ENCODE(next); 239 ap->ua_prev_enc = UU_PTR_ENCODE(prev); 240 next->ua_prev_enc = UU_PTR_ENCODE(ap); 241 prev->ua_next_enc = UU_PTR_ENCODE(ap); 242 (void) pthread_mutex_unlock(&pp->uap_lock); 243 244 return (ap); 245 } 246 247 void 248 uu_avl_destroy(uu_avl_t *ap) 249 { 250 uu_avl_pool_t *pp = ap->ua_pool; 251 252 if (ap->ua_debug) { 253 if (avl_numnodes(&ap->ua_tree) != 0) { 254 uu_panic("uu_avl_destroy(%p): tree not empty\n", ap); 255 } 256 if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk || 257 ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) { 258 uu_panic("uu_avl_destroy(%p): outstanding walkers\n", 259 ap); 260 } 261 } 262 (void) pthread_mutex_lock(&pp->uap_lock); 263 UU_AVL_PTR(ap->ua_next_enc)->ua_prev_enc = ap->ua_prev_enc; 264 UU_AVL_PTR(ap->ua_prev_enc)->ua_next_enc = ap->ua_next_enc; 265 (void) pthread_mutex_unlock(&pp->uap_lock); 266 ap->ua_prev_enc = UU_PTR_ENCODE(NULL); 267 ap->ua_next_enc = UU_PTR_ENCODE(NULL); 268 269 ap->ua_pool = NULL; 270 avl_destroy(&ap->ua_tree); 271 272 uu_free(ap); 273 } 274 275 size_t 276 uu_avl_numnodes(uu_avl_t *ap) 277 { 278 return (avl_numnodes(&ap->ua_tree)); 279 } 280 281 void * 282 uu_avl_first(uu_avl_t *ap) 283 { 284 return (avl_first(&ap->ua_tree)); 285 } 286 287 void * 288 uu_avl_last(uu_avl_t *ap) 289 { 290 return (avl_last(&ap->ua_tree)); 291 } 292 293 void * 294 uu_avl_next(uu_avl_t *ap, void *node) 295 { 296 return (AVL_NEXT(&ap->ua_tree, node)); 297 } 298 299 void * 300 uu_avl_prev(uu_avl_t *ap, void *node) 301 { 302 return (AVL_PREV(&ap->ua_tree, node)); 303 } 304 305 static void 306 _avl_walk_init(uu_avl_walk_t *wp, uu_avl_t *ap, uint32_t flags) 307 { 308 uu_avl_walk_t *next, *prev; 309 310 int robust = (flags & UU_WALK_ROBUST); 311 int direction = (flags & UU_WALK_REVERSE)? -1 : 1; 312 313 (void) memset(wp, 0, sizeof (*wp)); 314 wp->uaw_avl = ap; 315 wp->uaw_robust = robust; 316 wp->uaw_dir = direction; 317 318 if (direction > 0) 319 wp->uaw_next_result = avl_first(&ap->ua_tree); 320 else 321 wp->uaw_next_result = avl_last(&ap->ua_tree); 322 323 if (ap->ua_debug || robust) { 324 wp->uaw_next = next = &ap->ua_null_walk; 325 wp->uaw_prev = prev = next->uaw_prev; 326 next->uaw_prev = wp; 327 prev->uaw_next = wp; 328 } 329 } 330 331 static void * 332 _avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap) 333 { 334 void *np = wp->uaw_next_result; 335 336 avl_tree_t *t = &ap->ua_tree; 337 338 if (np == NULL) 339 return (NULL); 340 341 wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) : 342 AVL_PREV(t, np); 343 344 return (np); 345 } 346 347 static void 348 _avl_walk_fini(uu_avl_walk_t *wp) 349 { 350 if (wp->uaw_next != NULL) { 351 wp->uaw_next->uaw_prev = wp->uaw_prev; 352 wp->uaw_prev->uaw_next = wp->uaw_next; 353 wp->uaw_next = NULL; 354 wp->uaw_prev = NULL; 355 } 356 wp->uaw_avl = NULL; 357 wp->uaw_next_result = NULL; 358 } 359 360 uu_avl_walk_t * 361 uu_avl_walk_start(uu_avl_t *ap, uint32_t flags) 362 { 363 uu_avl_walk_t *wp; 364 365 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) { 366 uu_set_error(UU_ERROR_UNKNOWN_FLAG); 367 return (NULL); 368 } 369 370 wp = uu_zalloc(sizeof (*wp)); 371 if (wp == NULL) { 372 uu_set_error(UU_ERROR_NO_MEMORY); 373 return (NULL); 374 } 375 376 _avl_walk_init(wp, ap, flags); 377 return (wp); 378 } 379 380 void * 381 uu_avl_walk_next(uu_avl_walk_t *wp) 382 { 383 return (_avl_walk_advance(wp, wp->uaw_avl)); 384 } 385 386 void 387 uu_avl_walk_end(uu_avl_walk_t *wp) 388 { 389 _avl_walk_fini(wp); 390 uu_free(wp); 391 } 392 393 int 394 uu_avl_walk(uu_avl_t *ap, uu_walk_fn_t *func, void *private, uint32_t flags) 395 { 396 void *e; 397 uu_avl_walk_t my_walk; 398 399 int status = UU_WALK_NEXT; 400 401 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) { 402 uu_set_error(UU_ERROR_UNKNOWN_FLAG); 403 return (-1); 404 } 405 406 _avl_walk_init(&my_walk, ap, flags); 407 while (status == UU_WALK_NEXT && 408 (e = _avl_walk_advance(&my_walk, ap)) != NULL) 409 status = (*func)(e, private); 410 _avl_walk_fini(&my_walk); 411 412 if (status >= 0) 413 return (0); 414 uu_set_error(UU_ERROR_CALLBACK_FAILED); 415 return (-1); 416 } 417 418 void 419 uu_avl_remove(uu_avl_t *ap, void *elem) 420 { 421 uu_avl_walk_t *wp; 422 uu_avl_pool_t *pp = ap->ua_pool; 423 uintptr_t *na = NODE_ARRAY(pp, elem); 424 425 if (ap->ua_debug) { 426 /* 427 * invalidate outstanding uu_avl_index_ts. 428 */ 429 ap->ua_index = INDEX_NEXT(ap->ua_index); 430 } 431 432 /* 433 * Robust walkers most be advanced, if we are removing the node 434 * they are currently using. In debug mode, non-robust walkers 435 * are also on the walker list. 436 */ 437 for (wp = ap->ua_null_walk.uaw_next; wp != &ap->ua_null_walk; 438 wp = wp->uaw_next) { 439 if (wp->uaw_robust) { 440 if (elem == wp->uaw_next_result) 441 (void) _avl_walk_advance(wp, ap); 442 } else if (wp->uaw_next_result != NULL) { 443 uu_panic("uu_avl_remove(%p, %p): active non-robust " 444 "walker\n", ap, elem); 445 } 446 } 447 448 avl_remove(&ap->ua_tree, elem); 449 450 na[0] = POOL_TO_MARKER(pp); 451 na[1] = 0; 452 } 453 454 void * 455 uu_avl_teardown(uu_avl_t *ap, void **cookie) 456 { 457 void *elem = avl_destroy_nodes(&ap->ua_tree, cookie); 458 459 if (elem != NULL) { 460 uu_avl_pool_t *pp = ap->ua_pool; 461 uintptr_t *na = NODE_ARRAY(pp, elem); 462 463 na[0] = POOL_TO_MARKER(pp); 464 na[1] = 0; 465 } 466 return (elem); 467 } 468 469 void * 470 uu_avl_find(uu_avl_t *ap, void *elem, void *private, uu_avl_index_t *out) 471 { 472 struct uu_avl_node_compare_info info; 473 void *result; 474 475 info.ac_compare = ap->ua_pool->uap_cmp; 476 info.ac_private = private; 477 info.ac_right = elem; 478 info.ac_found = NULL; 479 480 result = avl_find(&ap->ua_tree, &info, out); 481 if (out != NULL) 482 *out = INDEX_ENCODE(ap, *out); 483 484 if (ap->ua_debug && result != NULL) 485 uu_panic("uu_avl_find: internal error: avl_find succeeded\n"); 486 487 return (info.ac_found); 488 } 489 490 void 491 uu_avl_insert(uu_avl_t *ap, void *elem, uu_avl_index_t idx) 492 { 493 if (ap->ua_debug) { 494 uu_avl_pool_t *pp = ap->ua_pool; 495 uintptr_t *na = NODE_ARRAY(pp, elem); 496 497 if (na[1] != 0) 498 uu_panic("uu_avl_insert(%p, %p, %p): node already " 499 "in tree, or corrupt\n", 500 ap, elem, idx); 501 if (na[0] == 0) 502 uu_panic("uu_avl_insert(%p, %p, %p): node not " 503 "initialized\n", 504 ap, elem, idx); 505 if (na[0] != POOL_TO_MARKER(pp)) 506 uu_panic("uu_avl_insert(%p, %p, %p): node from " 507 "other pool, or corrupt\n", 508 ap, elem, idx); 509 510 if (!INDEX_VALID(ap, idx)) 511 uu_panic("uu_avl_insert(%p, %p, %p): %s\n", 512 ap, elem, idx, 513 INDEX_CHECK(idx)? "outdated index" : 514 "invalid index"); 515 516 /* 517 * invalidate outstanding uu_avl_index_ts. 518 */ 519 ap->ua_index = INDEX_NEXT(ap->ua_index); 520 } 521 avl_insert(&ap->ua_tree, elem, INDEX_DECODE(idx)); 522 } 523 524 void * 525 uu_avl_nearest_next(uu_avl_t *ap, uu_avl_index_t idx) 526 { 527 if (ap->ua_debug && !INDEX_VALID(ap, idx)) 528 uu_panic("uu_avl_nearest_next(%p, %p): %s\n", 529 ap, idx, INDEX_CHECK(idx)? "outdated index" : 530 "invalid index"); 531 return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_AFTER)); 532 } 533 534 void * 535 uu_avl_nearest_prev(uu_avl_t *ap, uu_avl_index_t idx) 536 { 537 if (ap->ua_debug && !INDEX_VALID(ap, idx)) 538 uu_panic("uu_avl_nearest_prev(%p, %p): %s\n", 539 ap, idx, INDEX_CHECK(idx)? "outdated index" : 540 "invalid index"); 541 return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_BEFORE)); 542 } 543 544 /* 545 * called from uu_lockup() and uu_release(), as part of our fork1()-safety. 546 */ 547 void 548 uu_avl_lockup(void) 549 { 550 uu_avl_pool_t *pp; 551 552 (void) pthread_mutex_lock(&uu_apool_list_lock); 553 for (pp = uu_null_apool.uap_next; pp != &uu_null_apool; 554 pp = pp->uap_next) 555 (void) pthread_mutex_lock(&pp->uap_lock); 556 } 557 558 void 559 uu_avl_release(void) 560 { 561 uu_avl_pool_t *pp; 562 563 for (pp = uu_null_apool.uap_next; pp != &uu_null_apool; 564 pp = pp->uap_next) 565 (void) pthread_mutex_unlock(&pp->uap_lock); 566 (void) pthread_mutex_unlock(&uu_apool_list_lock); 567 } 568