1 /* 2 * Copyright (c) 1988, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)radix.c 8.4 (Berkeley) 11/2/94 30 * 31 * $FreeBSD$ 32 */ 33 34 /* 35 * Routines to build and maintain radix trees for routing lookups. 36 */ 37 38 #include "defs.h" 39 40 #ifdef __NetBSD__ 41 __RCSID("$NetBSD$"); 42 #elif defined(__FreeBSD__) 43 __RCSID("$FreeBSD$"); 44 #else 45 __RCSID("$Revision: 2.23 $"); 46 #ident "$Revision: 2.23 $" 47 #endif 48 49 #define log(x, msg) syslog(x, msg) 50 #define panic(s) {log(LOG_ERR,s); exit(1);} 51 #define min(a,b) (((a)<(b))?(a):(b)) 52 53 int max_keylen; 54 static struct radix_mask *rn_mkfreelist; 55 static struct radix_node_head *mask_rnhead; 56 static char *addmask_key; 57 static const uint8_t normal_chars[] = 58 { 0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff}; 59 static char *rn_zeros, *rn_ones; 60 61 #define rn_masktop (mask_rnhead->rnh_treetop) 62 #define Bcmp(a, b, l) (l == 0 ? 0 \ 63 : memcmp((caddr_t)(a), (caddr_t)(b), (size_t)l)) 64 65 static int rn_satisfies_leaf(char *, struct radix_node *, int); 66 static struct radix_node *rn_addmask(void *n_arg, int search, int skip); 67 static struct radix_node *rn_addroute(void *v_arg, void *n_arg, 68 struct radix_node_head *head, struct radix_node treenodes[2]); 69 static struct radix_node *rn_match(void *v_arg, struct radix_node_head *head); 70 71 /* 72 * The data structure for the keys is a radix tree with one way 73 * branching removed. The index rn_b at an internal node n represents a bit 74 * position to be tested. The tree is arranged so that all descendants 75 * of a node n have keys whose bits all agree up to position rn_b - 1. 76 * (We say the index of n is rn_b.) 77 * 78 * There is at least one descendant which has a one bit at position rn_b, 79 * and at least one with a zero there. 80 * 81 * A route is determined by a pair of key and mask. We require that the 82 * bit-wise logical and of the key and mask to be the key. 83 * We define the index of a route to associated with the mask to be 84 * the first bit number in the mask where 0 occurs (with bit number 0 85 * representing the highest order bit). 86 * 87 * We say a mask is normal if every bit is 0, past the index of the mask. 88 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, 89 * and m is a normal mask, then the route applies to every descendant of n. 90 * If the index(m) < rn_b, this implies the trailing last few bits of k 91 * before bit b are all 0, (and hence consequently true of every descendant 92 * of n), so the route applies to all descendants of the node as well. 93 * 94 * Similar logic shows that a non-normal mask m such that 95 * index(m) <= index(n) could potentially apply to many children of n. 96 * Thus, for each non-host route, we attach its mask to a list at an internal 97 * node as high in the tree as we can go. 98 * 99 * The present version of the code makes use of normal routes in short- 100 * circuiting an explicit mask and compare operation when testing whether 101 * a key satisfies a normal route, and also in remembering the unique leaf 102 * that governs a subtree. 103 */ 104 105 static struct radix_node * 106 rn_search(void *v_arg, 107 struct radix_node *head) 108 { 109 struct radix_node *x; 110 caddr_t v; 111 112 for (x = head, v = v_arg; x->rn_b >= 0;) { 113 if (x->rn_bmask & v[x->rn_off]) 114 x = x->rn_r; 115 else 116 x = x->rn_l; 117 } 118 return (x); 119 } 120 121 static struct radix_node * 122 rn_search_m(void *v_arg, 123 struct radix_node *head, 124 void *m_arg) 125 { 126 struct radix_node *x; 127 caddr_t v = v_arg, m = m_arg; 128 129 for (x = head; x->rn_b >= 0;) { 130 if ((x->rn_bmask & m[x->rn_off]) && 131 (x->rn_bmask & v[x->rn_off])) 132 x = x->rn_r; 133 else 134 x = x->rn_l; 135 } 136 return x; 137 } 138 139 static int 140 rn_refines(void* m_arg, void *n_arg) 141 { 142 caddr_t m = m_arg, n = n_arg; 143 caddr_t lim, lim2 = lim = n + *(u_char *)n; 144 int longer = (*(u_char *)n++) - (int)(*(u_char *)m++); 145 int masks_are_equal = 1; 146 147 if (longer > 0) 148 lim -= longer; 149 while (n < lim) { 150 if (*n & ~(*m)) 151 return 0; 152 if (*n++ != *m++) 153 masks_are_equal = 0; 154 } 155 while (n < lim2) 156 if (*n++) 157 return 0; 158 if (masks_are_equal && (longer < 0)) 159 for (lim2 = m - longer; m < lim2; ) 160 if (*m++) 161 return 1; 162 return (!masks_are_equal); 163 } 164 165 static struct radix_node * 166 rn_lookup(void *v_arg, void *m_arg, struct radix_node_head *head) 167 { 168 struct radix_node *x; 169 caddr_t netmask = 0; 170 171 if (m_arg) { 172 if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0) 173 return (0); 174 netmask = x->rn_key; 175 } 176 x = rn_match(v_arg, head); 177 if (x && netmask) { 178 while (x && x->rn_mask != netmask) 179 x = x->rn_dupedkey; 180 } 181 return x; 182 } 183 184 static int 185 rn_satisfies_leaf(char *trial, 186 struct radix_node *leaf, 187 int skip) 188 { 189 char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask; 190 char *cplim; 191 int length = min(*(u_char *)cp, *(u_char *)cp2); 192 193 if (cp3 == 0) 194 cp3 = rn_ones; 195 else 196 length = min(length, *(u_char *)cp3); 197 cplim = cp + length; cp3 += skip; cp2 += skip; 198 for (cp += skip; cp < cplim; cp++, cp2++, cp3++) 199 if ((*cp ^ *cp2) & *cp3) 200 return 0; 201 return 1; 202 } 203 204 static struct radix_node * 205 rn_match(void *v_arg, 206 struct radix_node_head *head) 207 { 208 caddr_t v = v_arg; 209 struct radix_node *t = head->rnh_treetop, *x; 210 caddr_t cp = v, cp2; 211 caddr_t cplim; 212 struct radix_node *saved_t, *top = t; 213 int off = t->rn_off, vlen = *(u_char *)cp, matched_off; 214 int test, b, rn_b; 215 216 /* 217 * Open code rn_search(v, top) to avoid overhead of extra 218 * subroutine call. 219 */ 220 for (; t->rn_b >= 0; ) { 221 if (t->rn_bmask & cp[t->rn_off]) 222 t = t->rn_r; 223 else 224 t = t->rn_l; 225 } 226 /* 227 * See if we match exactly as a host destination 228 * or at least learn how many bits match, for normal mask finesse. 229 * 230 * It doesn't hurt us to limit how many bytes to check 231 * to the length of the mask, since if it matches we had a genuine 232 * match and the leaf we have is the most specific one anyway; 233 * if it didn't match with a shorter length it would fail 234 * with a long one. This wins big for class B&C netmasks which 235 * are probably the most common case... 236 */ 237 if (t->rn_mask) 238 vlen = *(u_char *)t->rn_mask; 239 cp += off; cp2 = t->rn_key + off; cplim = v + vlen; 240 for (; cp < cplim; cp++, cp2++) 241 if (*cp != *cp2) 242 goto on1; 243 /* 244 * This extra grot is in case we are explicitly asked 245 * to look up the default. Ugh! 246 * Or 255.255.255.255 247 * 248 * In this case, we have a complete match of the key. Unless 249 * the node is one of the roots, we are finished. 250 * If it is the zeros root, then take what we have, preferring 251 * any real data. 252 * If it is the ones root, then pretend the target key was followed 253 * by a byte of zeros. 254 */ 255 if (!(t->rn_flags & RNF_ROOT)) 256 return t; /* not a root */ 257 if (t->rn_dupedkey) { 258 t = t->rn_dupedkey; 259 return t; /* have some real data */ 260 } 261 if (*(cp-1) == 0) 262 return t; /* not the ones root */ 263 b = 0; /* fake a zero after 255.255.255.255 */ 264 goto on2; 265 on1: 266 test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */ 267 for (b = 7; (test >>= 1) > 0;) 268 b--; 269 on2: 270 matched_off = cp - v; 271 b += matched_off << 3; 272 rn_b = -1 - b; 273 /* 274 * If there is a host route in a duped-key chain, it will be first. 275 */ 276 if ((saved_t = t)->rn_mask == 0) 277 t = t->rn_dupedkey; 278 for (; t; t = t->rn_dupedkey) { 279 /* 280 * Even if we don't match exactly as a host, 281 * we may match if the leaf we wound up at is 282 * a route to a net. 283 */ 284 if (t->rn_flags & RNF_NORMAL) { 285 if (rn_b <= t->rn_b) 286 return t; 287 } else if (rn_satisfies_leaf(v, t, matched_off)) { 288 return t; 289 } 290 } 291 t = saved_t; 292 /* start searching up the tree */ 293 do { 294 struct radix_mask *m; 295 t = t->rn_p; 296 if ((m = t->rn_mklist)) { 297 /* 298 * If non-contiguous masks ever become important 299 * we can restore the masking and open coding of 300 * the search and satisfaction test and put the 301 * calculation of "off" back before the "do". 302 */ 303 do { 304 if (m->rm_flags & RNF_NORMAL) { 305 if (rn_b <= m->rm_b) 306 return (m->rm_leaf); 307 } else { 308 off = min(t->rn_off, matched_off); 309 x = rn_search_m(v, t, m->rm_mask); 310 while (x && x->rn_mask != m->rm_mask) 311 x = x->rn_dupedkey; 312 if (x && rn_satisfies_leaf(v, x, off)) 313 return x; 314 } 315 } while ((m = m->rm_mklist)); 316 } 317 } while (t != top); 318 return 0; 319 } 320 321 #ifdef RN_DEBUG 322 int rn_nodenum; 323 struct radix_node *rn_clist; 324 int rn_saveinfo; 325 int rn_debug = 1; 326 #endif 327 328 static struct radix_node * 329 rn_newpair(void *v, int b, struct radix_node nodes[2]) 330 { 331 struct radix_node *tt = nodes, *t = tt + 1; 332 t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7); 333 t->rn_l = tt; t->rn_off = b >> 3; 334 tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t; 335 tt->rn_flags = t->rn_flags = RNF_ACTIVE; 336 #ifdef RN_DEBUG 337 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 338 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 339 #endif 340 return t; 341 } 342 343 static struct radix_node * 344 rn_insert(void* v_arg, 345 struct radix_node_head *head, 346 int *dupentry, 347 struct radix_node nodes[2]) 348 { 349 caddr_t v = v_arg; 350 struct radix_node *top = head->rnh_treetop; 351 int head_off = top->rn_off, vlen = (int)*((u_char *)v); 352 struct radix_node *t = rn_search(v_arg, top); 353 caddr_t cp = v + head_off; 354 int b; 355 struct radix_node *tt; 356 357 /* 358 * Find first bit at which v and t->rn_key differ 359 */ 360 { 361 caddr_t cp2 = t->rn_key + head_off; 362 int cmp_res; 363 caddr_t cplim = v + vlen; 364 365 while (cp < cplim) 366 if (*cp2++ != *cp++) 367 goto on1; 368 /* handle adding 255.255.255.255 */ 369 if (!(t->rn_flags & RNF_ROOT) || *(cp2-1) == 0) { 370 *dupentry = 1; 371 return t; 372 } 373 on1: 374 *dupentry = 0; 375 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; 376 for (b = (cp - v) << 3; cmp_res; b--) 377 cmp_res >>= 1; 378 } 379 { 380 struct radix_node *p, *x = top; 381 cp = v; 382 do { 383 p = x; 384 if (cp[x->rn_off] & x->rn_bmask) 385 x = x->rn_r; 386 else x = x->rn_l; 387 } while ((unsigned)b > (unsigned)x->rn_b); 388 #ifdef RN_DEBUG 389 if (rn_debug) 390 log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p); 391 #endif 392 t = rn_newpair(v_arg, b, nodes); tt = t->rn_l; 393 if ((cp[p->rn_off] & p->rn_bmask) == 0) 394 p->rn_l = t; 395 else 396 p->rn_r = t; 397 x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */ 398 if ((cp[t->rn_off] & t->rn_bmask) == 0) { 399 t->rn_r = x; 400 } else { 401 t->rn_r = tt; t->rn_l = x; 402 } 403 #ifdef RN_DEBUG 404 if (rn_debug) 405 log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p); 406 #endif 407 } 408 return (tt); 409 } 410 411 static struct radix_node * 412 rn_addmask(void *n_arg, int search, int skip) 413 { 414 caddr_t netmask = (caddr_t)n_arg; 415 struct radix_node *x; 416 caddr_t cp, cplim; 417 int b = 0, mlen, j; 418 int maskduplicated, m0, isnormal; 419 struct radix_node *saved_x; 420 static int last_zeroed = 0; 421 422 if ((mlen = *(u_char *)netmask) > max_keylen) 423 mlen = max_keylen; 424 if (skip == 0) 425 skip = 1; 426 if (mlen <= skip) 427 return (mask_rnhead->rnh_nodes); 428 if (skip > 1) 429 Bcopy(rn_ones + 1, addmask_key + 1, skip - 1); 430 if ((m0 = mlen) > skip) 431 Bcopy(netmask + skip, addmask_key + skip, mlen - skip); 432 /* 433 * Trim trailing zeroes. 434 */ 435 for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) 436 cp--; 437 mlen = cp - addmask_key; 438 if (mlen <= skip) { 439 if (m0 >= last_zeroed) 440 last_zeroed = mlen; 441 return (mask_rnhead->rnh_nodes); 442 } 443 if (m0 < last_zeroed) 444 Bzero(addmask_key + m0, last_zeroed - m0); 445 *addmask_key = last_zeroed = mlen; 446 x = rn_search(addmask_key, rn_masktop); 447 if (Bcmp(addmask_key, x->rn_key, mlen) != 0) 448 x = 0; 449 if (x || search) 450 return (x); 451 x = (struct radix_node *)rtmalloc(max_keylen + 2*sizeof(*x), 452 "rn_addmask"); 453 saved_x = x; 454 Bzero(x, max_keylen + 2 * sizeof (*x)); 455 netmask = cp = (caddr_t)(x + 2); 456 Bcopy(addmask_key, cp, mlen); 457 x = rn_insert(cp, mask_rnhead, &maskduplicated, x); 458 if (maskduplicated) { 459 log(LOG_ERR, "rn_addmask: mask impossibly already in tree"); 460 Free(saved_x); 461 return (x); 462 } 463 /* 464 * Calculate index of mask, and check for normalcy. 465 */ 466 cplim = netmask + mlen; isnormal = 1; 467 for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) 468 cp++; 469 if (cp != cplim) { 470 for (j = 0x80; (j & *cp) != 0; j >>= 1) 471 b++; 472 if (*cp != normal_chars[b] || cp != (cplim - 1)) 473 isnormal = 0; 474 } 475 b += (cp - netmask) << 3; 476 x->rn_b = -1 - b; 477 if (isnormal) 478 x->rn_flags |= RNF_NORMAL; 479 return (x); 480 } 481 482 static int /* XXX: arbitrary ordering for non-contiguous masks */ 483 rn_lexobetter(void *m_arg, void *n_arg) 484 { 485 u_char *mp = m_arg, *np = n_arg, *lim; 486 487 if (*mp > *np) 488 return 1; /* not really, but need to check longer one first */ 489 if (*mp == *np) 490 for (lim = mp + *mp; mp < lim;) 491 if (*mp++ > *np++) 492 return 1; 493 return 0; 494 } 495 496 static struct radix_mask * 497 rn_new_radix_mask(struct radix_node *tt, 498 struct radix_mask *next) 499 { 500 struct radix_mask *m; 501 502 MKGet(m); 503 if (m == 0) { 504 log(LOG_ERR, "Mask for route not entered\n"); 505 return (0); 506 } 507 Bzero(m, sizeof *m); 508 m->rm_b = tt->rn_b; 509 m->rm_flags = tt->rn_flags; 510 if (tt->rn_flags & RNF_NORMAL) 511 m->rm_leaf = tt; 512 else 513 m->rm_mask = tt->rn_mask; 514 m->rm_mklist = next; 515 tt->rn_mklist = m; 516 return m; 517 } 518 519 static struct radix_node * 520 rn_addroute(void *v_arg, 521 void *n_arg, 522 struct radix_node_head *head, 523 struct radix_node treenodes[2]) 524 { 525 caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg; 526 struct radix_node *t, *x = 0, *tt; 527 struct radix_node *saved_tt, *top = head->rnh_treetop; 528 short b = 0, b_leaf = 0; 529 int keyduplicated; 530 caddr_t mmask; 531 struct radix_mask *m, **mp; 532 533 /* 534 * In dealing with non-contiguous masks, there may be 535 * many different routes which have the same mask. 536 * We will find it useful to have a unique pointer to 537 * the mask to speed avoiding duplicate references at 538 * nodes and possibly save time in calculating indices. 539 */ 540 if (netmask) { 541 if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0) 542 return (0); 543 b_leaf = x->rn_b; 544 b = -1 - x->rn_b; 545 netmask = x->rn_key; 546 } 547 /* 548 * Deal with duplicated keys: attach node to previous instance 549 */ 550 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); 551 if (keyduplicated) { 552 for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) { 553 if (tt->rn_mask == netmask) 554 return (0); 555 if (netmask == 0 || 556 (tt->rn_mask && 557 ((b_leaf < tt->rn_b) || /* index(netmask) > node */ 558 rn_refines(netmask, tt->rn_mask) || 559 rn_lexobetter(netmask, tt->rn_mask)))) 560 break; 561 } 562 /* 563 * If the mask is not duplicated, we wouldn't 564 * find it among possible duplicate key entries 565 * anyway, so the above test doesn't hurt. 566 * 567 * We sort the masks for a duplicated key the same way as 568 * in a masklist -- most specific to least specific. 569 * This may require the unfortunate nuisance of relocating 570 * the head of the list. 571 */ 572 if (tt == saved_tt) { 573 struct radix_node *xx = x; 574 /* link in at head of list */ 575 (tt = treenodes)->rn_dupedkey = t; 576 tt->rn_flags = t->rn_flags; 577 tt->rn_p = x = t->rn_p; 578 if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt; 579 saved_tt = tt; x = xx; 580 } else { 581 (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; 582 t->rn_dupedkey = tt; 583 } 584 #ifdef RN_DEBUG 585 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 586 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 587 #endif 588 tt->rn_key = (caddr_t) v; 589 tt->rn_b = -1; 590 tt->rn_flags = RNF_ACTIVE; 591 } 592 /* 593 * Put mask in tree. 594 */ 595 if (netmask) { 596 tt->rn_mask = netmask; 597 tt->rn_b = x->rn_b; 598 tt->rn_flags |= x->rn_flags & RNF_NORMAL; 599 } 600 t = saved_tt->rn_p; 601 if (keyduplicated) 602 goto on2; 603 b_leaf = -1 - t->rn_b; 604 if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r; 605 /* Promote general routes from below */ 606 if (x->rn_b < 0) { 607 for (mp = &t->rn_mklist; x; x = x->rn_dupedkey) 608 if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) { 609 if ((*mp = m = rn_new_radix_mask(x, 0))) 610 mp = &m->rm_mklist; 611 } 612 } else if (x->rn_mklist) { 613 /* 614 * Skip over masks whose index is > that of new node 615 */ 616 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) 617 if (m->rm_b >= b_leaf) 618 break; 619 t->rn_mklist = m; *mp = 0; 620 } 621 on2: 622 /* Add new route to highest possible ancestor's list */ 623 if ((netmask == 0) || (b > t->rn_b )) 624 return tt; /* can't lift at all */ 625 b_leaf = tt->rn_b; 626 do { 627 x = t; 628 t = t->rn_p; 629 } while (b <= t->rn_b && x != top); 630 /* 631 * Search through routes associated with node to 632 * insert new route according to index. 633 * Need same criteria as when sorting dupedkeys to avoid 634 * double loop on deletion. 635 */ 636 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) { 637 if (m->rm_b < b_leaf) 638 continue; 639 if (m->rm_b > b_leaf) 640 break; 641 if (m->rm_flags & RNF_NORMAL) { 642 mmask = m->rm_leaf->rn_mask; 643 if (tt->rn_flags & RNF_NORMAL) { 644 log(LOG_ERR, 645 "Non-unique normal route, mask not entered"); 646 return tt; 647 } 648 } else 649 mmask = m->rm_mask; 650 if (mmask == netmask) { 651 m->rm_refs++; 652 tt->rn_mklist = m; 653 return tt; 654 } 655 if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask)) 656 break; 657 } 658 *mp = rn_new_radix_mask(tt, *mp); 659 return tt; 660 } 661 662 static struct radix_node * 663 rn_delete(void *v_arg, 664 void *netmask_arg, 665 struct radix_node_head *head) 666 { 667 struct radix_node *t, *p, *x, *tt; 668 struct radix_mask *m, *saved_m, **mp; 669 struct radix_node *dupedkey, *saved_tt, *top; 670 caddr_t v, netmask; 671 int b, head_off, vlen; 672 673 v = v_arg; 674 netmask = netmask_arg; 675 x = head->rnh_treetop; 676 tt = rn_search(v, x); 677 head_off = x->rn_off; 678 vlen = *(u_char *)v; 679 saved_tt = tt; 680 top = x; 681 if (tt == 0 || 682 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) 683 return (0); 684 /* 685 * Delete our route from mask lists. 686 */ 687 if (netmask) { 688 if ((x = rn_addmask(netmask, 1, head_off)) == 0) 689 return (0); 690 netmask = x->rn_key; 691 while (tt->rn_mask != netmask) 692 if ((tt = tt->rn_dupedkey) == 0) 693 return (0); 694 } 695 if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) 696 goto on1; 697 if (tt->rn_flags & RNF_NORMAL) { 698 if (m->rm_leaf != tt || m->rm_refs > 0) { 699 log(LOG_ERR, "rn_delete: inconsistent annotation\n"); 700 return 0; /* dangling ref could cause disaster */ 701 } 702 } else { 703 if (m->rm_mask != tt->rn_mask) { 704 log(LOG_ERR, "rn_delete: inconsistent annotation\n"); 705 goto on1; 706 } 707 if (--m->rm_refs >= 0) 708 goto on1; 709 } 710 b = -1 - tt->rn_b; 711 t = saved_tt->rn_p; 712 if (b > t->rn_b) 713 goto on1; /* Wasn't lifted at all */ 714 do { 715 x = t; 716 t = t->rn_p; 717 } while (b <= t->rn_b && x != top); 718 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) 719 if (m == saved_m) { 720 *mp = m->rm_mklist; 721 MKFree(m); 722 break; 723 } 724 if (m == 0) { 725 log(LOG_ERR, "rn_delete: couldn't find our annotation\n"); 726 if (tt->rn_flags & RNF_NORMAL) 727 return (0); /* Dangling ref to us */ 728 } 729 on1: 730 /* 731 * Eliminate us from tree 732 */ 733 if (tt->rn_flags & RNF_ROOT) 734 return (0); 735 #ifdef RN_DEBUG 736 /* Get us out of the creation list */ 737 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {} 738 if (t) t->rn_ybro = tt->rn_ybro; 739 #endif 740 t = tt->rn_p; 741 if ((dupedkey = saved_tt->rn_dupedkey)) { 742 if (tt == saved_tt) { 743 x = dupedkey; x->rn_p = t; 744 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; 745 } else { 746 for (x = p = saved_tt; p && p->rn_dupedkey != tt;) 747 p = p->rn_dupedkey; 748 if (p) p->rn_dupedkey = tt->rn_dupedkey; 749 else log(LOG_ERR, "rn_delete: couldn't find us\n"); 750 } 751 t = tt + 1; 752 if (t->rn_flags & RNF_ACTIVE) { 753 #ifndef RN_DEBUG 754 *++x = *t; p = t->rn_p; 755 #else 756 b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p; 757 #endif 758 if (p->rn_l == t) p->rn_l = x; else p->rn_r = x; 759 x->rn_l->rn_p = x; x->rn_r->rn_p = x; 760 } 761 goto out; 762 } 763 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; 764 p = t->rn_p; 765 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; 766 x->rn_p = p; 767 /* 768 * Demote routes attached to us. 769 */ 770 if (t->rn_mklist) { 771 if (x->rn_b >= 0) { 772 for (mp = &x->rn_mklist; (m = *mp);) 773 mp = &m->rm_mklist; 774 *mp = t->rn_mklist; 775 } else { 776 /* If there are any key,mask pairs in a sibling 777 duped-key chain, some subset will appear sorted 778 in the same order attached to our mklist */ 779 for (m = t->rn_mklist; m && x; x = x->rn_dupedkey) 780 if (m == x->rn_mklist) { 781 struct radix_mask *mm = m->rm_mklist; 782 x->rn_mklist = 0; 783 if (--(m->rm_refs) < 0) 784 MKFree(m); 785 m = mm; 786 } 787 if (m) 788 syslog(LOG_ERR, "%s 0x%lx at 0x%lx\n", 789 "rn_delete: Orphaned Mask", 790 (unsigned long)m, 791 (unsigned long)x); 792 } 793 } 794 /* 795 * We may be holding an active internal node in the tree. 796 */ 797 x = tt + 1; 798 if (t != x) { 799 #ifndef RN_DEBUG 800 *t = *x; 801 #else 802 b = t->rn_info; *t = *x; t->rn_info = b; 803 #endif 804 t->rn_l->rn_p = t; t->rn_r->rn_p = t; 805 p = x->rn_p; 806 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; 807 } 808 out: 809 tt->rn_flags &= ~RNF_ACTIVE; 810 tt[1].rn_flags &= ~RNF_ACTIVE; 811 return (tt); 812 } 813 814 int 815 rn_walktree(struct radix_node_head *h, 816 int (*f)(struct radix_node *, struct walkarg *), 817 struct walkarg *w) 818 { 819 int error; 820 struct radix_node *base, *next; 821 struct radix_node *rn = h->rnh_treetop; 822 /* 823 * This gets complicated because we may delete the node 824 * while applying the function f to it, so we need to calculate 825 * the successor node in advance. 826 */ 827 /* First time through node, go left */ 828 while (rn->rn_b >= 0) 829 rn = rn->rn_l; 830 for (;;) { 831 base = rn; 832 /* If at right child go back up, otherwise, go right */ 833 while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) 834 rn = rn->rn_p; 835 /* Find the next *leaf* since next node might vanish, too */ 836 for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) 837 rn = rn->rn_l; 838 next = rn; 839 /* Process leaves */ 840 while ((rn = base)) { 841 base = rn->rn_dupedkey; 842 if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w))) 843 return (error); 844 } 845 rn = next; 846 if (rn->rn_flags & RNF_ROOT) 847 return (0); 848 } 849 /* NOTREACHED */ 850 } 851 852 int 853 rn_inithead(struct radix_node_head **head, int off) 854 { 855 struct radix_node_head *rnh; 856 struct radix_node *t, *tt, *ttt; 857 if (*head) 858 return (1); 859 rnh = (struct radix_node_head *)rtmalloc(sizeof(*rnh), "rn_inithead"); 860 Bzero(rnh, sizeof (*rnh)); 861 *head = rnh; 862 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); 863 ttt = rnh->rnh_nodes + 2; 864 t->rn_r = ttt; 865 t->rn_p = t; 866 tt = t->rn_l; 867 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; 868 tt->rn_b = -1 - off; 869 *ttt = *tt; 870 ttt->rn_key = rn_ones; 871 rnh->rnh_addaddr = rn_addroute; 872 rnh->rnh_deladdr = rn_delete; 873 rnh->rnh_matchaddr = rn_match; 874 rnh->rnh_lookup = rn_lookup; 875 rnh->rnh_walktree = rn_walktree; 876 rnh->rnh_treetop = t; 877 return (1); 878 } 879 880 void 881 rn_init(void) 882 { 883 char *cp, *cplim; 884 if (max_keylen == 0) { 885 printf("rn_init: radix functions require max_keylen be set\n"); 886 return; 887 } 888 rn_zeros = (char *)rtmalloc(3 * max_keylen, "rn_init"); 889 Bzero(rn_zeros, 3 * max_keylen); 890 rn_ones = cp = rn_zeros + max_keylen; 891 addmask_key = cplim = rn_ones + max_keylen; 892 while (cp < cplim) 893 *cp++ = -1; 894 if (rn_inithead(&mask_rnhead, 0) == 0) 895 panic("rn_init 2"); 896 } 897 898