xref: /titanic_51/usr/src/common/net/patricia/radix.c (revision fe598cdcd847f8359013532d5c691bb6190378c0)
1 /*
2  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  *
5  * Copyright (c) 1988, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)radix.c	8.5 (Berkeley) 5/19/95
33  * $FreeBSD: /repoman/r/ncvs/src/sys/net/radix.c,v 1.36.2.1 2005/01/31 23:26:23
34  * imp Exp $
35  */
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 /*
40  * Routines to build and maintain radix trees for routing lookups.
41  */
42 #include <sys/types.h>
43 
44 #ifndef _RADIX_H_
45 #include <sys/param.h>
46 #ifdef	_KERNEL
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/systm.h>
50 #include <sys/cmn_err.h>
51 #else
52 #include <assert.h>
53 #define	ASSERT assert
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <syslog.h>
57 #include <strings.h>
58 #endif	/* _KERNEL */
59 #include <net/radix.h>
60 #endif
61 
62 #ifndef	_KERNEL
63 void
64 panic(const char *str)
65 {
66 	fprintf(stderr, "Panic - %s\n", str);
67 	abort();
68 }
69 #endif	/* _KERNEL */
70 
71 static int	rn_walktree(struct radix_node_head *, walktree_f_t *, void *);
72 static int	rn_walktree_mt(struct radix_node_head *, walktree_f_t *,
73     void *, lockf_t, lockf_t);
74 static struct radix_node
75 	*rn_insert(void *, struct radix_node_head *, int *,
76 	    struct radix_node [2]),
77 	*rn_newpair(void *, int, struct radix_node[2]),
78 	*rn_search(void *, struct radix_node *),
79 	*rn_search_m(void *, struct radix_node *, void *),
80 	*rn_lookup(void *, void *, struct radix_node_head *),
81 	*rn_match(void *, struct radix_node_head *),
82 	*rn_match_args(void *, struct radix_node_head *, match_leaf_t *,
83 	    void *),
84 	*rn_addmask(void *, int, int),
85 	*rn_addroute(void *, void *, struct radix_node_head *,
86 	    struct radix_node [2]),
87 	*rn_delete(void *, void *, struct radix_node_head *);
88 static	boolean_t rn_refines(void *, void *);
89 
90 #define	MAX_KEYLEN	16
91 static int	max_keylen = MAX_KEYLEN;
92 
93 #ifdef	_KERNEL
94 static struct kmem_cache *radix_mask_cache; /* for rn_mkfreelist */
95 static struct kmem_cache *radix_node_cache;
96 #else
97 static char *radix_mask_cache, *radix_node_cache; /* dummy vars. never inited */
98 #endif	/* _KERNEL */
99 
100 static struct radix_mask *rn_mkfreelist;
101 static struct radix_node_head *mask_rnhead;
102 /*
103  * Work area -- the following point to 2 buffers of size max_keylen,
104  * allocated in this order in a block of memory malloc'ed by rn_init.
105  * A third buffer of size MAX_KEYLEN is allocated from the stack.
106  */
107 static char *rn_zeros, *rn_ones;
108 
109 #define	MKGet(m)  R_Malloc(m, radix_mask_cache, sizeof (struct radix_mask))
110 #define	MKFree(m) Free(m, radix_mask_cache)
111 #define	rn_masktop (mask_rnhead->rnh_treetop)
112 
113 static boolean_t	rn_lexobetter(void *m_arg, void *n_arg);
114 static struct radix_mask *
115 		rn_new_radix_mask(struct radix_node *tt,
116 		    struct radix_mask *next);
117 static boolean_t
118 		rn_satisfies_leaf(char *trial, struct radix_node *leaf,
119 		    int skip, match_leaf_t *rn_leaf_fn, void *rn_leaf_arg);
120 
121 #define	RN_MATCHF(rn, f, arg)	(f == NULL || (*f)((rn), arg))
122 
123 /*
124  * The data structure for the keys is a radix tree with one way
125  * branching removed.  The index rn_bit at an internal node n represents a bit
126  * position to be tested.  The tree is arranged so that all descendants
127  * of a node n have keys whose bits all agree up to position rn_bit - 1.
128  * (We say the index of n is rn_bit.)
129  *
130  * There is at least one descendant which has a one bit at position rn_bit,
131  * and at least one with a zero there.
132  *
133  * A route is determined by a pair of key and mask.  We require that the
134  * bit-wise logical and of the key and mask to be the key.
135  * We define the index of a route associated with the mask to be
136  * the first bit number in the mask where 0 occurs (with bit number 0
137  * representing the highest order bit).
138  *
139  * We say a mask is normal if every bit is 0, past the index of the mask.
140  * If a node n has a descendant (k, m) with index(m) == index(n) == rn_bit,
141  * and m is a normal mask, then the route applies to every descendant of n.
142  * If the index(m) < rn_bit, this implies the trailing last few bits of k
143  * before bit b are all 0, (and hence consequently true of every descendant
144  * of n), so the route applies to all descendants of the node as well.
145  *
146  * Similar logic shows that a non-normal mask m such that
147  * index(m) <= index(n) could potentially apply to many children of n.
148  * Thus, for each non-host route, we attach its mask to a list at an internal
149  * node as high in the tree as we can go.
150  *
151  * The present version of the code makes use of normal routes in short-
152  * circuiting an explict mask and compare operation when testing whether
153  * a key satisfies a normal route, and also in remembering the unique leaf
154  * that governs a subtree.
155  */
156 
157 /*
158  * Most of the functions in this code assume that the key/mask arguments
159  * are sockaddr-like structures, where the first byte is an uchar_t
160  * indicating the size of the entire structure.
161  *
162  * To make the assumption more explicit, we use the LEN() macro to access
163  * this field. It is safe to pass an expression with side effects
164  * to LEN() as the argument is evaluated only once.
165  */
166 #define	LEN(x) (*(const uchar_t *)(x))
167 
168 
169 /*
170  * Search a node in the tree matching the key.
171  */
172 static struct radix_node *
173 rn_search(v_arg, head)
174 	void *v_arg;
175 	struct radix_node *head;
176 {
177 	struct radix_node *x;
178 	caddr_t v;
179 
180 	for (x = head, v = v_arg; x->rn_bit >= 0; ) {
181 		if (x->rn_bmask & v[x->rn_offset])
182 			x = x->rn_right;
183 		else
184 			x = x->rn_left;
185 	}
186 	return (x);
187 }
188 
189 /*
190  * Same as above, but with an additional mask.
191  */
192 static struct radix_node *
193 rn_search_m(v_arg, head, m_arg)
194 	struct radix_node *head;
195 	void *v_arg, *m_arg;
196 {
197 	struct radix_node *x;
198 	caddr_t v = v_arg, m = m_arg;
199 
200 	for (x = head; x->rn_bit >= 0; ) {
201 		if ((x->rn_bmask & m[x->rn_offset]) &&
202 		    (x->rn_bmask & v[x->rn_offset]))
203 			x = x->rn_right;
204 		else
205 			x = x->rn_left;
206 	}
207 	return (x);
208 }
209 
210 /*
211  * Returns true if there are no bits set in n_arg that are zero in
212  * m_arg and the masks aren't equal.  In other words, it returns true
213  * when m_arg is a finer-granularity netmask -- it represents a subset
214  * of the destinations implied by n_arg.
215  */
216 static boolean_t
217 rn_refines(m_arg, n_arg)
218 	void *m_arg, *n_arg;
219 {
220 	caddr_t m = m_arg, n = n_arg;
221 	caddr_t lim = n + LEN(n), lim2 = lim;
222 	int longer = LEN(n++) - (int)LEN(m++);
223 	boolean_t masks_are_equal = B_TRUE;
224 
225 	if (longer > 0)
226 		lim -= longer;
227 	while (n < lim) {
228 		if (*n & ~(*m))
229 			return (0);
230 		if (*n++ != *m++)
231 			masks_are_equal = B_FALSE;
232 	}
233 	while (n < lim2)
234 		if (*n++)
235 			return (B_FALSE);
236 	if (masks_are_equal && (longer < 0))
237 		for (lim2 = m - longer; m < lim2; )
238 			if (*m++)
239 				return (B_TRUE);
240 	return (!masks_are_equal);
241 }
242 
243 static struct radix_node *
244 rn_lookup(v_arg, m_arg, head)
245 	void *v_arg, *m_arg;
246 	struct radix_node_head *head;
247 {
248 	struct radix_node *x;
249 	caddr_t netmask = NULL;
250 
251 	if (m_arg) {
252 		x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_offset);
253 		if (x == NULL)
254 			return (NULL);
255 		netmask = x->rn_key;
256 	}
257 	x = rn_match(v_arg, head);
258 	if (x && netmask) {
259 		while (x && x->rn_mask != netmask)
260 			x = x->rn_dupedkey;
261 	}
262 	return (x);
263 }
264 
265 /*
266  * Returns true if address 'trial' has no bits differing from the
267  * leaf's key when compared under the leaf's mask.  In other words,
268  * returns true when 'trial' matches leaf.
269  * In addition, if a rn_leaf_fn is passed in, that is used to find
270  * a match on conditions defined by the caller of rn_match.  This is
271  * used by the kernel ftable to match on IRE_MATCH_* conditions.
272  */
273 static boolean_t
274 rn_satisfies_leaf(trial, leaf, skip, rn_leaf_fn, rn_leaf_arg)
275 	caddr_t trial;
276 	struct radix_node *leaf;
277 	int skip;
278 	match_leaf_t *rn_leaf_fn;
279 	void *rn_leaf_arg;
280 {
281 	char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask;
282 	char *cplim;
283 	int length = min(LEN(cp), LEN(cp2));
284 
285 	if (cp3 == 0)
286 		cp3 = rn_ones;
287 	else
288 		length = min(length, LEN(cp3));
289 	cplim = cp + length;
290 	cp3 += skip;
291 	cp2 += skip;
292 
293 	for (cp += skip; cp < cplim; cp++, cp2++, cp3++)
294 		if ((*cp ^ *cp2) & *cp3)
295 			return (B_FALSE);
296 
297 	return (RN_MATCHF(leaf, rn_leaf_fn, rn_leaf_arg));
298 }
299 
300 static struct radix_node *
301 rn_match(v_arg, head)
302 	void *v_arg;
303 	struct radix_node_head *head;
304 {
305 	return (rn_match_args(v_arg, head, NULL, NULL));
306 }
307 
308 static struct radix_node *
309 rn_match_args(v_arg, head, rn_leaf_fn, rn_leaf_arg)
310 	void *v_arg;
311 	struct radix_node_head *head;
312 	match_leaf_t *rn_leaf_fn;
313 	void *rn_leaf_arg;
314 {
315 	caddr_t v = v_arg;
316 	struct radix_node *t = head->rnh_treetop, *x;
317 	caddr_t cp = v, cp2;
318 	caddr_t cplim;
319 	struct radix_node *saved_t, *top = t;
320 	int off = t->rn_offset, vlen = LEN(cp), matched_off;
321 	int test, b, rn_bit;
322 
323 	/*
324 	 * Open code rn_search(v, top) to avoid overhead of extra
325 	 * subroutine call.
326 	 */
327 	for (; t->rn_bit >= 0; ) {
328 		if (t->rn_bmask & cp[t->rn_offset])
329 			t = t->rn_right;
330 		else
331 			t = t->rn_left;
332 	}
333 	/*
334 	 * See if we match exactly as a host destination
335 	 * or at least learn how many bits match, for normal mask finesse.
336 	 *
337 	 * It doesn't hurt us to limit how many bytes to check
338 	 * to the length of the mask, since if it matches we had a genuine
339 	 * match and the leaf we have is the most specific one anyway;
340 	 * if it didn't match with a shorter length it would fail
341 	 * with a long one.  This wins big for class B&C netmasks which
342 	 * are probably the most common case...
343 	 */
344 	if (t->rn_mask)
345 		vlen = LEN(t->rn_mask);
346 	cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
347 	for (; cp < cplim; cp++, cp2++)
348 		if (*cp != *cp2)
349 			goto keydiff;
350 	/*
351 	 * This extra grot is in case we are explicitly asked
352 	 * to look up the default.  Ugh!
353 	 *
354 	 * Never return the root node itself, it seems to cause a
355 	 * lot of confusion.
356 	 */
357 	if (t->rn_flags & RNF_ROOT)
358 		t = t->rn_dupedkey;
359 	if (t == NULL || RN_MATCHF(t, rn_leaf_fn, rn_leaf_arg)) {
360 		return (t);
361 	} else {
362 		/*
363 		 * Although we found an exact match on the key, rn_leaf_fn
364 		 * is looking for some other criteria as well. Continue
365 		 * looking as if the exact match failed.
366 		 */
367 		if (t->rn_parent->rn_flags & RNF_ROOT) {
368 			/* hit the top. have to give up */
369 			return (NULL);
370 		}
371 		b = 0;
372 		goto keeplooking;
373 
374 	}
375 keydiff:
376 	test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
377 	for (b = 7; (test >>= 1) > 0; )
378 		b--;
379 keeplooking:
380 	matched_off = cp - v;
381 	b += matched_off << 3;
382 	rn_bit = -1 - b;
383 
384 	/*
385 	 * If there is a host route in a duped-key chain, it will be first.
386 	 */
387 	if ((saved_t = t)->rn_mask == 0)
388 		t = t->rn_dupedkey;
389 	for (; t != NULL; t = t->rn_dupedkey) {
390 		/*
391 		 * Even if we don't match exactly as a host,
392 		 * we may match if the leaf we wound up at is
393 		 * a route to a net.
394 		 */
395 
396 		if (t->rn_flags & RNF_NORMAL) {
397 			if ((rn_bit <= t->rn_bit) &&
398 			    RN_MATCHF(t, rn_leaf_fn, rn_leaf_arg)) {
399 				return (t);
400 			}
401 		} else if (rn_satisfies_leaf(v, t, matched_off, rn_leaf_fn,
402 		    rn_leaf_arg)) {
403 			return (t);
404 		}
405 	}
406 	t = saved_t;
407 	/* start searching up the tree */
408 	do {
409 		struct radix_mask *m;
410 
411 		t = t->rn_parent;
412 		m = t->rn_mklist;
413 		/*
414 		 * If non-contiguous masks ever become important
415 		 * we can restore the masking and open coding of
416 		 * the search and satisfaction test and put the
417 		 * calculation of "off" back before the "do".
418 		 */
419 		while (m) {
420 			if (m->rm_flags & RNF_NORMAL) {
421 				if ((rn_bit <= m->rm_bit) &&
422 				    RN_MATCHF(m->rm_leaf, rn_leaf_fn,
423 				    rn_leaf_arg)) {
424 					return (m->rm_leaf);
425 				}
426 			} else {
427 				off = min(t->rn_offset, matched_off);
428 				x = rn_search_m(v, t, m->rm_mask);
429 				while (x != NULL && x->rn_mask != m->rm_mask)
430 					x = x->rn_dupedkey;
431 				if (x && rn_satisfies_leaf(v, x, off,
432 				    rn_leaf_fn, rn_leaf_arg)) {
433 					return (x);
434 				}
435 			}
436 			m = m->rm_mklist;
437 		}
438 	} while (t != top);
439 	return (0);
440 }
441 
442 /*
443  * Whenever we add a new leaf to the tree, we also add a parent node,
444  * so we allocate them as an array of two elements: the first one must be
445  * the leaf (see RNTORT() in route.c), the second one is the parent.
446  * This routine initializes the relevant fields of the nodes, so that
447  * the leaf is the left child of the parent node, and both nodes have
448  * (almost) all all fields filled as appropriate.
449  * The function returns a pointer to the parent node.
450  */
451 
452 static struct radix_node *
453 rn_newpair(v, b, nodes)
454 	void *v;
455 	int b;
456 	struct radix_node nodes[2];
457 {
458 	struct radix_node *tt = nodes, *t = tt + 1;
459 
460 	t->rn_bit = b;
461 	t->rn_bmask = 0x80 >> (b & 7);
462 	t->rn_left = tt;
463 	t->rn_offset = b >> 3;
464 
465 	/*
466 	 * t->rn_parent, r->rn_right, tt->rn_mask, tt->rn_dupedkey
467 	 * and tt->rn_bmask must have been zeroed by caller.
468 	 */
469 	tt->rn_bit = -1;
470 	tt->rn_key = v;
471 	tt->rn_parent = t;
472 	tt->rn_flags = t->rn_flags = RNF_ACTIVE;
473 	tt->rn_mklist = t->rn_mklist = 0;
474 	return (t);
475 }
476 
477 static struct radix_node *
478 rn_insert(v_arg, head, dupentry, nodes)
479 	void *v_arg;
480 	struct radix_node_head *head;
481 	int *dupentry;
482 	struct radix_node nodes[2];
483 {
484 	caddr_t v = v_arg;
485 	struct radix_node *top = head->rnh_treetop;
486 	int head_off = top->rn_offset, vlen = (int)LEN(v);
487 	struct radix_node *t = rn_search(v_arg, top);
488 	caddr_t cp = v + head_off;
489 	int b;
490 	struct radix_node *tt;
491 
492 	/*
493 	 * Find first bit at which v and t->rn_key differ
494 	 */
495 	{
496 		caddr_t cp2 = t->rn_key + head_off;
497 		int cmp_res;
498 		caddr_t cplim = v + vlen;
499 
500 		while (cp < cplim)
501 			if (*cp2++ != *cp++)
502 				goto on1;
503 		*dupentry = 1;
504 		return (t);
505 on1:
506 		*dupentry = 0;
507 		cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
508 		for (b = (cp - v) << 3; cmp_res; b--)
509 			cmp_res >>= 1;
510 	}
511 	{
512 		struct radix_node *p, *x = top;
513 		cp = v;
514 		do {
515 			p = x;
516 			if (cp[x->rn_offset] & x->rn_bmask)
517 				x = x->rn_right;
518 			else
519 				x = x->rn_left;
520 		} while (b > (unsigned)x->rn_bit);
521 				/* x->rn_bit < b && x->rn_bit >= 0 */
522 		t = rn_newpair(v_arg, b, nodes);
523 		tt = t->rn_left;
524 		if ((cp[p->rn_offset] & p->rn_bmask) == 0)
525 			p->rn_left = t;
526 		else
527 			p->rn_right = t;
528 		x->rn_parent = t;
529 		t->rn_parent = p;
530 		if ((cp[t->rn_offset] & t->rn_bmask) == 0) {
531 			t->rn_right = x;
532 		} else {
533 			t->rn_right = tt;
534 			t->rn_left = x;
535 		}
536 	}
537 	return (tt);
538 }
539 
540 static struct radix_node *
541 rn_addmask(n_arg, search, skip)
542 	int search, skip;
543 	void *n_arg;
544 {
545 	caddr_t netmask = (caddr_t)n_arg;
546 	struct radix_node *x;
547 	caddr_t cp, cplim;
548 	int b = 0, mlen, j;
549 	int maskduplicated, m0, isnormal;
550 	struct radix_node *saved_x;
551 	int last_zeroed = 0;
552 	char addmask_key[MAX_KEYLEN];
553 
554 	if ((mlen = LEN(netmask)) > max_keylen)
555 		mlen = max_keylen;
556 	if (skip == 0)
557 		skip = 1;
558 	if (mlen <= skip)
559 		return (mask_rnhead->rnh_nodes);
560 	if (skip > 1)
561 		bcopy(rn_ones + 1, addmask_key + 1, skip - 1);
562 	if ((m0 = mlen) > skip)
563 		bcopy(netmask + skip, addmask_key + skip, mlen - skip);
564 	/*
565 	 * Trim trailing zeroes.
566 	 */
567 	for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0; )
568 		cp--;
569 	mlen = cp - addmask_key;
570 	if (mlen <= skip) {
571 		if (m0 >= last_zeroed)
572 			last_zeroed = mlen;
573 		return (mask_rnhead->rnh_nodes);
574 	}
575 	if (m0 < last_zeroed)
576 		bzero(addmask_key + m0, last_zeroed - m0);
577 	*addmask_key = last_zeroed = mlen;
578 	x = rn_search(addmask_key, rn_masktop);
579 	if (bcmp(addmask_key, x->rn_key, mlen) != 0)
580 		x = 0;
581 	if (x || search)
582 		return (x);
583 	R_Zalloc(x, radix_node_cache, max_keylen + 2 * sizeof (*x));
584 
585 	if ((saved_x = x) == 0)
586 		return (0);
587 	netmask = cp = (caddr_t)(x + 2);
588 	bcopy(addmask_key, cp, mlen);
589 	x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
590 	if (maskduplicated) {
591 #ifdef	_KERNEL
592 		cmn_err(CE_WARN, "rn_addmask: mask impossibly already in tree");
593 #else
594 		syslog(LOG_ERR, "rn_addmask: mask impossibly already in tree");
595 #endif	/* _KERNEL */
596 		Free(saved_x, radix_node_cache);
597 		return (x);
598 	}
599 	/*
600 	 * Calculate index of mask, and check for normalcy.
601 	 * First find the first byte with a 0 bit, then if there are
602 	 * more bits left (remember we already trimmed the trailing 0's),
603 	 * the pattern must be one of those in normal_chars[], or we have
604 	 * a non-contiguous mask.
605 	 */
606 	cplim = netmask + mlen;
607 	isnormal = 1;
608 	for (cp = netmask + skip; (cp < cplim) && *(uchar_t *)cp == 0xff; )
609 		cp++;
610 	if (cp != cplim) {
611 		static uint8_t normal_chars[] = {
612 			0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff};
613 
614 		for (j = 0x80; (j & *cp) != 0; j >>= 1)
615 			b++;
616 		if (*cp != normal_chars[b] || cp != (cplim - 1))
617 			isnormal = 0;
618 	}
619 	b += (cp - netmask) << 3;
620 	x->rn_bit = -1 - b;
621 	if (isnormal)
622 		x->rn_flags |= RNF_NORMAL;
623 	return (x);
624 }
625 
626 /* arbitrary ordering for non-contiguous masks */
627 static boolean_t
628 rn_lexobetter(m_arg, n_arg)
629 	void *m_arg, *n_arg;
630 {
631 	uchar_t *mp = m_arg, *np = n_arg, *lim;
632 
633 	if (LEN(mp) > LEN(np))
634 		/* not really, but need to check longer one first */
635 		return (B_TRUE);
636 	if (LEN(mp) == LEN(np))
637 		for (lim = mp + LEN(mp); mp < lim; )
638 			if (*mp++ > *np++)
639 				return (B_TRUE);
640 	return (B_FALSE);
641 }
642 
643 static struct radix_mask *
644 rn_new_radix_mask(tt, next)
645 	struct radix_node *tt;
646 	struct radix_mask *next;
647 {
648 	struct radix_mask *m;
649 
650 	MKGet(m);
651 	if (m == 0) {
652 #ifndef	_KERNEL
653 		syslog(LOG_ERR, "Mask for route not entered\n");
654 #endif	/* _KERNEL */
655 		return (0);
656 	}
657 	bzero(m, sizeof (*m));
658 	m->rm_bit = tt->rn_bit;
659 	m->rm_flags = tt->rn_flags;
660 	if (tt->rn_flags & RNF_NORMAL)
661 		m->rm_leaf = tt;
662 	else
663 		m->rm_mask = tt->rn_mask;
664 	m->rm_mklist = next;
665 	tt->rn_mklist = m;
666 	return (m);
667 }
668 
669 static struct radix_node *
670 rn_addroute(v_arg, n_arg, head, treenodes)
671 	void *v_arg, *n_arg;
672 	struct radix_node_head *head;
673 	struct radix_node treenodes[2];
674 {
675 	caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
676 	struct radix_node *t, *x = 0, *tt;
677 	struct radix_node *saved_tt, *top = head->rnh_treetop;
678 	short b = 0, b_leaf = 0;
679 	int keyduplicated;
680 	caddr_t mmask;
681 	struct radix_mask *m, **mp;
682 
683 	/*
684 	 * In dealing with non-contiguous masks, there may be
685 	 * many different routes which have the same mask.
686 	 * We will find it useful to have a unique pointer to
687 	 * the mask to speed avoiding duplicate references at
688 	 * nodes and possibly save time in calculating indices.
689 	 */
690 	if (netmask)  {
691 		if ((x = rn_addmask(netmask, 0, top->rn_offset)) == 0)
692 			return (0);
693 		b_leaf = x->rn_bit;
694 		b = -1 - x->rn_bit;
695 		netmask = x->rn_key;
696 	}
697 	/*
698 	 * Deal with duplicated keys: attach node to previous instance
699 	 */
700 	saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
701 	if (keyduplicated) {
702 		for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
703 			if (tt->rn_mask == netmask)
704 				return (0);
705 			if (netmask == 0 ||
706 			    (tt->rn_mask &&
707 			    /* index (netmask) > node */
708 			    ((b_leaf < tt->rn_bit) ||
709 			    rn_refines(netmask, tt->rn_mask) ||
710 			    rn_lexobetter(netmask, tt->rn_mask))))
711 				break;
712 		}
713 		/*
714 		 * If the mask is not duplicated, we wouldn't
715 		 * find it among possible duplicate key entries
716 		 * anyway, so the above test doesn't hurt.
717 		 *
718 		 * We sort the masks for a duplicated key the same way as
719 		 * in a masklist -- most specific to least specific.
720 		 * This may require the unfortunate nuisance of relocating
721 		 * the head of the list.
722 		 *
723 		 * We also reverse, or doubly link the list through the
724 		 * parent pointer.
725 		 */
726 		if (tt == saved_tt) {
727 			struct	radix_node *xx = x;
728 			/* link in at head of list */
729 			(tt = treenodes)->rn_dupedkey = t;
730 			tt->rn_flags = t->rn_flags;
731 			tt->rn_parent = x = t->rn_parent;
732 			t->rn_parent = tt; /* parent */
733 			if (x->rn_left == t)
734 				x->rn_left = tt;
735 			else
736 				x->rn_right = tt;
737 			saved_tt = tt; x = xx;
738 		} else {
739 			(tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
740 			t->rn_dupedkey = tt;
741 			/* Set rn_parent value for tt and tt->rn_dupedkey */
742 			tt->rn_parent = t;
743 			if (tt->rn_dupedkey)
744 				tt->rn_dupedkey->rn_parent = tt;
745 		}
746 		tt->rn_key = v;
747 		tt->rn_bit = -1;
748 		tt->rn_flags = RNF_ACTIVE;
749 	}
750 	/*
751 	 * Put mask in tree.
752 	 */
753 	if (netmask) {
754 		tt->rn_mask = netmask;
755 		tt->rn_bit = x->rn_bit;
756 		tt->rn_flags |= x->rn_flags & RNF_NORMAL;
757 	}
758 	t = saved_tt->rn_parent;
759 	if (keyduplicated)
760 		goto key_exists;
761 	b_leaf = -1 - t->rn_bit;
762 	if (t->rn_right == saved_tt)
763 		x = t->rn_left;
764 	else
765 		x = t->rn_right;
766 	/* Promote general routes from below */
767 	if (x->rn_bit < 0) {
768 	    for (mp = &t->rn_mklist; x; x = x->rn_dupedkey)
769 		if (x->rn_mask && (x->rn_bit >= b_leaf) && x->rn_mklist == 0) {
770 			*mp = m = rn_new_radix_mask(x, 0);
771 			if (m)
772 				mp = &m->rm_mklist;
773 		}
774 	} else if (x->rn_mklist) {
775 		/*
776 		 * Skip over masks whose index is > that of new node
777 		 */
778 		for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist)
779 			if (m->rm_bit >= b_leaf)
780 				break;
781 		t->rn_mklist = m; *mp = 0;
782 	}
783 key_exists:
784 	/* Add new route to highest possible ancestor's list */
785 	if ((netmask == 0) || (b > t->rn_bit))
786 		return (tt); /* can't lift at all */
787 	b_leaf = tt->rn_bit;
788 	do {
789 		x = t;
790 		t = t->rn_parent;
791 	} while (b <= t->rn_bit && x != top);
792 	/*
793 	 * Search through routes associated with node to
794 	 * insert new route according to index.
795 	 * Need same criteria as when sorting dupedkeys to avoid
796 	 * double loop on deletion.
797 	 */
798 	for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) {
799 		if (m->rm_bit < b_leaf)
800 			continue;
801 		if (m->rm_bit > b_leaf)
802 			break;
803 		if (m->rm_flags & RNF_NORMAL) {
804 			mmask = m->rm_leaf->rn_mask;
805 			if (tt->rn_flags & RNF_NORMAL) {
806 #ifdef	_KERNEL
807 				cmn_err(CE_WARN, "Non-unique normal route, "
808 				    "mask not entered\n");
809 #else
810 				syslog(LOG_ERR, "Non-unique normal route, "
811 				    "mask not entered\n");
812 #endif	/* _KERNEL */
813 				return (tt);
814 			}
815 		} else
816 			mmask = m->rm_mask;
817 		if (mmask == netmask) {
818 			m->rm_refs++;
819 			tt->rn_mklist = m;
820 			return (tt);
821 		}
822 		if (rn_refines(netmask, mmask) ||
823 		    rn_lexobetter(netmask, mmask))
824 			break;
825 	}
826 	*mp = rn_new_radix_mask(tt, *mp);
827 	return (tt);
828 }
829 
830 static struct radix_node *
831 rn_delete(v_arg, netmask_arg, head)
832 	void *v_arg, *netmask_arg;
833 	struct radix_node_head *head;
834 {
835 	struct radix_node *t, *p, *x, *tt;
836 	struct radix_mask *m, *saved_m, **mp;
837 	struct radix_node *dupedkey, *saved_tt, *top;
838 	caddr_t v, netmask;
839 	int b, head_off, vlen;
840 
841 	v = v_arg;
842 	netmask = netmask_arg;
843 	x = head->rnh_treetop;
844 	tt = rn_search(v, x);
845 	head_off = x->rn_offset;
846 	vlen =  LEN(v);
847 	saved_tt = tt;
848 	top = x;
849 	if (tt == 0 ||
850 	    bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
851 		return (0);
852 	/*
853 	 * Delete our route from mask lists.
854 	 */
855 	if (netmask) {
856 		if ((x = rn_addmask(netmask, 1, head_off)) == 0)
857 			return (0);
858 		netmask = x->rn_key;
859 		while (tt->rn_mask != netmask)
860 			if ((tt = tt->rn_dupedkey) == 0)
861 				return (0);
862 	}
863 	if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
864 		goto on1;
865 	if (tt->rn_flags & RNF_NORMAL) {
866 		if (m->rm_leaf != tt || m->rm_refs > 0) {
867 #ifdef	_KERNEL
868 			cmn_err(CE_WARN,
869 			    "rn_delete: inconsistent annotation\n");
870 #else
871 			syslog(LOG_ERR, "rn_delete: inconsistent annotation\n");
872 #endif	/* _KERNEL */
873 			return (0);  /* dangling ref could cause disaster */
874 		}
875 	} else {
876 		if (m->rm_mask != tt->rn_mask) {
877 #ifdef	_KERNEL
878 			cmn_err(CE_WARN,
879 			    "rn_delete: inconsistent annotation 2\n");
880 #else
881 			syslog(LOG_ERR,
882 			    "rn_delete: inconsistent annotation 2\n");
883 #endif	/* _KERNEL */
884 			goto on1;
885 		}
886 		if (--m->rm_refs >= 0)
887 			goto on1;
888 	}
889 	b = -1 - tt->rn_bit;
890 	t = saved_tt->rn_parent;
891 	if (b > t->rn_bit)
892 		goto on1; /* Wasn't lifted at all */
893 	do {
894 		x = t;
895 		t = t->rn_parent;
896 	} while (b <= t->rn_bit && x != top);
897 	for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist)
898 		if (m == saved_m) {
899 			*mp = m->rm_mklist;
900 			MKFree(m);
901 			break;
902 		}
903 	if (m == 0) {
904 #ifdef	_KERNEL
905 		cmn_err(CE_WARN, "rn_delete: couldn't find our annotation\n");
906 #else
907 		syslog(LOG_ERR, "rn_delete: couldn't find our annotation\n");
908 #endif	/* _KERNEL */
909 		if (tt->rn_flags & RNF_NORMAL)
910 			return (0); /* Dangling ref to us */
911 	}
912 on1:
913 	/*
914 	 * Eliminate us from tree
915 	 */
916 	if (tt->rn_flags & RNF_ROOT)
917 		return (0);
918 	t = tt->rn_parent;
919 	dupedkey = saved_tt->rn_dupedkey;
920 	if (dupedkey) {
921 		/*
922 		 * Here, tt is the deletion target and
923 		 * saved_tt is the head of the dupekey chain.
924 		 */
925 		if (tt == saved_tt) {
926 			/* remove from head of chain */
927 			x = dupedkey; x->rn_parent = t;
928 			if (t->rn_left == tt)
929 				t->rn_left = x;
930 			else
931 				t->rn_right = x;
932 		} else {
933 			/* find node in front of tt on the chain */
934 			for (x = p = saved_tt; p && p->rn_dupedkey != tt; )
935 				p = p->rn_dupedkey;
936 			if (p) {
937 				p->rn_dupedkey = tt->rn_dupedkey;
938 				if (tt->rn_dupedkey)		/* parent */
939 					tt->rn_dupedkey->rn_parent = p;
940 								/* parent */
941 			} else
942 #ifdef	_KERNEL
943 				cmn_err(CE_WARN,
944 				    "rn_delete: couldn't find us\n");
945 #else
946 				syslog(LOG_ERR,
947 				    "rn_delete: couldn't find us\n");
948 #endif	/* _KERNEL */
949 		}
950 		t = tt + 1;
951 		if (t->rn_flags & RNF_ACTIVE) {
952 			*++x = *t;
953 			p = t->rn_parent;
954 			if (p->rn_left == t)
955 				p->rn_left = x;
956 			else
957 				p->rn_right = x;
958 			x->rn_left->rn_parent = x;
959 			x->rn_right->rn_parent = x;
960 		}
961 		goto out;
962 	}
963 	if (t->rn_left == tt)
964 		x = t->rn_right;
965 	else
966 		x = t->rn_left;
967 	p = t->rn_parent;
968 	if (p->rn_right == t)
969 		p->rn_right = x;
970 	else
971 		p->rn_left = x;
972 	x->rn_parent = p;
973 	/*
974 	 * Demote routes attached to us.
975 	 */
976 	if (t->rn_mklist) {
977 		if (x->rn_bit >= 0) {
978 			for (mp = &x->rn_mklist; (m = *mp) != NULL; )
979 				mp = &m->rm_mklist;
980 			*mp = t->rn_mklist;
981 		} else {
982 			/*
983 			 * If there are any key,mask pairs in a sibling
984 			 * duped-key chain, some subset will appear sorted
985 			 * in the same order attached to our mklist
986 			 */
987 			for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
988 				if (m == x->rn_mklist) {
989 					struct radix_mask *mm = m->rm_mklist;
990 					x->rn_mklist = 0;
991 					if (--(m->rm_refs) < 0)
992 						MKFree(m);
993 					m = mm;
994 				}
995 			if (m)
996 #ifdef	_KERNEL
997 				cmn_err(CE_WARN,
998 				    "rn_delete: Orphaned Mask %p at %p\n",
999 				    (void *)m, (void *)x);
1000 #else
1001 				syslog(LOG_ERR,
1002 				    "rn_delete: Orphaned Mask %p at %p\n",
1003 				    (void *)m, (void *)x);
1004 #endif	/* _KERNEL */
1005 		}
1006 	}
1007 	/*
1008 	 * We may be holding an active internal node in the tree.
1009 	 */
1010 	x = tt + 1;
1011 	if (t != x) {
1012 		*t = *x;
1013 		t->rn_left->rn_parent = t;
1014 		t->rn_right->rn_parent = t;
1015 		p = x->rn_parent;
1016 		if (p->rn_left == x)
1017 			p->rn_left = t;
1018 		else
1019 			p->rn_right = t;
1020 	}
1021 out:
1022 	tt->rn_flags &= ~RNF_ACTIVE;
1023 	tt[1].rn_flags &= ~RNF_ACTIVE;
1024 	return (tt);
1025 }
1026 
1027 /*
1028  * Walk the radix tree; For the kernel routing table, we hold additional
1029  * refs on the ire_bucket to ensure that the walk function f() does not
1030  * run into trashed memory. The kernel routing table is identified by
1031  * a rnh_treetop that has RNF_SUNW_FT set in the rn_flags.
1032  * Note that all refs takein in rn_walktree are released before it returns,
1033  * so that f() will need to take any additional references on memory
1034  * to be passed back to the caller of rn_walktree.
1035  */
1036 static int
1037 rn_walktree(h, f, w)
1038 	struct radix_node_head *h;
1039 	walktree_f_t *f;
1040 	void *w;
1041 {
1042 	return (rn_walktree_mt(h, f, w, NULL, NULL));
1043 }
1044 static int
1045 rn_walktree_mt(h, f, w, lockf, unlockf)
1046 	struct radix_node_head *h;
1047 	walktree_f_t *f;
1048 	void *w;
1049 	lockf_t lockf, unlockf;
1050 {
1051 	int error;
1052 	struct radix_node *base, *next;
1053 	struct radix_node *rn = h->rnh_treetop;
1054 	boolean_t is_mt = B_FALSE;
1055 
1056 	if (lockf != NULL) {
1057 		ASSERT(unlockf != NULL);
1058 		is_mt = B_TRUE;
1059 	}
1060 	/*
1061 	 * This gets complicated because we may delete the node
1062 	 * while applying the function f to it, so we need to calculate
1063 	 * the successor node in advance.
1064 	 */
1065 	RADIX_NODE_HEAD_RLOCK(h);
1066 	/* First time through node, go left */
1067 	while (rn->rn_bit >= 0) {
1068 		rn = rn->rn_left;
1069 	}
1070 
1071 	if (is_mt)
1072 		(*lockf)(rn);
1073 
1074 	for (;;) {
1075 		base = rn;
1076 		/* If at right child go back up, otherwise, go right */
1077 		while (rn->rn_parent->rn_right == rn &&
1078 		    (rn->rn_flags & RNF_ROOT) == 0) {
1079 			rn = rn->rn_parent;
1080 		}
1081 		/* Find the next *leaf* since next node might vanish, too */
1082 		for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0; ) {
1083 			rn = rn->rn_left;
1084 		}
1085 		next = rn;
1086 
1087 		if (is_mt && next != NULL)
1088 			(*lockf)(next);
1089 
1090 		/* Process leaves */
1091 		while ((rn = base) != NULL) {
1092 			base = rn->rn_dupedkey;
1093 
1094 			if (is_mt && base != NULL)
1095 				(*lockf)(base);
1096 
1097 			RADIX_NODE_HEAD_UNLOCK(h);
1098 			if (!(rn->rn_flags & RNF_ROOT) &&
1099 			    (error = (*f)(rn, w))) {
1100 				if (is_mt) {
1101 					(*unlockf)(rn);
1102 					if (base != NULL)
1103 						(*unlockf)(base);
1104 					if (next != NULL)
1105 						(*unlockf)(next);
1106 				}
1107 				return (error);
1108 			}
1109 			if (is_mt)
1110 				(*unlockf)(rn);
1111 			RADIX_NODE_HEAD_RLOCK(h);
1112 		}
1113 		rn = next;
1114 		if (rn->rn_flags & RNF_ROOT) {
1115 			RADIX_NODE_HEAD_UNLOCK(h);
1116 			/*
1117 			 * no ref to release, since we never take a ref
1118 			 * on the root node- it can't be deleted.
1119 			 */
1120 			return (0);
1121 		}
1122 	}
1123 	/* NOTREACHED */
1124 }
1125 
1126 /*
1127  * Allocate and initialize an empty tree. This has 3 nodes, which are
1128  * part of the radix_node_head (in the order <left,root,right>) and are
1129  * marked RNF_ROOT so they cannot be freed.
1130  * The leaves have all-zero and all-one keys, with significant
1131  * bits starting at 'off'.
1132  * Return 1 on success, 0 on error.
1133  */
1134 int
1135 rn_inithead(head, off)
1136 	void **head;
1137 	int off;
1138 {
1139 	struct radix_node_head *rnh;
1140 	struct radix_node *t, *tt, *ttt;
1141 	if (*head)
1142 		return (1);
1143 	R_ZallocSleep(rnh, struct radix_node_head *, sizeof (*rnh));
1144 	if (rnh == 0)
1145 		return (0);
1146 #ifdef _KERNEL
1147 	RADIX_NODE_HEAD_LOCK_INIT(rnh);
1148 #endif
1149 	*head = rnh;
1150 	t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
1151 	ttt = rnh->rnh_nodes + 2;
1152 	t->rn_right = ttt;
1153 	t->rn_parent = t;
1154 	tt = t->rn_left;	/* ... which in turn is rnh->rnh_nodes */
1155 	tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
1156 	tt->rn_bit = -1 - off;
1157 	*ttt = *tt;
1158 	ttt->rn_key = rn_ones;
1159 	rnh->rnh_addaddr = rn_addroute;
1160 	rnh->rnh_deladdr = rn_delete;
1161 	rnh->rnh_matchaddr = rn_match;
1162 	rnh->rnh_matchaddr_args = rn_match_args;
1163 	rnh->rnh_lookup = rn_lookup;
1164 	rnh->rnh_walktree = rn_walktree;
1165 	rnh->rnh_walktree_mt = rn_walktree_mt;
1166 	rnh->rnh_walktree_from = NULL;  /* not implemented */
1167 	rnh->rnh_treetop = t;
1168 	return (1);
1169 }
1170 
1171 void
1172 rn_init()
1173 {
1174 	char *cp, *cplim;
1175 
1176 #ifdef	_KERNEL
1177 	radix_mask_cache = kmem_cache_create("radix_mask",
1178 	    sizeof (struct radix_mask), 0, NULL, NULL, NULL, NULL, NULL, 0);
1179 	radix_node_cache = kmem_cache_create("radix_node",
1180 	    max_keylen + 2 * sizeof (struct radix_node),
1181 	    0, NULL, NULL, NULL, NULL, NULL, 0);
1182 #endif /* _KERNEL */
1183 	R_ZallocSleep(rn_zeros, char *, 2 * max_keylen);
1184 
1185 	ASSERT(rn_zeros != NULL);
1186 	bzero(rn_zeros, 2 * max_keylen);
1187 	rn_ones = cp = rn_zeros + max_keylen;
1188 	cplim = rn_ones + max_keylen;
1189 	while (cp < cplim)
1190 		*cp++ = -1;
1191 	if (rn_inithead((void **)(void *)&mask_rnhead, 0) == 0)
1192 		panic("rn_init: could not init mask_rnhead ");
1193 }
1194 
1195 int
1196 rn_freenode(n, p)
1197 	struct radix_node *n;
1198 	void *p;
1199 {
1200 	struct	radix_node_head *rnh = p;
1201 	struct	radix_node *d;
1202 
1203 	d = rnh->rnh_deladdr(n->rn_key, NULL, rnh);
1204 	if (d != NULL) {
1205 		Free(d, radix_node_cache);
1206 	}
1207 	return (0);
1208 }
1209 
1210 
1211 void
1212 rn_freehead(rnh)
1213 	struct radix_node_head *rnh;
1214 {
1215 	(void) rn_walktree(rnh, rn_freenode, rnh);
1216 
1217 	rnh->rnh_addaddr = NULL;
1218 	rnh->rnh_deladdr = NULL;
1219 	rnh->rnh_matchaddr = NULL;
1220 	rnh->rnh_lookup = NULL;
1221 	rnh->rnh_walktree = NULL;
1222 
1223 #ifdef	_KERNEL
1224 	RADIX_NODE_HEAD_DESTROY(rnh);
1225 	FreeHead(rnh, sizeof (*rnh));
1226 #else
1227 	Free(rnh, NULL);
1228 #endif	/* _KERNEL */
1229 }
1230 
1231 void
1232 rn_fini()
1233 {
1234 	struct radix_mask *m;
1235 
1236 	if (rn_zeros != NULL) {
1237 #ifdef _KERNEL
1238 		FreeHead(rn_zeros, 2 * max_keylen);
1239 #else
1240 		Free(rn_zeros, NULL);
1241 #endif
1242 		rn_zeros = NULL;
1243 	}
1244 
1245 
1246 	if (mask_rnhead != NULL) {
1247 		rn_freehead(mask_rnhead);
1248 		mask_rnhead = NULL;
1249 	}
1250 
1251 	while ((m = rn_mkfreelist) != NULL) {
1252 		rn_mkfreelist = m->rm_mklist;
1253 		Free(m, NULL);
1254 	}
1255 }
1256