xref: /freebsd/sys/net/route/route_helpers.c (revision e0c4386e7e71d93b0edc0c8fa156263fc4a8b0b6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_route.h"
32 
33 #include <sys/param.h>
34 #include <sys/jail.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/syslog.h>
41 #include <sys/sysproto.h>
42 #include <sys/proc.h>
43 #include <sys/domain.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/rmlock.h>
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_dl.h>
51 #include <net/route.h>
52 #include <net/route/route_ctl.h>
53 #include <net/route/route_var.h>
54 #include <net/route/nhop_utils.h>
55 #include <net/route/nhop.h>
56 #include <net/route/nhop_var.h>
57 #ifdef INET
58 #include <netinet/in_fib.h>
59 #endif
60 #ifdef INET6
61 #include <netinet6/in6_fib.h>
62 #include <netinet6/in6_var.h>
63 #endif
64 #include <net/vnet.h>
65 
66 #define	DEBUG_MOD_NAME	rt_helpers
67 #define	DEBUG_MAX_LEVEL	LOG_DEBUG2
68 #include <net/route/route_debug.h>
69 _DECLARE_DEBUG(LOG_INFO);
70 
71 /*
72  * RIB helper functions.
73  */
74 
75 void
76 rib_walk_ext_locked(struct rib_head *rnh, rib_walktree_f_t *wa_f,
77     rib_walk_hook_f_t *hook_f, void *arg)
78 {
79 	if (hook_f != NULL)
80 		hook_f(rnh, RIB_WALK_HOOK_PRE, arg);
81 	rnh->rnh_walktree(&rnh->head, (walktree_f_t *)wa_f, arg);
82 	if (hook_f != NULL)
83 		hook_f(rnh, RIB_WALK_HOOK_POST, arg);
84 }
85 
86 /*
87  * Calls @wa_f with @arg for each entry in the table specified by
88  * @af and @fibnum.
89  *
90  * @ss_t callback is called before and after the tree traversal
91  *  while holding table lock.
92  *
93  * Table is traversed under read lock unless @wlock is set.
94  */
95 void
96 rib_walk_ext_internal(struct rib_head *rnh, bool wlock, rib_walktree_f_t *wa_f,
97     rib_walk_hook_f_t *hook_f, void *arg)
98 {
99 	RIB_RLOCK_TRACKER;
100 
101 	if (wlock)
102 		RIB_WLOCK(rnh);
103 	else
104 		RIB_RLOCK(rnh);
105 	rib_walk_ext_locked(rnh, wa_f, hook_f, arg);
106 	if (wlock)
107 		RIB_WUNLOCK(rnh);
108 	else
109 		RIB_RUNLOCK(rnh);
110 }
111 
112 void
113 rib_walk_ext(uint32_t fibnum, int family, bool wlock, rib_walktree_f_t *wa_f,
114     rib_walk_hook_f_t *hook_f, void *arg)
115 {
116 	struct rib_head *rnh;
117 
118 	if ((rnh = rt_tables_get_rnh(fibnum, family)) != NULL)
119 		rib_walk_ext_internal(rnh, wlock, wa_f, hook_f, arg);
120 }
121 
122 /*
123  * Calls @wa_f with @arg for each entry in the table specified by
124  * @af and @fibnum.
125  *
126  * Table is traversed under read lock unless @wlock is set.
127  */
128 void
129 rib_walk(uint32_t fibnum, int family, bool wlock, rib_walktree_f_t *wa_f,
130     void *arg)
131 {
132 
133 	rib_walk_ext(fibnum, family, wlock, wa_f, NULL, arg);
134 }
135 
136 /*
137  * Calls @wa_f with @arg for each entry in the table matching @prefix/@mask.
138  *
139  * The following flags are supported:
140  *  RIB_FLAG_WLOCK: acquire exclusive lock
141  *  RIB_FLAG_LOCKED: Assumes the table is already locked & skip locking
142  *
143  * By default, table is traversed under read lock.
144  */
145 void
146 rib_walk_from(uint32_t fibnum, int family, uint32_t flags, struct sockaddr *prefix,
147     struct sockaddr *mask, rib_walktree_f_t *wa_f, void *arg)
148 {
149 	RIB_RLOCK_TRACKER;
150 	struct rib_head *rnh = rt_tables_get_rnh(fibnum, family);
151 
152 	if (rnh == NULL)
153 		return;
154 
155 	if (flags & RIB_FLAG_WLOCK)
156 		RIB_WLOCK(rnh);
157 	else if (!(flags & RIB_FLAG_LOCKED))
158 		RIB_RLOCK(rnh);
159 
160 	rnh->rnh_walktree_from(&rnh->head, prefix, mask, (walktree_f_t *)wa_f, arg);
161 
162 	if (flags & RIB_FLAG_WLOCK)
163 		RIB_WUNLOCK(rnh);
164 	else if (!(flags & RIB_FLAG_LOCKED))
165 		RIB_RUNLOCK(rnh);
166 }
167 
168 /*
169  * Iterates over all existing fibs in system calling
170  *  @hook_f function before/after traversing each fib.
171  *  Calls @wa_f function for each element in current fib.
172  * If af is not AF_UNSPEC, iterates over fibs in particular
173  * address family.
174  */
175 void
176 rib_foreach_table_walk(int family, bool wlock, rib_walktree_f_t *wa_f,
177     rib_walk_hook_f_t *hook_f, void *arg)
178 {
179 
180 	for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) {
181 		/* Do we want some specific family? */
182 		if (family != AF_UNSPEC) {
183 			rib_walk_ext(fibnum, family, wlock, wa_f, hook_f, arg);
184 			continue;
185 		}
186 
187 		for (int i = 1; i <= AF_MAX; i++)
188 			rib_walk_ext(fibnum, i, wlock, wa_f, hook_f, arg);
189 	}
190 }
191 
192 /*
193  * Iterates over all existing fibs in system and deletes each element
194  *  for which @filter_f function returns non-zero value.
195  * If @family is not AF_UNSPEC, iterates over fibs in particular
196  * address family.
197  */
198 void
199 rib_foreach_table_walk_del(int family, rib_filter_f_t *filter_f, void *arg)
200 {
201 
202 	for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) {
203 		/* Do we want some specific family? */
204 		if (family != AF_UNSPEC) {
205 			rib_walk_del(fibnum, family, filter_f, arg, 0);
206 			continue;
207 		}
208 
209 		for (int i = 1; i <= AF_MAX; i++)
210 			rib_walk_del(fibnum, i, filter_f, arg, 0);
211 	}
212 }
213 
214 
215 /*
216  * Wrapper for the control plane functions for performing af-agnostic
217  *  lookups.
218  * @fibnum: fib to perform the lookup.
219  * @dst: sockaddr with family and addr filled in. IPv6 addresses needs to be in
220  *  deembedded from.
221  * @flags: fib(9) flags.
222  * @flowid: flow id for path selection in multipath use case.
223  *
224  * Returns nhop_object or NULL.
225  *
226  * Requires NET_EPOCH.
227  *
228  */
229 struct nhop_object *
230 rib_lookup(uint32_t fibnum, const struct sockaddr *dst, uint32_t flags,
231     uint32_t flowid)
232 {
233 	struct nhop_object *nh;
234 
235 	nh = NULL;
236 
237 	switch (dst->sa_family) {
238 #ifdef INET
239 	case AF_INET:
240 	{
241 		const struct sockaddr_in *a = (const struct sockaddr_in *)dst;
242 		nh = fib4_lookup(fibnum, a->sin_addr, 0, flags, flowid);
243 		break;
244 	}
245 #endif
246 #ifdef INET6
247 	case AF_INET6:
248 	{
249 		const struct sockaddr_in6 *a = (const struct sockaddr_in6*)dst;
250 		nh = fib6_lookup(fibnum, &a->sin6_addr, a->sin6_scope_id,
251 		    flags, flowid);
252 		break;
253 	}
254 #endif
255 	}
256 
257 	return (nh);
258 }
259 
260 #ifdef ROUTE_MPATH
261 static void
262 notify_add(struct rib_cmd_info *rc, const struct weightened_nhop *wn_src,
263     route_notification_t *cb, void *cbdata)
264 {
265 	rc->rc_nh_new = wn_src->nh;
266 	rc->rc_nh_weight = wn_src->weight;
267 
268 	IF_DEBUG_LEVEL(LOG_DEBUG2) {
269 		char nhbuf[NHOP_PRINT_BUFSIZE] __unused;
270 		FIB_NH_LOG(LOG_DEBUG2, wn_src->nh, "RTM_ADD for %s @ w=%u",
271 		    nhop_print_buf(wn_src->nh, nhbuf, sizeof(nhbuf)),
272 		    wn_src->weight);
273 	}
274 	cb(rc, cbdata);
275 }
276 
277 static void
278 notify_del(struct rib_cmd_info *rc, const struct weightened_nhop *wn_src,
279     route_notification_t *cb, void *cbdata)
280 {
281 	rc->rc_nh_old = wn_src->nh;
282 	rc->rc_nh_weight = wn_src->weight;
283 
284 	IF_DEBUG_LEVEL(LOG_DEBUG2) {
285 		char nhbuf[NHOP_PRINT_BUFSIZE] __unused;
286 		FIB_NH_LOG(LOG_DEBUG2, wn_src->nh, "RTM_DEL for %s @ w=%u",
287 		    nhop_print_buf(wn_src->nh, nhbuf, sizeof(nhbuf)),
288 		    wn_src->weight);
289 	}
290 	cb(rc, cbdata);
291 }
292 
293 static void
294 decompose_change_notification(const struct rib_cmd_info *rc, route_notification_t *cb,
295     void *cbdata)
296 {
297 	uint32_t num_old, num_new;
298 	const struct weightened_nhop *wn_old, *wn_new;
299 	struct weightened_nhop tmp = { NULL, 0 };
300 	uint32_t idx_old = 0, idx_new = 0;
301 
302 	struct rib_cmd_info rc_del = { .rc_cmd = RTM_DELETE, .rc_rt = rc->rc_rt };
303 	struct rib_cmd_info rc_add = { .rc_cmd = RTM_ADD, .rc_rt = rc->rc_rt };
304 
305 	if (NH_IS_NHGRP(rc->rc_nh_old)) {
306 		wn_old = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_old, &num_old);
307 	} else {
308 		tmp.nh = rc->rc_nh_old;
309 		tmp.weight = rc->rc_nh_weight;
310 		wn_old = &tmp;
311 		num_old = 1;
312 	}
313 	if (NH_IS_NHGRP(rc->rc_nh_new)) {
314 		wn_new = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_new, &num_new);
315 	} else {
316 		tmp.nh = rc->rc_nh_new;
317 		tmp.weight = rc->rc_nh_weight;
318 		wn_new = &tmp;
319 		num_new = 1;
320 	}
321 	IF_DEBUG_LEVEL(LOG_DEBUG) {
322 		char buf_old[NHOP_PRINT_BUFSIZE], buf_new[NHOP_PRINT_BUFSIZE];
323 		nhop_print_buf_any(rc->rc_nh_old, buf_old, NHOP_PRINT_BUFSIZE);
324 		nhop_print_buf_any(rc->rc_nh_new, buf_new, NHOP_PRINT_BUFSIZE);
325 		FIB_NH_LOG(LOG_DEBUG, wn_old[0].nh, "change %s -> %s", buf_old, buf_new);
326 	}
327 
328 	/* Use the fact that each @wn array is sorted */
329 	/*
330 	 * Here we have one (or two) multipath groups and transition
331 	 *  between them needs to be reported to the caller, using series
332 	 *  of primitive (RTM_DEL, RTM_ADD) operations.
333 	 *
334 	 * Leverage the fact that each nexthop group has its nexthops sorted
335 	 *  by their indices.
336 	 * [1] -> [1, 2] = A{2}
337 	 * [1, 2] -> [1] = D{2}
338 	 * [1, 2, 4] -> [1, 3, 4] = D{2}, A{3}
339 	 * [1, 2] -> [3, 4] = D{1}, D{2}, A{3}, A{4]
340 	 */
341 	while ((idx_old < num_old) && (idx_new < num_new)) {
342 		uint32_t nh_idx_old = wn_old[idx_old].nh->nh_priv->nh_idx;
343 		uint32_t nh_idx_new = wn_new[idx_new].nh->nh_priv->nh_idx;
344 
345 		if (nh_idx_old == nh_idx_new) {
346 			if (wn_old[idx_old].weight != wn_new[idx_new].weight) {
347 				/* Update weight by providing del/add notifications */
348 				notify_del(&rc_del, &wn_old[idx_old], cb, cbdata);
349 				notify_add(&rc_add, &wn_new[idx_new], cb, cbdata);
350 			}
351 			idx_old++;
352 			idx_new++;
353 		} else if (nh_idx_old < nh_idx_new) {
354 			/* [1, ~2~, 4], [1, ~3~, 4] */
355 			notify_del(&rc_del, &wn_old[idx_old], cb, cbdata);
356 			idx_old++;
357 		} else {
358 			/* nh_idx_old > nh_idx_new. */
359 			notify_add(&rc_add, &wn_new[idx_new], cb, cbdata);
360 			idx_new++;
361 		}
362 	}
363 
364 	while (idx_old < num_old) {
365 		notify_del(&rc_del, &wn_old[idx_old], cb, cbdata);
366 		idx_old++;
367 	}
368 
369 	while (idx_new < num_new) {
370 		notify_add(&rc_add, &wn_new[idx_new], cb, cbdata);
371 		idx_new++;
372 	}
373 }
374 
375 /*
376  * Decompose multipath cmd info @rc into a list of add/del/change
377  *  single-path operations, calling @cb callback for each operation.
378  * Assumes at least one of the nexthops in @rc is multipath.
379  */
380 void
381 rib_decompose_notification(const struct rib_cmd_info *rc, route_notification_t *cb,
382     void *cbdata)
383 {
384 	const struct weightened_nhop *wn;
385 	uint32_t num_nhops;
386 	struct rib_cmd_info rc_new;
387 
388 	rc_new = *rc;
389 	switch (rc->rc_cmd) {
390 	case RTM_ADD:
391 		if (!NH_IS_NHGRP(rc->rc_nh_new))
392 			return;
393 		wn = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_new, &num_nhops);
394 		for (uint32_t i = 0; i < num_nhops; i++) {
395 			notify_add(&rc_new, &wn[i], cb, cbdata);
396 		}
397 		break;
398 	case RTM_DELETE:
399 		if (!NH_IS_NHGRP(rc->rc_nh_old))
400 			return;
401 		wn = nhgrp_get_nhops((struct nhgrp_object *)rc->rc_nh_old, &num_nhops);
402 		for (uint32_t i = 0; i < num_nhops; i++) {
403 			notify_del(&rc_new, &wn[i], cb, cbdata);
404 		}
405 		break;
406 	case RTM_CHANGE:
407 		if (!NH_IS_NHGRP(rc->rc_nh_old) && !NH_IS_NHGRP(rc->rc_nh_new))
408 			return;
409 		decompose_change_notification(rc, cb, cbdata);
410 		break;
411 	}
412 }
413 #endif
414 
415 union sockaddr_union {
416 	struct sockaddr		sa;
417 	struct sockaddr_in	sin;
418 	struct sockaddr_in6	sin6;
419 	char			_buf[32];
420 };
421 
422 /*
423  * Creates nexhops suitable for using as a default route nhop.
424  * Helper for the various kernel subsystems adding/changing default route.
425  */
426 int
427 rib_add_default_route(uint32_t fibnum, int family, struct ifnet *ifp,
428     struct sockaddr *gw, struct rib_cmd_info *rc)
429 {
430 	struct route_nhop_data rnd = { .rnd_weight = RT_DEFAULT_WEIGHT };
431 	union sockaddr_union saun = {};
432 	struct sockaddr *dst = &saun.sa;
433 	int error;
434 
435 	switch (family) {
436 #ifdef INET
437 	case AF_INET:
438 		saun.sin.sin_family = AF_INET;
439 		saun.sin.sin_len = sizeof(struct sockaddr_in);
440 		break;
441 #endif
442 #ifdef INET6
443 	case AF_INET6:
444 		saun.sin6.sin6_family = AF_INET6;
445 		saun.sin6.sin6_len = sizeof(struct sockaddr_in6);
446 		break;
447 #endif
448 	default:
449 		return (EAFNOSUPPORT);
450 	}
451 
452 	struct ifaddr *ifa = ifaof_ifpforaddr(gw, ifp);
453 	if (ifa == NULL)
454 		return (ENOENT);
455 
456 	struct nhop_object *nh = nhop_alloc(fibnum, family);
457 	if (nh == NULL)
458 		return (ENOMEM);
459 
460 	nhop_set_gw(nh, gw, true);
461 	nhop_set_transmit_ifp(nh, ifp);
462 	nhop_set_src(nh, ifa);
463 	nhop_set_pxtype_flag(nh, NHF_DEFAULT);
464 	rnd.rnd_nhop = nhop_get_nhop(nh, &error);
465 
466 	if (error == 0)
467 		error = rib_add_route_px(fibnum, dst, 0, &rnd, RTM_F_CREATE, rc);
468 	return (error);
469 }
470 
471 #ifdef INET
472 /*
473  * Checks if the found key in the trie contains (<=) a prefix covering
474  *  @paddr/@plen.
475  * Returns the most specific rtentry matching the condition or NULL.
476  */
477 static struct rtentry *
478 get_inet_parent_prefix(uint32_t fibnum, struct in_addr addr, int plen)
479 {
480 	struct route_nhop_data rnd;
481 	struct rtentry *rt;
482 	struct in_addr addr4;
483 	uint32_t scopeid;
484 	int parent_plen;
485 	struct radix_node *rn;
486 
487 	rt = fib4_lookup_rt(fibnum, addr, 0, NHR_UNLOCKED, &rnd);
488 	if (rt == NULL)
489 		return (NULL);
490 
491 	rt_get_inet_prefix_plen(rt, &addr4, &parent_plen, &scopeid);
492 	if (parent_plen <= plen)
493 		return (rt);
494 
495 	/*
496 	 * There can be multiple prefixes associated with the found key:
497 	 * 10.0.0.0 -> 10.0.0.0/24, 10.0.0.0/23, 10.0.0.0/22, etc.
498 	 * All such prefixes are linked via rn_dupedkey, from most specific
499 	 *  to least specific. Iterate over them to check if any of these
500 	 *  prefixes are wider than desired plen.
501 	 */
502 	rn = (struct radix_node *)rt;
503 	while ((rn = rn_nextprefix(rn)) != NULL) {
504 		rt = RNTORT(rn);
505 		rt_get_inet_prefix_plen(rt, &addr4, &parent_plen, &scopeid);
506 		if (parent_plen <= plen)
507 			return (rt);
508 	}
509 
510 	return (NULL);
511 }
512 
513 /*
514  * Returns the most specific prefix containing (>) @paddr/plen.
515  */
516 struct rtentry *
517 rt_get_inet_parent(uint32_t fibnum, struct in_addr addr, int plen)
518 {
519 	struct in_addr lookup_addr = { .s_addr = INADDR_BROADCAST };
520 	struct in_addr addr4 = addr;
521 	struct in_addr mask4;
522 	struct rtentry *rt;
523 
524 	while (plen-- > 0) {
525 		/* Calculate wider mask & new key to lookup */
526 		mask4.s_addr = htonl(plen ? ~((1 << (32 - plen)) - 1) : 0);
527 		addr4.s_addr = htonl(ntohl(addr4.s_addr) & ntohl(mask4.s_addr));
528 		if (addr4.s_addr == lookup_addr.s_addr) {
529 			/* Skip lookup if the key is the same */
530 			continue;
531 		}
532 		lookup_addr = addr4;
533 
534 		rt = get_inet_parent_prefix(fibnum, lookup_addr, plen);
535 		if (rt != NULL)
536 			return (rt);
537 	}
538 
539 	return (NULL);
540 }
541 #endif
542 
543 #ifdef INET6
544 /*
545  * Checks if the found key in the trie contains (<=) a prefix covering
546  *  @paddr/@plen.
547  * Returns the most specific rtentry matching the condition or NULL.
548  */
549 static struct rtentry *
550 get_inet6_parent_prefix(uint32_t fibnum, const struct in6_addr *paddr, int plen)
551 {
552 	struct route_nhop_data rnd;
553 	struct rtentry *rt;
554 	struct in6_addr addr6;
555 	uint32_t scopeid;
556 	int parent_plen;
557 	struct radix_node *rn;
558 
559 	rt = fib6_lookup_rt(fibnum, paddr, 0, NHR_UNLOCKED, &rnd);
560 	if (rt == NULL)
561 		return (NULL);
562 
563 	rt_get_inet6_prefix_plen(rt, &addr6, &parent_plen, &scopeid);
564 	if (parent_plen <= plen)
565 		return (rt);
566 
567 	/*
568 	 * There can be multiple prefixes associated with the found key:
569 	 * 2001:db8:1::/64 -> 2001:db8:1::/56, 2001:db8:1::/48, etc.
570 	 * All such prefixes are linked via rn_dupedkey, from most specific
571 	 *  to least specific. Iterate over them to check if any of these
572 	 *  prefixes are wider than desired plen.
573 	 */
574 	rn = (struct radix_node *)rt;
575 	while ((rn = rn_nextprefix(rn)) != NULL) {
576 		rt = RNTORT(rn);
577 		rt_get_inet6_prefix_plen(rt, &addr6, &parent_plen, &scopeid);
578 		if (parent_plen <= plen)
579 			return (rt);
580 	}
581 
582 	return (NULL);
583 }
584 
585 void
586 ip6_writemask(struct in6_addr *addr6, uint8_t mask)
587 {
588 	uint32_t *cp;
589 
590 	for (cp = (uint32_t *)addr6; mask >= 32; mask -= 32)
591 		*cp++ = 0xFFFFFFFF;
592 	if (mask > 0)
593 		*cp = htonl(mask ? ~((1 << (32 - mask)) - 1) : 0);
594 }
595 
596 /*
597  * Returns the most specific prefix containing (>) @paddr/plen.
598  */
599 struct rtentry *
600 rt_get_inet6_parent(uint32_t fibnum, const struct in6_addr *paddr, int plen)
601 {
602 	struct in6_addr lookup_addr = in6mask128;
603 	struct in6_addr addr6 = *paddr;
604 	struct in6_addr mask6;
605 	struct rtentry *rt;
606 
607 	while (plen-- > 0) {
608 		/* Calculate wider mask & new key to lookup */
609 		ip6_writemask(&mask6, plen);
610 		IN6_MASK_ADDR(&addr6, &mask6);
611 		if (IN6_ARE_ADDR_EQUAL(&addr6, &lookup_addr)) {
612 			/* Skip lookup if the key is the same */
613 			continue;
614 		}
615 		lookup_addr = addr6;
616 
617 		rt = get_inet6_parent_prefix(fibnum, &lookup_addr, plen);
618 		if (rt != NULL)
619 			return (rt);
620 	}
621 
622 	return (NULL);
623 }
624 #endif
625 
626 /*
627  * Prints rtentry @rt data in the provided @buf.
628  * Example: rt/192.168.0.0/24
629  */
630 char *
631 rt_print_buf(const struct rtentry *rt, char *buf, size_t bufsize)
632 {
633 #if defined(INET) || defined(INET6)
634 	char abuf[INET6_ADDRSTRLEN];
635 	uint32_t scopeid;
636 	int plen;
637 #endif
638 
639 	switch (rt_get_family(rt)) {
640 #ifdef INET
641 	case AF_INET:
642 		{
643 			struct in_addr addr4;
644 			rt_get_inet_prefix_plen(rt, &addr4, &plen, &scopeid);
645 			inet_ntop(AF_INET, &addr4, abuf, sizeof(abuf));
646 			snprintf(buf, bufsize, "rt/%s/%d", abuf, plen);
647 		}
648 		break;
649 #endif
650 #ifdef INET6
651 	case AF_INET6:
652 		{
653 			struct in6_addr addr6;
654 			rt_get_inet6_prefix_plen(rt, &addr6, &plen, &scopeid);
655 			inet_ntop(AF_INET6, &addr6, abuf, sizeof(abuf));
656 			snprintf(buf, bufsize, "rt/%s/%d", abuf, plen);
657 		}
658 		break;
659 #endif
660 	default:
661 		snprintf(buf, bufsize, "rt/unknown_af#%d", rt_get_family(rt));
662 		break;
663 	}
664 
665 	return (buf);
666 }
667 
668 const char *
669 rib_print_cmd(int rib_cmd)
670 {
671 	switch (rib_cmd) {
672 	case RTM_ADD:
673 		return ("RTM_ADD");
674 	case RTM_CHANGE:
675 		return ("RTM_CHANGE");
676 	case RTM_DELETE:
677 		return ("RTM_DELETE");
678 	case RTM_GET:
679 		return ("RTM_GET");
680 	}
681 
682 	return ("UNKNOWN");
683 }
684