xref: /freebsd/sys/net/route/route_ctl.c (revision b2bf0c7e5f4037d63458def91a026592468afd2f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2020 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 #include "opt_route.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/syslog.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/rmlock.h>
44 
45 #include <net/if.h>
46 #include <net/if_var.h>
47 #include <net/if_dl.h>
48 #include <net/vnet.h>
49 #include <net/route.h>
50 #include <net/route/route_ctl.h>
51 #include <net/route/route_var.h>
52 #include <net/route/nhop_utils.h>
53 #include <net/route/nhop.h>
54 #include <net/route/nhop_var.h>
55 #include <netinet/in.h>
56 #include <netinet6/scope6_var.h>
57 
58 #include <vm/uma.h>
59 
60 /*
61  * This file contains control plane routing tables functions.
62  *
63  * All functions assumes they are called in net epoch.
64  */
65 
66 struct rib_subscription {
67 	CK_STAILQ_ENTRY(rib_subscription)	next;
68 	rib_subscription_cb_t			*func;
69 	void					*arg;
70 	struct rib_head				*rnh;
71 	enum rib_subscription_type		type;
72 	struct epoch_context			epoch_ctx;
73 };
74 
75 static int add_route(struct rib_head *rnh, struct rt_addrinfo *info,
76     struct rib_cmd_info *rc);
77 static int add_route_nhop(struct rib_head *rnh, struct rtentry *rt,
78     struct rt_addrinfo *info, struct route_nhop_data *rnd,
79     struct rib_cmd_info *rc);
80 static int del_route(struct rib_head *rnh, struct rt_addrinfo *info,
81     struct rib_cmd_info *rc);
82 static int change_route(struct rib_head *rnh, struct rt_addrinfo *info,
83     struct route_nhop_data *nhd_orig, struct rib_cmd_info *rc);
84 
85 static int rt_unlinkrte(struct rib_head *rnh, struct rt_addrinfo *info,
86     struct rib_cmd_info *rc);
87 
88 static void rib_notify(struct rib_head *rnh, enum rib_subscription_type type,
89     struct rib_cmd_info *rc);
90 
91 static void destroy_subscription_epoch(epoch_context_t ctx);
92 #ifdef ROUTE_MPATH
93 static bool rib_can_multipath(struct rib_head *rh);
94 #endif
95 
96 /* Per-vnet multipath routing configuration */
97 SYSCTL_DECL(_net_route);
98 #define	V_rib_route_multipath	VNET(rib_route_multipath)
99 #ifdef ROUTE_MPATH
100 #define _MP_FLAGS	CTLFLAG_RW
101 #else
102 #define _MP_FLAGS	CTLFLAG_RD
103 #endif
104 VNET_DEFINE(u_int, rib_route_multipath) = 1;
105 SYSCTL_UINT(_net_route, OID_AUTO, multipath, _MP_FLAGS | CTLFLAG_VNET,
106     &VNET_NAME(rib_route_multipath), 0, "Enable route multipath");
107 #undef _MP_FLAGS
108 
109 #if defined(INET) && defined(INET6)
110 FEATURE(ipv4_rfc5549_support, "Route IPv4 packets via IPv6 nexthops");
111 #define V_rib_route_ipv6_nexthop VNET(rib_route_ipv6_nexthop)
112 VNET_DEFINE(u_int, rib_route_ipv6_nexthop) = 1;
113 SYSCTL_UINT(_net_route, OID_AUTO, ipv6_nexthop, CTLFLAG_RW | CTLFLAG_VNET,
114     &VNET_NAME(rib_route_ipv6_nexthop), 0, "Enable IPv4 route via IPv6 Next Hop address");
115 #endif
116 
117 /* Routing table UMA zone */
118 VNET_DEFINE_STATIC(uma_zone_t, rtzone);
119 #define	V_rtzone	VNET(rtzone)
120 
121 void
122 vnet_rtzone_init()
123 {
124 
125 	V_rtzone = uma_zcreate("rtentry", sizeof(struct rtentry),
126 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
127 }
128 
129 #ifdef VIMAGE
130 void
131 vnet_rtzone_destroy()
132 {
133 
134 	uma_zdestroy(V_rtzone);
135 }
136 #endif
137 
138 static void
139 destroy_rtentry(struct rtentry *rt)
140 {
141 #ifdef VIMAGE
142 	struct nhop_object *nh = rt->rt_nhop;
143 
144 	/*
145 	 * At this moment rnh, nh_control may be already freed.
146 	 * nhop interface may have been migrated to a different vnet.
147 	 * Use vnet stored in the nexthop to delete the entry.
148 	 */
149 #ifdef ROUTE_MPATH
150 	if (NH_IS_NHGRP(nh)) {
151 		struct weightened_nhop *wn;
152 		uint32_t num_nhops;
153 		wn = nhgrp_get_nhops((struct nhgrp_object *)nh, &num_nhops);
154 		nh = wn[0].nh;
155 	}
156 #endif
157 	CURVNET_SET(nhop_get_vnet(nh));
158 #endif
159 
160 	/* Unreference nexthop */
161 	nhop_free_any(rt->rt_nhop);
162 
163 	uma_zfree(V_rtzone, rt);
164 
165 	CURVNET_RESTORE();
166 }
167 
168 /*
169  * Epoch callback indicating rtentry is safe to destroy
170  */
171 static void
172 destroy_rtentry_epoch(epoch_context_t ctx)
173 {
174 	struct rtentry *rt;
175 
176 	rt = __containerof(ctx, struct rtentry, rt_epoch_ctx);
177 
178 	destroy_rtentry(rt);
179 }
180 
181 /*
182  * Schedule rtentry deletion
183  */
184 static void
185 rtfree(struct rtentry *rt)
186 {
187 
188 	KASSERT(rt != NULL, ("%s: NULL rt", __func__));
189 
190 	epoch_call(net_epoch_preempt, destroy_rtentry_epoch,
191 	    &rt->rt_epoch_ctx);
192 }
193 
194 static struct rib_head *
195 get_rnh(uint32_t fibnum, const struct rt_addrinfo *info)
196 {
197 	struct rib_head *rnh;
198 	struct sockaddr *dst;
199 
200 	KASSERT((fibnum < rt_numfibs), ("rib_add_route: bad fibnum"));
201 
202 	dst = info->rti_info[RTAX_DST];
203 	rnh = rt_tables_get_rnh(fibnum, dst->sa_family);
204 
205 	return (rnh);
206 }
207 
208 #if defined(INET) && defined(INET6)
209 static bool
210 rib_can_ipv6_nexthop_address(struct rib_head *rh)
211 {
212 	int result;
213 
214 	CURVNET_SET(rh->rib_vnet);
215 	result = !!V_rib_route_ipv6_nexthop;
216 	CURVNET_RESTORE();
217 
218 	return (result);
219 }
220 #endif
221 
222 #ifdef ROUTE_MPATH
223 static bool
224 rib_can_multipath(struct rib_head *rh)
225 {
226 	int result;
227 
228 	CURVNET_SET(rh->rib_vnet);
229 	result = !!V_rib_route_multipath;
230 	CURVNET_RESTORE();
231 
232 	return (result);
233 }
234 
235 /*
236  * Check is nhop is multipath-eligible.
237  * Avoid nhops without gateways and redirects.
238  *
239  * Returns 1 for multipath-eligible nexthop,
240  * 0 otherwise.
241  */
242 bool
243 nhop_can_multipath(const struct nhop_object *nh)
244 {
245 
246 	if ((nh->nh_flags & NHF_MULTIPATH) != 0)
247 		return (1);
248 	if ((nh->nh_flags & NHF_GATEWAY) == 0)
249 		return (0);
250 	if ((nh->nh_flags & NHF_REDIRECT) != 0)
251 		return (0);
252 
253 	return (1);
254 }
255 #endif
256 
257 static int
258 get_info_weight(const struct rt_addrinfo *info, uint32_t default_weight)
259 {
260 	uint32_t weight;
261 
262 	if (info->rti_mflags & RTV_WEIGHT)
263 		weight = info->rti_rmx->rmx_weight;
264 	else
265 		weight = default_weight;
266 	/* Keep upper 1 byte for adm distance purposes */
267 	if (weight > RT_MAX_WEIGHT)
268 		weight = RT_MAX_WEIGHT;
269 	else if (weight == 0)
270 		weight = default_weight;
271 
272 	return (weight);
273 }
274 
275 bool
276 rt_is_host(const struct rtentry *rt)
277 {
278 
279 	return (rt->rte_flags & RTF_HOST);
280 }
281 
282 sa_family_t
283 rt_get_family(const struct rtentry *rt)
284 {
285 	const struct sockaddr *dst;
286 
287 	dst = (const struct sockaddr *)rt_key_const(rt);
288 
289 	return (dst->sa_family);
290 }
291 
292 /*
293  * Returns pointer to nexthop or nexthop group
294  * associated with @rt
295  */
296 struct nhop_object *
297 rt_get_raw_nhop(const struct rtentry *rt)
298 {
299 
300 	return (rt->rt_nhop);
301 }
302 
303 #ifdef INET
304 /*
305  * Stores IPv4 address and prefix length of @rt inside
306  *  @paddr and @plen.
307  * @pscopeid is currently always set to 0.
308  */
309 void
310 rt_get_inet_prefix_plen(const struct rtentry *rt, struct in_addr *paddr,
311     int *plen, uint32_t *pscopeid)
312 {
313 	const struct sockaddr_in *dst;
314 
315 	dst = (const struct sockaddr_in *)rt_key_const(rt);
316 	KASSERT((dst->sin_family == AF_INET),
317 	    ("rt family is %d, not inet", dst->sin_family));
318 	*paddr = dst->sin_addr;
319 	dst = (const struct sockaddr_in *)rt_mask_const(rt);
320 	if (dst == NULL)
321 		*plen = 32;
322 	else
323 		*plen = bitcount32(dst->sin_addr.s_addr);
324 	*pscopeid = 0;
325 }
326 
327 /*
328  * Stores IPv4 address and prefix mask of @rt inside
329  *  @paddr and @pmask. Sets mask to INADDR_ANY for host routes.
330  * @pscopeid is currently always set to 0.
331  */
332 void
333 rt_get_inet_prefix_pmask(const struct rtentry *rt, struct in_addr *paddr,
334     struct in_addr *pmask, uint32_t *pscopeid)
335 {
336 	const struct sockaddr_in *dst;
337 
338 	dst = (const struct sockaddr_in *)rt_key_const(rt);
339 	KASSERT((dst->sin_family == AF_INET),
340 	    ("rt family is %d, not inet", dst->sin_family));
341 	*paddr = dst->sin_addr;
342 	dst = (const struct sockaddr_in *)rt_mask_const(rt);
343 	if (dst == NULL)
344 		pmask->s_addr = INADDR_BROADCAST;
345 	else
346 		*pmask = dst->sin_addr;
347 	*pscopeid = 0;
348 }
349 #endif
350 
351 #ifdef INET6
352 static int
353 inet6_get_plen(const struct in6_addr *addr)
354 {
355 
356 	return (bitcount32(addr->s6_addr32[0]) + bitcount32(addr->s6_addr32[1]) +
357 	    bitcount32(addr->s6_addr32[2]) + bitcount32(addr->s6_addr32[3]));
358 }
359 
360 /*
361  * Stores IPv6 address and prefix length of @rt inside
362  *  @paddr and @plen. Addresses are returned in de-embedded form.
363  * Scopeid is set to 0 for non-LL addresses.
364  */
365 void
366 rt_get_inet6_prefix_plen(const struct rtentry *rt, struct in6_addr *paddr,
367     int *plen, uint32_t *pscopeid)
368 {
369 	const struct sockaddr_in6 *dst;
370 
371 	dst = (const struct sockaddr_in6 *)rt_key_const(rt);
372 	KASSERT((dst->sin6_family == AF_INET6),
373 	    ("rt family is %d, not inet6", dst->sin6_family));
374 	if (IN6_IS_SCOPE_LINKLOCAL(&dst->sin6_addr))
375 		in6_splitscope(&dst->sin6_addr, paddr, pscopeid);
376 	else
377 		*paddr = dst->sin6_addr;
378 	dst = (const struct sockaddr_in6 *)rt_mask_const(rt);
379 	if (dst == NULL)
380 		*plen = 128;
381 	else
382 		*plen = inet6_get_plen(&dst->sin6_addr);
383 }
384 
385 /*
386  * Stores IPv6 address and prefix mask of @rt inside
387  *  @paddr and @pmask. Addresses are returned in de-embedded form.
388  * Scopeid is set to 0 for non-LL addresses.
389  */
390 void
391 rt_get_inet6_prefix_pmask(const struct rtentry *rt, struct in6_addr *paddr,
392     struct in6_addr *pmask, uint32_t *pscopeid)
393 {
394 	const struct sockaddr_in6 *dst;
395 
396 	dst = (const struct sockaddr_in6 *)rt_key_const(rt);
397 	KASSERT((dst->sin6_family == AF_INET6),
398 	    ("rt family is %d, not inet", dst->sin6_family));
399 	if (IN6_IS_SCOPE_LINKLOCAL(&dst->sin6_addr))
400 		in6_splitscope(&dst->sin6_addr, paddr, pscopeid);
401 	else
402 		*paddr = dst->sin6_addr;
403 	dst = (const struct sockaddr_in6 *)rt_mask_const(rt);
404 	if (dst == NULL)
405 		memset(pmask, 0xFF, sizeof(struct in6_addr));
406 	else
407 		*pmask = dst->sin6_addr;
408 }
409 #endif
410 
411 static void
412 rt_set_expire_info(struct rtentry *rt, const struct rt_addrinfo *info)
413 {
414 
415 	/* Kernel -> userland timebase conversion. */
416 	if (info->rti_mflags & RTV_EXPIRE)
417 		rt->rt_expire = info->rti_rmx->rmx_expire ?
418 		    info->rti_rmx->rmx_expire - time_second + time_uptime : 0;
419 }
420 
421 /*
422  * Check if specified @gw matches gw data in the nexthop @nh.
423  *
424  * Returns true if matches, false otherwise.
425  */
426 bool
427 match_nhop_gw(const struct nhop_object *nh, const struct sockaddr *gw)
428 {
429 
430 	if (nh->gw_sa.sa_family != gw->sa_family)
431 		return (false);
432 
433 	switch (gw->sa_family) {
434 	case AF_INET:
435 		return (nh->gw4_sa.sin_addr.s_addr ==
436 		    ((const struct sockaddr_in *)gw)->sin_addr.s_addr);
437 	case AF_INET6:
438 		{
439 			const struct sockaddr_in6 *gw6;
440 			gw6 = (const struct sockaddr_in6 *)gw;
441 
442 			/*
443 			 * Currently (2020-09) IPv6 gws in kernel have their
444 			 * scope embedded. Once this becomes false, this code
445 			 * has to be revisited.
446 			 */
447 			if (IN6_ARE_ADDR_EQUAL(&nh->gw6_sa.sin6_addr,
448 			    &gw6->sin6_addr))
449 				return (true);
450 			return (false);
451 		}
452 	case AF_LINK:
453 		{
454 			const struct sockaddr_dl *sdl;
455 			sdl = (const struct sockaddr_dl *)gw;
456 			return (nh->gwl_sa.sdl_index == sdl->sdl_index);
457 		}
458 	default:
459 		return (memcmp(&nh->gw_sa, gw, nh->gw_sa.sa_len) == 0);
460 	}
461 
462 	/* NOTREACHED */
463 	return (false);
464 }
465 
466 /*
467  * Checks if data in @info matches nexhop @nh.
468  *
469  * Returns 0 on success,
470  * ESRCH if not matched,
471  * ENOENT if filter function returned false
472  */
473 int
474 check_info_match_nhop(const struct rt_addrinfo *info, const struct rtentry *rt,
475     const struct nhop_object *nh)
476 {
477 	const struct sockaddr *gw = info->rti_info[RTAX_GATEWAY];
478 
479 	if (info->rti_filter != NULL) {
480 	    if (info->rti_filter(rt, nh, info->rti_filterdata) == 0)
481 		    return (ENOENT);
482 	    else
483 		    return (0);
484 	}
485 	if ((gw != NULL) && !match_nhop_gw(nh, gw))
486 		return (ESRCH);
487 
488 	return (0);
489 }
490 
491 /*
492  * Checks if nexhop @nh can be rewritten by data in @info because
493  *  of higher "priority". Currently the only case for such scenario
494  *  is kernel installing interface routes, marked by RTF_PINNED flag.
495  *
496  * Returns:
497  * 1 if @info data has higher priority
498  * 0 if priority is the same
499  * -1 if priority is lower
500  */
501 int
502 can_override_nhop(const struct rt_addrinfo *info, const struct nhop_object *nh)
503 {
504 
505 	if (info->rti_flags & RTF_PINNED) {
506 		return (NH_IS_PINNED(nh)) ? 0 : 1;
507 	} else {
508 		return (NH_IS_PINNED(nh)) ? -1 : 0;
509 	}
510 }
511 
512 /*
513  * Runs exact prefix match based on @dst and @netmask.
514  * Returns matched @rtentry if found or NULL.
515  * If rtentry was found, saves nexthop / weight value into @rnd.
516  */
517 static struct rtentry *
518 lookup_prefix_bysa(struct rib_head *rnh, const struct sockaddr *dst,
519     const struct sockaddr *netmask, struct route_nhop_data *rnd)
520 {
521 	struct rtentry *rt;
522 
523 	RIB_LOCK_ASSERT(rnh);
524 
525 	rt = (struct rtentry *)rnh->rnh_lookup(__DECONST(void *, dst),
526 	    __DECONST(void *, netmask), &rnh->head);
527 	if (rt != NULL) {
528 		rnd->rnd_nhop = rt->rt_nhop;
529 		rnd->rnd_weight = rt->rt_weight;
530 	} else {
531 		rnd->rnd_nhop = NULL;
532 		rnd->rnd_weight = 0;
533 	}
534 
535 	return (rt);
536 }
537 
538 /*
539  * Runs exact prefix match based on dst/netmask from @info.
540  * Assumes RIB lock is held.
541  * Returns matched @rtentry if found or NULL.
542  * If rtentry was found, saves nexthop / weight value into @rnd.
543  */
544 struct rtentry *
545 lookup_prefix(struct rib_head *rnh, const struct rt_addrinfo *info,
546     struct route_nhop_data *rnd)
547 {
548 	struct rtentry *rt;
549 
550 	rt = lookup_prefix_bysa(rnh, info->rti_info[RTAX_DST],
551 	    info->rti_info[RTAX_NETMASK], rnd);
552 
553 	return (rt);
554 }
555 
556 /*
557  * Adds route defined by @info into the kernel table specified by @fibnum and
558  * sa_family in @info->rti_info[RTAX_DST].
559  *
560  * Returns 0 on success and fills in operation metadata into @rc.
561  */
562 int
563 rib_add_route(uint32_t fibnum, struct rt_addrinfo *info,
564     struct rib_cmd_info *rc)
565 {
566 	struct rib_head *rnh;
567 	int error;
568 
569 	NET_EPOCH_ASSERT();
570 
571 	rnh = get_rnh(fibnum, info);
572 	if (rnh == NULL)
573 		return (EAFNOSUPPORT);
574 
575 	/*
576 	 * Check consistency between RTF_HOST flag and netmask
577 	 * existence.
578 	 */
579 	if (info->rti_flags & RTF_HOST)
580 		info->rti_info[RTAX_NETMASK] = NULL;
581 	else if (info->rti_info[RTAX_NETMASK] == NULL)
582 		return (EINVAL);
583 
584 	bzero(rc, sizeof(struct rib_cmd_info));
585 	rc->rc_cmd = RTM_ADD;
586 
587 	error = add_route(rnh, info, rc);
588 	if (error == 0)
589 		rib_notify(rnh, RIB_NOTIFY_DELAYED, rc);
590 
591 	return (error);
592 }
593 
594 /*
595  * Checks if @dst and @gateway is valid combination.
596  *
597  * Returns true if is valid, false otherwise.
598  */
599 static bool
600 check_gateway(struct rib_head *rnh, struct sockaddr *dst,
601     struct sockaddr *gateway)
602 {
603 	if (dst->sa_family == gateway->sa_family)
604 		return (true);
605 	else if (gateway->sa_family == AF_UNSPEC)
606 		return (true);
607 	else if (gateway->sa_family == AF_LINK)
608 		return (true);
609 #if defined(INET) && defined(INET6)
610 	else if (dst->sa_family == AF_INET && gateway->sa_family == AF_INET6 &&
611 		rib_can_ipv6_nexthop_address(rnh))
612 		return (true);
613 #endif
614 	else
615 		return (false);
616 }
617 
618 /*
619  * Creates rtentry and nexthop based on @info data.
620  * Return 0 and fills in rtentry into @prt on success,
621  * return errno otherwise.
622  */
623 static int
624 create_rtentry(struct rib_head *rnh, struct rt_addrinfo *info,
625     struct rtentry **prt)
626 {
627 	struct sockaddr *dst, *ndst, *gateway, *netmask;
628 	struct rtentry *rt;
629 	struct nhop_object *nh;
630 	struct ifaddr *ifa;
631 	int error, flags;
632 
633 	dst = info->rti_info[RTAX_DST];
634 	gateway = info->rti_info[RTAX_GATEWAY];
635 	netmask = info->rti_info[RTAX_NETMASK];
636 	flags = info->rti_flags;
637 
638 	if ((flags & RTF_GATEWAY) && !gateway)
639 		return (EINVAL);
640 	if (dst && gateway && !check_gateway(rnh, dst, gateway))
641 		return (EINVAL);
642 
643 	if (dst->sa_len > sizeof(((struct rtentry *)NULL)->rt_dstb))
644 		return (EINVAL);
645 
646 	if (info->rti_ifa == NULL) {
647 		error = rt_getifa_fib(info, rnh->rib_fibnum);
648 		if (error)
649 			return (error);
650 	}
651 
652 	error = nhop_create_from_info(rnh, info, &nh);
653 	if (error != 0)
654 		return (error);
655 
656 	rt = uma_zalloc(V_rtzone, M_NOWAIT | M_ZERO);
657 	if (rt == NULL) {
658 		nhop_free(nh);
659 		return (ENOBUFS);
660 	}
661 	rt->rte_flags = (RTF_UP | flags) & RTE_RT_FLAG_MASK;
662 	rt->rt_nhop = nh;
663 
664 	/* Fill in dst */
665 	memcpy(&rt->rt_dst, dst, dst->sa_len);
666 	rt_key(rt) = &rt->rt_dst;
667 
668 	/*
669 	 * point to the (possibly newly malloc'd) dest address.
670 	 */
671 	ndst = (struct sockaddr *)rt_key(rt);
672 
673 	/*
674 	 * make sure it contains the value we want (masked if needed).
675 	 */
676 	if (netmask) {
677 		rt_maskedcopy(dst, ndst, netmask);
678 	} else
679 		bcopy(dst, ndst, dst->sa_len);
680 
681 	/*
682 	 * We use the ifa reference returned by rt_getifa_fib().
683 	 * This moved from below so that rnh->rnh_addaddr() can
684 	 * examine the ifa and  ifa->ifa_ifp if it so desires.
685 	 */
686 	ifa = info->rti_ifa;
687 	rt->rt_weight = get_info_weight(info, RT_DEFAULT_WEIGHT);
688 	rt_set_expire_info(rt, info);
689 
690 	*prt = rt;
691 	return (0);
692 }
693 
694 static int
695 add_route(struct rib_head *rnh, struct rt_addrinfo *info,
696     struct rib_cmd_info *rc)
697 {
698 	struct nhop_object *nh_orig;
699 	struct route_nhop_data rnd_orig, rnd_add;
700 	struct nhop_object *nh;
701 	struct rtentry *rt, *rt_orig;
702 	int error;
703 
704 	error = create_rtentry(rnh, info, &rt);
705 	if (error != 0)
706 		return (error);
707 
708 	rnd_add.rnd_nhop = rt->rt_nhop;
709 	rnd_add.rnd_weight = rt->rt_weight;
710 	nh = rt->rt_nhop;
711 
712 	RIB_WLOCK(rnh);
713 	error = add_route_nhop(rnh, rt, info, &rnd_add, rc);
714 	if (error == 0) {
715 		RIB_WUNLOCK(rnh);
716 		return (0);
717 	}
718 
719 	/* addition failed. Lookup prefix in the rib to determine the cause */
720 	rt_orig = lookup_prefix(rnh, info, &rnd_orig);
721 	if (rt_orig == NULL) {
722 		/* No prefix -> rnh_addaddr() failed to allocate memory */
723 		RIB_WUNLOCK(rnh);
724 		nhop_free(nh);
725 		uma_zfree(V_rtzone, rt);
726 		return (ENOMEM);
727 	}
728 
729 	/* We have existing route in the RIB. */
730 	nh_orig = rnd_orig.rnd_nhop;
731 	/* Check if new route has higher preference */
732 	if (can_override_nhop(info, nh_orig) > 0) {
733 		/* Update nexthop to the new route */
734 		change_route_nhop(rnh, rt_orig, info, &rnd_add, rc);
735 		RIB_WUNLOCK(rnh);
736 		uma_zfree(V_rtzone, rt);
737 		nhop_free(nh_orig);
738 		return (0);
739 	}
740 
741 	RIB_WUNLOCK(rnh);
742 
743 #ifdef ROUTE_MPATH
744 	if (rib_can_multipath(rnh) && nhop_can_multipath(rnd_add.rnd_nhop) &&
745 	    nhop_can_multipath(rnd_orig.rnd_nhop))
746 		error = add_route_mpath(rnh, info, rt, &rnd_add, &rnd_orig, rc);
747 	else
748 #endif
749 	/* Unable to add - another route with the same preference exists */
750 	error = EEXIST;
751 
752 	/*
753 	 * ROUTE_MPATH disabled: failed to add route, free both nhop and rt.
754 	 * ROUTE_MPATH enabled: original nhop reference is unused in any case,
755 	 *  free rt only if not _adding_ new route to rib (e.g. the case
756 	 *  when initial lookup returned existing route, but then it got
757 	 *  deleted prior to multipath group insertion, leading to a simple
758 	 *  non-multipath add as a result).
759 	 */
760 	nhop_free(nh);
761 	if ((error != 0) || rc->rc_cmd != RTM_ADD)
762 		uma_zfree(V_rtzone, rt);
763 
764 	return (error);
765 }
766 
767 /*
768  * Removes route defined by @info from the kernel table specified by @fibnum and
769  * sa_family in @info->rti_info[RTAX_DST].
770  *
771  * Returns 0 on success and fills in operation metadata into @rc.
772  */
773 int
774 rib_del_route(uint32_t fibnum, struct rt_addrinfo *info, struct rib_cmd_info *rc)
775 {
776 	struct rib_head *rnh;
777 	struct sockaddr *dst_orig, *netmask;
778 	struct sockaddr_storage mdst;
779 	int error;
780 
781 	NET_EPOCH_ASSERT();
782 
783 	rnh = get_rnh(fibnum, info);
784 	if (rnh == NULL)
785 		return (EAFNOSUPPORT);
786 
787 	bzero(rc, sizeof(struct rib_cmd_info));
788 	rc->rc_cmd = RTM_DELETE;
789 
790 	dst_orig = info->rti_info[RTAX_DST];
791 	netmask = info->rti_info[RTAX_NETMASK];
792 
793 	if (netmask != NULL) {
794 		/* Ensure @dst is always properly masked */
795 		if (dst_orig->sa_len > sizeof(mdst))
796 			return (EINVAL);
797 		rt_maskedcopy(dst_orig, (struct sockaddr *)&mdst, netmask);
798 		info->rti_info[RTAX_DST] = (struct sockaddr *)&mdst;
799 	}
800 	error = del_route(rnh, info, rc);
801 	info->rti_info[RTAX_DST] = dst_orig;
802 
803 	return (error);
804 }
805 
806 /*
807  * Conditionally unlinks rtentry matching data inside @info from @rnh.
808  * Returns 0 on success with operation result stored in @rc.
809  * On error, returns:
810  * ESRCH - if prefix was not found,
811  * EADDRINUSE - if trying to delete higher priority route.
812  * ENOENT - if supplied filter function returned 0 (not matched).
813  */
814 static int
815 rt_unlinkrte(struct rib_head *rnh, struct rt_addrinfo *info, struct rib_cmd_info *rc)
816 {
817 	struct rtentry *rt;
818 	struct nhop_object *nh;
819 	struct radix_node *rn;
820 	struct route_nhop_data rnd;
821 	int error;
822 
823 	rt = lookup_prefix(rnh, info, &rnd);
824 	if (rt == NULL)
825 		return (ESRCH);
826 
827 	nh = rt->rt_nhop;
828 #ifdef ROUTE_MPATH
829 	if (NH_IS_NHGRP(nh)) {
830 		error = del_route_mpath(rnh, info, rt,
831 		    (struct nhgrp_object *)nh, rc);
832 		return (error);
833 	}
834 #endif
835 	error = check_info_match_nhop(info, rt, nh);
836 	if (error != 0)
837 		return (error);
838 
839 	if (can_override_nhop(info, nh) < 0)
840 		return (EADDRINUSE);
841 
842 	/*
843 	 * Remove the item from the tree and return it.
844 	 * Complain if it is not there and do no more processing.
845 	 */
846 	rn = rnh->rnh_deladdr(info->rti_info[RTAX_DST],
847 	    info->rti_info[RTAX_NETMASK], &rnh->head);
848 	if (rn == NULL)
849 		return (ESRCH);
850 
851 	if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
852 		panic ("rtrequest delete");
853 
854 	rt = RNTORT(rn);
855 	rt->rte_flags &= ~RTF_UP;
856 
857 	/* Finalize notification */
858 	rib_bump_gen(rnh);
859 	rnh->rnh_prefixes--;
860 
861 	rc->rc_cmd = RTM_DELETE;
862 	rc->rc_rt = rt;
863 	rc->rc_nh_old = rt->rt_nhop;
864 	rc->rc_nh_weight = rt->rt_weight;
865 	rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc);
866 
867 	return (0);
868 }
869 
870 static int
871 del_route(struct rib_head *rnh, struct rt_addrinfo *info,
872     struct rib_cmd_info *rc)
873 {
874 	int error;
875 
876 	RIB_WLOCK(rnh);
877 	error = rt_unlinkrte(rnh, info, rc);
878 	RIB_WUNLOCK(rnh);
879 	if (error != 0)
880 		return (error);
881 
882 	rib_notify(rnh, RIB_NOTIFY_DELAYED, rc);
883 
884 	/*
885 	 * If the caller wants it, then it can have it,
886 	 * the entry will be deleted after the end of the current epoch.
887 	 */
888 	if (rc->rc_cmd == RTM_DELETE)
889 		rtfree(rc->rc_rt);
890 #ifdef ROUTE_MPATH
891 	else {
892 		/*
893 		 * Deleting 1 path may result in RTM_CHANGE to
894 		 * a different mpath group/nhop.
895 		 * Free old mpath group.
896 		 */
897 		nhop_free_any(rc->rc_nh_old);
898 	}
899 #endif
900 
901 	return (0);
902 }
903 
904 int
905 rib_change_route(uint32_t fibnum, struct rt_addrinfo *info,
906     struct rib_cmd_info *rc)
907 {
908 	RIB_RLOCK_TRACKER;
909 	struct route_nhop_data rnd_orig;
910 	struct rib_head *rnh;
911 	struct rtentry *rt;
912 	int error;
913 
914 	NET_EPOCH_ASSERT();
915 
916 	rnh = get_rnh(fibnum, info);
917 	if (rnh == NULL)
918 		return (EAFNOSUPPORT);
919 
920 	bzero(rc, sizeof(struct rib_cmd_info));
921 	rc->rc_cmd = RTM_CHANGE;
922 
923 	/* Check if updated gateway exists */
924 	if ((info->rti_flags & RTF_GATEWAY) &&
925 	    (info->rti_info[RTAX_GATEWAY] == NULL)) {
926 
927 		/*
928 		 * route(8) adds RTF_GATEWAY flag if -interface is not set.
929 		 * Remove RTF_GATEWAY to enforce consistency and maintain
930 		 * compatibility..
931 		 */
932 		info->rti_flags &= ~RTF_GATEWAY;
933 	}
934 
935 	/*
936 	 * route change is done in multiple steps, with dropping and
937 	 * reacquiring lock. In the situations with multiple processes
938 	 * changes the same route in can lead to the case when route
939 	 * is changed between the steps. Address it by retrying the operation
940 	 * multiple times before failing.
941 	 */
942 
943 	RIB_RLOCK(rnh);
944 	rt = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST],
945 	    info->rti_info[RTAX_NETMASK], &rnh->head);
946 
947 	if (rt == NULL) {
948 		RIB_RUNLOCK(rnh);
949 		return (ESRCH);
950 	}
951 
952 	rnd_orig.rnd_nhop = rt->rt_nhop;
953 	rnd_orig.rnd_weight = rt->rt_weight;
954 
955 	RIB_RUNLOCK(rnh);
956 
957 	for (int i = 0; i < RIB_MAX_RETRIES; i++) {
958 		error = change_route(rnh, info, &rnd_orig, rc);
959 		if (error != EAGAIN)
960 			break;
961 	}
962 
963 	return (error);
964 }
965 
966 static int
967 change_nhop(struct rib_head *rnh, struct rt_addrinfo *info,
968     struct nhop_object *nh_orig, struct nhop_object **nh_new)
969 {
970 	int error;
971 
972 	/*
973 	 * New gateway could require new ifaddr, ifp;
974 	 * flags may also be different; ifp may be specified
975 	 * by ll sockaddr when protocol address is ambiguous
976 	 */
977 	if (((nh_orig->nh_flags & NHF_GATEWAY) &&
978 	    info->rti_info[RTAX_GATEWAY] != NULL) ||
979 	    info->rti_info[RTAX_IFP] != NULL ||
980 	    (info->rti_info[RTAX_IFA] != NULL &&
981 	     !sa_equal(info->rti_info[RTAX_IFA], nh_orig->nh_ifa->ifa_addr))) {
982 		error = rt_getifa_fib(info, rnh->rib_fibnum);
983 
984 		if (error != 0) {
985 			info->rti_ifa = NULL;
986 			return (error);
987 		}
988 	}
989 
990 	error = nhop_create_from_nhop(rnh, nh_orig, info, nh_new);
991 	info->rti_ifa = NULL;
992 
993 	return (error);
994 }
995 
996 #ifdef ROUTE_MPATH
997 static int
998 change_mpath_route(struct rib_head *rnh, struct rt_addrinfo *info,
999     struct route_nhop_data *rnd_orig, struct rib_cmd_info *rc)
1000 {
1001 	int error = 0;
1002 	struct nhop_object *nh, *nh_orig, *nh_new;
1003 	struct route_nhop_data rnd_new;
1004 
1005 	nh = NULL;
1006 	nh_orig = rnd_orig->rnd_nhop;
1007 
1008 	struct weightened_nhop *wn = NULL, *wn_new;
1009 	uint32_t num_nhops;
1010 
1011 	wn = nhgrp_get_nhops((struct nhgrp_object *)nh_orig, &num_nhops);
1012 	nh_orig = NULL;
1013 	for (int i = 0; i < num_nhops; i++) {
1014 		if (check_info_match_nhop(info, NULL, wn[i].nh)) {
1015 			nh_orig = wn[i].nh;
1016 			break;
1017 		}
1018 	}
1019 
1020 	if (nh_orig == NULL)
1021 		return (ESRCH);
1022 
1023 	error = change_nhop(rnh, info, nh_orig, &nh_new);
1024 	if (error != 0)
1025 		return (error);
1026 
1027 	wn_new = mallocarray(num_nhops, sizeof(struct weightened_nhop),
1028 	    M_TEMP, M_NOWAIT | M_ZERO);
1029 	if (wn_new == NULL) {
1030 		nhop_free(nh_new);
1031 		return (EAGAIN);
1032 	}
1033 
1034 	memcpy(wn_new, wn, num_nhops * sizeof(struct weightened_nhop));
1035 	for (int i = 0; i < num_nhops; i++) {
1036 		if (wn[i].nh == nh_orig) {
1037 			wn[i].nh = nh_new;
1038 			wn[i].weight = get_info_weight(info, rnd_orig->rnd_weight);
1039 			break;
1040 		}
1041 	}
1042 
1043 	error = nhgrp_get_group(rnh, wn_new, num_nhops, &rnd_new);
1044 	nhop_free(nh_new);
1045 	free(wn_new, M_TEMP);
1046 
1047 	if (error != 0)
1048 		return (error);
1049 
1050 	error = change_route_conditional(rnh, NULL, info, rnd_orig, &rnd_new, rc);
1051 
1052 	return (error);
1053 }
1054 #endif
1055 
1056 static int
1057 change_route(struct rib_head *rnh, struct rt_addrinfo *info,
1058     struct route_nhop_data *rnd_orig, struct rib_cmd_info *rc)
1059 {
1060 	int error = 0;
1061 	struct nhop_object *nh, *nh_orig;
1062 	struct route_nhop_data rnd_new;
1063 
1064 	nh = NULL;
1065 	nh_orig = rnd_orig->rnd_nhop;
1066 	if (nh_orig == NULL)
1067 		return (ESRCH);
1068 
1069 #ifdef ROUTE_MPATH
1070 	if (NH_IS_NHGRP(nh_orig))
1071 		return (change_mpath_route(rnh, info, rnd_orig, rc));
1072 #endif
1073 
1074 	rnd_new.rnd_weight = get_info_weight(info, rnd_orig->rnd_weight);
1075 	error = change_nhop(rnh, info, nh_orig, &rnd_new.rnd_nhop);
1076 	if (error != 0)
1077 		return (error);
1078 	error = change_route_conditional(rnh, NULL, info, rnd_orig, &rnd_new, rc);
1079 
1080 	return (error);
1081 }
1082 
1083 /*
1084  * Insert @rt with nhop data from @rnd_new to @rnh.
1085  * Returns 0 on success and stores operation results in @rc.
1086  */
1087 static int
1088 add_route_nhop(struct rib_head *rnh, struct rtentry *rt,
1089     struct rt_addrinfo *info, struct route_nhop_data *rnd,
1090     struct rib_cmd_info *rc)
1091 {
1092 	struct sockaddr *ndst, *netmask;
1093 	struct radix_node *rn;
1094 	int error = 0;
1095 
1096 	RIB_WLOCK_ASSERT(rnh);
1097 
1098 	ndst = (struct sockaddr *)rt_key(rt);
1099 	netmask = info->rti_info[RTAX_NETMASK];
1100 
1101 	rt->rt_nhop = rnd->rnd_nhop;
1102 	rt->rt_weight = rnd->rnd_weight;
1103 	rn = rnh->rnh_addaddr(ndst, netmask, &rnh->head, rt->rt_nodes);
1104 
1105 	if (rn != NULL) {
1106 		if (rt->rt_expire > 0)
1107 			tmproutes_update(rnh, rt);
1108 
1109 		/* Finalize notification */
1110 		rib_bump_gen(rnh);
1111 		rnh->rnh_prefixes++;
1112 
1113 		rc->rc_cmd = RTM_ADD;
1114 		rc->rc_rt = rt;
1115 		rc->rc_nh_old = NULL;
1116 		rc->rc_nh_new = rnd->rnd_nhop;
1117 		rc->rc_nh_weight = rnd->rnd_weight;
1118 
1119 		rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc);
1120 	} else {
1121 		/* Existing route or memory allocation failure */
1122 		error = EEXIST;
1123 	}
1124 
1125 	return (error);
1126 }
1127 
1128 /*
1129  * Switch @rt nhop/weigh to the ones specified in @rnd.
1130  *  Conditionally set rt_expire if set in @info.
1131  * Returns 0 on success.
1132  */
1133 int
1134 change_route_nhop(struct rib_head *rnh, struct rtentry *rt,
1135     struct rt_addrinfo *info, struct route_nhop_data *rnd,
1136     struct rib_cmd_info *rc)
1137 {
1138 	struct nhop_object *nh_orig;
1139 
1140 	RIB_WLOCK_ASSERT(rnh);
1141 
1142 	nh_orig = rt->rt_nhop;
1143 
1144 	if (rnd->rnd_nhop != NULL) {
1145 		/* Changing expiration & nexthop & weight to a new one */
1146 		rt_set_expire_info(rt, info);
1147 		rt->rt_nhop = rnd->rnd_nhop;
1148 		rt->rt_weight = rnd->rnd_weight;
1149 		if (rt->rt_expire > 0)
1150 			tmproutes_update(rnh, rt);
1151 	} else {
1152 		/* Route deletion requested. */
1153 		struct sockaddr *ndst, *netmask;
1154 		struct radix_node *rn;
1155 
1156 		ndst = (struct sockaddr *)rt_key(rt);
1157 		netmask = info->rti_info[RTAX_NETMASK];
1158 		rn = rnh->rnh_deladdr(ndst, netmask, &rnh->head);
1159 		if (rn == NULL)
1160 			return (ESRCH);
1161 		rt = RNTORT(rn);
1162 		rt->rte_flags &= ~RTF_UP;
1163 	}
1164 
1165 	/* Finalize notification */
1166 	rib_bump_gen(rnh);
1167 	if (rnd->rnd_nhop == NULL)
1168 		rnh->rnh_prefixes--;
1169 
1170 	rc->rc_cmd = (rnd->rnd_nhop != NULL) ? RTM_CHANGE : RTM_DELETE;
1171 	rc->rc_rt = rt;
1172 	rc->rc_nh_old = nh_orig;
1173 	rc->rc_nh_new = rnd->rnd_nhop;
1174 	rc->rc_nh_weight = rnd->rnd_weight;
1175 
1176 	rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc);
1177 
1178 	return (0);
1179 }
1180 
1181 /*
1182  * Conditionally update route nhop/weight IFF data in @nhd_orig is
1183  *  consistent with the current route data.
1184  * Nexthop in @nhd_new is consumed.
1185  */
1186 int
1187 change_route_conditional(struct rib_head *rnh, struct rtentry *rt,
1188     struct rt_addrinfo *info, struct route_nhop_data *rnd_orig,
1189     struct route_nhop_data *rnd_new, struct rib_cmd_info *rc)
1190 {
1191 	struct rtentry *rt_new;
1192 	int error = 0;
1193 
1194 	RIB_WLOCK(rnh);
1195 
1196 	rt_new = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST],
1197 	    info->rti_info[RTAX_NETMASK], &rnh->head);
1198 
1199 	if (rt_new == NULL) {
1200 		if (rnd_orig->rnd_nhop == NULL)
1201 			error = add_route_nhop(rnh, rt, info, rnd_new, rc);
1202 		else {
1203 			/*
1204 			 * Prefix does not exist, which was not our assumption.
1205 			 * Update @rnd_orig with the new data and return
1206 			 */
1207 			rnd_orig->rnd_nhop = NULL;
1208 			rnd_orig->rnd_weight = 0;
1209 			error = EAGAIN;
1210 		}
1211 	} else {
1212 		/* Prefix exists, try to update */
1213 		if (rnd_orig->rnd_nhop == rt_new->rt_nhop) {
1214 			/*
1215 			 * Nhop/mpath group hasn't changed. Flip
1216 			 * to the new precalculated one and return
1217 			 */
1218 			error = change_route_nhop(rnh, rt_new, info, rnd_new, rc);
1219 		} else {
1220 			/* Update and retry */
1221 			rnd_orig->rnd_nhop = rt_new->rt_nhop;
1222 			rnd_orig->rnd_weight = rt_new->rt_weight;
1223 			error = EAGAIN;
1224 		}
1225 	}
1226 
1227 	RIB_WUNLOCK(rnh);
1228 
1229 	if (error == 0) {
1230 		rib_notify(rnh, RIB_NOTIFY_DELAYED, rc);
1231 
1232 		if (rnd_orig->rnd_nhop != NULL)
1233 			nhop_free_any(rnd_orig->rnd_nhop);
1234 
1235 	} else {
1236 		if (rnd_new->rnd_nhop != NULL)
1237 			nhop_free_any(rnd_new->rnd_nhop);
1238 	}
1239 
1240 	return (error);
1241 }
1242 
1243 /*
1244  * Performs modification of routing table specificed by @action.
1245  * Table is specified by @fibnum and sa_family in @info->rti_info[RTAX_DST].
1246  * Needs to be run in network epoch.
1247  *
1248  * Returns 0 on success and fills in @rc with action result.
1249  */
1250 int
1251 rib_action(uint32_t fibnum, int action, struct rt_addrinfo *info,
1252     struct rib_cmd_info *rc)
1253 {
1254 	int error;
1255 
1256 	switch (action) {
1257 	case RTM_ADD:
1258 		error = rib_add_route(fibnum, info, rc);
1259 		break;
1260 	case RTM_DELETE:
1261 		error = rib_del_route(fibnum, info, rc);
1262 		break;
1263 	case RTM_CHANGE:
1264 		error = rib_change_route(fibnum, info, rc);
1265 		break;
1266 	default:
1267 		error = ENOTSUP;
1268 	}
1269 
1270 	return (error);
1271 }
1272 
1273 struct rt_delinfo
1274 {
1275 	struct rt_addrinfo info;
1276 	struct rib_head *rnh;
1277 	struct rtentry *head;
1278 	struct rib_cmd_info rc;
1279 };
1280 
1281 /*
1282  * Conditionally unlinks @rn from radix tree based
1283  * on info data passed in @arg.
1284  */
1285 static int
1286 rt_checkdelroute(struct radix_node *rn, void *arg)
1287 {
1288 	struct rt_delinfo *di;
1289 	struct rt_addrinfo *info;
1290 	struct rtentry *rt;
1291 
1292 	di = (struct rt_delinfo *)arg;
1293 	rt = (struct rtentry *)rn;
1294 	info = &di->info;
1295 
1296 	info->rti_info[RTAX_DST] = rt_key(rt);
1297 	info->rti_info[RTAX_NETMASK] = rt_mask(rt);
1298 
1299 	if (rt_unlinkrte(di->rnh, info, &di->rc) != 0)
1300 		return (0);
1301 
1302 	/*
1303 	 * Add deleted rtentries to the list to GC them
1304 	 *  after dropping the lock.
1305 	 *
1306 	 * XXX: Delayed notifications not implemented
1307 	 *  for nexthop updates.
1308 	 */
1309 	if (di->rc.rc_cmd == RTM_DELETE) {
1310 		/* Add to the list and return */
1311 		rt->rt_chain = di->head;
1312 		di->head = rt;
1313 #ifdef ROUTE_MPATH
1314 	} else {
1315 		/*
1316 		 * RTM_CHANGE to a diferent nexthop or nexthop group.
1317 		 * Free old multipath group.
1318 		 */
1319 		nhop_free_any(di->rc.rc_nh_old);
1320 #endif
1321 	}
1322 
1323 	return (0);
1324 }
1325 
1326 /*
1327  * Iterates over a routing table specified by @fibnum and @family and
1328  *  deletes elements marked by @filter_f.
1329  * @fibnum: rtable id
1330  * @family: AF_ address family
1331  * @filter_f: function returning non-zero value for items to delete
1332  * @arg: data to pass to the @filter_f function
1333  * @report: true if rtsock notification is needed.
1334  */
1335 void
1336 rib_walk_del(u_int fibnum, int family, rib_filter_f_t *filter_f, void *arg, bool report)
1337 {
1338 	struct rib_head *rnh;
1339 	struct rt_delinfo di;
1340 	struct rtentry *rt;
1341 	struct nhop_object *nh;
1342 	struct epoch_tracker et;
1343 
1344 	rnh = rt_tables_get_rnh(fibnum, family);
1345 	if (rnh == NULL)
1346 		return;
1347 
1348 	bzero(&di, sizeof(di));
1349 	di.info.rti_filter = filter_f;
1350 	di.info.rti_filterdata = arg;
1351 	di.rnh = rnh;
1352 	di.rc.rc_cmd = RTM_DELETE;
1353 
1354 	NET_EPOCH_ENTER(et);
1355 
1356 	RIB_WLOCK(rnh);
1357 	rnh->rnh_walktree(&rnh->head, rt_checkdelroute, &di);
1358 	RIB_WUNLOCK(rnh);
1359 
1360 	/* We might have something to reclaim. */
1361 	bzero(&di.rc, sizeof(di.rc));
1362 	di.rc.rc_cmd = RTM_DELETE;
1363 	while (di.head != NULL) {
1364 		rt = di.head;
1365 		di.head = rt->rt_chain;
1366 		rt->rt_chain = NULL;
1367 		nh = rt->rt_nhop;
1368 
1369 		di.rc.rc_rt = rt;
1370 		di.rc.rc_nh_old = nh;
1371 		rib_notify(rnh, RIB_NOTIFY_DELAYED, &di.rc);
1372 
1373 		/* TODO std rt -> rt_addrinfo export */
1374 		di.info.rti_info[RTAX_DST] = rt_key(rt);
1375 		di.info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1376 
1377 		if (report) {
1378 #ifdef ROUTE_MPATH
1379 			struct nhgrp_object *nhg;
1380 			struct weightened_nhop *wn;
1381 			uint32_t num_nhops;
1382 			if (NH_IS_NHGRP(nh)) {
1383 				nhg = (struct nhgrp_object *)nh;
1384 				wn = nhgrp_get_nhops(nhg, &num_nhops);
1385 				for (int i = 0; i < num_nhops; i++)
1386 					rt_routemsg(RTM_DELETE, rt, wn[i].nh, fibnum);
1387 			} else
1388 #endif
1389 			rt_routemsg(RTM_DELETE, rt, nh, fibnum);
1390 		}
1391 		rtfree(rt);
1392 	}
1393 
1394 	NET_EPOCH_EXIT(et);
1395 }
1396 
1397 static int
1398 rt_delete_unconditional(struct radix_node *rn, void *arg)
1399 {
1400 	struct rtentry *rt = RNTORT(rn);
1401 	struct rib_head *rnh = (struct rib_head *)arg;
1402 
1403 	rn = rnh->rnh_deladdr(rt_key(rt), rt_mask(rt), &rnh->head);
1404 	if (RNTORT(rn) == rt)
1405 		rtfree(rt);
1406 
1407 	return (0);
1408 }
1409 
1410 /*
1411  * Removes all routes from the routing table without executing notifications.
1412  * rtentres will be removed after the end of a current epoch.
1413  */
1414 static void
1415 rib_flush_routes(struct rib_head *rnh)
1416 {
1417 	RIB_WLOCK(rnh);
1418 	rnh->rnh_walktree(&rnh->head, rt_delete_unconditional, rnh);
1419 	RIB_WUNLOCK(rnh);
1420 }
1421 
1422 void
1423 rib_flush_routes_family(int family)
1424 {
1425 	struct rib_head *rnh;
1426 
1427 	for (uint32_t fibnum = 0; fibnum < rt_numfibs; fibnum++) {
1428 		if ((rnh = rt_tables_get_rnh(fibnum, family)) != NULL)
1429 			rib_flush_routes(rnh);
1430 	}
1431 }
1432 
1433 static void
1434 rib_notify(struct rib_head *rnh, enum rib_subscription_type type,
1435     struct rib_cmd_info *rc)
1436 {
1437 	struct rib_subscription *rs;
1438 
1439 	CK_STAILQ_FOREACH(rs, &rnh->rnh_subscribers, next) {
1440 		if (rs->type == type)
1441 			rs->func(rnh, rc, rs->arg);
1442 	}
1443 }
1444 
1445 static struct rib_subscription *
1446 allocate_subscription(rib_subscription_cb_t *f, void *arg,
1447     enum rib_subscription_type type, bool waitok)
1448 {
1449 	struct rib_subscription *rs;
1450 	int flags = M_ZERO | (waitok ? M_WAITOK : M_NOWAIT);
1451 
1452 	rs = malloc(sizeof(struct rib_subscription), M_RTABLE, flags);
1453 	if (rs == NULL)
1454 		return (NULL);
1455 
1456 	rs->func = f;
1457 	rs->arg = arg;
1458 	rs->type = type;
1459 
1460 	return (rs);
1461 }
1462 
1463 /*
1464  * Subscribe for the changes in the routing table specified by @fibnum and
1465  *  @family.
1466  *
1467  * Returns pointer to the subscription structure on success.
1468  */
1469 struct rib_subscription *
1470 rib_subscribe(uint32_t fibnum, int family, rib_subscription_cb_t *f, void *arg,
1471     enum rib_subscription_type type, bool waitok)
1472 {
1473 	struct rib_head *rnh;
1474 	struct epoch_tracker et;
1475 
1476 	NET_EPOCH_ENTER(et);
1477 	KASSERT((fibnum < rt_numfibs), ("%s: bad fibnum", __func__));
1478 	rnh = rt_tables_get_rnh(fibnum, family);
1479 	NET_EPOCH_EXIT(et);
1480 
1481 	return (rib_subscribe_internal(rnh, f, arg, type, waitok));
1482 }
1483 
1484 struct rib_subscription *
1485 rib_subscribe_internal(struct rib_head *rnh, rib_subscription_cb_t *f, void *arg,
1486     enum rib_subscription_type type, bool waitok)
1487 {
1488 	struct rib_subscription *rs;
1489 	struct epoch_tracker et;
1490 
1491 	if ((rs = allocate_subscription(f, arg, type, waitok)) == NULL)
1492 		return (NULL);
1493 	rs->rnh = rnh;
1494 
1495 	NET_EPOCH_ENTER(et);
1496 	RIB_WLOCK(rnh);
1497 	CK_STAILQ_INSERT_HEAD(&rnh->rnh_subscribers, rs, next);
1498 	RIB_WUNLOCK(rnh);
1499 	NET_EPOCH_EXIT(et);
1500 
1501 	return (rs);
1502 }
1503 
1504 struct rib_subscription *
1505 rib_subscribe_locked(struct rib_head *rnh, rib_subscription_cb_t *f, void *arg,
1506     enum rib_subscription_type type)
1507 {
1508 	struct rib_subscription *rs;
1509 
1510 	NET_EPOCH_ASSERT();
1511 	RIB_WLOCK_ASSERT(rnh);
1512 
1513 	if ((rs = allocate_subscription(f, arg, type, false)) == NULL)
1514 		return (NULL);
1515 	rs->rnh = rnh;
1516 
1517 	CK_STAILQ_INSERT_HEAD(&rnh->rnh_subscribers, rs, next);
1518 
1519 	return (rs);
1520 }
1521 
1522 /*
1523  * Remove rtable subscription @rs from the routing table.
1524  * Needs to be run in network epoch.
1525  */
1526 void
1527 rib_unsubscribe(struct rib_subscription *rs)
1528 {
1529 	struct rib_head *rnh = rs->rnh;
1530 
1531 	NET_EPOCH_ASSERT();
1532 
1533 	RIB_WLOCK(rnh);
1534 	CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next);
1535 	RIB_WUNLOCK(rnh);
1536 
1537 	epoch_call(net_epoch_preempt, destroy_subscription_epoch,
1538 	    &rs->epoch_ctx);
1539 }
1540 
1541 void
1542 rib_unsubscribe_locked(struct rib_subscription *rs)
1543 {
1544 	struct rib_head *rnh = rs->rnh;
1545 
1546 	NET_EPOCH_ASSERT();
1547 	RIB_WLOCK_ASSERT(rnh);
1548 
1549 	CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next);
1550 
1551 	epoch_call(net_epoch_preempt, destroy_subscription_epoch,
1552 	    &rs->epoch_ctx);
1553 }
1554 
1555 /*
1556  * Epoch callback indicating subscription is safe to destroy
1557  */
1558 static void
1559 destroy_subscription_epoch(epoch_context_t ctx)
1560 {
1561 	struct rib_subscription *rs;
1562 
1563 	rs = __containerof(ctx, struct rib_subscription, epoch_ctx);
1564 
1565 	free(rs, M_RTABLE);
1566 }
1567 
1568 void
1569 rib_init_subscriptions(struct rib_head *rnh)
1570 {
1571 
1572 	CK_STAILQ_INIT(&rnh->rnh_subscribers);
1573 }
1574 
1575 void
1576 rib_destroy_subscriptions(struct rib_head *rnh)
1577 {
1578 	struct rib_subscription *rs;
1579 	struct epoch_tracker et;
1580 
1581 	NET_EPOCH_ENTER(et);
1582 	RIB_WLOCK(rnh);
1583 	while ((rs = CK_STAILQ_FIRST(&rnh->rnh_subscribers)) != NULL) {
1584 		CK_STAILQ_REMOVE_HEAD(&rnh->rnh_subscribers, next);
1585 		epoch_call(net_epoch_preempt, destroy_subscription_epoch,
1586 		    &rs->epoch_ctx);
1587 	}
1588 	RIB_WUNLOCK(rnh);
1589 	NET_EPOCH_EXIT(et);
1590 }
1591