xref: /freebsd/sys/net/route/route_ctl.c (revision 62cfcf62f627e5093fb37026a6d8c98e4d2ef04c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2020 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 #include "opt_inet.h"
31 #include "opt_inet6.h"
32 #include "opt_mpath.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/syslog.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/rmlock.h>
44 
45 #include <net/if.h>
46 #include <net/if_var.h>
47 #include <net/if_dl.h>
48 #include <net/vnet.h>
49 #include <net/route.h>
50 #include <net/route/route_ctl.h>
51 #include <net/route/route_var.h>
52 #include <net/route/nhop_utils.h>
53 #include <net/route/nhop.h>
54 #include <net/route/nhop_var.h>
55 #include <net/route/shared.h>
56 #include <netinet/in.h>
57 
58 #ifdef RADIX_MPATH
59 #include <net/radix_mpath.h>
60 #endif
61 
62 #include <vm/uma.h>
63 
64 
65 /*
66  * This file contains control plane routing tables functions.
67  *
68  * All functions assumes they are called in net epoch.
69  */
70 
71 struct rib_subscription {
72 	CK_STAILQ_ENTRY(rib_subscription)	next;
73 	rib_subscription_cb_t			*func;
74 	void					*arg;
75 	enum rib_subscription_type		type;
76 	struct epoch_context			epoch_ctx;
77 };
78 
79 static void rib_notify(struct rib_head *rnh, enum rib_subscription_type type,
80     struct rib_cmd_info *rc);
81 
82 static void rt_notifydelete(struct rtentry *rt, struct rt_addrinfo *info);
83 static void destroy_subscription_epoch(epoch_context_t ctx);
84 
85 static struct rib_head *
86 get_rnh(uint32_t fibnum, const struct rt_addrinfo *info)
87 {
88 	struct rib_head *rnh;
89 	struct sockaddr *dst;
90 
91 	KASSERT((fibnum < rt_numfibs), ("rib_add_route: bad fibnum"));
92 
93 	dst = info->rti_info[RTAX_DST];
94 	rnh = rt_tables_get_rnh(fibnum, dst->sa_family);
95 
96 	return (rnh);
97 }
98 
99 /*
100  * Adds route defined by @info into the kernel table specified by @fibnum and
101  * sa_family in @info->rti_info[RTAX_DST].
102  *
103  * Returns 0 on success and fills in operation metadata into @rc.
104  */
105 int
106 rib_add_route(uint32_t fibnum, struct rt_addrinfo *info,
107     struct rib_cmd_info *rc)
108 {
109 	struct rib_head *rnh;
110 
111 	NET_EPOCH_ASSERT();
112 
113 	rnh = get_rnh(fibnum, info);
114 	if (rnh == NULL)
115 		return (EAFNOSUPPORT);
116 
117 	/*
118 	 * Check consistency between RTF_HOST flag and netmask
119 	 * existence.
120 	 */
121 	if (info->rti_flags & RTF_HOST)
122 		info->rti_info[RTAX_NETMASK] = NULL;
123 	else if (info->rti_info[RTAX_NETMASK] == NULL)
124 		return (EINVAL);
125 
126 	bzero(rc, sizeof(struct rib_cmd_info));
127 	rc->rc_cmd = RTM_ADD;
128 
129 	return (add_route(rnh, info, rc));
130 }
131 
132 int
133 add_route(struct rib_head *rnh, struct rt_addrinfo *info,
134     struct rib_cmd_info *rc)
135 {
136 	struct sockaddr *dst, *ndst, *gateway, *netmask;
137 	struct rtentry *rt, *rt_old;
138 	struct nhop_object *nh;
139 	struct radix_node *rn;
140 	struct ifaddr *ifa;
141 	int error, flags;
142 	struct epoch_tracker et;
143 
144 	dst = info->rti_info[RTAX_DST];
145 	gateway = info->rti_info[RTAX_GATEWAY];
146 	netmask = info->rti_info[RTAX_NETMASK];
147 	flags = info->rti_flags;
148 
149 	if ((flags & RTF_GATEWAY) && !gateway)
150 		return (EINVAL);
151 	if (dst && gateway && (dst->sa_family != gateway->sa_family) &&
152 	    (gateway->sa_family != AF_UNSPEC) && (gateway->sa_family != AF_LINK))
153 		return (EINVAL);
154 
155 	if (dst->sa_len > sizeof(((struct rtentry *)NULL)->rt_dstb))
156 		return (EINVAL);
157 
158 	if (info->rti_ifa == NULL) {
159 		error = rt_getifa_fib(info, rnh->rib_fibnum);
160 		if (error)
161 			return (error);
162 	} else {
163 		ifa_ref(info->rti_ifa);
164 	}
165 
166 	NET_EPOCH_ENTER(et);
167 	error = nhop_create_from_info(rnh, info, &nh);
168 	NET_EPOCH_EXIT(et);
169 	if (error != 0) {
170 		ifa_free(info->rti_ifa);
171 		return (error);
172 	}
173 
174 	rt = uma_zalloc(V_rtzone, M_NOWAIT);
175 	if (rt == NULL) {
176 		ifa_free(info->rti_ifa);
177 		nhop_free(nh);
178 		return (ENOBUFS);
179 	}
180 	rt->rt_flags = RTF_UP | flags;
181 	rt->rt_nhop = nh;
182 
183 	/* Fill in dst */
184 	memcpy(&rt->rt_dst, dst, dst->sa_len);
185 	rt_key(rt) = &rt->rt_dst;
186 
187 	/*
188 	 * point to the (possibly newly malloc'd) dest address.
189 	 */
190 	ndst = (struct sockaddr *)rt_key(rt);
191 
192 	/*
193 	 * make sure it contains the value we want (masked if needed).
194 	 */
195 	if (netmask) {
196 		rt_maskedcopy(dst, ndst, netmask);
197 	} else
198 		bcopy(dst, ndst, dst->sa_len);
199 
200 	/*
201 	 * We use the ifa reference returned by rt_getifa_fib().
202 	 * This moved from below so that rnh->rnh_addaddr() can
203 	 * examine the ifa and  ifa->ifa_ifp if it so desires.
204 	 */
205 	ifa = info->rti_ifa;
206 	rt->rt_weight = 1;
207 
208 	rt_setmetrics(info, rt);
209 	rt_old = NULL;
210 
211 	RIB_WLOCK(rnh);
212 	RT_LOCK(rt);
213 #ifdef RADIX_MPATH
214 	/* do not permit exactly the same dst/mask/gw pair */
215 	if (rt_mpath_capable(rnh) &&
216 		rt_mpath_conflict(rnh, rt, netmask)) {
217 		RIB_WUNLOCK(rnh);
218 
219 		nhop_free(nh);
220 		uma_zfree(V_rtzone, rt);
221 		return (EEXIST);
222 	}
223 #endif
224 
225 	rn = rnh->rnh_addaddr(ndst, netmask, &rnh->head, rt->rt_nodes);
226 
227 	if (rn != NULL) {
228 		/* Most common usecase */
229 		if (rt->rt_expire > 0)
230 			tmproutes_update(rnh, rt);
231 
232 		/* Finalize notification */
233 		rnh->rnh_gen++;
234 
235 		rc->rc_rt = RNTORT(rn);
236 		rc->rc_nh_new = nh;
237 
238 		rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc);
239 	} else if ((info->rti_flags & RTF_PINNED) != 0) {
240 
241 		/*
242 		 * Force removal and re-try addition
243 		 * TODO: better multipath&pinned support
244 		 */
245 		struct sockaddr *info_dst = info->rti_info[RTAX_DST];
246 		info->rti_info[RTAX_DST] = ndst;
247 		/* Do not delete existing PINNED(interface) routes */
248 		info->rti_flags &= ~RTF_PINNED;
249 		rt_old = rt_unlinkrte(rnh, info, &error);
250 		info->rti_flags |= RTF_PINNED;
251 		info->rti_info[RTAX_DST] = info_dst;
252 		if (rt_old != NULL) {
253 			rn = rnh->rnh_addaddr(ndst, netmask, &rnh->head,
254 			    rt->rt_nodes);
255 
256 			/* Finalize notification */
257 			rnh->rnh_gen++;
258 
259 			if (rn != NULL) {
260 				rc->rc_cmd = RTM_CHANGE;
261 				rc->rc_rt = RNTORT(rn);
262 				rc->rc_nh_old = rt_old->rt_nhop;
263 				rc->rc_nh_new = nh;
264 			} else {
265 				rc->rc_cmd = RTM_DELETE;
266 				rc->rc_rt = RNTORT(rn);
267 				rc->rc_nh_old = rt_old->rt_nhop;
268 				rc->rc_nh_new = nh;
269 			}
270 			rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc);
271 		}
272 	}
273 	RIB_WUNLOCK(rnh);
274 
275 	if ((rn != NULL) || (rt_old != NULL))
276 		rib_notify(rnh, RIB_NOTIFY_DELAYED, rc);
277 
278 	if (rt_old != NULL) {
279 		rt_notifydelete(rt_old, info);
280 		rtfree(rt_old);
281 	}
282 
283 	/*
284 	 * If it still failed to go into the tree,
285 	 * then un-make it (this should be a function)
286 	 */
287 	if (rn == NULL) {
288 		nhop_free(nh);
289 		uma_zfree(V_rtzone, rt);
290 		return (EEXIST);
291 	}
292 
293 	/*
294 	 * If this protocol has something to add to this then
295 	 * allow it to do that as well.
296 	 */
297 	if (ifa->ifa_rtrequest)
298 		ifa->ifa_rtrequest(RTM_ADD, rt, rt->rt_nhop, info);
299 
300 	RT_UNLOCK(rt);
301 
302 	return (0);
303 }
304 
305 
306 /*
307  * Removes route defined by @info from the kernel table specified by @fibnum and
308  * sa_family in @info->rti_info[RTAX_DST].
309  *
310  * Returns 0 on success and fills in operation metadata into @rc.
311  */
312 int
313 rib_del_route(uint32_t fibnum, struct rt_addrinfo *info, struct rib_cmd_info *rc)
314 {
315 	struct rib_head *rnh;
316 
317 	NET_EPOCH_ASSERT();
318 
319 	rnh = get_rnh(fibnum, info);
320 	if (rnh == NULL)
321 		return (EAFNOSUPPORT);
322 
323 	bzero(rc, sizeof(struct rib_cmd_info));
324 	rc->rc_cmd = RTM_DELETE;
325 
326 	return (del_route(rnh, info, rc));
327 }
328 
329 /*
330  * Conditionally unlinks rtentry matching data inside @info from @rnh.
331  * Returns unlinked, locked and referenced @rtentry on success,
332  * Returns NULL and sets @perror to:
333  * ESRCH - if prefix was not found,
334  * EADDRINUSE - if trying to delete PINNED route without appropriate flag.
335  * ENOENT - if supplied filter function returned 0 (not matched).
336  */
337 struct rtentry *
338 rt_unlinkrte(struct rib_head *rnh, struct rt_addrinfo *info, int *perror)
339 {
340 	struct sockaddr *dst, *netmask;
341 	struct rtentry *rt;
342 	struct radix_node *rn;
343 
344 	dst = info->rti_info[RTAX_DST];
345 	netmask = info->rti_info[RTAX_NETMASK];
346 
347 	rt = (struct rtentry *)rnh->rnh_lookup(dst, netmask, &rnh->head);
348 	if (rt == NULL) {
349 		*perror = ESRCH;
350 		return (NULL);
351 	}
352 
353 	if ((info->rti_flags & RTF_PINNED) == 0) {
354 		/* Check if target route can be deleted */
355 		if (rt->rt_flags & RTF_PINNED) {
356 			*perror = EADDRINUSE;
357 			return (NULL);
358 		}
359 	}
360 
361 	if (info->rti_filter != NULL) {
362 		if (info->rti_filter(rt, rt->rt_nhop, info->rti_filterdata)==0){
363 			/* Not matched */
364 			*perror = ENOENT;
365 			return (NULL);
366 		}
367 
368 		/*
369 		 * Filter function requested rte deletion.
370 		 * Ease the caller work by filling in remaining info
371 		 * from that particular entry.
372 		 */
373 		info->rti_info[RTAX_GATEWAY] = &rt->rt_nhop->gw_sa;
374 	}
375 
376 	/*
377 	 * Remove the item from the tree and return it.
378 	 * Complain if it is not there and do no more processing.
379 	 */
380 	*perror = ESRCH;
381 #ifdef RADIX_MPATH
382 	if (rt_mpath_capable(rnh))
383 		rn = rt_mpath_unlink(rnh, info, rt, perror);
384 	else
385 #endif
386 	rn = rnh->rnh_deladdr(dst, netmask, &rnh->head);
387 	if (rn == NULL)
388 		return (NULL);
389 
390 	if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
391 		panic ("rtrequest delete");
392 
393 	rt = RNTORT(rn);
394 	RT_LOCK(rt);
395 	rt->rt_flags &= ~RTF_UP;
396 
397 	*perror = 0;
398 
399 	return (rt);
400 }
401 
402 int
403 del_route(struct rib_head *rnh, struct rt_addrinfo *info,
404     struct rib_cmd_info *rc)
405 {
406 	struct sockaddr *dst, *netmask;
407 	struct sockaddr_storage mdst;
408 	struct rtentry *rt;
409 	int error;
410 
411 	dst = info->rti_info[RTAX_DST];
412 	netmask = info->rti_info[RTAX_NETMASK];
413 
414 	if (netmask) {
415 		if (dst->sa_len > sizeof(mdst))
416 			return (EINVAL);
417 		rt_maskedcopy(dst, (struct sockaddr *)&mdst, netmask);
418 		dst = (struct sockaddr *)&mdst;
419 	}
420 
421 	RIB_WLOCK(rnh);
422 	rt = rt_unlinkrte(rnh, info, &error);
423 	if (rt != NULL) {
424 		/* Finalize notification */
425 		rnh->rnh_gen++;
426 		rc->rc_rt = rt;
427 		rc->rc_nh_old = rt->rt_nhop;
428 		rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc);
429 	}
430 	RIB_WUNLOCK(rnh);
431 	if (error != 0)
432 		return (error);
433 
434 	rib_notify(rnh, RIB_NOTIFY_DELAYED, rc);
435 	rt_notifydelete(rt, info);
436 
437 	/*
438 	 * If the caller wants it, then it can have it,
439 	 * the entry will be deleted after the end of the current epoch.
440 	 */
441 	rtfree(rt);
442 
443 	return (0);
444 }
445 
446 int
447 rib_change_route(uint32_t fibnum, struct rt_addrinfo *info,
448     struct rib_cmd_info *rc)
449 {
450 	struct rib_head *rnh;
451 
452 	NET_EPOCH_ASSERT();
453 
454 	rnh = get_rnh(fibnum, info);
455 	if (rnh == NULL)
456 		return (EAFNOSUPPORT);
457 
458 	bzero(rc, sizeof(struct rib_cmd_info));
459 	rc->rc_cmd = RTM_CHANGE;
460 
461 	return (change_route(rnh, info, rc));
462 }
463 
464 static int
465 change_route_one(struct rib_head *rnh, struct rt_addrinfo *info,
466     struct rib_cmd_info *rc)
467 {
468 	RIB_RLOCK_TRACKER;
469 	struct rtentry *rt = NULL;
470 	int error = 0;
471 	int free_ifa = 0;
472 	struct nhop_object *nh, *nh_orig;
473 
474 	RIB_RLOCK(rnh);
475 	rt = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST],
476 	    info->rti_info[RTAX_NETMASK], &rnh->head);
477 
478 	if (rt == NULL) {
479 		RIB_RUNLOCK(rnh);
480 		return (ESRCH);
481 	}
482 
483 #ifdef RADIX_MPATH
484 	/*
485 	 * If we got multipath routes,
486 	 * we require users to specify a matching RTAX_GATEWAY.
487 	 */
488 	if (rt_mpath_capable(rnh)) {
489 		rt = rt_mpath_matchgate(rt, info->rti_info[RTAX_GATEWAY]);
490 		if (rt == NULL) {
491 			RIB_RUNLOCK(rnh);
492 			return (ESRCH);
493 		}
494 	}
495 #endif
496 	nh_orig = rt->rt_nhop;
497 
498 	RIB_RUNLOCK(rnh);
499 
500 	rt = NULL;
501 	nh = NULL;
502 
503 	/*
504 	 * New gateway could require new ifaddr, ifp;
505 	 * flags may also be different; ifp may be specified
506 	 * by ll sockaddr when protocol address is ambiguous
507 	 */
508 	if (((nh_orig->nh_flags & NHF_GATEWAY) &&
509 	    info->rti_info[RTAX_GATEWAY] != NULL) ||
510 	    info->rti_info[RTAX_IFP] != NULL ||
511 	    (info->rti_info[RTAX_IFA] != NULL &&
512 	     !sa_equal(info->rti_info[RTAX_IFA], nh_orig->nh_ifa->ifa_addr))) {
513 		error = rt_getifa_fib(info, rnh->rib_fibnum);
514 		if (info->rti_ifa != NULL)
515 			free_ifa = 1;
516 
517 		if (error != 0) {
518 			if (free_ifa) {
519 				ifa_free(info->rti_ifa);
520 				info->rti_ifa = NULL;
521 			}
522 
523 			return (error);
524 		}
525 	}
526 
527 	error = nhop_create_from_nhop(rnh, nh_orig, info, &nh);
528 	if (free_ifa) {
529 		ifa_free(info->rti_ifa);
530 		info->rti_ifa = NULL;
531 	}
532 	if (error != 0)
533 		return (error);
534 
535 	RIB_WLOCK(rnh);
536 
537 	/* Lookup rtentry once again and check if nexthop is still the same */
538 	rt = (struct rtentry *)rnh->rnh_lookup(info->rti_info[RTAX_DST],
539 	    info->rti_info[RTAX_NETMASK], &rnh->head);
540 
541 	if (rt == NULL) {
542 		RIB_WUNLOCK(rnh);
543 		nhop_free(nh);
544 		return (ESRCH);
545 	}
546 
547 	if (rt->rt_nhop != nh_orig) {
548 		RIB_WUNLOCK(rnh);
549 		nhop_free(nh);
550 		return (EAGAIN);
551 	}
552 
553 	/* Proceed with the update */
554 	RT_LOCK(rt);
555 
556 	/* Provide notification to the protocols.*/
557 	if ((nh_orig->nh_ifa != nh->nh_ifa) && nh_orig->nh_ifa->ifa_rtrequest)
558 		nh_orig->nh_ifa->ifa_rtrequest(RTM_DELETE, rt, nh_orig, info);
559 
560 	rt->rt_nhop = nh;
561 	rt_setmetrics(info, rt);
562 
563 	if ((nh_orig->nh_ifa != nh->nh_ifa) && nh_orig->nh_ifa->ifa_rtrequest)
564 		nh_orig->nh_ifa->ifa_rtrequest(RTM_DELETE, rt, nh_orig, info);
565 
566 	/* Finalize notification */
567 	rc->rc_rt = rt;
568 	rc->rc_nh_old = nh_orig;
569 	rc->rc_nh_new = rt->rt_nhop;
570 
571 	RT_UNLOCK(rt);
572 
573 	/* Update generation id to reflect rtable change */
574 	rnh->rnh_gen++;
575 	rib_notify(rnh, RIB_NOTIFY_IMMEDIATE, rc);
576 
577 	RIB_WUNLOCK(rnh);
578 
579 	rib_notify(rnh, RIB_NOTIFY_DELAYED, rc);
580 
581 	nhop_free(nh_orig);
582 
583 	return (0);
584 }
585 
586 int
587 change_route(struct rib_head *rnh, struct rt_addrinfo *info,
588     struct rib_cmd_info *rc)
589 {
590 	int error;
591 
592 	/* Check if updated gateway exists */
593 	if ((info->rti_flags & RTF_GATEWAY) &&
594 	    (info->rti_info[RTAX_GATEWAY] == NULL))
595 		return (EINVAL);
596 
597 	/*
598 	 * route change is done in multiple steps, with dropping and
599 	 * reacquiring lock. In the situations with multiple processes
600 	 * changes the same route in can lead to the case when route
601 	 * is changed between the steps. Address it by retrying the operation
602 	 * multiple times before failing.
603 	 */
604 	for (int i = 0; i < RIB_MAX_RETRIES; i++) {
605 		error = change_route_one(rnh, info, rc);
606 		if (error != EAGAIN)
607 			break;
608 	}
609 
610 	return (error);
611 }
612 
613 /*
614  * Performs modification of routing table specificed by @action.
615  * Table is specified by @fibnum and sa_family in @info->rti_info[RTAX_DST].
616  * Needs to be run in network epoch.
617  *
618  * Returns 0 on success and fills in @rc with action result.
619  */
620 int
621 rib_action(uint32_t fibnum, int action, struct rt_addrinfo *info,
622     struct rib_cmd_info *rc)
623 {
624 	int error;
625 
626 	switch (action) {
627 	case RTM_ADD:
628 		error = rib_add_route(fibnum, info, rc);
629 		break;
630 	case RTM_DELETE:
631 		error = rib_del_route(fibnum, info, rc);
632 		break;
633 	case RTM_CHANGE:
634 		error = rib_change_route(fibnum, info, rc);
635 		break;
636 	default:
637 		error = ENOTSUP;
638 	}
639 
640 	return (error);
641 }
642 
643 
644 static void
645 rt_notifydelete(struct rtentry *rt, struct rt_addrinfo *info)
646 {
647 	struct ifaddr *ifa;
648 
649 	/*
650 	 * give the protocol a chance to keep things in sync.
651 	 */
652 	ifa = rt->rt_nhop->nh_ifa;
653 	if (ifa != NULL && ifa->ifa_rtrequest != NULL)
654 		ifa->ifa_rtrequest(RTM_DELETE, rt, rt->rt_nhop, info);
655 }
656 
657 struct rt_delinfo
658 {
659 	struct rt_addrinfo info;
660 	struct rib_head *rnh;
661 	struct rtentry *head;
662 	struct rib_cmd_info rc;
663 };
664 
665 /*
666  * Conditionally unlinks @rn from radix tree based
667  * on info data passed in @arg.
668  */
669 static int
670 rt_checkdelroute(struct radix_node *rn, void *arg)
671 {
672 	struct rt_delinfo *di;
673 	struct rt_addrinfo *info;
674 	struct rtentry *rt;
675 	int error;
676 
677 	di = (struct rt_delinfo *)arg;
678 	rt = (struct rtentry *)rn;
679 	info = &di->info;
680 	error = 0;
681 
682 	info->rti_info[RTAX_DST] = rt_key(rt);
683 	info->rti_info[RTAX_NETMASK] = rt_mask(rt);
684 	info->rti_info[RTAX_GATEWAY] = &rt->rt_nhop->gw_sa;
685 
686 	rt = rt_unlinkrte(di->rnh, info, &error);
687 	if (rt == NULL) {
688 		/* Either not allowed or not matched. Skip entry */
689 		return (0);
690 	}
691 
692 	/* Entry was unlinked. Notify subscribers */
693 	di->rnh->rnh_gen++;
694 	di->rc.rc_rt = rt;
695 	di->rc.rc_nh_old = rt->rt_nhop;
696 	rib_notify(di->rnh, RIB_NOTIFY_IMMEDIATE, &di->rc);
697 
698 	/* Add to the list and return */
699 	rt->rt_chain = di->head;
700 	di->head = rt;
701 
702 	return (0);
703 }
704 
705 /*
706  * Iterates over a routing table specified by @fibnum and @family and
707  *  deletes elements marked by @filter_f.
708  * @fibnum: rtable id
709  * @family: AF_ address family
710  * @filter_f: function returning non-zero value for items to delete
711  * @arg: data to pass to the @filter_f function
712  * @report: true if rtsock notification is needed.
713  */
714 void
715 rib_walk_del(u_int fibnum, int family, rt_filter_f_t *filter_f, void *arg, bool report)
716 {
717 	struct rib_head *rnh;
718 	struct rt_delinfo di;
719 	struct rtentry *rt;
720 	struct epoch_tracker et;
721 
722 	rnh = rt_tables_get_rnh(fibnum, family);
723 	if (rnh == NULL)
724 		return;
725 
726 	bzero(&di, sizeof(di));
727 	di.info.rti_filter = filter_f;
728 	di.info.rti_filterdata = arg;
729 	di.rnh = rnh;
730 	di.rc.rc_cmd = RTM_DELETE;
731 
732 	NET_EPOCH_ENTER(et);
733 
734 	RIB_WLOCK(rnh);
735 	rnh->rnh_walktree(&rnh->head, rt_checkdelroute, &di);
736 	RIB_WUNLOCK(rnh);
737 
738 	/* We might have something to reclaim. */
739 	while (di.head != NULL) {
740 		rt = di.head;
741 		di.head = rt->rt_chain;
742 		rt->rt_chain = NULL;
743 
744 		di.rc.rc_rt = rt;
745 		di.rc.rc_nh_old = rt->rt_nhop;
746 		rib_notify(rnh, RIB_NOTIFY_DELAYED, &di.rc);
747 
748 		/* TODO std rt -> rt_addrinfo export */
749 		di.info.rti_info[RTAX_DST] = rt_key(rt);
750 		di.info.rti_info[RTAX_NETMASK] = rt_mask(rt);
751 
752 		rt_notifydelete(rt, &di.info);
753 
754 		if (report)
755 			rt_routemsg(RTM_DELETE, rt, rt->rt_nhop->nh_ifp, 0,
756 			    fibnum);
757 		rtfree(rt);
758 	}
759 
760 	NET_EPOCH_EXIT(et);
761 }
762 
763 static void
764 rib_notify(struct rib_head *rnh, enum rib_subscription_type type,
765     struct rib_cmd_info *rc)
766 {
767 	struct rib_subscription *rs;
768 
769 	CK_STAILQ_FOREACH(rs, &rnh->rnh_subscribers, next) {
770 		if (rs->type == type)
771 			rs->func(rnh, rc, rs->arg);
772 	}
773 }
774 
775 /*
776  * Subscribe for the changes in the routing table specified by @fibnum and
777  *  @family.
778  * Needs to be run in network epoch.
779  *
780  * Returns pointer to the subscription structure on success.
781  */
782 struct rib_subscription *
783 rib_subscribe(uint32_t fibnum, int family, rib_subscription_cb_t *f, void *arg,
784     enum rib_subscription_type type, int waitok)
785 {
786 	struct rib_head *rnh;
787 	struct rib_subscription *rs;
788 	int flags = M_ZERO | (waitok ? M_WAITOK : 0);
789 
790 	NET_EPOCH_ASSERT();
791 	KASSERT((fibnum < rt_numfibs), ("%s: bad fibnum", __func__));
792 	rnh = rt_tables_get_rnh(fibnum, family);
793 
794 	rs = malloc(sizeof(struct rib_subscription), M_RTABLE, flags);
795 	if (rs == NULL)
796 		return (NULL);
797 
798 	rs->func = f;
799 	rs->arg = arg;
800 	rs->type = type;
801 
802 	RIB_WLOCK(rnh);
803 	CK_STAILQ_INSERT_TAIL(&rnh->rnh_subscribers, rs, next);
804 	RIB_WUNLOCK(rnh);
805 
806 	return (rs);
807 }
808 
809 /*
810  * Remove rtable subscription @rs from the table specified by @fibnum
811  *  and @family.
812  * Needs to be run in network epoch.
813  *
814  * Returns 0 on success.
815  */
816 int
817 rib_unsibscribe(uint32_t fibnum, int family, struct rib_subscription *rs)
818 {
819 	struct rib_head *rnh;
820 
821 	NET_EPOCH_ASSERT();
822 	KASSERT((fibnum < rt_numfibs), ("%s: bad fibnum", __func__));
823 	rnh = rt_tables_get_rnh(fibnum, family);
824 
825 	if (rnh == NULL)
826 		return (ENOENT);
827 
828 	RIB_WLOCK(rnh);
829 	CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next);
830 	RIB_WUNLOCK(rnh);
831 
832 	epoch_call(net_epoch_preempt, destroy_subscription_epoch,
833 	    &rs->epoch_ctx);
834 
835 	return (0);
836 }
837 
838 /*
839  * Epoch callback indicating subscription is safe to destroy
840  */
841 static void
842 destroy_subscription_epoch(epoch_context_t ctx)
843 {
844 	struct rib_subscription *rs;
845 
846 	rs = __containerof(ctx, struct rib_subscription, epoch_ctx);
847 
848 	free(rs, M_RTABLE);
849 }
850