xref: /freebsd/sys/net/route/route_subscription.c (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021-2022 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include "opt_route.h"
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/socket.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/rmlock.h>
38 
39 #include <net/route.h>
40 #include <net/route/route_ctl.h>
41 #include <net/route/route_var.h>
42 #include <net/route/nhop.h>
43 
44 struct rib_subscription {
45 	CK_STAILQ_ENTRY(rib_subscription)	next;
46 	rib_subscription_cb_t			*func;
47 	void					*arg;
48 	struct rib_head				*rnh;
49 	enum rib_subscription_type		type;
50 	struct epoch_context			epoch_ctx;
51 };
52 
53 static void destroy_subscription_epoch(epoch_context_t ctx);
54 
55 void
56 rib_notify(struct rib_head *rnh, enum rib_subscription_type type,
57     struct rib_cmd_info *rc)
58 {
59 	struct rib_subscription *rs;
60 
61 	CK_STAILQ_FOREACH(rs, &rnh->rnh_subscribers, next) {
62 		if (rs->type == type)
63 			rs->func(rnh, rc, rs->arg);
64 	}
65 }
66 
67 static struct rib_subscription *
68 allocate_subscription(rib_subscription_cb_t *f, void *arg,
69     enum rib_subscription_type type, bool waitok)
70 {
71 	struct rib_subscription *rs;
72 	int flags = M_ZERO | (waitok ? M_WAITOK : M_NOWAIT);
73 
74 	rs = malloc(sizeof(struct rib_subscription), M_RTABLE, flags);
75 	if (rs == NULL)
76 		return (NULL);
77 
78 	rs->func = f;
79 	rs->arg = arg;
80 	rs->type = type;
81 
82 	return (rs);
83 }
84 
85 /*
86  * Subscribe for the changes in the routing table specified by @fibnum and
87  *  @family.
88  *
89  * Returns pointer to the subscription structure on success.
90  */
91 struct rib_subscription *
92 rib_subscribe(uint32_t fibnum, int family, rib_subscription_cb_t *f, void *arg,
93     enum rib_subscription_type type, bool waitok)
94 {
95 	struct rib_head *rnh;
96 	struct epoch_tracker et;
97 
98 	NET_EPOCH_ENTER(et);
99 	KASSERT((fibnum < rt_numfibs), ("%s: bad fibnum", __func__));
100 	rnh = rt_tables_get_rnh(fibnum, family);
101 	NET_EPOCH_EXIT(et);
102 
103 	return (rib_subscribe_internal(rnh, f, arg, type, waitok));
104 }
105 
106 struct rib_subscription *
107 rib_subscribe_internal(struct rib_head *rnh, rib_subscription_cb_t *f, void *arg,
108     enum rib_subscription_type type, bool waitok)
109 {
110 	struct rib_subscription *rs;
111 	struct epoch_tracker et;
112 
113 	if ((rs = allocate_subscription(f, arg, type, waitok)) == NULL)
114 		return (NULL);
115 	rs->rnh = rnh;
116 
117 	NET_EPOCH_ENTER(et);
118 	RIB_WLOCK(rnh);
119 	CK_STAILQ_INSERT_HEAD(&rnh->rnh_subscribers, rs, next);
120 	RIB_WUNLOCK(rnh);
121 	NET_EPOCH_EXIT(et);
122 
123 	return (rs);
124 }
125 
126 struct rib_subscription *
127 rib_subscribe_locked(struct rib_head *rnh, rib_subscription_cb_t *f, void *arg,
128     enum rib_subscription_type type)
129 {
130 	struct rib_subscription *rs;
131 
132 	NET_EPOCH_ASSERT();
133 	RIB_WLOCK_ASSERT(rnh);
134 
135 	if ((rs = allocate_subscription(f, arg, type, false)) == NULL)
136 		return (NULL);
137 	rs->rnh = rnh;
138 
139 	CK_STAILQ_INSERT_HEAD(&rnh->rnh_subscribers, rs, next);
140 
141 	return (rs);
142 }
143 
144 /*
145  * Remove rtable subscription @rs from the routing table.
146  * Needs to be run in network epoch.
147  */
148 void
149 rib_unsubscribe(struct rib_subscription *rs)
150 {
151 	struct rib_head *rnh = rs->rnh;
152 
153 	NET_EPOCH_ASSERT();
154 
155 	RIB_WLOCK(rnh);
156 	CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next);
157 	RIB_WUNLOCK(rnh);
158 
159 	NET_EPOCH_CALL(destroy_subscription_epoch, &rs->epoch_ctx);
160 }
161 
162 void
163 rib_unsubscribe_locked(struct rib_subscription *rs)
164 {
165 	struct rib_head *rnh = rs->rnh;
166 
167 	NET_EPOCH_ASSERT();
168 	RIB_WLOCK_ASSERT(rnh);
169 
170 	CK_STAILQ_REMOVE(&rnh->rnh_subscribers, rs, rib_subscription, next);
171 
172 	NET_EPOCH_CALL(destroy_subscription_epoch, &rs->epoch_ctx);
173 }
174 
175 /*
176  * Epoch callback indicating subscription is safe to destroy
177  */
178 static void
179 destroy_subscription_epoch(epoch_context_t ctx)
180 {
181 	struct rib_subscription *rs;
182 
183 	rs = __containerof(ctx, struct rib_subscription, epoch_ctx);
184 
185 	free(rs, M_RTABLE);
186 }
187 
188 void
189 rib_init_subscriptions(struct rib_head *rnh)
190 {
191 
192 	CK_STAILQ_INIT(&rnh->rnh_subscribers);
193 }
194 
195 void
196 rib_destroy_subscriptions(struct rib_head *rnh)
197 {
198 	struct rib_subscription *rs;
199 	struct epoch_tracker et;
200 
201 	NET_EPOCH_ENTER(et);
202 	RIB_WLOCK(rnh);
203 	while ((rs = CK_STAILQ_FIRST(&rnh->rnh_subscribers)) != NULL) {
204 		CK_STAILQ_REMOVE_HEAD(&rnh->rnh_subscribers, next);
205 		NET_EPOCH_CALL(destroy_subscription_epoch, &rs->epoch_ctx);
206 	}
207 	RIB_WUNLOCK(rnh);
208 	NET_EPOCH_EXIT(et);
209 }
210