xref: /freebsd/sys/netinet/in_rmx.c (revision 17ee9d00bc1ae1e598c38f25826f861e4bc6c3ce)
1 /*
2  * Copyright 1994, 1995 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $Id: in_rmx.c,v 1.10 1995/02/14 23:11:26 wollman Exp $
30  */
31 
32 /*
33  * This code does two things necessary for the enhanced TCP metrics to
34  * function in a useful manner:
35  *  1) It marks all non-host routes as `cloning', thus ensuring that
36  *     every actual reference to such a route actually gets turned
37  *     into a reference to a host route to the specific destination
38  *     requested.
39  *  2) When such routes lose all their references, it arranges for them
40  *     to be deleted in some random collection of circumstances, so that
41  *     a large quantity of stale routing data is not kept in kernel memory
42  *     indefinitely.  See in_rtqtimo() below for the exact mechanism.
43  */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/queue.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/mbuf.h>
52 #include <sys/syslog.h>
53 
54 #include <net/if.h>
55 #include <net/route.h>
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/in_var.h>
59 
60 #define RTPRF_OURS		RTF_PROTO3	/* set on routes we manage */
61 
62 /*
63  * Do what we need to do when inserting a route.
64  */
65 static struct radix_node *
66 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
67 	    struct radix_node *treenodes)
68 {
69 	struct rtentry *rt = (struct rtentry *)treenodes;
70 
71 	/*
72 	 * For IP, all unicast non-host routes are automatically cloning.
73 	 */
74 	if(!(rt->rt_flags & (RTF_HOST | RTF_CLONING))) {
75 		struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
76 		if(!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
77 			rt->rt_flags |= RTF_PRCLONING;
78 		}
79 	}
80 
81 	return rn_addroute(v_arg, n_arg, head, treenodes);
82 }
83 
84 /*
85  * This code is the inverse of in_clsroute: on first reference, if we
86  * were managing the route, stop doing so and set the expiration timer
87  * back off again.
88  */
89 static struct radix_node *
90 in_matroute(void *v_arg, struct radix_node_head *head)
91 {
92 	struct radix_node *rn = rn_match(v_arg, head);
93 	struct rtentry *rt = (struct rtentry *)rn;
94 
95 	if(rt && rt->rt_refcnt == 0) { /* this is first reference */
96 		if(rt->rt_flags & RTPRF_OURS) {
97 			rt->rt_flags &= ~RTPRF_OURS;
98 			rt->rt_rmx.rmx_expire = 0;
99 		}
100 	}
101 	return rn;
102 }
103 
104 /* MIB variables: net.inet.ip.{rtexpire,rtmaxcache,rtminexpire}. */
105 int rtq_reallyold = 60*60;	/* one hour is ``really old'' */
106 int rtq_toomany = 128;		/* 128 cached routes is ``too many'' */
107 int rtq_minreallyold = 10;	/* never automatically crank down to less */
108 
109 /*
110  * On last reference drop, mark the route as belong to us so that it can be
111  * timed out.
112  */
113 static void
114 in_clsroute(struct radix_node *rn, struct radix_node_head *head)
115 {
116 	struct rtentry *rt = (struct rtentry *)rn;
117 
118 	if(!(rt->rt_flags & RTF_UP))
119 		return;		/* prophylactic measures */
120 
121 	if((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
122 		return;
123 
124 	if((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS))
125 	   != RTF_WASCLONED)
126 		return;
127 
128 	/*
129 	 * As requested by David Greenman:
130 	 * If rtq_reallyold is 0, just delete the route without
131 	 * waiting for a timeout cycle to kill it.
132 	 */
133 	if(rtq_reallyold != 0) {
134 		rt->rt_flags |= RTPRF_OURS;
135 		rt->rt_rmx.rmx_expire = time.tv_sec + rtq_reallyold;
136 	} else {
137 		rtrequest(RTM_DELETE,
138 			  (struct sockaddr *)rt_key(rt),
139 			  rt->rt_gateway, rt_mask(rt),
140 			  rt->rt_flags, 0);
141 	}
142 }
143 
144 struct rtqk_arg {
145 	struct radix_node_head *rnh;
146 	int draining;
147 	int killed;
148 	int found;
149 	int updating;
150 	time_t nextstop;
151 };
152 
153 /*
154  * Get rid of old routes.  When draining, this deletes everything, even when
155  * the timeout is not expired yet.  When updating, this makes sure that
156  * nothing has a timeout longer than the current value of rtq_reallyold.
157  */
158 static int
159 in_rtqkill(struct radix_node *rn, void *rock)
160 {
161 	struct rtqk_arg *ap = rock;
162 	struct radix_node_head *rnh = ap->rnh;
163 	struct rtentry *rt = (struct rtentry *)rn;
164 	int err;
165 
166 	if(rt->rt_flags & RTPRF_OURS) {
167 		ap->found++;
168 
169 		if(ap->draining || rt->rt_rmx.rmx_expire <= time.tv_sec) {
170 			if(rt->rt_refcnt > 0)
171 				panic("rtqkill route really not free\n");
172 
173 			err = rtrequest(RTM_DELETE,
174 					(struct sockaddr *)rt_key(rt),
175 					rt->rt_gateway, rt_mask(rt),
176 					rt->rt_flags, 0);
177 			if(err) {
178 				log(LOG_WARNING, "in_rtqkill: error %d\n", err);
179 			} else {
180 				ap->killed++;
181 			}
182 		} else {
183 			if(ap->updating
184 			   && (time.tv_sec - rt->rt_rmx.rmx_expire
185 			       > rtq_reallyold)) {
186 				rt->rt_rmx.rmx_expire = time.tv_sec
187 					+ rtq_reallyold;
188 			}
189 			ap->nextstop = lmin(ap->nextstop,
190 					    rt->rt_rmx.rmx_expire);
191 		}
192 	}
193 
194 	return 0;
195 }
196 
197 #define RTQ_TIMEOUT	60*10	/* run no less than once every ten minutes */
198 int rtq_timeout = RTQ_TIMEOUT;
199 
200 static void
201 in_rtqtimo(void *rock)
202 {
203 	struct radix_node_head *rnh = rock;
204 	struct rtqk_arg arg;
205 	struct timeval atv;
206 	static time_t last_adjusted_timeout = 0;
207 	int s;
208 
209 	arg.found = arg.killed = 0;
210 	arg.rnh = rnh;
211 	arg.nextstop = time.tv_sec + rtq_timeout;
212 	arg.draining = arg.updating = 0;
213 	s = splnet();
214 	rnh->rnh_walktree(rnh, in_rtqkill, &arg);
215 	splx(s);
216 
217 	/*
218 	 * Attempt to be somewhat dynamic about this:
219 	 * If there are ``too many'' routes sitting around taking up space,
220 	 * then crank down the timeout, and see if we can't make some more
221 	 * go away.  However, we make sure that we will never adjust more
222 	 * than once in rtq_timeout seconds, to keep from cranking down too
223 	 * hard.
224 	 */
225 	if((arg.found - arg.killed > rtq_toomany)
226 	   && (time.tv_sec - last_adjusted_timeout >= rtq_timeout)
227 	   && rtq_reallyold > rtq_minreallyold) {
228 		rtq_reallyold = 2*rtq_reallyold / 3;
229 		if(rtq_reallyold < rtq_minreallyold) {
230 			rtq_reallyold = rtq_minreallyold;
231 		}
232 
233 		last_adjusted_timeout = time.tv_sec;
234 		log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
235 		    rtq_reallyold);
236 		arg.found = arg.killed = 0;
237 		arg.updating = 1;
238 		s = splnet();
239 		rnh->rnh_walktree(rnh, in_rtqkill, &arg);
240 		splx(s);
241 	}
242 
243 	atv.tv_usec = 0;
244 	atv.tv_sec = arg.nextstop;
245 	timeout(in_rtqtimo, rock, hzto(&atv));
246 }
247 
248 void
249 in_rtqdrain(void)
250 {
251 	struct radix_node_head *rnh = rt_tables[AF_INET];
252 	struct rtqk_arg arg;
253 	int s;
254 	arg.found = arg.killed = 0;
255 	arg.rnh = rnh;
256 	arg.nextstop = 0;
257 	arg.draining = 1;
258 	arg.updating = 0;
259 	s = splnet();
260 	rnh->rnh_walktree(rnh, in_rtqkill, &arg);
261 	splx(s);
262 }
263 
264 /*
265  * Initialize our routing tree.
266  */
267 int
268 in_inithead(void **head, int off)
269 {
270 	struct radix_node_head *rnh;
271 
272 	if(!rn_inithead(head, off))
273 		return 0;
274 
275 	if(head != (void **)&rt_tables[AF_INET]) /* BOGUS! */
276 		return 1;	/* only do this for the real routing table */
277 
278 	rnh = *head;
279 	rnh->rnh_addaddr = in_addroute;
280 	rnh->rnh_matchaddr = in_matroute;
281 	rnh->rnh_close = in_clsroute;
282 	in_rtqtimo(rnh);	/* kick off timeout first time */
283 	return 1;
284 }
285 
286