xref: /linux/net/ipv4/inetpeer.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  *		INETPEER - A storage for permanent information about peers
3  *
4  *  This source is covered by the GNU GPL, the same as all kernel sources.
5  *
6  *  Version:	$Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $
7  *
8  *  Authors:	Andrey V. Savochkin <saw@msu.ru>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/random.h>
17 #include <linux/timer.h>
18 #include <linux/time.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/net.h>
22 #include <net/ip.h>
23 #include <net/inetpeer.h>
24 
25 /*
26  *  Theory of operations.
27  *  We keep one entry for each peer IP address.  The nodes contains long-living
28  *  information about the peer which doesn't depend on routes.
29  *  At this moment this information consists only of ID field for the next
30  *  outgoing IP packet.  This field is incremented with each packet as encoded
31  *  in inet_getid() function (include/net/inetpeer.h).
32  *  At the moment of writing this notes identifier of IP packets is generated
33  *  to be unpredictable using this code only for packets subjected
34  *  (actually or potentially) to defragmentation.  I.e. DF packets less than
35  *  PMTU in size uses a constant ID and do not use this code (see
36  *  ip_select_ident() in include/net/ip.h).
37  *
38  *  Route cache entries hold references to our nodes.
39  *  New cache entries get references via lookup by destination IP address in
40  *  the avl tree.  The reference is grabbed only when it's needed i.e. only
41  *  when we try to output IP packet which needs an unpredictable ID (see
42  *  __ip_select_ident() in net/ipv4/route.c).
43  *  Nodes are removed only when reference counter goes to 0.
44  *  When it's happened the node may be removed when a sufficient amount of
45  *  time has been passed since its last use.  The less-recently-used entry can
46  *  also be removed if the pool is overloaded i.e. if the total amount of
47  *  entries is greater-or-equal than the threshold.
48  *
49  *  Node pool is organised as an AVL tree.
50  *  Such an implementation has been chosen not just for fun.  It's a way to
51  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
52  *  amount of long living nodes in a single hash slot would significantly delay
53  *  lookups performed with disabled BHs.
54  *
55  *  Serialisation issues.
56  *  1.  Nodes may appear in the tree only with the pool write lock held.
57  *  2.  Nodes may disappear from the tree only with the pool write lock held
58  *      AND reference count being 0.
59  *  3.  Nodes appears and disappears from unused node list only under
60  *      "inet_peer_unused_lock".
61  *  4.  Global variable peer_total is modified under the pool lock.
62  *  5.  struct inet_peer fields modification:
63  *		avl_left, avl_right, avl_parent, avl_height: pool lock
64  *		unused_next, unused_prevp: unused node list lock
65  *		refcnt: atomically against modifications on other CPU;
66  *		   usually under some other lock to prevent node disappearing
67  *		dtime: unused node list lock
68  *		v4daddr: unchangeable
69  *		ip_id_count: idlock
70  */
71 
72 /* Exported for inet_getid inline function.  */
73 DEFINE_SPINLOCK(inet_peer_idlock);
74 
75 static struct kmem_cache *peer_cachep __read_mostly;
76 
77 #define node_height(x) x->avl_height
78 static struct inet_peer peer_fake_node = {
79 	.avl_left	= &peer_fake_node,
80 	.avl_right	= &peer_fake_node,
81 	.avl_height	= 0
82 };
83 #define peer_avl_empty (&peer_fake_node)
84 static struct inet_peer *peer_root = peer_avl_empty;
85 static DEFINE_RWLOCK(peer_pool_lock);
86 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
87 
88 static int peer_total;
89 /* Exported for sysctl_net_ipv4.  */
90 int inet_peer_threshold = 65536 + 128;	/* start to throw entries more
91 					 * aggressively at this stage */
92 int inet_peer_minttl = 120 * HZ;	/* TTL under high load: 120 sec */
93 int inet_peer_maxttl = 10 * 60 * HZ;	/* usual time to live: 10 min */
94 
95 static struct inet_peer *inet_peer_unused_head;
96 static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
97 static DEFINE_SPINLOCK(inet_peer_unused_lock);
98 
99 static void peer_check_expire(unsigned long dummy);
100 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
101 
102 /* Exported for sysctl_net_ipv4.  */
103 int inet_peer_gc_mintime = 10 * HZ,
104     inet_peer_gc_maxtime = 120 * HZ;
105 
106 /* Called from ip_output.c:ip_init  */
107 void __init inet_initpeers(void)
108 {
109 	struct sysinfo si;
110 
111 	/* Use the straight interface to information about memory. */
112 	si_meminfo(&si);
113 	/* The values below were suggested by Alexey Kuznetsov
114 	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
115 	 * myself.  --SAW
116 	 */
117 	if (si.totalram <= (32768*1024)/PAGE_SIZE)
118 		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
119 	if (si.totalram <= (16384*1024)/PAGE_SIZE)
120 		inet_peer_threshold >>= 1; /* about 512KB */
121 	if (si.totalram <= (8192*1024)/PAGE_SIZE)
122 		inet_peer_threshold >>= 2; /* about 128KB */
123 
124 	peer_cachep = kmem_cache_create("inet_peer_cache",
125 			sizeof(struct inet_peer),
126 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
127 			NULL, NULL);
128 
129 	/* All the timers, started at system startup tend
130 	   to synchronize. Perturb it a bit.
131 	 */
132 	peer_periodic_timer.expires = jiffies
133 		+ net_random() % inet_peer_gc_maxtime
134 		+ inet_peer_gc_maxtime;
135 	add_timer(&peer_periodic_timer);
136 }
137 
138 /* Called with or without local BH being disabled. */
139 static void unlink_from_unused(struct inet_peer *p)
140 {
141 	spin_lock_bh(&inet_peer_unused_lock);
142 	if (p->unused_prevp != NULL) {
143 		/* On unused list. */
144 		*p->unused_prevp = p->unused_next;
145 		if (p->unused_next != NULL)
146 			p->unused_next->unused_prevp = p->unused_prevp;
147 		else
148 			inet_peer_unused_tailp = p->unused_prevp;
149 		p->unused_prevp = NULL; /* mark it as removed */
150 	}
151 	spin_unlock_bh(&inet_peer_unused_lock);
152 }
153 
154 /* Called with local BH disabled and the pool lock held. */
155 #define lookup(daddr) 						\
156 ({								\
157 	struct inet_peer *u, **v;				\
158 	stackptr = stack;					\
159 	*stackptr++ = &peer_root;				\
160 	for (u = peer_root; u != peer_avl_empty; ) {		\
161 		if (daddr == u->v4daddr)			\
162 			break;					\
163 		if ((__force __u32)daddr < (__force __u32)u->v4daddr)	\
164 			v = &u->avl_left;			\
165 		else						\
166 			v = &u->avl_right;			\
167 		*stackptr++ = v;				\
168 		u = *v;						\
169 	}							\
170 	u;							\
171 })
172 
173 /* Called with local BH disabled and the pool write lock held. */
174 #define lookup_rightempty(start)				\
175 ({								\
176 	struct inet_peer *u, **v;				\
177 	*stackptr++ = &start->avl_left;				\
178 	v = &start->avl_left;					\
179 	for (u = *v; u->avl_right != peer_avl_empty; ) {	\
180 		v = &u->avl_right;				\
181 		*stackptr++ = v;				\
182 		u = *v;						\
183 	}							\
184 	u;							\
185 })
186 
187 /* Called with local BH disabled and the pool write lock held.
188  * Variable names are the proof of operation correctness.
189  * Look into mm/map_avl.c for more detail description of the ideas.  */
190 static void peer_avl_rebalance(struct inet_peer **stack[],
191 		struct inet_peer ***stackend)
192 {
193 	struct inet_peer **nodep, *node, *l, *r;
194 	int lh, rh;
195 
196 	while (stackend > stack) {
197 		nodep = *--stackend;
198 		node = *nodep;
199 		l = node->avl_left;
200 		r = node->avl_right;
201 		lh = node_height(l);
202 		rh = node_height(r);
203 		if (lh > rh + 1) { /* l: RH+2 */
204 			struct inet_peer *ll, *lr, *lrl, *lrr;
205 			int lrh;
206 			ll = l->avl_left;
207 			lr = l->avl_right;
208 			lrh = node_height(lr);
209 			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
210 				node->avl_left = lr;	/* lr: RH or RH+1 */
211 				node->avl_right = r;	/* r: RH */
212 				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
213 				l->avl_left = ll;	/* ll: RH+1 */
214 				l->avl_right = node;	/* node: RH+1 or RH+2 */
215 				l->avl_height = node->avl_height + 1;
216 				*nodep = l;
217 			} else { /* ll: RH, lr: RH+1 */
218 				lrl = lr->avl_left;	/* lrl: RH or RH-1 */
219 				lrr = lr->avl_right;	/* lrr: RH or RH-1 */
220 				node->avl_left = lrr;	/* lrr: RH or RH-1 */
221 				node->avl_right = r;	/* r: RH */
222 				node->avl_height = rh + 1; /* node: RH+1 */
223 				l->avl_left = ll;	/* ll: RH */
224 				l->avl_right = lrl;	/* lrl: RH or RH-1 */
225 				l->avl_height = rh + 1;	/* l: RH+1 */
226 				lr->avl_left = l;	/* l: RH+1 */
227 				lr->avl_right = node;	/* node: RH+1 */
228 				lr->avl_height = rh + 2;
229 				*nodep = lr;
230 			}
231 		} else if (rh > lh + 1) { /* r: LH+2 */
232 			struct inet_peer *rr, *rl, *rlr, *rll;
233 			int rlh;
234 			rr = r->avl_right;
235 			rl = r->avl_left;
236 			rlh = node_height(rl);
237 			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
238 				node->avl_right = rl;	/* rl: LH or LH+1 */
239 				node->avl_left = l;	/* l: LH */
240 				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
241 				r->avl_right = rr;	/* rr: LH+1 */
242 				r->avl_left = node;	/* node: LH+1 or LH+2 */
243 				r->avl_height = node->avl_height + 1;
244 				*nodep = r;
245 			} else { /* rr: RH, rl: RH+1 */
246 				rlr = rl->avl_right;	/* rlr: LH or LH-1 */
247 				rll = rl->avl_left;	/* rll: LH or LH-1 */
248 				node->avl_right = rll;	/* rll: LH or LH-1 */
249 				node->avl_left = l;	/* l: LH */
250 				node->avl_height = lh + 1; /* node: LH+1 */
251 				r->avl_right = rr;	/* rr: LH */
252 				r->avl_left = rlr;	/* rlr: LH or LH-1 */
253 				r->avl_height = lh + 1;	/* r: LH+1 */
254 				rl->avl_right = r;	/* r: LH+1 */
255 				rl->avl_left = node;	/* node: LH+1 */
256 				rl->avl_height = lh + 2;
257 				*nodep = rl;
258 			}
259 		} else {
260 			node->avl_height = (lh > rh ? lh : rh) + 1;
261 		}
262 	}
263 }
264 
265 /* Called with local BH disabled and the pool write lock held. */
266 #define link_to_pool(n)						\
267 do {								\
268 	n->avl_height = 1;					\
269 	n->avl_left = peer_avl_empty;				\
270 	n->avl_right = peer_avl_empty;				\
271 	**--stackptr = n;					\
272 	peer_avl_rebalance(stack, stackptr);			\
273 } while(0)
274 
275 /* May be called with local BH enabled. */
276 static void unlink_from_pool(struct inet_peer *p)
277 {
278 	int do_free;
279 
280 	do_free = 0;
281 
282 	write_lock_bh(&peer_pool_lock);
283 	/* Check the reference counter.  It was artificially incremented by 1
284 	 * in cleanup() function to prevent sudden disappearing.  If the
285 	 * reference count is still 1 then the node is referenced only as `p'
286 	 * here and from the pool.  So under the exclusive pool lock it's safe
287 	 * to remove the node and free it later. */
288 	if (atomic_read(&p->refcnt) == 1) {
289 		struct inet_peer **stack[PEER_MAXDEPTH];
290 		struct inet_peer ***stackptr, ***delp;
291 		if (lookup(p->v4daddr) != p)
292 			BUG();
293 		delp = stackptr - 1; /* *delp[0] == p */
294 		if (p->avl_left == peer_avl_empty) {
295 			*delp[0] = p->avl_right;
296 			--stackptr;
297 		} else {
298 			/* look for a node to insert instead of p */
299 			struct inet_peer *t;
300 			t = lookup_rightempty(p);
301 			BUG_ON(*stackptr[-1] != t);
302 			**--stackptr = t->avl_left;
303 			/* t is removed, t->v4daddr > x->v4daddr for any
304 			 * x in p->avl_left subtree.
305 			 * Put t in the old place of p. */
306 			*delp[0] = t;
307 			t->avl_left = p->avl_left;
308 			t->avl_right = p->avl_right;
309 			t->avl_height = p->avl_height;
310 			BUG_ON(delp[1] != &p->avl_left);
311 			delp[1] = &t->avl_left; /* was &p->avl_left */
312 		}
313 		peer_avl_rebalance(stack, stackptr);
314 		peer_total--;
315 		do_free = 1;
316 	}
317 	write_unlock_bh(&peer_pool_lock);
318 
319 	if (do_free)
320 		kmem_cache_free(peer_cachep, p);
321 	else
322 		/* The node is used again.  Decrease the reference counter
323 		 * back.  The loop "cleanup -> unlink_from_unused
324 		 *   -> unlink_from_pool -> putpeer -> link_to_unused
325 		 *   -> cleanup (for the same node)"
326 		 * doesn't really exist because the entry will have a
327 		 * recent deletion time and will not be cleaned again soon. */
328 		inet_putpeer(p);
329 }
330 
331 /* May be called with local BH enabled. */
332 static int cleanup_once(unsigned long ttl)
333 {
334 	struct inet_peer *p;
335 
336 	/* Remove the first entry from the list of unused nodes. */
337 	spin_lock_bh(&inet_peer_unused_lock);
338 	p = inet_peer_unused_head;
339 	if (p != NULL) {
340 		__u32 delta = (__u32)jiffies - p->dtime;
341 		if (delta < ttl) {
342 			/* Do not prune fresh entries. */
343 			spin_unlock_bh(&inet_peer_unused_lock);
344 			return -1;
345 		}
346 		inet_peer_unused_head = p->unused_next;
347 		if (p->unused_next != NULL)
348 			p->unused_next->unused_prevp = p->unused_prevp;
349 		else
350 			inet_peer_unused_tailp = p->unused_prevp;
351 		p->unused_prevp = NULL; /* mark as not on the list */
352 		/* Grab an extra reference to prevent node disappearing
353 		 * before unlink_from_pool() call. */
354 		atomic_inc(&p->refcnt);
355 	}
356 	spin_unlock_bh(&inet_peer_unused_lock);
357 
358 	if (p == NULL)
359 		/* It means that the total number of USED entries has
360 		 * grown over inet_peer_threshold.  It shouldn't really
361 		 * happen because of entry limits in route cache. */
362 		return -1;
363 
364 	unlink_from_pool(p);
365 	return 0;
366 }
367 
368 /* Called with or without local BH being disabled. */
369 struct inet_peer *inet_getpeer(__be32 daddr, int create)
370 {
371 	struct inet_peer *p, *n;
372 	struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
373 
374 	/* Look up for the address quickly. */
375 	read_lock_bh(&peer_pool_lock);
376 	p = lookup(daddr);
377 	if (p != peer_avl_empty)
378 		atomic_inc(&p->refcnt);
379 	read_unlock_bh(&peer_pool_lock);
380 
381 	if (p != peer_avl_empty) {
382 		/* The existing node has been found. */
383 		/* Remove the entry from unused list if it was there. */
384 		unlink_from_unused(p);
385 		return p;
386 	}
387 
388 	if (!create)
389 		return NULL;
390 
391 	/* Allocate the space outside the locked region. */
392 	n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
393 	if (n == NULL)
394 		return NULL;
395 	n->v4daddr = daddr;
396 	atomic_set(&n->refcnt, 1);
397 	atomic_set(&n->rid, 0);
398 	n->ip_id_count = secure_ip_id(daddr);
399 	n->tcp_ts_stamp = 0;
400 
401 	write_lock_bh(&peer_pool_lock);
402 	/* Check if an entry has suddenly appeared. */
403 	p = lookup(daddr);
404 	if (p != peer_avl_empty)
405 		goto out_free;
406 
407 	/* Link the node. */
408 	link_to_pool(n);
409 	n->unused_prevp = NULL; /* not on the list */
410 	peer_total++;
411 	write_unlock_bh(&peer_pool_lock);
412 
413 	if (peer_total >= inet_peer_threshold)
414 		/* Remove one less-recently-used entry. */
415 		cleanup_once(0);
416 
417 	return n;
418 
419 out_free:
420 	/* The appropriate node is already in the pool. */
421 	atomic_inc(&p->refcnt);
422 	write_unlock_bh(&peer_pool_lock);
423 	/* Remove the entry from unused list if it was there. */
424 	unlink_from_unused(p);
425 	/* Free preallocated the preallocated node. */
426 	kmem_cache_free(peer_cachep, n);
427 	return p;
428 }
429 
430 /* Called with local BH disabled. */
431 static void peer_check_expire(unsigned long dummy)
432 {
433 	unsigned long now = jiffies;
434 	int ttl;
435 
436 	if (peer_total >= inet_peer_threshold)
437 		ttl = inet_peer_minttl;
438 	else
439 		ttl = inet_peer_maxttl
440 				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
441 					peer_total / inet_peer_threshold * HZ;
442 	while (!cleanup_once(ttl)) {
443 		if (jiffies != now)
444 			break;
445 	}
446 
447 	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
448 	 * interval depending on the total number of entries (more entries,
449 	 * less interval). */
450 	if (peer_total >= inet_peer_threshold)
451 		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
452 	else
453 		peer_periodic_timer.expires = jiffies
454 			+ inet_peer_gc_maxtime
455 			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
456 				peer_total / inet_peer_threshold * HZ;
457 	add_timer(&peer_periodic_timer);
458 }
459 
460 void inet_putpeer(struct inet_peer *p)
461 {
462 	spin_lock_bh(&inet_peer_unused_lock);
463 	if (atomic_dec_and_test(&p->refcnt)) {
464 		p->unused_prevp = inet_peer_unused_tailp;
465 		p->unused_next = NULL;
466 		*inet_peer_unused_tailp = p;
467 		inet_peer_unused_tailp = &p->unused_next;
468 		p->dtime = (__u32)jiffies;
469 	}
470 	spin_unlock_bh(&inet_peer_unused_lock);
471 }
472