xref: /linux/net/ipv4/inetpeer.c (revision 12871a0bd67dd4db4418e1daafcd46e9d329ef10)
1 /*
2  *		INETPEER - A storage for permanent information about peers
3  *
4  *  This source is covered by the GNU GPL, the same as all kernel sources.
5  *
6  *  Authors:	Andrey V. Savochkin <saw@msu.ru>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <net/ip.h>
21 #include <net/inetpeer.h>
22 
23 /*
24  *  Theory of operations.
25  *  We keep one entry for each peer IP address.  The nodes contains long-living
26  *  information about the peer which doesn't depend on routes.
27  *  At this moment this information consists only of ID field for the next
28  *  outgoing IP packet.  This field is incremented with each packet as encoded
29  *  in inet_getid() function (include/net/inetpeer.h).
30  *  At the moment of writing this notes identifier of IP packets is generated
31  *  to be unpredictable using this code only for packets subjected
32  *  (actually or potentially) to defragmentation.  I.e. DF packets less than
33  *  PMTU in size uses a constant ID and do not use this code (see
34  *  ip_select_ident() in include/net/ip.h).
35  *
36  *  Route cache entries hold references to our nodes.
37  *  New cache entries get references via lookup by destination IP address in
38  *  the avl tree.  The reference is grabbed only when it's needed i.e. only
39  *  when we try to output IP packet which needs an unpredictable ID (see
40  *  __ip_select_ident() in net/ipv4/route.c).
41  *  Nodes are removed only when reference counter goes to 0.
42  *  When it's happened the node may be removed when a sufficient amount of
43  *  time has been passed since its last use.  The less-recently-used entry can
44  *  also be removed if the pool is overloaded i.e. if the total amount of
45  *  entries is greater-or-equal than the threshold.
46  *
47  *  Node pool is organised as an AVL tree.
48  *  Such an implementation has been chosen not just for fun.  It's a way to
49  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
50  *  amount of long living nodes in a single hash slot would significantly delay
51  *  lookups performed with disabled BHs.
52  *
53  *  Serialisation issues.
54  *  1.  Nodes may appear in the tree only with the pool lock held.
55  *  2.  Nodes may disappear from the tree only with the pool lock held
56  *      AND reference count being 0.
57  *  3.  Nodes appears and disappears from unused node list only under
58  *      "inet_peer_unused_lock".
59  *  4.  Global variable peer_total is modified under the pool lock.
60  *  5.  struct inet_peer fields modification:
61  *		avl_left, avl_right, avl_parent, avl_height: pool lock
62  *		unused: unused node list lock
63  *		refcnt: atomically against modifications on other CPU;
64  *		   usually under some other lock to prevent node disappearing
65  *		dtime: unused node list lock
66  *		daddr: unchangeable
67  *		ip_id_count: atomic value (no lock needed)
68  */
69 
70 static struct kmem_cache *peer_cachep __read_mostly;
71 
72 #define node_height(x) x->avl_height
73 
74 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
75 #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
76 static const struct inet_peer peer_fake_node = {
77 	.avl_left	= peer_avl_empty_rcu,
78 	.avl_right	= peer_avl_empty_rcu,
79 	.avl_height	= 0
80 };
81 
82 struct inet_peer_base {
83 	struct inet_peer __rcu *root;
84 	seqlock_t	lock;
85 	int		total;
86 };
87 
88 static struct inet_peer_base v4_peers = {
89 	.root		= peer_avl_empty_rcu,
90 	.lock		= __SEQLOCK_UNLOCKED(v4_peers.lock),
91 	.total		= 0,
92 };
93 
94 static struct inet_peer_base v6_peers = {
95 	.root		= peer_avl_empty_rcu,
96 	.lock		= __SEQLOCK_UNLOCKED(v6_peers.lock),
97 	.total		= 0,
98 };
99 
100 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
101 
102 /* Exported for sysctl_net_ipv4.  */
103 int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
104 					 * aggressively at this stage */
105 int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
106 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
107 int inet_peer_gc_mintime __read_mostly = 10 * HZ;
108 int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
109 
110 static struct {
111 	struct list_head	list;
112 	spinlock_t		lock;
113 } unused_peers = {
114 	.list			= LIST_HEAD_INIT(unused_peers.list),
115 	.lock			= __SPIN_LOCK_UNLOCKED(unused_peers.lock),
116 };
117 
118 static void peer_check_expire(unsigned long dummy);
119 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
120 
121 
122 /* Called from ip_output.c:ip_init  */
123 void __init inet_initpeers(void)
124 {
125 	struct sysinfo si;
126 
127 	/* Use the straight interface to information about memory. */
128 	si_meminfo(&si);
129 	/* The values below were suggested by Alexey Kuznetsov
130 	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
131 	 * myself.  --SAW
132 	 */
133 	if (si.totalram <= (32768*1024)/PAGE_SIZE)
134 		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
135 	if (si.totalram <= (16384*1024)/PAGE_SIZE)
136 		inet_peer_threshold >>= 1; /* about 512KB */
137 	if (si.totalram <= (8192*1024)/PAGE_SIZE)
138 		inet_peer_threshold >>= 2; /* about 128KB */
139 
140 	peer_cachep = kmem_cache_create("inet_peer_cache",
141 			sizeof(struct inet_peer),
142 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
143 			NULL);
144 
145 	/* All the timers, started at system startup tend
146 	   to synchronize. Perturb it a bit.
147 	 */
148 	peer_periodic_timer.expires = jiffies
149 		+ net_random() % inet_peer_gc_maxtime
150 		+ inet_peer_gc_maxtime;
151 	add_timer(&peer_periodic_timer);
152 }
153 
154 /* Called with or without local BH being disabled. */
155 static void unlink_from_unused(struct inet_peer *p)
156 {
157 	spin_lock_bh(&unused_peers.lock);
158 	list_del_init(&p->unused);
159 	spin_unlock_bh(&unused_peers.lock);
160 }
161 
162 static int addr_compare(const struct inetpeer_addr *a,
163 			const struct inetpeer_addr *b)
164 {
165 	int i, n = (a->family == AF_INET ? 1 : 4);
166 
167 	for (i = 0; i < n; i++) {
168 		if (a->addr.a6[i] == b->addr.a6[i])
169 			continue;
170 		if (a->addr.a6[i] < b->addr.a6[i])
171 			return -1;
172 		return 1;
173 	}
174 
175 	return 0;
176 }
177 
178 #define rcu_deref_locked(X, BASE)				\
179 	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
180 
181 /*
182  * Called with local BH disabled and the pool lock held.
183  */
184 #define lookup(_daddr, _stack, _base)				\
185 ({								\
186 	struct inet_peer *u;					\
187 	struct inet_peer __rcu **v;				\
188 								\
189 	stackptr = _stack;					\
190 	*stackptr++ = &_base->root;				\
191 	for (u = rcu_deref_locked(_base->root, _base);		\
192 	     u != peer_avl_empty; ) {				\
193 		int cmp = addr_compare(_daddr, &u->daddr);	\
194 		if (cmp == 0)					\
195 			break;					\
196 		if (cmp == -1)					\
197 			v = &u->avl_left;			\
198 		else						\
199 			v = &u->avl_right;			\
200 		*stackptr++ = v;				\
201 		u = rcu_deref_locked(*v, _base);		\
202 	}							\
203 	u;							\
204 })
205 
206 static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
207 {
208 	int cur, old = atomic_read(ptr);
209 
210 	while (old != u) {
211 		*newv = old + a;
212 		cur = atomic_cmpxchg(ptr, old, *newv);
213 		if (cur == old)
214 			return true;
215 		old = cur;
216 	}
217 	return false;
218 }
219 
220 /*
221  * Called with rcu_read_lock()
222  * Because we hold no lock against a writer, its quite possible we fall
223  * in an endless loop.
224  * But every pointer we follow is guaranteed to be valid thanks to RCU.
225  * We exit from this function if number of links exceeds PEER_MAXDEPTH
226  */
227 static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
228 				    struct inet_peer_base *base,
229 				    int *newrefcnt)
230 {
231 	struct inet_peer *u = rcu_dereference(base->root);
232 	int count = 0;
233 
234 	while (u != peer_avl_empty) {
235 		int cmp = addr_compare(daddr, &u->daddr);
236 		if (cmp == 0) {
237 			/* Before taking a reference, check if this entry was
238 			 * deleted, unlink_from_pool() sets refcnt=-1 to make
239 			 * distinction between an unused entry (refcnt=0) and
240 			 * a freed one.
241 			 */
242 			if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
243 				u = NULL;
244 			return u;
245 		}
246 		if (cmp == -1)
247 			u = rcu_dereference(u->avl_left);
248 		else
249 			u = rcu_dereference(u->avl_right);
250 		if (unlikely(++count == PEER_MAXDEPTH))
251 			break;
252 	}
253 	return NULL;
254 }
255 
256 /* Called with local BH disabled and the pool lock held. */
257 #define lookup_rightempty(start, base)				\
258 ({								\
259 	struct inet_peer *u;					\
260 	struct inet_peer __rcu **v;				\
261 	*stackptr++ = &start->avl_left;				\
262 	v = &start->avl_left;					\
263 	for (u = rcu_deref_locked(*v, base);			\
264 	     u->avl_right != peer_avl_empty_rcu; ) {		\
265 		v = &u->avl_right;				\
266 		*stackptr++ = v;				\
267 		u = rcu_deref_locked(*v, base);			\
268 	}							\
269 	u;							\
270 })
271 
272 /* Called with local BH disabled and the pool lock held.
273  * Variable names are the proof of operation correctness.
274  * Look into mm/map_avl.c for more detail description of the ideas.
275  */
276 static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
277 			       struct inet_peer __rcu ***stackend,
278 			       struct inet_peer_base *base)
279 {
280 	struct inet_peer __rcu **nodep;
281 	struct inet_peer *node, *l, *r;
282 	int lh, rh;
283 
284 	while (stackend > stack) {
285 		nodep = *--stackend;
286 		node = rcu_deref_locked(*nodep, base);
287 		l = rcu_deref_locked(node->avl_left, base);
288 		r = rcu_deref_locked(node->avl_right, base);
289 		lh = node_height(l);
290 		rh = node_height(r);
291 		if (lh > rh + 1) { /* l: RH+2 */
292 			struct inet_peer *ll, *lr, *lrl, *lrr;
293 			int lrh;
294 			ll = rcu_deref_locked(l->avl_left, base);
295 			lr = rcu_deref_locked(l->avl_right, base);
296 			lrh = node_height(lr);
297 			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
298 				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
299 				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
300 				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
301 				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
302 				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
303 				l->avl_height = node->avl_height + 1;
304 				RCU_INIT_POINTER(*nodep, l);
305 			} else { /* ll: RH, lr: RH+1 */
306 				lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
307 				lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
308 				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
309 				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
310 				node->avl_height = rh + 1; /* node: RH+1 */
311 				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
312 				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
313 				l->avl_height = rh + 1;	/* l: RH+1 */
314 				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
315 				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
316 				lr->avl_height = rh + 2;
317 				RCU_INIT_POINTER(*nodep, lr);
318 			}
319 		} else if (rh > lh + 1) { /* r: LH+2 */
320 			struct inet_peer *rr, *rl, *rlr, *rll;
321 			int rlh;
322 			rr = rcu_deref_locked(r->avl_right, base);
323 			rl = rcu_deref_locked(r->avl_left, base);
324 			rlh = node_height(rl);
325 			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
326 				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
327 				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
328 				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
329 				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
330 				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
331 				r->avl_height = node->avl_height + 1;
332 				RCU_INIT_POINTER(*nodep, r);
333 			} else { /* rr: RH, rl: RH+1 */
334 				rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
335 				rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
336 				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
337 				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
338 				node->avl_height = lh + 1; /* node: LH+1 */
339 				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
340 				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
341 				r->avl_height = lh + 1;	/* r: LH+1 */
342 				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
343 				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
344 				rl->avl_height = lh + 2;
345 				RCU_INIT_POINTER(*nodep, rl);
346 			}
347 		} else {
348 			node->avl_height = (lh > rh ? lh : rh) + 1;
349 		}
350 	}
351 }
352 
353 /* Called with local BH disabled and the pool lock held. */
354 #define link_to_pool(n, base)					\
355 do {								\
356 	n->avl_height = 1;					\
357 	n->avl_left = peer_avl_empty_rcu;			\
358 	n->avl_right = peer_avl_empty_rcu;			\
359 	/* lockless readers can catch us now */			\
360 	rcu_assign_pointer(**--stackptr, n);			\
361 	peer_avl_rebalance(stack, stackptr, base);		\
362 } while (0)
363 
364 static void inetpeer_free_rcu(struct rcu_head *head)
365 {
366 	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
367 }
368 
369 /* May be called with local BH enabled. */
370 static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
371 			     struct inet_peer __rcu **stack[PEER_MAXDEPTH])
372 {
373 	int do_free;
374 
375 	do_free = 0;
376 
377 	write_seqlock_bh(&base->lock);
378 	/* Check the reference counter.  It was artificially incremented by 1
379 	 * in cleanup() function to prevent sudden disappearing.  If we can
380 	 * atomically (because of lockless readers) take this last reference,
381 	 * it's safe to remove the node and free it later.
382 	 * We use refcnt=-1 to alert lockless readers this entry is deleted.
383 	 */
384 	if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
385 		struct inet_peer __rcu ***stackptr, ***delp;
386 		if (lookup(&p->daddr, stack, base) != p)
387 			BUG();
388 		delp = stackptr - 1; /* *delp[0] == p */
389 		if (p->avl_left == peer_avl_empty_rcu) {
390 			*delp[0] = p->avl_right;
391 			--stackptr;
392 		} else {
393 			/* look for a node to insert instead of p */
394 			struct inet_peer *t;
395 			t = lookup_rightempty(p, base);
396 			BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
397 			**--stackptr = t->avl_left;
398 			/* t is removed, t->daddr > x->daddr for any
399 			 * x in p->avl_left subtree.
400 			 * Put t in the old place of p. */
401 			RCU_INIT_POINTER(*delp[0], t);
402 			t->avl_left = p->avl_left;
403 			t->avl_right = p->avl_right;
404 			t->avl_height = p->avl_height;
405 			BUG_ON(delp[1] != &p->avl_left);
406 			delp[1] = &t->avl_left; /* was &p->avl_left */
407 		}
408 		peer_avl_rebalance(stack, stackptr, base);
409 		base->total--;
410 		do_free = 1;
411 	}
412 	write_sequnlock_bh(&base->lock);
413 
414 	if (do_free)
415 		call_rcu(&p->rcu, inetpeer_free_rcu);
416 	else
417 		/* The node is used again.  Decrease the reference counter
418 		 * back.  The loop "cleanup -> unlink_from_unused
419 		 *   -> unlink_from_pool -> putpeer -> link_to_unused
420 		 *   -> cleanup (for the same node)"
421 		 * doesn't really exist because the entry will have a
422 		 * recent deletion time and will not be cleaned again soon.
423 		 */
424 		inet_putpeer(p);
425 }
426 
427 static struct inet_peer_base *family_to_base(int family)
428 {
429 	return (family == AF_INET ? &v4_peers : &v6_peers);
430 }
431 
432 static struct inet_peer_base *peer_to_base(struct inet_peer *p)
433 {
434 	return family_to_base(p->daddr.family);
435 }
436 
437 /* May be called with local BH enabled. */
438 static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
439 {
440 	struct inet_peer *p = NULL;
441 
442 	/* Remove the first entry from the list of unused nodes. */
443 	spin_lock_bh(&unused_peers.lock);
444 	if (!list_empty(&unused_peers.list)) {
445 		__u32 delta;
446 
447 		p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
448 		delta = (__u32)jiffies - p->dtime;
449 
450 		if (delta < ttl) {
451 			/* Do not prune fresh entries. */
452 			spin_unlock_bh(&unused_peers.lock);
453 			return -1;
454 		}
455 
456 		list_del_init(&p->unused);
457 
458 		/* Grab an extra reference to prevent node disappearing
459 		 * before unlink_from_pool() call. */
460 		atomic_inc(&p->refcnt);
461 	}
462 	spin_unlock_bh(&unused_peers.lock);
463 
464 	if (p == NULL)
465 		/* It means that the total number of USED entries has
466 		 * grown over inet_peer_threshold.  It shouldn't really
467 		 * happen because of entry limits in route cache. */
468 		return -1;
469 
470 	unlink_from_pool(p, peer_to_base(p), stack);
471 	return 0;
472 }
473 
474 /* Called with or without local BH being disabled. */
475 struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
476 {
477 	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
478 	struct inet_peer_base *base = family_to_base(daddr->family);
479 	struct inet_peer *p;
480 	unsigned int sequence;
481 	int invalidated, newrefcnt = 0;
482 
483 	/* Look up for the address quickly, lockless.
484 	 * Because of a concurrent writer, we might not find an existing entry.
485 	 */
486 	rcu_read_lock();
487 	sequence = read_seqbegin(&base->lock);
488 	p = lookup_rcu(daddr, base, &newrefcnt);
489 	invalidated = read_seqretry(&base->lock, sequence);
490 	rcu_read_unlock();
491 
492 	if (p) {
493 found:		/* The existing node has been found.
494 		 * Remove the entry from unused list if it was there.
495 		 */
496 		if (newrefcnt == 1)
497 			unlink_from_unused(p);
498 		return p;
499 	}
500 
501 	/* If no writer did a change during our lookup, we can return early. */
502 	if (!create && !invalidated)
503 		return NULL;
504 
505 	/* retry an exact lookup, taking the lock before.
506 	 * At least, nodes should be hot in our cache.
507 	 */
508 	write_seqlock_bh(&base->lock);
509 	p = lookup(daddr, stack, base);
510 	if (p != peer_avl_empty) {
511 		newrefcnt = atomic_inc_return(&p->refcnt);
512 		write_sequnlock_bh(&base->lock);
513 		goto found;
514 	}
515 	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
516 	if (p) {
517 		p->daddr = *daddr;
518 		atomic_set(&p->refcnt, 1);
519 		atomic_set(&p->rid, 0);
520 		atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
521 		p->tcp_ts_stamp = 0;
522 		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
523 		p->rate_tokens = 0;
524 		p->rate_last = 0;
525 		p->pmtu_expires = 0;
526 		p->pmtu_orig = 0;
527 		memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
528 		INIT_LIST_HEAD(&p->unused);
529 
530 
531 		/* Link the node. */
532 		link_to_pool(p, base);
533 		base->total++;
534 	}
535 	write_sequnlock_bh(&base->lock);
536 
537 	if (base->total >= inet_peer_threshold)
538 		/* Remove one less-recently-used entry. */
539 		cleanup_once(0, stack);
540 
541 	return p;
542 }
543 
544 static int compute_total(void)
545 {
546 	return v4_peers.total + v6_peers.total;
547 }
548 EXPORT_SYMBOL_GPL(inet_getpeer);
549 
550 /* Called with local BH disabled. */
551 static void peer_check_expire(unsigned long dummy)
552 {
553 	unsigned long now = jiffies;
554 	int ttl, total;
555 	struct inet_peer __rcu **stack[PEER_MAXDEPTH];
556 
557 	total = compute_total();
558 	if (total >= inet_peer_threshold)
559 		ttl = inet_peer_minttl;
560 	else
561 		ttl = inet_peer_maxttl
562 				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
563 					total / inet_peer_threshold * HZ;
564 	while (!cleanup_once(ttl, stack)) {
565 		if (jiffies != now)
566 			break;
567 	}
568 
569 	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
570 	 * interval depending on the total number of entries (more entries,
571 	 * less interval). */
572 	total = compute_total();
573 	if (total >= inet_peer_threshold)
574 		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
575 	else
576 		peer_periodic_timer.expires = jiffies
577 			+ inet_peer_gc_maxtime
578 			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
579 				total / inet_peer_threshold * HZ;
580 	add_timer(&peer_periodic_timer);
581 }
582 
583 void inet_putpeer(struct inet_peer *p)
584 {
585 	local_bh_disable();
586 
587 	if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
588 		list_add_tail(&p->unused, &unused_peers.list);
589 		p->dtime = (__u32)jiffies;
590 		spin_unlock(&unused_peers.lock);
591 	}
592 
593 	local_bh_enable();
594 }
595 EXPORT_SYMBOL_GPL(inet_putpeer);
596 
597 /*
598  *	Check transmit rate limitation for given message.
599  *	The rate information is held in the inet_peer entries now.
600  *	This function is generic and could be used for other purposes
601  *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
602  *
603  *	Note that the same inet_peer fields are modified by functions in
604  *	route.c too, but these work for packet destinations while xrlim_allow
605  *	works for icmp destinations. This means the rate limiting information
606  *	for one "ip object" is shared - and these ICMPs are twice limited:
607  *	by source and by destination.
608  *
609  *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
610  *			  SHOULD allow setting of rate limits
611  *
612  * 	Shared between ICMPv4 and ICMPv6.
613  */
614 #define XRLIM_BURST_FACTOR 6
615 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
616 {
617 	unsigned long now, token;
618 	bool rc = false;
619 
620 	if (!peer)
621 		return true;
622 
623 	token = peer->rate_tokens;
624 	now = jiffies;
625 	token += now - peer->rate_last;
626 	peer->rate_last = now;
627 	if (token > XRLIM_BURST_FACTOR * timeout)
628 		token = XRLIM_BURST_FACTOR * timeout;
629 	if (token >= timeout) {
630 		token -= timeout;
631 		rc = true;
632 	}
633 	peer->rate_tokens = token;
634 	return rc;
635 }
636 EXPORT_SYMBOL(inet_peer_xrlim_allow);
637