xref: /linux/net/ipv4/inetpeer.c (revision 77de28cd7cf172e782319a144bf64e693794d78b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *		INETPEER - A storage for permanent information about peers
4  *
5  *  Authors:	Andrey V. Savochkin <saw@msu.ru>
6  */
7 
8 #include <linux/cache.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <linux/workqueue.h>
21 #include <net/ip.h>
22 #include <net/inetpeer.h>
23 #include <net/secure_seq.h>
24 
25 /*
26  *  Theory of operations.
27  *  We keep one entry for each peer IP address.  The nodes contains long-living
28  *  information about the peer which doesn't depend on routes.
29  *
30  *  Nodes are removed only when reference counter goes to 0.
31  *  When it's happened the node may be removed when a sufficient amount of
32  *  time has been passed since its last use.  The less-recently-used entry can
33  *  also be removed if the pool is overloaded i.e. if the total amount of
34  *  entries is greater-or-equal than the threshold.
35  *
36  *  Node pool is organised as an RB tree.
37  *  Such an implementation has been chosen not just for fun.  It's a way to
38  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
39  *  amount of long living nodes in a single hash slot would significantly delay
40  *  lookups performed with disabled BHs.
41  *
42  *  Serialisation issues.
43  *  1.  Nodes may appear in the tree only with the pool lock held.
44  *  2.  Nodes may disappear from the tree only with the pool lock held
45  *      AND reference count being 0.
46  *  3.  Global variable peer_total is modified under the pool lock.
47  *  4.  struct inet_peer fields modification:
48  *		rb_node: pool lock
49  *		refcnt: atomically against modifications on other CPU;
50  *		   usually under some other lock to prevent node disappearing
51  *		daddr: unchangeable
52  */
53 
54 static struct kmem_cache *peer_cachep __ro_after_init;
55 
56 void inet_peer_base_init(struct inet_peer_base *bp)
57 {
58 	bp->rb_root = RB_ROOT;
59 	seqlock_init(&bp->lock);
60 	bp->total = 0;
61 }
62 EXPORT_IPV6_MOD_GPL(inet_peer_base_init);
63 
64 #define PEER_MAX_GC 32
65 
66 /* Exported for sysctl_net_ipv4.  */
67 int inet_peer_threshold __read_mostly;	/* start to throw entries more
68 					 * aggressively at this stage */
69 int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
70 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
71 
72 /* Called from ip_output.c:ip_init  */
73 void __init inet_initpeers(void)
74 {
75 	u64 nr_entries;
76 
77 	 /* 1% of physical memory */
78 	nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
79 			      100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
80 
81 	inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
82 
83 	peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
84 }
85 
86 /* Called with rcu_read_lock() or base->lock held */
87 static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
88 				struct inet_peer_base *base,
89 				unsigned int seq,
90 				struct inet_peer *gc_stack[],
91 				unsigned int *gc_cnt,
92 				struct rb_node **parent_p,
93 				struct rb_node ***pp_p)
94 {
95 	struct rb_node **pp, *parent, *next;
96 	struct inet_peer *p;
97 	u32 now;
98 
99 	pp = &base->rb_root.rb_node;
100 	parent = NULL;
101 	while (1) {
102 		int cmp;
103 
104 		next = rcu_dereference_raw(*pp);
105 		if (!next)
106 			break;
107 		parent = next;
108 		p = rb_entry(parent, struct inet_peer, rb_node);
109 		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
110 		if (cmp == 0) {
111 			now = jiffies;
112 			if (READ_ONCE(p->dtime) != now)
113 				WRITE_ONCE(p->dtime, now);
114 			return p;
115 		}
116 		if (gc_stack) {
117 			if (*gc_cnt < PEER_MAX_GC)
118 				gc_stack[(*gc_cnt)++] = p;
119 		} else if (unlikely(read_seqretry(&base->lock, seq))) {
120 			break;
121 		}
122 		if (cmp == -1)
123 			pp = &next->rb_left;
124 		else
125 			pp = &next->rb_right;
126 	}
127 	*parent_p = parent;
128 	*pp_p = pp;
129 	return NULL;
130 }
131 
132 /* perform garbage collect on all items stacked during a lookup */
133 static void inet_peer_gc(struct inet_peer_base *base,
134 			 struct inet_peer *gc_stack[],
135 			 unsigned int gc_cnt)
136 {
137 	int peer_threshold, peer_maxttl, peer_minttl;
138 	struct inet_peer *p;
139 	__u32 delta, ttl;
140 	int i;
141 
142 	peer_threshold = READ_ONCE(inet_peer_threshold);
143 	peer_maxttl = READ_ONCE(inet_peer_maxttl);
144 	peer_minttl = READ_ONCE(inet_peer_minttl);
145 
146 	if (base->total >= peer_threshold)
147 		ttl = 0; /* be aggressive */
148 	else
149 		ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
150 			base->total / peer_threshold * HZ;
151 	for (i = 0; i < gc_cnt; i++) {
152 		p = gc_stack[i];
153 
154 		delta = (__u32)jiffies - READ_ONCE(p->dtime);
155 
156 		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
157 			gc_stack[i] = NULL;
158 	}
159 	for (i = 0; i < gc_cnt; i++) {
160 		p = gc_stack[i];
161 		if (p) {
162 			rb_erase(&p->rb_node, &base->rb_root);
163 			base->total--;
164 			kfree_rcu(p, rcu);
165 		}
166 	}
167 }
168 
169 /* Must be called under RCU : No refcount change is done here. */
170 struct inet_peer *inet_getpeer(struct inet_peer_base *base,
171 			       const struct inetpeer_addr *daddr)
172 {
173 	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
174 	struct rb_node **pp, *parent;
175 	unsigned int gc_cnt, seq;
176 
177 	/* Attempt a lockless lookup first.
178 	 * Because of a concurrent writer, we might not find an existing entry.
179 	 */
180 	seq = read_seqbegin(&base->lock);
181 	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
182 
183 	if (p)
184 		return p;
185 
186 	/* retry an exact lookup, taking the lock before.
187 	 * At least, nodes should be hot in our cache.
188 	 */
189 	parent = NULL;
190 	write_seqlock_bh(&base->lock);
191 
192 	gc_cnt = 0;
193 	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
194 	if (!p) {
195 		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
196 		if (p) {
197 			p->daddr = *daddr;
198 			p->dtime = (__u32)jiffies;
199 			refcount_set(&p->refcnt, 1);
200 			atomic_set(&p->rid, 0);
201 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
202 			p->rate_tokens = 0;
203 			p->n_redirects = 0;
204 			/* 60*HZ is arbitrary, but chosen enough high so that the first
205 			 * calculation of tokens is at its maximum.
206 			 */
207 			p->rate_last = jiffies - 60*HZ;
208 
209 			rb_link_node(&p->rb_node, parent, pp);
210 			rb_insert_color(&p->rb_node, &base->rb_root);
211 			base->total++;
212 		}
213 	}
214 	if (gc_cnt)
215 		inet_peer_gc(base, gc_stack, gc_cnt);
216 	write_sequnlock_bh(&base->lock);
217 
218 	return p;
219 }
220 EXPORT_IPV6_MOD_GPL(inet_getpeer);
221 
222 void inet_putpeer(struct inet_peer *p)
223 {
224 	if (refcount_dec_and_test(&p->refcnt))
225 		kfree_rcu(p, rcu);
226 }
227 
228 /*
229  *	Check transmit rate limitation for given message.
230  *	The rate information is held in the inet_peer entries now.
231  *	This function is generic and could be used for other purposes
232  *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
233  *
234  *	Note that the same inet_peer fields are modified by functions in
235  *	route.c too, but these work for packet destinations while xrlim_allow
236  *	works for icmp destinations. This means the rate limiting information
237  *	for one "ip object" is shared - and these ICMPs are twice limited:
238  *	by source and by destination.
239  *
240  *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
241  *			  SHOULD allow setting of rate limits
242  *
243  * 	Shared between ICMPv4 and ICMPv6.
244  */
245 #define XRLIM_BURST_FACTOR 6
246 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
247 {
248 	unsigned long now, token, otoken, delta;
249 	bool rc = false;
250 
251 	if (!peer)
252 		return true;
253 
254 	token = otoken = READ_ONCE(peer->rate_tokens);
255 	now = jiffies;
256 	delta = now - READ_ONCE(peer->rate_last);
257 	if (delta) {
258 		WRITE_ONCE(peer->rate_last, now);
259 		token += delta;
260 		if (token > XRLIM_BURST_FACTOR * timeout)
261 			token = XRLIM_BURST_FACTOR * timeout;
262 	}
263 	if (token >= timeout) {
264 		token -= timeout;
265 		rc = true;
266 	}
267 	if (token != otoken)
268 		WRITE_ONCE(peer->rate_tokens, token);
269 	return rc;
270 }
271 EXPORT_IPV6_MOD(inet_peer_xrlim_allow);
272 
273 void inetpeer_invalidate_tree(struct inet_peer_base *base)
274 {
275 	struct rb_node *p = rb_first(&base->rb_root);
276 
277 	while (p) {
278 		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
279 
280 		p = rb_next(p);
281 		rb_erase(&peer->rb_node, &base->rb_root);
282 		inet_putpeer(peer);
283 		cond_resched();
284 	}
285 
286 	base->total = 0;
287 }
288 EXPORT_IPV6_MOD(inetpeer_invalidate_tree);
289