1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INETPEER - A storage for permanent information about peers
4 *
5 * Authors: Andrey V. Savochkin <saw@msu.ru>
6 */
7
8 #include <linux/cache.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <linux/workqueue.h>
21 #include <net/ip.h>
22 #include <net/inetpeer.h>
23 #include <net/secure_seq.h>
24
25 /*
26 * Theory of operations.
27 * We keep one entry for each peer IP address. The nodes contains long-living
28 * information about the peer which doesn't depend on routes.
29 *
30 * Nodes are removed only when reference counter goes to 0.
31 * When it's happened the node may be removed when a sufficient amount of
32 * time has been passed since its last use. The less-recently-used entry can
33 * also be removed if the pool is overloaded i.e. if the total amount of
34 * entries is greater-or-equal than the threshold.
35 *
36 * Node pool is organised as an RB tree.
37 * Such an implementation has been chosen not just for fun. It's a way to
38 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
39 * amount of long living nodes in a single hash slot would significantly delay
40 * lookups performed with disabled BHs.
41 *
42 * Serialisation issues.
43 * 1. Nodes may appear in the tree only with the pool lock held.
44 * 2. Nodes may disappear from the tree only with the pool lock held
45 * AND reference count being 0.
46 * 3. Global variable peer_total is modified under the pool lock.
47 * 4. struct inet_peer fields modification:
48 * rb_node: pool lock
49 * refcnt: atomically against modifications on other CPU;
50 * usually under some other lock to prevent node disappearing
51 * daddr: unchangeable
52 */
53
54 static struct kmem_cache *peer_cachep __ro_after_init;
55
inet_peer_base_init(struct inet_peer_base * bp)56 void inet_peer_base_init(struct inet_peer_base *bp)
57 {
58 bp->rb_root = RB_ROOT;
59 seqlock_init(&bp->lock);
60 bp->total = 0;
61 }
62
63 #define PEER_MAX_GC 32
64
65 /* Exported for sysctl_net_ipv4. */
66 int inet_peer_threshold __read_mostly; /* start to throw entries more
67 * aggressively at this stage */
68 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
69 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
70
71 /* Called from ip_output.c:ip_init */
inet_initpeers(void)72 void __init inet_initpeers(void)
73 {
74 u64 nr_entries;
75
76 /* 1% of physical memory */
77 nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
78 100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
79
80 inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
81
82 peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
83 }
84
85 /* Called with rcu_read_lock() or base->lock held */
lookup(const struct inetpeer_addr * daddr,struct inet_peer_base * base,unsigned int seq,struct inet_peer * gc_stack[],unsigned int * gc_cnt,struct rb_node ** parent_p,struct rb_node *** pp_p)86 static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
87 struct inet_peer_base *base,
88 unsigned int seq,
89 struct inet_peer *gc_stack[],
90 unsigned int *gc_cnt,
91 struct rb_node **parent_p,
92 struct rb_node ***pp_p)
93 {
94 struct rb_node **pp, *parent, *next;
95 struct inet_peer *p;
96 u32 now;
97
98 pp = &base->rb_root.rb_node;
99 parent = NULL;
100 while (1) {
101 int cmp;
102
103 next = rcu_dereference_raw(*pp);
104 if (!next)
105 break;
106 parent = next;
107 p = rb_entry(parent, struct inet_peer, rb_node);
108 cmp = inetpeer_addr_cmp(daddr, &p->daddr);
109 if (cmp == 0) {
110 now = jiffies;
111 if (READ_ONCE(p->dtime) != now)
112 WRITE_ONCE(p->dtime, now);
113 return p;
114 }
115 if (gc_stack) {
116 if (*gc_cnt < PEER_MAX_GC)
117 gc_stack[(*gc_cnt)++] = p;
118 } else if (unlikely(read_seqretry(&base->lock, seq))) {
119 break;
120 }
121 if (cmp == -1)
122 pp = &next->rb_left;
123 else
124 pp = &next->rb_right;
125 }
126 *parent_p = parent;
127 *pp_p = pp;
128 return NULL;
129 }
130
131 /* perform garbage collect on all items stacked during a lookup */
inet_peer_gc(struct inet_peer_base * base,struct inet_peer * gc_stack[],unsigned int gc_cnt)132 static void inet_peer_gc(struct inet_peer_base *base,
133 struct inet_peer *gc_stack[],
134 unsigned int gc_cnt)
135 {
136 int peer_threshold, peer_maxttl, peer_minttl;
137 struct inet_peer *p;
138 __u32 delta, ttl;
139 int i;
140
141 peer_threshold = READ_ONCE(inet_peer_threshold);
142 peer_maxttl = READ_ONCE(inet_peer_maxttl);
143 peer_minttl = READ_ONCE(inet_peer_minttl);
144
145 if (base->total >= peer_threshold)
146 ttl = 0; /* be aggressive */
147 else
148 ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
149 base->total / peer_threshold * HZ;
150 for (i = 0; i < gc_cnt; i++) {
151 p = gc_stack[i];
152
153 delta = (__u32)jiffies - READ_ONCE(p->dtime);
154
155 if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
156 gc_stack[i] = NULL;
157 }
158 for (i = 0; i < gc_cnt; i++) {
159 p = gc_stack[i];
160 if (p) {
161 rb_erase(&p->rb_node, &base->rb_root);
162 base->total--;
163 kfree_rcu(p, rcu);
164 }
165 }
166 }
167
168 /* Must be called under RCU : No refcount change is done here. */
inet_getpeer(struct inet_peer_base * base,const struct inetpeer_addr * daddr)169 struct inet_peer *inet_getpeer(struct inet_peer_base *base,
170 const struct inetpeer_addr *daddr)
171 {
172 struct inet_peer *p, *gc_stack[PEER_MAX_GC];
173 struct rb_node **pp, *parent;
174 unsigned int gc_cnt, seq;
175
176 /* Attempt a lockless lookup first.
177 * Because of a concurrent writer, we might not find an existing entry.
178 */
179 seq = read_seqbegin(&base->lock);
180 p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
181
182 /* Make sure tree was not modified during our lookup. */
183 if (p && !read_seqretry(&base->lock, seq))
184 return p;
185
186 /* retry an exact lookup, taking the lock before.
187 * At least, nodes should be hot in our cache.
188 */
189 parent = NULL;
190 write_seqlock_bh(&base->lock);
191
192 gc_cnt = 0;
193 p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
194 if (!p) {
195 p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
196 if (p) {
197 p->daddr = *daddr;
198 p->dtime = (__u32)jiffies;
199 refcount_set(&p->refcnt, 1);
200 atomic_set(&p->rid, 0);
201 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
202 p->rate_tokens = 0;
203 p->n_redirects = 0;
204 /* 60*HZ is arbitrary, but chosen enough high so that the first
205 * calculation of tokens is at its maximum.
206 */
207 p->rate_last = jiffies - 60*HZ;
208
209 rb_link_node(&p->rb_node, parent, pp);
210 rb_insert_color(&p->rb_node, &base->rb_root);
211 base->total++;
212 }
213 }
214 if (gc_cnt)
215 inet_peer_gc(base, gc_stack, gc_cnt);
216 write_sequnlock_bh(&base->lock);
217
218 return p;
219 }
220
inet_putpeer(struct inet_peer * p)221 void inet_putpeer(struct inet_peer *p)
222 {
223 if (refcount_dec_and_test(&p->refcnt))
224 kfree_rcu(p, rcu);
225 }
226
227 /*
228 * Check transmit rate limitation for given message.
229 * The rate information is held in the inet_peer entries now.
230 * This function is generic and could be used for other purposes
231 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
232 *
233 * Note that the same inet_peer fields are modified by functions in
234 * route.c too, but these work for packet destinations while xrlim_allow
235 * works for icmp destinations. This means the rate limiting information
236 * for one "ip object" is shared - and these ICMPs are twice limited:
237 * by source and by destination.
238 *
239 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
240 * SHOULD allow setting of rate limits
241 *
242 * Shared between ICMPv4 and ICMPv6.
243 */
244 #define XRLIM_BURST_FACTOR 6
inet_peer_xrlim_allow(struct inet_peer * peer,int timeout)245 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
246 {
247 unsigned long now, token, otoken, delta;
248 bool rc = false;
249
250 if (!peer)
251 return true;
252
253 token = otoken = READ_ONCE(peer->rate_tokens);
254 now = jiffies;
255 delta = now - READ_ONCE(peer->rate_last);
256 if (delta) {
257 WRITE_ONCE(peer->rate_last, now);
258 token += delta;
259 if (token > XRLIM_BURST_FACTOR * timeout)
260 token = XRLIM_BURST_FACTOR * timeout;
261 }
262 if (token >= timeout) {
263 token -= timeout;
264 rc = true;
265 }
266 if (token != otoken)
267 WRITE_ONCE(peer->rate_tokens, token);
268 return rc;
269 }
270
inetpeer_invalidate_tree(struct inet_peer_base * base)271 void inetpeer_invalidate_tree(struct inet_peer_base *base)
272 {
273 struct rb_node *p = rb_first(&base->rb_root);
274
275 while (p) {
276 struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
277
278 p = rb_next(p);
279 rb_erase(&peer->rb_node, &base->rb_root);
280 inet_putpeer(peer);
281 cond_resched();
282 }
283
284 base->total = 0;
285 }
286