1 /* 2 * INETPEER - A storage for permanent information about peers 3 * 4 * This source is covered by the GNU GPL, the same as all kernel sources. 5 * 6 * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $ 7 * 8 * Authors: Andrey V. Savochkin <saw@msu.ru> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/interrupt.h> 15 #include <linux/spinlock.h> 16 #include <linux/random.h> 17 #include <linux/sched.h> 18 #include <linux/timer.h> 19 #include <linux/time.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/net.h> 23 #include <net/ip.h> 24 #include <net/inetpeer.h> 25 26 /* 27 * Theory of operations. 28 * We keep one entry for each peer IP address. The nodes contains long-living 29 * information about the peer which doesn't depend on routes. 30 * At this moment this information consists only of ID field for the next 31 * outgoing IP packet. This field is incremented with each packet as encoded 32 * in inet_getid() function (include/net/inetpeer.h). 33 * At the moment of writing this notes identifier of IP packets is generated 34 * to be unpredictable using this code only for packets subjected 35 * (actually or potentially) to defragmentation. I.e. DF packets less than 36 * PMTU in size uses a constant ID and do not use this code (see 37 * ip_select_ident() in include/net/ip.h). 38 * 39 * Route cache entries hold references to our nodes. 40 * New cache entries get references via lookup by destination IP address in 41 * the avl tree. The reference is grabbed only when it's needed i.e. only 42 * when we try to output IP packet which needs an unpredictable ID (see 43 * __ip_select_ident() in net/ipv4/route.c). 44 * Nodes are removed only when reference counter goes to 0. 45 * When it's happened the node may be removed when a sufficient amount of 46 * time has been passed since its last use. The less-recently-used entry can 47 * also be removed if the pool is overloaded i.e. if the total amount of 48 * entries is greater-or-equal than the threshold. 49 * 50 * Node pool is organised as an AVL tree. 51 * Such an implementation has been chosen not just for fun. It's a way to 52 * prevent easy and efficient DoS attacks by creating hash collisions. A huge 53 * amount of long living nodes in a single hash slot would significantly delay 54 * lookups performed with disabled BHs. 55 * 56 * Serialisation issues. 57 * 1. Nodes may appear in the tree only with the pool write lock held. 58 * 2. Nodes may disappear from the tree only with the pool write lock held 59 * AND reference count being 0. 60 * 3. Nodes appears and disappears from unused node list only under 61 * "inet_peer_unused_lock". 62 * 4. Global variable peer_total is modified under the pool lock. 63 * 5. struct inet_peer fields modification: 64 * avl_left, avl_right, avl_parent, avl_height: pool lock 65 * unused_next, unused_prevp: unused node list lock 66 * refcnt: atomically against modifications on other CPU; 67 * usually under some other lock to prevent node disappearing 68 * dtime: unused node list lock 69 * v4daddr: unchangeable 70 * ip_id_count: idlock 71 */ 72 73 /* Exported for inet_getid inline function. */ 74 DEFINE_SPINLOCK(inet_peer_idlock); 75 76 static struct kmem_cache *peer_cachep __read_mostly; 77 78 #define node_height(x) x->avl_height 79 static struct inet_peer peer_fake_node = { 80 .avl_left = &peer_fake_node, 81 .avl_right = &peer_fake_node, 82 .avl_height = 0 83 }; 84 #define peer_avl_empty (&peer_fake_node) 85 static struct inet_peer *peer_root = peer_avl_empty; 86 static DEFINE_RWLOCK(peer_pool_lock); 87 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 88 89 static int peer_total; 90 /* Exported for sysctl_net_ipv4. */ 91 int inet_peer_threshold = 65536 + 128; /* start to throw entries more 92 * aggressively at this stage */ 93 int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */ 94 int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */ 95 96 static struct inet_peer *inet_peer_unused_head; 97 static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; 98 static DEFINE_SPINLOCK(inet_peer_unused_lock); 99 100 static void peer_check_expire(unsigned long dummy); 101 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); 102 103 /* Exported for sysctl_net_ipv4. */ 104 int inet_peer_gc_mintime = 10 * HZ, 105 inet_peer_gc_maxtime = 120 * HZ; 106 107 /* Called from ip_output.c:ip_init */ 108 void __init inet_initpeers(void) 109 { 110 struct sysinfo si; 111 112 /* Use the straight interface to information about memory. */ 113 si_meminfo(&si); 114 /* The values below were suggested by Alexey Kuznetsov 115 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values 116 * myself. --SAW 117 */ 118 if (si.totalram <= (32768*1024)/PAGE_SIZE) 119 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ 120 if (si.totalram <= (16384*1024)/PAGE_SIZE) 121 inet_peer_threshold >>= 1; /* about 512KB */ 122 if (si.totalram <= (8192*1024)/PAGE_SIZE) 123 inet_peer_threshold >>= 2; /* about 128KB */ 124 125 peer_cachep = kmem_cache_create("inet_peer_cache", 126 sizeof(struct inet_peer), 127 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 128 NULL, NULL); 129 130 /* All the timers, started at system startup tend 131 to synchronize. Perturb it a bit. 132 */ 133 peer_periodic_timer.expires = jiffies 134 + net_random() % inet_peer_gc_maxtime 135 + inet_peer_gc_maxtime; 136 add_timer(&peer_periodic_timer); 137 } 138 139 /* Called with or without local BH being disabled. */ 140 static void unlink_from_unused(struct inet_peer *p) 141 { 142 spin_lock_bh(&inet_peer_unused_lock); 143 if (p->unused_prevp != NULL) { 144 /* On unused list. */ 145 *p->unused_prevp = p->unused_next; 146 if (p->unused_next != NULL) 147 p->unused_next->unused_prevp = p->unused_prevp; 148 else 149 inet_peer_unused_tailp = p->unused_prevp; 150 p->unused_prevp = NULL; /* mark it as removed */ 151 } 152 spin_unlock_bh(&inet_peer_unused_lock); 153 } 154 155 /* Called with local BH disabled and the pool lock held. */ 156 #define lookup(daddr) \ 157 ({ \ 158 struct inet_peer *u, **v; \ 159 stackptr = stack; \ 160 *stackptr++ = &peer_root; \ 161 for (u = peer_root; u != peer_avl_empty; ) { \ 162 if (daddr == u->v4daddr) \ 163 break; \ 164 if ((__force __u32)daddr < (__force __u32)u->v4daddr) \ 165 v = &u->avl_left; \ 166 else \ 167 v = &u->avl_right; \ 168 *stackptr++ = v; \ 169 u = *v; \ 170 } \ 171 u; \ 172 }) 173 174 /* Called with local BH disabled and the pool write lock held. */ 175 #define lookup_rightempty(start) \ 176 ({ \ 177 struct inet_peer *u, **v; \ 178 *stackptr++ = &start->avl_left; \ 179 v = &start->avl_left; \ 180 for (u = *v; u->avl_right != peer_avl_empty; ) { \ 181 v = &u->avl_right; \ 182 *stackptr++ = v; \ 183 u = *v; \ 184 } \ 185 u; \ 186 }) 187 188 /* Called with local BH disabled and the pool write lock held. 189 * Variable names are the proof of operation correctness. 190 * Look into mm/map_avl.c for more detail description of the ideas. */ 191 static void peer_avl_rebalance(struct inet_peer **stack[], 192 struct inet_peer ***stackend) 193 { 194 struct inet_peer **nodep, *node, *l, *r; 195 int lh, rh; 196 197 while (stackend > stack) { 198 nodep = *--stackend; 199 node = *nodep; 200 l = node->avl_left; 201 r = node->avl_right; 202 lh = node_height(l); 203 rh = node_height(r); 204 if (lh > rh + 1) { /* l: RH+2 */ 205 struct inet_peer *ll, *lr, *lrl, *lrr; 206 int lrh; 207 ll = l->avl_left; 208 lr = l->avl_right; 209 lrh = node_height(lr); 210 if (lrh <= node_height(ll)) { /* ll: RH+1 */ 211 node->avl_left = lr; /* lr: RH or RH+1 */ 212 node->avl_right = r; /* r: RH */ 213 node->avl_height = lrh + 1; /* RH+1 or RH+2 */ 214 l->avl_left = ll; /* ll: RH+1 */ 215 l->avl_right = node; /* node: RH+1 or RH+2 */ 216 l->avl_height = node->avl_height + 1; 217 *nodep = l; 218 } else { /* ll: RH, lr: RH+1 */ 219 lrl = lr->avl_left; /* lrl: RH or RH-1 */ 220 lrr = lr->avl_right; /* lrr: RH or RH-1 */ 221 node->avl_left = lrr; /* lrr: RH or RH-1 */ 222 node->avl_right = r; /* r: RH */ 223 node->avl_height = rh + 1; /* node: RH+1 */ 224 l->avl_left = ll; /* ll: RH */ 225 l->avl_right = lrl; /* lrl: RH or RH-1 */ 226 l->avl_height = rh + 1; /* l: RH+1 */ 227 lr->avl_left = l; /* l: RH+1 */ 228 lr->avl_right = node; /* node: RH+1 */ 229 lr->avl_height = rh + 2; 230 *nodep = lr; 231 } 232 } else if (rh > lh + 1) { /* r: LH+2 */ 233 struct inet_peer *rr, *rl, *rlr, *rll; 234 int rlh; 235 rr = r->avl_right; 236 rl = r->avl_left; 237 rlh = node_height(rl); 238 if (rlh <= node_height(rr)) { /* rr: LH+1 */ 239 node->avl_right = rl; /* rl: LH or LH+1 */ 240 node->avl_left = l; /* l: LH */ 241 node->avl_height = rlh + 1; /* LH+1 or LH+2 */ 242 r->avl_right = rr; /* rr: LH+1 */ 243 r->avl_left = node; /* node: LH+1 or LH+2 */ 244 r->avl_height = node->avl_height + 1; 245 *nodep = r; 246 } else { /* rr: RH, rl: RH+1 */ 247 rlr = rl->avl_right; /* rlr: LH or LH-1 */ 248 rll = rl->avl_left; /* rll: LH or LH-1 */ 249 node->avl_right = rll; /* rll: LH or LH-1 */ 250 node->avl_left = l; /* l: LH */ 251 node->avl_height = lh + 1; /* node: LH+1 */ 252 r->avl_right = rr; /* rr: LH */ 253 r->avl_left = rlr; /* rlr: LH or LH-1 */ 254 r->avl_height = lh + 1; /* r: LH+1 */ 255 rl->avl_right = r; /* r: LH+1 */ 256 rl->avl_left = node; /* node: LH+1 */ 257 rl->avl_height = lh + 2; 258 *nodep = rl; 259 } 260 } else { 261 node->avl_height = (lh > rh ? lh : rh) + 1; 262 } 263 } 264 } 265 266 /* Called with local BH disabled and the pool write lock held. */ 267 #define link_to_pool(n) \ 268 do { \ 269 n->avl_height = 1; \ 270 n->avl_left = peer_avl_empty; \ 271 n->avl_right = peer_avl_empty; \ 272 **--stackptr = n; \ 273 peer_avl_rebalance(stack, stackptr); \ 274 } while(0) 275 276 /* May be called with local BH enabled. */ 277 static void unlink_from_pool(struct inet_peer *p) 278 { 279 int do_free; 280 281 do_free = 0; 282 283 write_lock_bh(&peer_pool_lock); 284 /* Check the reference counter. It was artificially incremented by 1 285 * in cleanup() function to prevent sudden disappearing. If the 286 * reference count is still 1 then the node is referenced only as `p' 287 * here and from the pool. So under the exclusive pool lock it's safe 288 * to remove the node and free it later. */ 289 if (atomic_read(&p->refcnt) == 1) { 290 struct inet_peer **stack[PEER_MAXDEPTH]; 291 struct inet_peer ***stackptr, ***delp; 292 if (lookup(p->v4daddr) != p) 293 BUG(); 294 delp = stackptr - 1; /* *delp[0] == p */ 295 if (p->avl_left == peer_avl_empty) { 296 *delp[0] = p->avl_right; 297 --stackptr; 298 } else { 299 /* look for a node to insert instead of p */ 300 struct inet_peer *t; 301 t = lookup_rightempty(p); 302 BUG_ON(*stackptr[-1] != t); 303 **--stackptr = t->avl_left; 304 /* t is removed, t->v4daddr > x->v4daddr for any 305 * x in p->avl_left subtree. 306 * Put t in the old place of p. */ 307 *delp[0] = t; 308 t->avl_left = p->avl_left; 309 t->avl_right = p->avl_right; 310 t->avl_height = p->avl_height; 311 BUG_ON(delp[1] != &p->avl_left); 312 delp[1] = &t->avl_left; /* was &p->avl_left */ 313 } 314 peer_avl_rebalance(stack, stackptr); 315 peer_total--; 316 do_free = 1; 317 } 318 write_unlock_bh(&peer_pool_lock); 319 320 if (do_free) 321 kmem_cache_free(peer_cachep, p); 322 else 323 /* The node is used again. Decrease the reference counter 324 * back. The loop "cleanup -> unlink_from_unused 325 * -> unlink_from_pool -> putpeer -> link_to_unused 326 * -> cleanup (for the same node)" 327 * doesn't really exist because the entry will have a 328 * recent deletion time and will not be cleaned again soon. */ 329 inet_putpeer(p); 330 } 331 332 /* May be called with local BH enabled. */ 333 static int cleanup_once(unsigned long ttl) 334 { 335 struct inet_peer *p; 336 337 /* Remove the first entry from the list of unused nodes. */ 338 spin_lock_bh(&inet_peer_unused_lock); 339 p = inet_peer_unused_head; 340 if (p != NULL) { 341 __u32 delta = (__u32)jiffies - p->dtime; 342 if (delta < ttl) { 343 /* Do not prune fresh entries. */ 344 spin_unlock_bh(&inet_peer_unused_lock); 345 return -1; 346 } 347 inet_peer_unused_head = p->unused_next; 348 if (p->unused_next != NULL) 349 p->unused_next->unused_prevp = p->unused_prevp; 350 else 351 inet_peer_unused_tailp = p->unused_prevp; 352 p->unused_prevp = NULL; /* mark as not on the list */ 353 /* Grab an extra reference to prevent node disappearing 354 * before unlink_from_pool() call. */ 355 atomic_inc(&p->refcnt); 356 } 357 spin_unlock_bh(&inet_peer_unused_lock); 358 359 if (p == NULL) 360 /* It means that the total number of USED entries has 361 * grown over inet_peer_threshold. It shouldn't really 362 * happen because of entry limits in route cache. */ 363 return -1; 364 365 unlink_from_pool(p); 366 return 0; 367 } 368 369 /* Called with or without local BH being disabled. */ 370 struct inet_peer *inet_getpeer(__be32 daddr, int create) 371 { 372 struct inet_peer *p, *n; 373 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; 374 375 /* Look up for the address quickly. */ 376 read_lock_bh(&peer_pool_lock); 377 p = lookup(daddr); 378 if (p != peer_avl_empty) 379 atomic_inc(&p->refcnt); 380 read_unlock_bh(&peer_pool_lock); 381 382 if (p != peer_avl_empty) { 383 /* The existing node has been found. */ 384 /* Remove the entry from unused list if it was there. */ 385 unlink_from_unused(p); 386 return p; 387 } 388 389 if (!create) 390 return NULL; 391 392 /* Allocate the space outside the locked region. */ 393 n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); 394 if (n == NULL) 395 return NULL; 396 n->v4daddr = daddr; 397 atomic_set(&n->refcnt, 1); 398 atomic_set(&n->rid, 0); 399 n->ip_id_count = secure_ip_id(daddr); 400 n->tcp_ts_stamp = 0; 401 402 write_lock_bh(&peer_pool_lock); 403 /* Check if an entry has suddenly appeared. */ 404 p = lookup(daddr); 405 if (p != peer_avl_empty) 406 goto out_free; 407 408 /* Link the node. */ 409 link_to_pool(n); 410 n->unused_prevp = NULL; /* not on the list */ 411 peer_total++; 412 write_unlock_bh(&peer_pool_lock); 413 414 if (peer_total >= inet_peer_threshold) 415 /* Remove one less-recently-used entry. */ 416 cleanup_once(0); 417 418 return n; 419 420 out_free: 421 /* The appropriate node is already in the pool. */ 422 atomic_inc(&p->refcnt); 423 write_unlock_bh(&peer_pool_lock); 424 /* Remove the entry from unused list if it was there. */ 425 unlink_from_unused(p); 426 /* Free preallocated the preallocated node. */ 427 kmem_cache_free(peer_cachep, n); 428 return p; 429 } 430 431 /* Called with local BH disabled. */ 432 static void peer_check_expire(unsigned long dummy) 433 { 434 unsigned long now = jiffies; 435 int ttl; 436 437 if (peer_total >= inet_peer_threshold) 438 ttl = inet_peer_minttl; 439 else 440 ttl = inet_peer_maxttl 441 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 442 peer_total / inet_peer_threshold * HZ; 443 while (!cleanup_once(ttl)) { 444 if (jiffies != now) 445 break; 446 } 447 448 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime 449 * interval depending on the total number of entries (more entries, 450 * less interval). */ 451 if (peer_total >= inet_peer_threshold) 452 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; 453 else 454 peer_periodic_timer.expires = jiffies 455 + inet_peer_gc_maxtime 456 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * 457 peer_total / inet_peer_threshold * HZ; 458 add_timer(&peer_periodic_timer); 459 } 460 461 void inet_putpeer(struct inet_peer *p) 462 { 463 spin_lock_bh(&inet_peer_unused_lock); 464 if (atomic_dec_and_test(&p->refcnt)) { 465 p->unused_prevp = inet_peer_unused_tailp; 466 p->unused_next = NULL; 467 *inet_peer_unused_tailp = p; 468 inet_peer_unused_tailp = &p->unused_next; 469 p->dtime = (__u32)jiffies; 470 } 471 spin_unlock_bh(&inet_peer_unused_lock); 472 } 473