17eb95156SPavel Emelyanov /* 27eb95156SPavel Emelyanov * inet fragments management 37eb95156SPavel Emelyanov * 47eb95156SPavel Emelyanov * This program is free software; you can redistribute it and/or 57eb95156SPavel Emelyanov * modify it under the terms of the GNU General Public License 67eb95156SPavel Emelyanov * as published by the Free Software Foundation; either version 77eb95156SPavel Emelyanov * 2 of the License, or (at your option) any later version. 87eb95156SPavel Emelyanov * 97eb95156SPavel Emelyanov * Authors: Pavel Emelyanov <xemul@openvz.org> 107eb95156SPavel Emelyanov * Started as consolidation of ipv4/ip_fragment.c, 117eb95156SPavel Emelyanov * ipv6/reassembly. and ipv6 nf conntrack reassembly 127eb95156SPavel Emelyanov */ 137eb95156SPavel Emelyanov 147eb95156SPavel Emelyanov #include <linux/list.h> 157eb95156SPavel Emelyanov #include <linux/spinlock.h> 167eb95156SPavel Emelyanov #include <linux/module.h> 177eb95156SPavel Emelyanov #include <linux/timer.h> 187eb95156SPavel Emelyanov #include <linux/mm.h> 19321a3a99SPavel Emelyanov #include <linux/random.h> 201e4b8287SPavel Emelyanov #include <linux/skbuff.h> 211e4b8287SPavel Emelyanov #include <linux/rtnetlink.h> 227eb95156SPavel Emelyanov 237eb95156SPavel Emelyanov #include <net/inet_frag.h> 247eb95156SPavel Emelyanov 25321a3a99SPavel Emelyanov static void inet_frag_secret_rebuild(unsigned long dummy) 26321a3a99SPavel Emelyanov { 27321a3a99SPavel Emelyanov struct inet_frags *f = (struct inet_frags *)dummy; 28321a3a99SPavel Emelyanov unsigned long now = jiffies; 29321a3a99SPavel Emelyanov int i; 30321a3a99SPavel Emelyanov 31321a3a99SPavel Emelyanov write_lock(&f->lock); 32321a3a99SPavel Emelyanov get_random_bytes(&f->rnd, sizeof(u32)); 33321a3a99SPavel Emelyanov for (i = 0; i < INETFRAGS_HASHSZ; i++) { 34321a3a99SPavel Emelyanov struct inet_frag_queue *q; 35321a3a99SPavel Emelyanov struct hlist_node *p, *n; 36321a3a99SPavel Emelyanov 37321a3a99SPavel Emelyanov hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { 38321a3a99SPavel Emelyanov unsigned int hval = f->hashfn(q); 39321a3a99SPavel Emelyanov 40321a3a99SPavel Emelyanov if (hval != i) { 41321a3a99SPavel Emelyanov hlist_del(&q->list); 42321a3a99SPavel Emelyanov 43321a3a99SPavel Emelyanov /* Relink to new hash chain. */ 44321a3a99SPavel Emelyanov hlist_add_head(&q->list, &f->hash[hval]); 45321a3a99SPavel Emelyanov } 46321a3a99SPavel Emelyanov } 47321a3a99SPavel Emelyanov } 48321a3a99SPavel Emelyanov write_unlock(&f->lock); 49321a3a99SPavel Emelyanov 503b4bc4a2SPavel Emelyanov mod_timer(&f->secret_timer, now + f->secret_interval); 51321a3a99SPavel Emelyanov } 52321a3a99SPavel Emelyanov 537eb95156SPavel Emelyanov void inet_frags_init(struct inet_frags *f) 547eb95156SPavel Emelyanov { 557eb95156SPavel Emelyanov int i; 567eb95156SPavel Emelyanov 577eb95156SPavel Emelyanov for (i = 0; i < INETFRAGS_HASHSZ; i++) 587eb95156SPavel Emelyanov INIT_HLIST_HEAD(&f->hash[i]); 597eb95156SPavel Emelyanov 607eb95156SPavel Emelyanov rwlock_init(&f->lock); 617eb95156SPavel Emelyanov 627eb95156SPavel Emelyanov f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ 637eb95156SPavel Emelyanov (jiffies ^ (jiffies >> 6))); 647eb95156SPavel Emelyanov 65b24b8a24SPavel Emelyanov setup_timer(&f->secret_timer, inet_frag_secret_rebuild, 66b24b8a24SPavel Emelyanov (unsigned long)f); 673b4bc4a2SPavel Emelyanov f->secret_timer.expires = jiffies + f->secret_interval; 68321a3a99SPavel Emelyanov add_timer(&f->secret_timer); 697eb95156SPavel Emelyanov } 707eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init); 717eb95156SPavel Emelyanov 72e5a2bb84SPavel Emelyanov void inet_frags_init_net(struct netns_frags *nf) 73e5a2bb84SPavel Emelyanov { 74e5a2bb84SPavel Emelyanov nf->nqueues = 0; 756ddc0822SPavel Emelyanov atomic_set(&nf->mem, 0); 763140c25cSPavel Emelyanov INIT_LIST_HEAD(&nf->lru_list); 77e5a2bb84SPavel Emelyanov } 78e5a2bb84SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init_net); 79e5a2bb84SPavel Emelyanov 807eb95156SPavel Emelyanov void inet_frags_fini(struct inet_frags *f) 817eb95156SPavel Emelyanov { 82321a3a99SPavel Emelyanov del_timer(&f->secret_timer); 837eb95156SPavel Emelyanov } 847eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_fini); 85277e650dSPavel Emelyanov 8681566e83SPavel Emelyanov void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) 8781566e83SPavel Emelyanov { 8881566e83SPavel Emelyanov nf->low_thresh = 0; 89e8e16b70SDavid S. Miller 90e8e16b70SDavid S. Miller local_bh_disable(); 9181566e83SPavel Emelyanov inet_frag_evictor(nf, f); 92e8e16b70SDavid S. Miller local_bh_enable(); 9381566e83SPavel Emelyanov } 9481566e83SPavel Emelyanov EXPORT_SYMBOL(inet_frags_exit_net); 9581566e83SPavel Emelyanov 96277e650dSPavel Emelyanov static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) 97277e650dSPavel Emelyanov { 98277e650dSPavel Emelyanov write_lock(&f->lock); 99277e650dSPavel Emelyanov hlist_del(&fq->list); 100277e650dSPavel Emelyanov list_del(&fq->lru_list); 101e5a2bb84SPavel Emelyanov fq->net->nqueues--; 102277e650dSPavel Emelyanov write_unlock(&f->lock); 103277e650dSPavel Emelyanov } 104277e650dSPavel Emelyanov 105277e650dSPavel Emelyanov void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) 106277e650dSPavel Emelyanov { 107277e650dSPavel Emelyanov if (del_timer(&fq->timer)) 108277e650dSPavel Emelyanov atomic_dec(&fq->refcnt); 109277e650dSPavel Emelyanov 110bc578a54SJoe Perches if (!(fq->last_in & INET_FRAG_COMPLETE)) { 111277e650dSPavel Emelyanov fq_unlink(fq, f); 112277e650dSPavel Emelyanov atomic_dec(&fq->refcnt); 113bc578a54SJoe Perches fq->last_in |= INET_FRAG_COMPLETE; 114277e650dSPavel Emelyanov } 115277e650dSPavel Emelyanov } 116277e650dSPavel Emelyanov 117277e650dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_kill); 1181e4b8287SPavel Emelyanov 1196ddc0822SPavel Emelyanov static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, 1206ddc0822SPavel Emelyanov struct sk_buff *skb, int *work) 1211e4b8287SPavel Emelyanov { 1221e4b8287SPavel Emelyanov if (work) 1231e4b8287SPavel Emelyanov *work -= skb->truesize; 1241e4b8287SPavel Emelyanov 1256ddc0822SPavel Emelyanov atomic_sub(skb->truesize, &nf->mem); 1261e4b8287SPavel Emelyanov if (f->skb_free) 1271e4b8287SPavel Emelyanov f->skb_free(skb); 1281e4b8287SPavel Emelyanov kfree_skb(skb); 1291e4b8287SPavel Emelyanov } 1301e4b8287SPavel Emelyanov 1311e4b8287SPavel Emelyanov void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, 1321e4b8287SPavel Emelyanov int *work) 1331e4b8287SPavel Emelyanov { 1341e4b8287SPavel Emelyanov struct sk_buff *fp; 1356ddc0822SPavel Emelyanov struct netns_frags *nf; 1361e4b8287SPavel Emelyanov 137bc578a54SJoe Perches BUG_TRAP(q->last_in & INET_FRAG_COMPLETE); 1381e4b8287SPavel Emelyanov BUG_TRAP(del_timer(&q->timer) == 0); 1391e4b8287SPavel Emelyanov 1401e4b8287SPavel Emelyanov /* Release all fragment data. */ 1411e4b8287SPavel Emelyanov fp = q->fragments; 1426ddc0822SPavel Emelyanov nf = q->net; 1431e4b8287SPavel Emelyanov while (fp) { 1441e4b8287SPavel Emelyanov struct sk_buff *xp = fp->next; 1451e4b8287SPavel Emelyanov 1466ddc0822SPavel Emelyanov frag_kfree_skb(nf, f, fp, work); 1471e4b8287SPavel Emelyanov fp = xp; 1481e4b8287SPavel Emelyanov } 1491e4b8287SPavel Emelyanov 1501e4b8287SPavel Emelyanov if (work) 1511e4b8287SPavel Emelyanov *work -= f->qsize; 1526ddc0822SPavel Emelyanov atomic_sub(f->qsize, &nf->mem); 1531e4b8287SPavel Emelyanov 154c9547709SPavel Emelyanov if (f->destructor) 1551e4b8287SPavel Emelyanov f->destructor(q); 156c9547709SPavel Emelyanov kfree(q); 1571e4b8287SPavel Emelyanov 1581e4b8287SPavel Emelyanov } 1591e4b8287SPavel Emelyanov EXPORT_SYMBOL(inet_frag_destroy); 1608e7999c4SPavel Emelyanov 1616ddc0822SPavel Emelyanov int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f) 1628e7999c4SPavel Emelyanov { 1638e7999c4SPavel Emelyanov struct inet_frag_queue *q; 1648e7999c4SPavel Emelyanov int work, evicted = 0; 1658e7999c4SPavel Emelyanov 166e31e0bdcSPavel Emelyanov work = atomic_read(&nf->mem) - nf->low_thresh; 1678e7999c4SPavel Emelyanov while (work > 0) { 1688e7999c4SPavel Emelyanov read_lock(&f->lock); 1693140c25cSPavel Emelyanov if (list_empty(&nf->lru_list)) { 1708e7999c4SPavel Emelyanov read_unlock(&f->lock); 1718e7999c4SPavel Emelyanov break; 1728e7999c4SPavel Emelyanov } 1738e7999c4SPavel Emelyanov 1743140c25cSPavel Emelyanov q = list_first_entry(&nf->lru_list, 1758e7999c4SPavel Emelyanov struct inet_frag_queue, lru_list); 1768e7999c4SPavel Emelyanov atomic_inc(&q->refcnt); 1778e7999c4SPavel Emelyanov read_unlock(&f->lock); 1788e7999c4SPavel Emelyanov 1798e7999c4SPavel Emelyanov spin_lock(&q->lock); 180bc578a54SJoe Perches if (!(q->last_in & INET_FRAG_COMPLETE)) 1818e7999c4SPavel Emelyanov inet_frag_kill(q, f); 1828e7999c4SPavel Emelyanov spin_unlock(&q->lock); 1838e7999c4SPavel Emelyanov 1848e7999c4SPavel Emelyanov if (atomic_dec_and_test(&q->refcnt)) 1858e7999c4SPavel Emelyanov inet_frag_destroy(q, f, &work); 1868e7999c4SPavel Emelyanov evicted++; 1878e7999c4SPavel Emelyanov } 1888e7999c4SPavel Emelyanov 1898e7999c4SPavel Emelyanov return evicted; 1908e7999c4SPavel Emelyanov } 1918e7999c4SPavel Emelyanov EXPORT_SYMBOL(inet_frag_evictor); 1922588fe1dSPavel Emelyanov 193ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, 194ac18e750SPavel Emelyanov struct inet_frag_queue *qp_in, struct inet_frags *f, 195*9a375803SPavel Emelyanov void *arg) 1962588fe1dSPavel Emelyanov { 1972588fe1dSPavel Emelyanov struct inet_frag_queue *qp; 1982588fe1dSPavel Emelyanov #ifdef CONFIG_SMP 1992588fe1dSPavel Emelyanov struct hlist_node *n; 2002588fe1dSPavel Emelyanov #endif 201*9a375803SPavel Emelyanov unsigned int hash; 2022588fe1dSPavel Emelyanov 2032588fe1dSPavel Emelyanov write_lock(&f->lock); 204*9a375803SPavel Emelyanov /* 205*9a375803SPavel Emelyanov * While we stayed w/o the lock other CPU could update 206*9a375803SPavel Emelyanov * the rnd seed, so we need to re-calculate the hash 207*9a375803SPavel Emelyanov * chain. Fortunatelly the qp_in can be used to get one. 208*9a375803SPavel Emelyanov */ 209*9a375803SPavel Emelyanov hash = f->hashfn(qp_in); 2102588fe1dSPavel Emelyanov #ifdef CONFIG_SMP 2112588fe1dSPavel Emelyanov /* With SMP race we have to recheck hash table, because 2122588fe1dSPavel Emelyanov * such entry could be created on other cpu, while we 2132588fe1dSPavel Emelyanov * promoted read lock to write lock. 2142588fe1dSPavel Emelyanov */ 2152588fe1dSPavel Emelyanov hlist_for_each_entry(qp, n, &f->hash[hash], list) { 216ac18e750SPavel Emelyanov if (qp->net == nf && f->match(qp, arg)) { 2172588fe1dSPavel Emelyanov atomic_inc(&qp->refcnt); 2182588fe1dSPavel Emelyanov write_unlock(&f->lock); 219bc578a54SJoe Perches qp_in->last_in |= INET_FRAG_COMPLETE; 2202588fe1dSPavel Emelyanov inet_frag_put(qp_in, f); 2212588fe1dSPavel Emelyanov return qp; 2222588fe1dSPavel Emelyanov } 2232588fe1dSPavel Emelyanov } 2242588fe1dSPavel Emelyanov #endif 2252588fe1dSPavel Emelyanov qp = qp_in; 226b2fd5321SPavel Emelyanov if (!mod_timer(&qp->timer, jiffies + nf->timeout)) 2272588fe1dSPavel Emelyanov atomic_inc(&qp->refcnt); 2282588fe1dSPavel Emelyanov 2292588fe1dSPavel Emelyanov atomic_inc(&qp->refcnt); 2302588fe1dSPavel Emelyanov hlist_add_head(&qp->list, &f->hash[hash]); 2313140c25cSPavel Emelyanov list_add_tail(&qp->lru_list, &nf->lru_list); 232e5a2bb84SPavel Emelyanov nf->nqueues++; 2332588fe1dSPavel Emelyanov write_unlock(&f->lock); 2342588fe1dSPavel Emelyanov return qp; 2352588fe1dSPavel Emelyanov } 236e521db9dSPavel Emelyanov 237ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, 238ac18e750SPavel Emelyanov struct inet_frags *f, void *arg) 239e521db9dSPavel Emelyanov { 240e521db9dSPavel Emelyanov struct inet_frag_queue *q; 241e521db9dSPavel Emelyanov 242e521db9dSPavel Emelyanov q = kzalloc(f->qsize, GFP_ATOMIC); 243e521db9dSPavel Emelyanov if (q == NULL) 244e521db9dSPavel Emelyanov return NULL; 245e521db9dSPavel Emelyanov 246c6fda282SPavel Emelyanov f->constructor(q, arg); 2476ddc0822SPavel Emelyanov atomic_add(f->qsize, &nf->mem); 248e521db9dSPavel Emelyanov setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 249e521db9dSPavel Emelyanov spin_lock_init(&q->lock); 250e521db9dSPavel Emelyanov atomic_set(&q->refcnt, 1); 251ac18e750SPavel Emelyanov q->net = nf; 252e521db9dSPavel Emelyanov 253e521db9dSPavel Emelyanov return q; 254e521db9dSPavel Emelyanov } 255c6fda282SPavel Emelyanov 256ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, 257*9a375803SPavel Emelyanov struct inet_frags *f, void *arg) 258c6fda282SPavel Emelyanov { 259c6fda282SPavel Emelyanov struct inet_frag_queue *q; 260c6fda282SPavel Emelyanov 261ac18e750SPavel Emelyanov q = inet_frag_alloc(nf, f, arg); 262c6fda282SPavel Emelyanov if (q == NULL) 263c6fda282SPavel Emelyanov return NULL; 264c6fda282SPavel Emelyanov 265*9a375803SPavel Emelyanov return inet_frag_intern(nf, q, f, arg); 266c6fda282SPavel Emelyanov } 267abd6523dSPavel Emelyanov 268ac18e750SPavel Emelyanov struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, 269ac18e750SPavel Emelyanov struct inet_frags *f, void *key, unsigned int hash) 270abd6523dSPavel Emelyanov { 271abd6523dSPavel Emelyanov struct inet_frag_queue *q; 272abd6523dSPavel Emelyanov struct hlist_node *n; 273abd6523dSPavel Emelyanov 274abd6523dSPavel Emelyanov hlist_for_each_entry(q, n, &f->hash[hash], list) { 275ac18e750SPavel Emelyanov if (q->net == nf && f->match(q, key)) { 276abd6523dSPavel Emelyanov atomic_inc(&q->refcnt); 277abd6523dSPavel Emelyanov read_unlock(&f->lock); 278abd6523dSPavel Emelyanov return q; 279abd6523dSPavel Emelyanov } 280abd6523dSPavel Emelyanov } 281abd6523dSPavel Emelyanov read_unlock(&f->lock); 282abd6523dSPavel Emelyanov 283*9a375803SPavel Emelyanov return inet_frag_create(nf, f, key); 284abd6523dSPavel Emelyanov } 285abd6523dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_find); 286