xref: /linux/net/ipv4/inet_fragment.c (revision 5719b296fb81502d0dbbb4e87b3235e5bdcdfc6b)
17eb95156SPavel Emelyanov /*
27eb95156SPavel Emelyanov  * inet fragments management
37eb95156SPavel Emelyanov  *
47eb95156SPavel Emelyanov  *		This program is free software; you can redistribute it and/or
57eb95156SPavel Emelyanov  *		modify it under the terms of the GNU General Public License
67eb95156SPavel Emelyanov  *		as published by the Free Software Foundation; either version
77eb95156SPavel Emelyanov  *		2 of the License, or (at your option) any later version.
87eb95156SPavel Emelyanov  *
97eb95156SPavel Emelyanov  * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
107eb95156SPavel Emelyanov  *				Started as consolidation of ipv4/ip_fragment.c,
117eb95156SPavel Emelyanov  *				ipv6/reassembly. and ipv6 nf conntrack reassembly
127eb95156SPavel Emelyanov  */
137eb95156SPavel Emelyanov 
147eb95156SPavel Emelyanov #include <linux/list.h>
157eb95156SPavel Emelyanov #include <linux/spinlock.h>
167eb95156SPavel Emelyanov #include <linux/module.h>
177eb95156SPavel Emelyanov #include <linux/timer.h>
187eb95156SPavel Emelyanov #include <linux/mm.h>
19321a3a99SPavel Emelyanov #include <linux/random.h>
201e4b8287SPavel Emelyanov #include <linux/skbuff.h>
211e4b8287SPavel Emelyanov #include <linux/rtnetlink.h>
225a0e3ad6STejun Heo #include <linux/slab.h>
237eb95156SPavel Emelyanov 
245a3da1feSHannes Frederic Sowa #include <net/sock.h>
257eb95156SPavel Emelyanov #include <net/inet_frag.h>
26be991971SHannes Frederic Sowa #include <net/inet_ecn.h>
27be991971SHannes Frederic Sowa 
28b13d3cbfSFlorian Westphal #define INETFRAGS_EVICT_BUCKETS   128
29b13d3cbfSFlorian Westphal #define INETFRAGS_EVICT_MAX	  512
30b13d3cbfSFlorian Westphal 
31e3a57d18SFlorian Westphal /* don't rebuild inetfrag table with new secret more often than this */
32e3a57d18SFlorian Westphal #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33e3a57d18SFlorian Westphal 
34be991971SHannes Frederic Sowa /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35be991971SHannes Frederic Sowa  * Value : 0xff if frame should be dropped.
36be991971SHannes Frederic Sowa  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37be991971SHannes Frederic Sowa  */
38be991971SHannes Frederic Sowa const u8 ip_frag_ecn_table[16] = {
39be991971SHannes Frederic Sowa 	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
40be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
41be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
42be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
43be991971SHannes Frederic Sowa 
44be991971SHannes Frederic Sowa 	/* invalid combinations : drop frame */
45be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52be991971SHannes Frederic Sowa };
53be991971SHannes Frederic Sowa EXPORT_SYMBOL(ip_frag_ecn_table);
547eb95156SPavel Emelyanov 
55fb3cfe6eSFlorian Westphal static unsigned int
56fb3cfe6eSFlorian Westphal inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57fb3cfe6eSFlorian Westphal {
58fb3cfe6eSFlorian Westphal 	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59fb3cfe6eSFlorian Westphal }
60fb3cfe6eSFlorian Westphal 
61e3a57d18SFlorian Westphal static bool inet_frag_may_rebuild(struct inet_frags *f)
62321a3a99SPavel Emelyanov {
63e3a57d18SFlorian Westphal 	return time_after(jiffies,
64e3a57d18SFlorian Westphal 	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65e3a57d18SFlorian Westphal }
66e3a57d18SFlorian Westphal 
67e3a57d18SFlorian Westphal static void inet_frag_secret_rebuild(struct inet_frags *f)
68e3a57d18SFlorian Westphal {
69321a3a99SPavel Emelyanov 	int i;
70321a3a99SPavel Emelyanov 
71ab1c724fSFlorian Westphal 	write_seqlock_bh(&f->rnd_seqlock);
72e3a57d18SFlorian Westphal 
73e3a57d18SFlorian Westphal 	if (!inet_frag_may_rebuild(f))
74e3a57d18SFlorian Westphal 		goto out;
7519952cc4SJesper Dangaard Brouer 
76321a3a99SPavel Emelyanov 	get_random_bytes(&f->rnd, sizeof(u32));
77e3a57d18SFlorian Westphal 
78321a3a99SPavel Emelyanov 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
7919952cc4SJesper Dangaard Brouer 		struct inet_frag_bucket *hb;
80321a3a99SPavel Emelyanov 		struct inet_frag_queue *q;
81b67bfe0dSSasha Levin 		struct hlist_node *n;
82321a3a99SPavel Emelyanov 
8319952cc4SJesper Dangaard Brouer 		hb = &f->hash[i];
84ab1c724fSFlorian Westphal 		spin_lock(&hb->chain_lock);
85ab1c724fSFlorian Westphal 
8619952cc4SJesper Dangaard Brouer 		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87fb3cfe6eSFlorian Westphal 			unsigned int hval = inet_frag_hashfn(f, q);
88321a3a99SPavel Emelyanov 
89321a3a99SPavel Emelyanov 			if (hval != i) {
9019952cc4SJesper Dangaard Brouer 				struct inet_frag_bucket *hb_dest;
9119952cc4SJesper Dangaard Brouer 
92321a3a99SPavel Emelyanov 				hlist_del(&q->list);
93321a3a99SPavel Emelyanov 
94321a3a99SPavel Emelyanov 				/* Relink to new hash chain. */
9519952cc4SJesper Dangaard Brouer 				hb_dest = &f->hash[hval];
96ab1c724fSFlorian Westphal 
97ab1c724fSFlorian Westphal 				/* This is the only place where we take
98ab1c724fSFlorian Westphal 				 * another chain_lock while already holding
99ab1c724fSFlorian Westphal 				 * one.  As this will not run concurrently,
100ab1c724fSFlorian Westphal 				 * we cannot deadlock on hb_dest lock below, if its
101ab1c724fSFlorian Westphal 				 * already locked it will be released soon since
102ab1c724fSFlorian Westphal 				 * other caller cannot be waiting for hb lock
103ab1c724fSFlorian Westphal 				 * that we've taken above.
104ab1c724fSFlorian Westphal 				 */
105ab1c724fSFlorian Westphal 				spin_lock_nested(&hb_dest->chain_lock,
106ab1c724fSFlorian Westphal 						 SINGLE_DEPTH_NESTING);
10719952cc4SJesper Dangaard Brouer 				hlist_add_head(&q->list, &hb_dest->chain);
108ab1c724fSFlorian Westphal 				spin_unlock(&hb_dest->chain_lock);
109321a3a99SPavel Emelyanov 			}
110321a3a99SPavel Emelyanov 		}
111ab1c724fSFlorian Westphal 		spin_unlock(&hb->chain_lock);
112321a3a99SPavel Emelyanov 	}
113321a3a99SPavel Emelyanov 
114e3a57d18SFlorian Westphal 	f->rebuild = false;
115e3a57d18SFlorian Westphal 	f->last_rebuild_jiffies = jiffies;
116e3a57d18SFlorian Westphal out:
117ab1c724fSFlorian Westphal 	write_sequnlock_bh(&f->rnd_seqlock);
118321a3a99SPavel Emelyanov }
119321a3a99SPavel Emelyanov 
120b13d3cbfSFlorian Westphal static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121b13d3cbfSFlorian Westphal {
122b13d3cbfSFlorian Westphal 	return q->net->low_thresh == 0 ||
123b13d3cbfSFlorian Westphal 	       frag_mem_limit(q->net) >= q->net->low_thresh;
124b13d3cbfSFlorian Westphal }
125b13d3cbfSFlorian Westphal 
126b13d3cbfSFlorian Westphal static unsigned int
127b13d3cbfSFlorian Westphal inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
128b13d3cbfSFlorian Westphal {
129b13d3cbfSFlorian Westphal 	struct inet_frag_queue *fq;
130b13d3cbfSFlorian Westphal 	struct hlist_node *n;
131b13d3cbfSFlorian Westphal 	unsigned int evicted = 0;
132b13d3cbfSFlorian Westphal 	HLIST_HEAD(expired);
133b13d3cbfSFlorian Westphal 
134b13d3cbfSFlorian Westphal 	spin_lock(&hb->chain_lock);
135b13d3cbfSFlorian Westphal 
136b13d3cbfSFlorian Westphal 	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
137b13d3cbfSFlorian Westphal 		if (!inet_fragq_should_evict(fq))
138b13d3cbfSFlorian Westphal 			continue;
139b13d3cbfSFlorian Westphal 
140*5719b296SFlorian Westphal 		if (!del_timer(&fq->timer))
141*5719b296SFlorian Westphal 			continue;
142b13d3cbfSFlorian Westphal 
14306aa8b8aSNikolay Aleksandrov 		fq->flags |= INET_FRAG_EVICTED;
144d1fe1944SFlorian Westphal 		hlist_add_head(&fq->list_evictor, &expired);
145b13d3cbfSFlorian Westphal 		++evicted;
146b13d3cbfSFlorian Westphal 	}
147b13d3cbfSFlorian Westphal 
148b13d3cbfSFlorian Westphal 	spin_unlock(&hb->chain_lock);
149b13d3cbfSFlorian Westphal 
150d1fe1944SFlorian Westphal 	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
151b13d3cbfSFlorian Westphal 		f->frag_expire((unsigned long) fq);
152b13d3cbfSFlorian Westphal 
153b13d3cbfSFlorian Westphal 	return evicted;
154b13d3cbfSFlorian Westphal }
155b13d3cbfSFlorian Westphal 
156b13d3cbfSFlorian Westphal static void inet_frag_worker(struct work_struct *work)
157b13d3cbfSFlorian Westphal {
158b13d3cbfSFlorian Westphal 	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
159b13d3cbfSFlorian Westphal 	unsigned int i, evicted = 0;
160b13d3cbfSFlorian Westphal 	struct inet_frags *f;
161b13d3cbfSFlorian Westphal 
162b13d3cbfSFlorian Westphal 	f = container_of(work, struct inet_frags, frags_work);
163b13d3cbfSFlorian Westphal 
164b13d3cbfSFlorian Westphal 	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
165b13d3cbfSFlorian Westphal 
166ab1c724fSFlorian Westphal 	local_bh_disable();
167b13d3cbfSFlorian Westphal 
168b13d3cbfSFlorian Westphal 	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
169b13d3cbfSFlorian Westphal 		evicted += inet_evict_bucket(f, &f->hash[i]);
170b13d3cbfSFlorian Westphal 		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
171b13d3cbfSFlorian Westphal 		if (evicted > INETFRAGS_EVICT_MAX)
172b13d3cbfSFlorian Westphal 			break;
173b13d3cbfSFlorian Westphal 	}
174b13d3cbfSFlorian Westphal 
175b13d3cbfSFlorian Westphal 	f->next_bucket = i;
176b13d3cbfSFlorian Westphal 
177ab1c724fSFlorian Westphal 	local_bh_enable();
178ab1c724fSFlorian Westphal 
179e3a57d18SFlorian Westphal 	if (f->rebuild && inet_frag_may_rebuild(f))
180e3a57d18SFlorian Westphal 		inet_frag_secret_rebuild(f);
181b13d3cbfSFlorian Westphal }
182b13d3cbfSFlorian Westphal 
183b13d3cbfSFlorian Westphal static void inet_frag_schedule_worker(struct inet_frags *f)
184b13d3cbfSFlorian Westphal {
185b13d3cbfSFlorian Westphal 	if (unlikely(!work_pending(&f->frags_work)))
186b13d3cbfSFlorian Westphal 		schedule_work(&f->frags_work);
187b13d3cbfSFlorian Westphal }
188b13d3cbfSFlorian Westphal 
189d4ad4d22SNikolay Aleksandrov int inet_frags_init(struct inet_frags *f)
1907eb95156SPavel Emelyanov {
1917eb95156SPavel Emelyanov 	int i;
1927eb95156SPavel Emelyanov 
193b13d3cbfSFlorian Westphal 	INIT_WORK(&f->frags_work, inet_frag_worker);
194b13d3cbfSFlorian Westphal 
19519952cc4SJesper Dangaard Brouer 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
19619952cc4SJesper Dangaard Brouer 		struct inet_frag_bucket *hb = &f->hash[i];
1977eb95156SPavel Emelyanov 
19819952cc4SJesper Dangaard Brouer 		spin_lock_init(&hb->chain_lock);
19919952cc4SJesper Dangaard Brouer 		INIT_HLIST_HEAD(&hb->chain);
20019952cc4SJesper Dangaard Brouer 	}
201ab1c724fSFlorian Westphal 
202ab1c724fSFlorian Westphal 	seqlock_init(&f->rnd_seqlock);
203e3a57d18SFlorian Westphal 	f->last_rebuild_jiffies = 0;
204d4ad4d22SNikolay Aleksandrov 	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
205d4ad4d22SNikolay Aleksandrov 					    NULL);
206d4ad4d22SNikolay Aleksandrov 	if (!f->frags_cachep)
207d4ad4d22SNikolay Aleksandrov 		return -ENOMEM;
208d4ad4d22SNikolay Aleksandrov 
209d4ad4d22SNikolay Aleksandrov 	return 0;
2107eb95156SPavel Emelyanov }
2117eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init);
2127eb95156SPavel Emelyanov 
213e5a2bb84SPavel Emelyanov void inet_frags_init_net(struct netns_frags *nf)
214e5a2bb84SPavel Emelyanov {
215d433673eSJesper Dangaard Brouer 	init_frag_mem_limit(nf);
216e5a2bb84SPavel Emelyanov }
217e5a2bb84SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init_net);
218e5a2bb84SPavel Emelyanov 
2197eb95156SPavel Emelyanov void inet_frags_fini(struct inet_frags *f)
2207eb95156SPavel Emelyanov {
221b13d3cbfSFlorian Westphal 	cancel_work_sync(&f->frags_work);
222d4ad4d22SNikolay Aleksandrov 	kmem_cache_destroy(f->frags_cachep);
2237eb95156SPavel Emelyanov }
2247eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_fini);
225277e650dSPavel Emelyanov 
22681566e83SPavel Emelyanov void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
22781566e83SPavel Emelyanov {
228ab1c724fSFlorian Westphal 	unsigned int seq;
229b13d3cbfSFlorian Westphal 	int i;
230b13d3cbfSFlorian Westphal 
23181566e83SPavel Emelyanov 	nf->low_thresh = 0;
232e8e16b70SDavid S. Miller 
233ab1c724fSFlorian Westphal evict_again:
234*5719b296SFlorian Westphal 	local_bh_disable();
235ab1c724fSFlorian Westphal 	seq = read_seqbegin(&f->rnd_seqlock);
236b13d3cbfSFlorian Westphal 
237b13d3cbfSFlorian Westphal 	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
238b13d3cbfSFlorian Westphal 		inet_evict_bucket(f, &f->hash[i]);
239b13d3cbfSFlorian Westphal 
240ab1c724fSFlorian Westphal 	local_bh_enable();
241*5719b296SFlorian Westphal 	cond_resched();
242*5719b296SFlorian Westphal 
243*5719b296SFlorian Westphal 	if (read_seqretry(&f->rnd_seqlock, seq) ||
244*5719b296SFlorian Westphal 	    percpu_counter_sum(&nf->mem))
245*5719b296SFlorian Westphal 		goto evict_again;
2466d7b857dSJesper Dangaard Brouer 
2476d7b857dSJesper Dangaard Brouer 	percpu_counter_destroy(&nf->mem);
24881566e83SPavel Emelyanov }
24981566e83SPavel Emelyanov EXPORT_SYMBOL(inet_frags_exit_net);
25081566e83SPavel Emelyanov 
251ab1c724fSFlorian Westphal static struct inet_frag_bucket *
252ab1c724fSFlorian Westphal get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
253ab1c724fSFlorian Westphal __acquires(hb->chain_lock)
254277e650dSPavel Emelyanov {
25519952cc4SJesper Dangaard Brouer 	struct inet_frag_bucket *hb;
256ab1c724fSFlorian Westphal 	unsigned int seq, hash;
25719952cc4SJesper Dangaard Brouer 
258ab1c724fSFlorian Westphal  restart:
259ab1c724fSFlorian Westphal 	seq = read_seqbegin(&f->rnd_seqlock);
260ab1c724fSFlorian Westphal 
261fb3cfe6eSFlorian Westphal 	hash = inet_frag_hashfn(f, fq);
26219952cc4SJesper Dangaard Brouer 	hb = &f->hash[hash];
26319952cc4SJesper Dangaard Brouer 
26419952cc4SJesper Dangaard Brouer 	spin_lock(&hb->chain_lock);
265ab1c724fSFlorian Westphal 	if (read_seqretry(&f->rnd_seqlock, seq)) {
266ab1c724fSFlorian Westphal 		spin_unlock(&hb->chain_lock);
267ab1c724fSFlorian Westphal 		goto restart;
268ab1c724fSFlorian Westphal 	}
269ab1c724fSFlorian Westphal 
270ab1c724fSFlorian Westphal 	return hb;
271ab1c724fSFlorian Westphal }
272ab1c724fSFlorian Westphal 
273ab1c724fSFlorian Westphal static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
274ab1c724fSFlorian Westphal {
275ab1c724fSFlorian Westphal 	struct inet_frag_bucket *hb;
276ab1c724fSFlorian Westphal 
277ab1c724fSFlorian Westphal 	hb = get_frag_bucket_locked(fq, f);
278277e650dSPavel Emelyanov 	hlist_del(&fq->list);
279*5719b296SFlorian Westphal 	fq->flags |= INET_FRAG_COMPLETE;
28019952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
281277e650dSPavel Emelyanov }
282277e650dSPavel Emelyanov 
283277e650dSPavel Emelyanov void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
284277e650dSPavel Emelyanov {
285277e650dSPavel Emelyanov 	if (del_timer(&fq->timer))
286277e650dSPavel Emelyanov 		atomic_dec(&fq->refcnt);
287277e650dSPavel Emelyanov 
28806aa8b8aSNikolay Aleksandrov 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
289277e650dSPavel Emelyanov 		fq_unlink(fq, f);
290277e650dSPavel Emelyanov 		atomic_dec(&fq->refcnt);
291277e650dSPavel Emelyanov 	}
292277e650dSPavel Emelyanov }
293277e650dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_kill);
2941e4b8287SPavel Emelyanov 
2956ddc0822SPavel Emelyanov static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
296d433673eSJesper Dangaard Brouer 				  struct sk_buff *skb)
2971e4b8287SPavel Emelyanov {
2981e4b8287SPavel Emelyanov 	if (f->skb_free)
2991e4b8287SPavel Emelyanov 		f->skb_free(skb);
3001e4b8287SPavel Emelyanov 	kfree_skb(skb);
3011e4b8287SPavel Emelyanov }
3021e4b8287SPavel Emelyanov 
3033fd588ebSFlorian Westphal void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
3041e4b8287SPavel Emelyanov {
3051e4b8287SPavel Emelyanov 	struct sk_buff *fp;
3066ddc0822SPavel Emelyanov 	struct netns_frags *nf;
307d433673eSJesper Dangaard Brouer 	unsigned int sum, sum_truesize = 0;
3081e4b8287SPavel Emelyanov 
30906aa8b8aSNikolay Aleksandrov 	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
310547b792cSIlpo Järvinen 	WARN_ON(del_timer(&q->timer) != 0);
3111e4b8287SPavel Emelyanov 
3121e4b8287SPavel Emelyanov 	/* Release all fragment data. */
3131e4b8287SPavel Emelyanov 	fp = q->fragments;
3146ddc0822SPavel Emelyanov 	nf = q->net;
3151e4b8287SPavel Emelyanov 	while (fp) {
3161e4b8287SPavel Emelyanov 		struct sk_buff *xp = fp->next;
3171e4b8287SPavel Emelyanov 
318d433673eSJesper Dangaard Brouer 		sum_truesize += fp->truesize;
319d433673eSJesper Dangaard Brouer 		frag_kfree_skb(nf, f, fp);
3201e4b8287SPavel Emelyanov 		fp = xp;
3211e4b8287SPavel Emelyanov 	}
322d433673eSJesper Dangaard Brouer 	sum = sum_truesize + f->qsize;
3231e4b8287SPavel Emelyanov 
324c9547709SPavel Emelyanov 	if (f->destructor)
3251e4b8287SPavel Emelyanov 		f->destructor(q);
326d4ad4d22SNikolay Aleksandrov 	kmem_cache_free(f->frags_cachep, q);
327*5719b296SFlorian Westphal 
328*5719b296SFlorian Westphal 	sub_frag_mem_limit(nf, sum);
3291e4b8287SPavel Emelyanov }
3301e4b8287SPavel Emelyanov EXPORT_SYMBOL(inet_frag_destroy);
3318e7999c4SPavel Emelyanov 
332ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
333f926e236SNikolay Aleksandrov 						struct inet_frag_queue *qp_in,
334f926e236SNikolay Aleksandrov 						struct inet_frags *f,
3359a375803SPavel Emelyanov 						void *arg)
3362588fe1dSPavel Emelyanov {
337ab1c724fSFlorian Westphal 	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
3382588fe1dSPavel Emelyanov 	struct inet_frag_queue *qp;
33919952cc4SJesper Dangaard Brouer 
3402588fe1dSPavel Emelyanov #ifdef CONFIG_SMP
3412588fe1dSPavel Emelyanov 	/* With SMP race we have to recheck hash table, because
342ab1c724fSFlorian Westphal 	 * such entry could have been created on other cpu before
343ab1c724fSFlorian Westphal 	 * we acquired hash bucket lock.
3442588fe1dSPavel Emelyanov 	 */
34519952cc4SJesper Dangaard Brouer 	hlist_for_each_entry(qp, &hb->chain, list) {
346ac18e750SPavel Emelyanov 		if (qp->net == nf && f->match(qp, arg)) {
3472588fe1dSPavel Emelyanov 			atomic_inc(&qp->refcnt);
34819952cc4SJesper Dangaard Brouer 			spin_unlock(&hb->chain_lock);
34906aa8b8aSNikolay Aleksandrov 			qp_in->flags |= INET_FRAG_COMPLETE;
3502588fe1dSPavel Emelyanov 			inet_frag_put(qp_in, f);
3512588fe1dSPavel Emelyanov 			return qp;
3522588fe1dSPavel Emelyanov 		}
3532588fe1dSPavel Emelyanov 	}
3542588fe1dSPavel Emelyanov #endif
3552588fe1dSPavel Emelyanov 	qp = qp_in;
356b2fd5321SPavel Emelyanov 	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
3572588fe1dSPavel Emelyanov 		atomic_inc(&qp->refcnt);
3582588fe1dSPavel Emelyanov 
3592588fe1dSPavel Emelyanov 	atomic_inc(&qp->refcnt);
36019952cc4SJesper Dangaard Brouer 	hlist_add_head(&qp->list, &hb->chain);
3613fd588ebSFlorian Westphal 
36219952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
36324b9bf43SNikolay Aleksandrov 
3642588fe1dSPavel Emelyanov 	return qp;
3652588fe1dSPavel Emelyanov }
366e521db9dSPavel Emelyanov 
367ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
368f926e236SNikolay Aleksandrov 					       struct inet_frags *f,
369f926e236SNikolay Aleksandrov 					       void *arg)
370e521db9dSPavel Emelyanov {
371e521db9dSPavel Emelyanov 	struct inet_frag_queue *q;
372e521db9dSPavel Emelyanov 
373b13d3cbfSFlorian Westphal 	if (frag_mem_limit(nf) > nf->high_thresh) {
374b13d3cbfSFlorian Westphal 		inet_frag_schedule_worker(f);
37586e93e47SFlorian Westphal 		return NULL;
376b13d3cbfSFlorian Westphal 	}
37786e93e47SFlorian Westphal 
378d4ad4d22SNikolay Aleksandrov 	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
37951456b29SIan Morris 	if (!q)
380e521db9dSPavel Emelyanov 		return NULL;
381e521db9dSPavel Emelyanov 
38254db0cc2SGao feng 	q->net = nf;
383c6fda282SPavel Emelyanov 	f->constructor(q, arg);
3840e60d245SFlorian Westphal 	add_frag_mem_limit(nf, f->qsize);
385d433673eSJesper Dangaard Brouer 
386e521db9dSPavel Emelyanov 	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
387e521db9dSPavel Emelyanov 	spin_lock_init(&q->lock);
388e521db9dSPavel Emelyanov 	atomic_set(&q->refcnt, 1);
389e521db9dSPavel Emelyanov 
390e521db9dSPavel Emelyanov 	return q;
391e521db9dSPavel Emelyanov }
392c6fda282SPavel Emelyanov 
393ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
394f926e236SNikolay Aleksandrov 						struct inet_frags *f,
395f926e236SNikolay Aleksandrov 						void *arg)
396c6fda282SPavel Emelyanov {
397c6fda282SPavel Emelyanov 	struct inet_frag_queue *q;
398c6fda282SPavel Emelyanov 
399ac18e750SPavel Emelyanov 	q = inet_frag_alloc(nf, f, arg);
40051456b29SIan Morris 	if (!q)
401c6fda282SPavel Emelyanov 		return NULL;
402c6fda282SPavel Emelyanov 
4039a375803SPavel Emelyanov 	return inet_frag_intern(nf, q, f, arg);
404c6fda282SPavel Emelyanov }
405abd6523dSPavel Emelyanov 
406ac18e750SPavel Emelyanov struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
407f926e236SNikolay Aleksandrov 				       struct inet_frags *f, void *key,
408f926e236SNikolay Aleksandrov 				       unsigned int hash)
409abd6523dSPavel Emelyanov {
41019952cc4SJesper Dangaard Brouer 	struct inet_frag_bucket *hb;
411abd6523dSPavel Emelyanov 	struct inet_frag_queue *q;
4125a3da1feSHannes Frederic Sowa 	int depth = 0;
413abd6523dSPavel Emelyanov 
414b13d3cbfSFlorian Westphal 	if (frag_mem_limit(nf) > nf->low_thresh)
415b13d3cbfSFlorian Westphal 		inet_frag_schedule_worker(f);
41686e93e47SFlorian Westphal 
417fb3cfe6eSFlorian Westphal 	hash &= (INETFRAGS_HASHSZ - 1);
41819952cc4SJesper Dangaard Brouer 	hb = &f->hash[hash];
41919952cc4SJesper Dangaard Brouer 
42019952cc4SJesper Dangaard Brouer 	spin_lock(&hb->chain_lock);
42119952cc4SJesper Dangaard Brouer 	hlist_for_each_entry(q, &hb->chain, list) {
422ac18e750SPavel Emelyanov 		if (q->net == nf && f->match(q, key)) {
423abd6523dSPavel Emelyanov 			atomic_inc(&q->refcnt);
42419952cc4SJesper Dangaard Brouer 			spin_unlock(&hb->chain_lock);
425abd6523dSPavel Emelyanov 			return q;
426abd6523dSPavel Emelyanov 		}
4275a3da1feSHannes Frederic Sowa 		depth++;
428abd6523dSPavel Emelyanov 	}
42919952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
430abd6523dSPavel Emelyanov 
4315a3da1feSHannes Frederic Sowa 	if (depth <= INETFRAGS_MAXDEPTH)
4329a375803SPavel Emelyanov 		return inet_frag_create(nf, f, key);
433e3a57d18SFlorian Westphal 
434e3a57d18SFlorian Westphal 	if (inet_frag_may_rebuild(f)) {
435ab1c724fSFlorian Westphal 		if (!f->rebuild)
436e3a57d18SFlorian Westphal 			f->rebuild = true;
437e3a57d18SFlorian Westphal 		inet_frag_schedule_worker(f);
438e3a57d18SFlorian Westphal 	}
439e3a57d18SFlorian Westphal 
4405a3da1feSHannes Frederic Sowa 	return ERR_PTR(-ENOBUFS);
441abd6523dSPavel Emelyanov }
442abd6523dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_find);
4435a3da1feSHannes Frederic Sowa 
4445a3da1feSHannes Frederic Sowa void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
4455a3da1feSHannes Frederic Sowa 				   const char *prefix)
4465a3da1feSHannes Frederic Sowa {
4475a3da1feSHannes Frederic Sowa 	static const char msg[] = "inet_frag_find: Fragment hash bucket"
4485a3da1feSHannes Frederic Sowa 		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
4495a3da1feSHannes Frederic Sowa 		". Dropping fragment.\n";
4505a3da1feSHannes Frederic Sowa 
4515a3da1feSHannes Frederic Sowa 	if (PTR_ERR(q) == -ENOBUFS)
452ba7a46f1SJoe Perches 		net_dbg_ratelimited("%s%s", prefix, msg);
4535a3da1feSHannes Frederic Sowa }
4545a3da1feSHannes Frederic Sowa EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
455