xref: /linux/net/ipv4/inet_fragment.c (revision d1fe19444d82e399e38c1594c71b850eca8e9de0)
17eb95156SPavel Emelyanov /*
27eb95156SPavel Emelyanov  * inet fragments management
37eb95156SPavel Emelyanov  *
47eb95156SPavel Emelyanov  *		This program is free software; you can redistribute it and/or
57eb95156SPavel Emelyanov  *		modify it under the terms of the GNU General Public License
67eb95156SPavel Emelyanov  *		as published by the Free Software Foundation; either version
77eb95156SPavel Emelyanov  *		2 of the License, or (at your option) any later version.
87eb95156SPavel Emelyanov  *
97eb95156SPavel Emelyanov  * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
107eb95156SPavel Emelyanov  *				Started as consolidation of ipv4/ip_fragment.c,
117eb95156SPavel Emelyanov  *				ipv6/reassembly. and ipv6 nf conntrack reassembly
127eb95156SPavel Emelyanov  */
137eb95156SPavel Emelyanov 
147eb95156SPavel Emelyanov #include <linux/list.h>
157eb95156SPavel Emelyanov #include <linux/spinlock.h>
167eb95156SPavel Emelyanov #include <linux/module.h>
177eb95156SPavel Emelyanov #include <linux/timer.h>
187eb95156SPavel Emelyanov #include <linux/mm.h>
19321a3a99SPavel Emelyanov #include <linux/random.h>
201e4b8287SPavel Emelyanov #include <linux/skbuff.h>
211e4b8287SPavel Emelyanov #include <linux/rtnetlink.h>
225a0e3ad6STejun Heo #include <linux/slab.h>
237eb95156SPavel Emelyanov 
245a3da1feSHannes Frederic Sowa #include <net/sock.h>
257eb95156SPavel Emelyanov #include <net/inet_frag.h>
26be991971SHannes Frederic Sowa #include <net/inet_ecn.h>
27be991971SHannes Frederic Sowa 
28b13d3cbfSFlorian Westphal #define INETFRAGS_EVICT_BUCKETS   128
29b13d3cbfSFlorian Westphal #define INETFRAGS_EVICT_MAX	  512
30b13d3cbfSFlorian Westphal 
31e3a57d18SFlorian Westphal /* don't rebuild inetfrag table with new secret more often than this */
32e3a57d18SFlorian Westphal #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33e3a57d18SFlorian Westphal 
34be991971SHannes Frederic Sowa /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35be991971SHannes Frederic Sowa  * Value : 0xff if frame should be dropped.
36be991971SHannes Frederic Sowa  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37be991971SHannes Frederic Sowa  */
38be991971SHannes Frederic Sowa const u8 ip_frag_ecn_table[16] = {
39be991971SHannes Frederic Sowa 	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
40be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
41be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
42be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
43be991971SHannes Frederic Sowa 
44be991971SHannes Frederic Sowa 	/* invalid combinations : drop frame */
45be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52be991971SHannes Frederic Sowa };
53be991971SHannes Frederic Sowa EXPORT_SYMBOL(ip_frag_ecn_table);
547eb95156SPavel Emelyanov 
55fb3cfe6eSFlorian Westphal static unsigned int
56fb3cfe6eSFlorian Westphal inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57fb3cfe6eSFlorian Westphal {
58fb3cfe6eSFlorian Westphal 	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59fb3cfe6eSFlorian Westphal }
60fb3cfe6eSFlorian Westphal 
61e3a57d18SFlorian Westphal static bool inet_frag_may_rebuild(struct inet_frags *f)
62321a3a99SPavel Emelyanov {
63e3a57d18SFlorian Westphal 	return time_after(jiffies,
64e3a57d18SFlorian Westphal 	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65e3a57d18SFlorian Westphal }
66e3a57d18SFlorian Westphal 
67e3a57d18SFlorian Westphal static void inet_frag_secret_rebuild(struct inet_frags *f)
68e3a57d18SFlorian Westphal {
69321a3a99SPavel Emelyanov 	int i;
70321a3a99SPavel Emelyanov 
71ab1c724fSFlorian Westphal 	write_seqlock_bh(&f->rnd_seqlock);
72e3a57d18SFlorian Westphal 
73e3a57d18SFlorian Westphal 	if (!inet_frag_may_rebuild(f))
74e3a57d18SFlorian Westphal 		goto out;
7519952cc4SJesper Dangaard Brouer 
76321a3a99SPavel Emelyanov 	get_random_bytes(&f->rnd, sizeof(u32));
77e3a57d18SFlorian Westphal 
78321a3a99SPavel Emelyanov 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
7919952cc4SJesper Dangaard Brouer 		struct inet_frag_bucket *hb;
80321a3a99SPavel Emelyanov 		struct inet_frag_queue *q;
81b67bfe0dSSasha Levin 		struct hlist_node *n;
82321a3a99SPavel Emelyanov 
8319952cc4SJesper Dangaard Brouer 		hb = &f->hash[i];
84ab1c724fSFlorian Westphal 		spin_lock(&hb->chain_lock);
85ab1c724fSFlorian Westphal 
8619952cc4SJesper Dangaard Brouer 		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87fb3cfe6eSFlorian Westphal 			unsigned int hval = inet_frag_hashfn(f, q);
88321a3a99SPavel Emelyanov 
89321a3a99SPavel Emelyanov 			if (hval != i) {
9019952cc4SJesper Dangaard Brouer 				struct inet_frag_bucket *hb_dest;
9119952cc4SJesper Dangaard Brouer 
92321a3a99SPavel Emelyanov 				hlist_del(&q->list);
93321a3a99SPavel Emelyanov 
94321a3a99SPavel Emelyanov 				/* Relink to new hash chain. */
9519952cc4SJesper Dangaard Brouer 				hb_dest = &f->hash[hval];
96ab1c724fSFlorian Westphal 
97ab1c724fSFlorian Westphal 				/* This is the only place where we take
98ab1c724fSFlorian Westphal 				 * another chain_lock while already holding
99ab1c724fSFlorian Westphal 				 * one.  As this will not run concurrently,
100ab1c724fSFlorian Westphal 				 * we cannot deadlock on hb_dest lock below, if its
101ab1c724fSFlorian Westphal 				 * already locked it will be released soon since
102ab1c724fSFlorian Westphal 				 * other caller cannot be waiting for hb lock
103ab1c724fSFlorian Westphal 				 * that we've taken above.
104ab1c724fSFlorian Westphal 				 */
105ab1c724fSFlorian Westphal 				spin_lock_nested(&hb_dest->chain_lock,
106ab1c724fSFlorian Westphal 						 SINGLE_DEPTH_NESTING);
10719952cc4SJesper Dangaard Brouer 				hlist_add_head(&q->list, &hb_dest->chain);
108ab1c724fSFlorian Westphal 				spin_unlock(&hb_dest->chain_lock);
109321a3a99SPavel Emelyanov 			}
110321a3a99SPavel Emelyanov 		}
111ab1c724fSFlorian Westphal 		spin_unlock(&hb->chain_lock);
112321a3a99SPavel Emelyanov 	}
113321a3a99SPavel Emelyanov 
114e3a57d18SFlorian Westphal 	f->rebuild = false;
115e3a57d18SFlorian Westphal 	f->last_rebuild_jiffies = jiffies;
116e3a57d18SFlorian Westphal out:
117ab1c724fSFlorian Westphal 	write_sequnlock_bh(&f->rnd_seqlock);
118321a3a99SPavel Emelyanov }
119321a3a99SPavel Emelyanov 
120b13d3cbfSFlorian Westphal static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121b13d3cbfSFlorian Westphal {
122b13d3cbfSFlorian Westphal 	return q->net->low_thresh == 0 ||
123b13d3cbfSFlorian Westphal 	       frag_mem_limit(q->net) >= q->net->low_thresh;
124b13d3cbfSFlorian Westphal }
125b13d3cbfSFlorian Westphal 
126b13d3cbfSFlorian Westphal static unsigned int
127b13d3cbfSFlorian Westphal inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
128b13d3cbfSFlorian Westphal {
129b13d3cbfSFlorian Westphal 	struct inet_frag_queue *fq;
130b13d3cbfSFlorian Westphal 	struct hlist_node *n;
131b13d3cbfSFlorian Westphal 	unsigned int evicted = 0;
132b13d3cbfSFlorian Westphal 	HLIST_HEAD(expired);
133b13d3cbfSFlorian Westphal 
134b13d3cbfSFlorian Westphal evict_again:
135b13d3cbfSFlorian Westphal 	spin_lock(&hb->chain_lock);
136b13d3cbfSFlorian Westphal 
137b13d3cbfSFlorian Westphal 	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138b13d3cbfSFlorian Westphal 		if (!inet_fragq_should_evict(fq))
139b13d3cbfSFlorian Westphal 			continue;
140b13d3cbfSFlorian Westphal 
141b13d3cbfSFlorian Westphal 		if (!del_timer(&fq->timer)) {
142b13d3cbfSFlorian Westphal 			/* q expiring right now thus increment its refcount so
143b13d3cbfSFlorian Westphal 			 * it won't be freed under us and wait until the timer
144b13d3cbfSFlorian Westphal 			 * has finished executing then destroy it
145b13d3cbfSFlorian Westphal 			 */
146b13d3cbfSFlorian Westphal 			atomic_inc(&fq->refcnt);
147b13d3cbfSFlorian Westphal 			spin_unlock(&hb->chain_lock);
148b13d3cbfSFlorian Westphal 			del_timer_sync(&fq->timer);
149b13d3cbfSFlorian Westphal 			inet_frag_put(fq, f);
150b13d3cbfSFlorian Westphal 			goto evict_again;
151b13d3cbfSFlorian Westphal 		}
152b13d3cbfSFlorian Westphal 
15306aa8b8aSNikolay Aleksandrov 		fq->flags |= INET_FRAG_EVICTED;
154*d1fe1944SFlorian Westphal 		hlist_add_head(&fq->list_evictor, &expired);
155b13d3cbfSFlorian Westphal 		++evicted;
156b13d3cbfSFlorian Westphal 	}
157b13d3cbfSFlorian Westphal 
158b13d3cbfSFlorian Westphal 	spin_unlock(&hb->chain_lock);
159b13d3cbfSFlorian Westphal 
160*d1fe1944SFlorian Westphal 	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
161b13d3cbfSFlorian Westphal 		f->frag_expire((unsigned long) fq);
162b13d3cbfSFlorian Westphal 
163b13d3cbfSFlorian Westphal 	return evicted;
164b13d3cbfSFlorian Westphal }
165b13d3cbfSFlorian Westphal 
166b13d3cbfSFlorian Westphal static void inet_frag_worker(struct work_struct *work)
167b13d3cbfSFlorian Westphal {
168b13d3cbfSFlorian Westphal 	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
169b13d3cbfSFlorian Westphal 	unsigned int i, evicted = 0;
170b13d3cbfSFlorian Westphal 	struct inet_frags *f;
171b13d3cbfSFlorian Westphal 
172b13d3cbfSFlorian Westphal 	f = container_of(work, struct inet_frags, frags_work);
173b13d3cbfSFlorian Westphal 
174b13d3cbfSFlorian Westphal 	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
175b13d3cbfSFlorian Westphal 
176ab1c724fSFlorian Westphal 	local_bh_disable();
177b13d3cbfSFlorian Westphal 
178b13d3cbfSFlorian Westphal 	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
179b13d3cbfSFlorian Westphal 		evicted += inet_evict_bucket(f, &f->hash[i]);
180b13d3cbfSFlorian Westphal 		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
181b13d3cbfSFlorian Westphal 		if (evicted > INETFRAGS_EVICT_MAX)
182b13d3cbfSFlorian Westphal 			break;
183b13d3cbfSFlorian Westphal 	}
184b13d3cbfSFlorian Westphal 
185b13d3cbfSFlorian Westphal 	f->next_bucket = i;
186b13d3cbfSFlorian Westphal 
187ab1c724fSFlorian Westphal 	local_bh_enable();
188ab1c724fSFlorian Westphal 
189e3a57d18SFlorian Westphal 	if (f->rebuild && inet_frag_may_rebuild(f))
190e3a57d18SFlorian Westphal 		inet_frag_secret_rebuild(f);
191b13d3cbfSFlorian Westphal }
192b13d3cbfSFlorian Westphal 
193b13d3cbfSFlorian Westphal static void inet_frag_schedule_worker(struct inet_frags *f)
194b13d3cbfSFlorian Westphal {
195b13d3cbfSFlorian Westphal 	if (unlikely(!work_pending(&f->frags_work)))
196b13d3cbfSFlorian Westphal 		schedule_work(&f->frags_work);
197b13d3cbfSFlorian Westphal }
198b13d3cbfSFlorian Westphal 
199d4ad4d22SNikolay Aleksandrov int inet_frags_init(struct inet_frags *f)
2007eb95156SPavel Emelyanov {
2017eb95156SPavel Emelyanov 	int i;
2027eb95156SPavel Emelyanov 
203b13d3cbfSFlorian Westphal 	INIT_WORK(&f->frags_work, inet_frag_worker);
204b13d3cbfSFlorian Westphal 
20519952cc4SJesper Dangaard Brouer 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
20619952cc4SJesper Dangaard Brouer 		struct inet_frag_bucket *hb = &f->hash[i];
2077eb95156SPavel Emelyanov 
20819952cc4SJesper Dangaard Brouer 		spin_lock_init(&hb->chain_lock);
20919952cc4SJesper Dangaard Brouer 		INIT_HLIST_HEAD(&hb->chain);
21019952cc4SJesper Dangaard Brouer 	}
211ab1c724fSFlorian Westphal 
212ab1c724fSFlorian Westphal 	seqlock_init(&f->rnd_seqlock);
213e3a57d18SFlorian Westphal 	f->last_rebuild_jiffies = 0;
214d4ad4d22SNikolay Aleksandrov 	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
215d4ad4d22SNikolay Aleksandrov 					    NULL);
216d4ad4d22SNikolay Aleksandrov 	if (!f->frags_cachep)
217d4ad4d22SNikolay Aleksandrov 		return -ENOMEM;
218d4ad4d22SNikolay Aleksandrov 
219d4ad4d22SNikolay Aleksandrov 	return 0;
2207eb95156SPavel Emelyanov }
2217eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init);
2227eb95156SPavel Emelyanov 
223e5a2bb84SPavel Emelyanov void inet_frags_init_net(struct netns_frags *nf)
224e5a2bb84SPavel Emelyanov {
225d433673eSJesper Dangaard Brouer 	init_frag_mem_limit(nf);
226e5a2bb84SPavel Emelyanov }
227e5a2bb84SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init_net);
228e5a2bb84SPavel Emelyanov 
2297eb95156SPavel Emelyanov void inet_frags_fini(struct inet_frags *f)
2307eb95156SPavel Emelyanov {
231b13d3cbfSFlorian Westphal 	cancel_work_sync(&f->frags_work);
232d4ad4d22SNikolay Aleksandrov 	kmem_cache_destroy(f->frags_cachep);
2337eb95156SPavel Emelyanov }
2347eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_fini);
235277e650dSPavel Emelyanov 
23681566e83SPavel Emelyanov void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
23781566e83SPavel Emelyanov {
238ab1c724fSFlorian Westphal 	unsigned int seq;
239b13d3cbfSFlorian Westphal 	int i;
240b13d3cbfSFlorian Westphal 
24181566e83SPavel Emelyanov 	nf->low_thresh = 0;
242ab1c724fSFlorian Westphal 	local_bh_disable();
243e8e16b70SDavid S. Miller 
244ab1c724fSFlorian Westphal evict_again:
245ab1c724fSFlorian Westphal 	seq = read_seqbegin(&f->rnd_seqlock);
246b13d3cbfSFlorian Westphal 
247b13d3cbfSFlorian Westphal 	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
248b13d3cbfSFlorian Westphal 		inet_evict_bucket(f, &f->hash[i]);
249b13d3cbfSFlorian Westphal 
250ab1c724fSFlorian Westphal 	if (read_seqretry(&f->rnd_seqlock, seq))
251ab1c724fSFlorian Westphal 		goto evict_again;
252ab1c724fSFlorian Westphal 
253ab1c724fSFlorian Westphal 	local_bh_enable();
2546d7b857dSJesper Dangaard Brouer 
2556d7b857dSJesper Dangaard Brouer 	percpu_counter_destroy(&nf->mem);
25681566e83SPavel Emelyanov }
25781566e83SPavel Emelyanov EXPORT_SYMBOL(inet_frags_exit_net);
25881566e83SPavel Emelyanov 
259ab1c724fSFlorian Westphal static struct inet_frag_bucket *
260ab1c724fSFlorian Westphal get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
261ab1c724fSFlorian Westphal __acquires(hb->chain_lock)
262277e650dSPavel Emelyanov {
26319952cc4SJesper Dangaard Brouer 	struct inet_frag_bucket *hb;
264ab1c724fSFlorian Westphal 	unsigned int seq, hash;
26519952cc4SJesper Dangaard Brouer 
266ab1c724fSFlorian Westphal  restart:
267ab1c724fSFlorian Westphal 	seq = read_seqbegin(&f->rnd_seqlock);
268ab1c724fSFlorian Westphal 
269fb3cfe6eSFlorian Westphal 	hash = inet_frag_hashfn(f, fq);
27019952cc4SJesper Dangaard Brouer 	hb = &f->hash[hash];
27119952cc4SJesper Dangaard Brouer 
27219952cc4SJesper Dangaard Brouer 	spin_lock(&hb->chain_lock);
273ab1c724fSFlorian Westphal 	if (read_seqretry(&f->rnd_seqlock, seq)) {
274ab1c724fSFlorian Westphal 		spin_unlock(&hb->chain_lock);
275ab1c724fSFlorian Westphal 		goto restart;
276ab1c724fSFlorian Westphal 	}
277ab1c724fSFlorian Westphal 
278ab1c724fSFlorian Westphal 	return hb;
279ab1c724fSFlorian Westphal }
280ab1c724fSFlorian Westphal 
281ab1c724fSFlorian Westphal static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
282ab1c724fSFlorian Westphal {
283ab1c724fSFlorian Westphal 	struct inet_frag_bucket *hb;
284ab1c724fSFlorian Westphal 
285ab1c724fSFlorian Westphal 	hb = get_frag_bucket_locked(fq, f);
286277e650dSPavel Emelyanov 	hlist_del(&fq->list);
28719952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
288277e650dSPavel Emelyanov }
289277e650dSPavel Emelyanov 
290277e650dSPavel Emelyanov void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
291277e650dSPavel Emelyanov {
292277e650dSPavel Emelyanov 	if (del_timer(&fq->timer))
293277e650dSPavel Emelyanov 		atomic_dec(&fq->refcnt);
294277e650dSPavel Emelyanov 
29506aa8b8aSNikolay Aleksandrov 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
296277e650dSPavel Emelyanov 		fq_unlink(fq, f);
297277e650dSPavel Emelyanov 		atomic_dec(&fq->refcnt);
29806aa8b8aSNikolay Aleksandrov 		fq->flags |= INET_FRAG_COMPLETE;
299277e650dSPavel Emelyanov 	}
300277e650dSPavel Emelyanov }
301277e650dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_kill);
3021e4b8287SPavel Emelyanov 
3036ddc0822SPavel Emelyanov static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
304d433673eSJesper Dangaard Brouer 				  struct sk_buff *skb)
3051e4b8287SPavel Emelyanov {
3061e4b8287SPavel Emelyanov 	if (f->skb_free)
3071e4b8287SPavel Emelyanov 		f->skb_free(skb);
3081e4b8287SPavel Emelyanov 	kfree_skb(skb);
3091e4b8287SPavel Emelyanov }
3101e4b8287SPavel Emelyanov 
3113fd588ebSFlorian Westphal void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
3121e4b8287SPavel Emelyanov {
3131e4b8287SPavel Emelyanov 	struct sk_buff *fp;
3146ddc0822SPavel Emelyanov 	struct netns_frags *nf;
315d433673eSJesper Dangaard Brouer 	unsigned int sum, sum_truesize = 0;
3161e4b8287SPavel Emelyanov 
31706aa8b8aSNikolay Aleksandrov 	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
318547b792cSIlpo Järvinen 	WARN_ON(del_timer(&q->timer) != 0);
3191e4b8287SPavel Emelyanov 
3201e4b8287SPavel Emelyanov 	/* Release all fragment data. */
3211e4b8287SPavel Emelyanov 	fp = q->fragments;
3226ddc0822SPavel Emelyanov 	nf = q->net;
3231e4b8287SPavel Emelyanov 	while (fp) {
3241e4b8287SPavel Emelyanov 		struct sk_buff *xp = fp->next;
3251e4b8287SPavel Emelyanov 
326d433673eSJesper Dangaard Brouer 		sum_truesize += fp->truesize;
327d433673eSJesper Dangaard Brouer 		frag_kfree_skb(nf, f, fp);
3281e4b8287SPavel Emelyanov 		fp = xp;
3291e4b8287SPavel Emelyanov 	}
330d433673eSJesper Dangaard Brouer 	sum = sum_truesize + f->qsize;
331d433673eSJesper Dangaard Brouer 	sub_frag_mem_limit(q, sum);
3321e4b8287SPavel Emelyanov 
333c9547709SPavel Emelyanov 	if (f->destructor)
3341e4b8287SPavel Emelyanov 		f->destructor(q);
335d4ad4d22SNikolay Aleksandrov 	kmem_cache_free(f->frags_cachep, q);
3361e4b8287SPavel Emelyanov }
3371e4b8287SPavel Emelyanov EXPORT_SYMBOL(inet_frag_destroy);
3388e7999c4SPavel Emelyanov 
339ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
340f926e236SNikolay Aleksandrov 						struct inet_frag_queue *qp_in,
341f926e236SNikolay Aleksandrov 						struct inet_frags *f,
3429a375803SPavel Emelyanov 						void *arg)
3432588fe1dSPavel Emelyanov {
344ab1c724fSFlorian Westphal 	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
3452588fe1dSPavel Emelyanov 	struct inet_frag_queue *qp;
34619952cc4SJesper Dangaard Brouer 
3472588fe1dSPavel Emelyanov #ifdef CONFIG_SMP
3482588fe1dSPavel Emelyanov 	/* With SMP race we have to recheck hash table, because
349ab1c724fSFlorian Westphal 	 * such entry could have been created on other cpu before
350ab1c724fSFlorian Westphal 	 * we acquired hash bucket lock.
3512588fe1dSPavel Emelyanov 	 */
35219952cc4SJesper Dangaard Brouer 	hlist_for_each_entry(qp, &hb->chain, list) {
353ac18e750SPavel Emelyanov 		if (qp->net == nf && f->match(qp, arg)) {
3542588fe1dSPavel Emelyanov 			atomic_inc(&qp->refcnt);
35519952cc4SJesper Dangaard Brouer 			spin_unlock(&hb->chain_lock);
35606aa8b8aSNikolay Aleksandrov 			qp_in->flags |= INET_FRAG_COMPLETE;
3572588fe1dSPavel Emelyanov 			inet_frag_put(qp_in, f);
3582588fe1dSPavel Emelyanov 			return qp;
3592588fe1dSPavel Emelyanov 		}
3602588fe1dSPavel Emelyanov 	}
3612588fe1dSPavel Emelyanov #endif
3622588fe1dSPavel Emelyanov 	qp = qp_in;
363b2fd5321SPavel Emelyanov 	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
3642588fe1dSPavel Emelyanov 		atomic_inc(&qp->refcnt);
3652588fe1dSPavel Emelyanov 
3662588fe1dSPavel Emelyanov 	atomic_inc(&qp->refcnt);
36719952cc4SJesper Dangaard Brouer 	hlist_add_head(&qp->list, &hb->chain);
3683fd588ebSFlorian Westphal 
36919952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
37024b9bf43SNikolay Aleksandrov 
3712588fe1dSPavel Emelyanov 	return qp;
3722588fe1dSPavel Emelyanov }
373e521db9dSPavel Emelyanov 
374ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
375f926e236SNikolay Aleksandrov 					       struct inet_frags *f,
376f926e236SNikolay Aleksandrov 					       void *arg)
377e521db9dSPavel Emelyanov {
378e521db9dSPavel Emelyanov 	struct inet_frag_queue *q;
379e521db9dSPavel Emelyanov 
380b13d3cbfSFlorian Westphal 	if (frag_mem_limit(nf) > nf->high_thresh) {
381b13d3cbfSFlorian Westphal 		inet_frag_schedule_worker(f);
38286e93e47SFlorian Westphal 		return NULL;
383b13d3cbfSFlorian Westphal 	}
38486e93e47SFlorian Westphal 
385d4ad4d22SNikolay Aleksandrov 	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
38651456b29SIan Morris 	if (!q)
387e521db9dSPavel Emelyanov 		return NULL;
388e521db9dSPavel Emelyanov 
38954db0cc2SGao feng 	q->net = nf;
390c6fda282SPavel Emelyanov 	f->constructor(q, arg);
391d433673eSJesper Dangaard Brouer 	add_frag_mem_limit(q, f->qsize);
392d433673eSJesper Dangaard Brouer 
393e521db9dSPavel Emelyanov 	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
394e521db9dSPavel Emelyanov 	spin_lock_init(&q->lock);
395e521db9dSPavel Emelyanov 	atomic_set(&q->refcnt, 1);
396e521db9dSPavel Emelyanov 
397e521db9dSPavel Emelyanov 	return q;
398e521db9dSPavel Emelyanov }
399c6fda282SPavel Emelyanov 
400ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
401f926e236SNikolay Aleksandrov 						struct inet_frags *f,
402f926e236SNikolay Aleksandrov 						void *arg)
403c6fda282SPavel Emelyanov {
404c6fda282SPavel Emelyanov 	struct inet_frag_queue *q;
405c6fda282SPavel Emelyanov 
406ac18e750SPavel Emelyanov 	q = inet_frag_alloc(nf, f, arg);
40751456b29SIan Morris 	if (!q)
408c6fda282SPavel Emelyanov 		return NULL;
409c6fda282SPavel Emelyanov 
4109a375803SPavel Emelyanov 	return inet_frag_intern(nf, q, f, arg);
411c6fda282SPavel Emelyanov }
412abd6523dSPavel Emelyanov 
413ac18e750SPavel Emelyanov struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
414f926e236SNikolay Aleksandrov 				       struct inet_frags *f, void *key,
415f926e236SNikolay Aleksandrov 				       unsigned int hash)
416abd6523dSPavel Emelyanov {
41719952cc4SJesper Dangaard Brouer 	struct inet_frag_bucket *hb;
418abd6523dSPavel Emelyanov 	struct inet_frag_queue *q;
4195a3da1feSHannes Frederic Sowa 	int depth = 0;
420abd6523dSPavel Emelyanov 
421b13d3cbfSFlorian Westphal 	if (frag_mem_limit(nf) > nf->low_thresh)
422b13d3cbfSFlorian Westphal 		inet_frag_schedule_worker(f);
42386e93e47SFlorian Westphal 
424fb3cfe6eSFlorian Westphal 	hash &= (INETFRAGS_HASHSZ - 1);
42519952cc4SJesper Dangaard Brouer 	hb = &f->hash[hash];
42619952cc4SJesper Dangaard Brouer 
42719952cc4SJesper Dangaard Brouer 	spin_lock(&hb->chain_lock);
42819952cc4SJesper Dangaard Brouer 	hlist_for_each_entry(q, &hb->chain, list) {
429ac18e750SPavel Emelyanov 		if (q->net == nf && f->match(q, key)) {
430abd6523dSPavel Emelyanov 			atomic_inc(&q->refcnt);
43119952cc4SJesper Dangaard Brouer 			spin_unlock(&hb->chain_lock);
432abd6523dSPavel Emelyanov 			return q;
433abd6523dSPavel Emelyanov 		}
4345a3da1feSHannes Frederic Sowa 		depth++;
435abd6523dSPavel Emelyanov 	}
43619952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
437abd6523dSPavel Emelyanov 
4385a3da1feSHannes Frederic Sowa 	if (depth <= INETFRAGS_MAXDEPTH)
4399a375803SPavel Emelyanov 		return inet_frag_create(nf, f, key);
440e3a57d18SFlorian Westphal 
441e3a57d18SFlorian Westphal 	if (inet_frag_may_rebuild(f)) {
442ab1c724fSFlorian Westphal 		if (!f->rebuild)
443e3a57d18SFlorian Westphal 			f->rebuild = true;
444e3a57d18SFlorian Westphal 		inet_frag_schedule_worker(f);
445e3a57d18SFlorian Westphal 	}
446e3a57d18SFlorian Westphal 
4475a3da1feSHannes Frederic Sowa 	return ERR_PTR(-ENOBUFS);
448abd6523dSPavel Emelyanov }
449abd6523dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_find);
4505a3da1feSHannes Frederic Sowa 
4515a3da1feSHannes Frederic Sowa void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
4525a3da1feSHannes Frederic Sowa 				   const char *prefix)
4535a3da1feSHannes Frederic Sowa {
4545a3da1feSHannes Frederic Sowa 	static const char msg[] = "inet_frag_find: Fragment hash bucket"
4555a3da1feSHannes Frederic Sowa 		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
4565a3da1feSHannes Frederic Sowa 		". Dropping fragment.\n";
4575a3da1feSHannes Frederic Sowa 
4585a3da1feSHannes Frederic Sowa 	if (PTR_ERR(q) == -ENOBUFS)
459ba7a46f1SJoe Perches 		net_dbg_ratelimited("%s%s", prefix, msg);
4605a3da1feSHannes Frederic Sowa }
4615a3da1feSHannes Frederic Sowa EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
462