xref: /linux/net/ipv4/inet_fragment.c (revision ba7a46f16dd29f93303daeb1fee8af316c5a07f4)
17eb95156SPavel Emelyanov /*
27eb95156SPavel Emelyanov  * inet fragments management
37eb95156SPavel Emelyanov  *
47eb95156SPavel Emelyanov  *		This program is free software; you can redistribute it and/or
57eb95156SPavel Emelyanov  *		modify it under the terms of the GNU General Public License
67eb95156SPavel Emelyanov  *		as published by the Free Software Foundation; either version
77eb95156SPavel Emelyanov  *		2 of the License, or (at your option) any later version.
87eb95156SPavel Emelyanov  *
97eb95156SPavel Emelyanov  * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
107eb95156SPavel Emelyanov  *				Started as consolidation of ipv4/ip_fragment.c,
117eb95156SPavel Emelyanov  *				ipv6/reassembly. and ipv6 nf conntrack reassembly
127eb95156SPavel Emelyanov  */
137eb95156SPavel Emelyanov 
147eb95156SPavel Emelyanov #include <linux/list.h>
157eb95156SPavel Emelyanov #include <linux/spinlock.h>
167eb95156SPavel Emelyanov #include <linux/module.h>
177eb95156SPavel Emelyanov #include <linux/timer.h>
187eb95156SPavel Emelyanov #include <linux/mm.h>
19321a3a99SPavel Emelyanov #include <linux/random.h>
201e4b8287SPavel Emelyanov #include <linux/skbuff.h>
211e4b8287SPavel Emelyanov #include <linux/rtnetlink.h>
225a0e3ad6STejun Heo #include <linux/slab.h>
237eb95156SPavel Emelyanov 
245a3da1feSHannes Frederic Sowa #include <net/sock.h>
257eb95156SPavel Emelyanov #include <net/inet_frag.h>
26be991971SHannes Frederic Sowa #include <net/inet_ecn.h>
27be991971SHannes Frederic Sowa 
28b13d3cbfSFlorian Westphal #define INETFRAGS_EVICT_BUCKETS   128
29b13d3cbfSFlorian Westphal #define INETFRAGS_EVICT_MAX	  512
30b13d3cbfSFlorian Westphal 
31e3a57d18SFlorian Westphal /* don't rebuild inetfrag table with new secret more often than this */
32e3a57d18SFlorian Westphal #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33e3a57d18SFlorian Westphal 
34be991971SHannes Frederic Sowa /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35be991971SHannes Frederic Sowa  * Value : 0xff if frame should be dropped.
36be991971SHannes Frederic Sowa  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37be991971SHannes Frederic Sowa  */
38be991971SHannes Frederic Sowa const u8 ip_frag_ecn_table[16] = {
39be991971SHannes Frederic Sowa 	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
40be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
41be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
42be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
43be991971SHannes Frederic Sowa 
44be991971SHannes Frederic Sowa 	/* invalid combinations : drop frame */
45be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52be991971SHannes Frederic Sowa };
53be991971SHannes Frederic Sowa EXPORT_SYMBOL(ip_frag_ecn_table);
547eb95156SPavel Emelyanov 
55fb3cfe6eSFlorian Westphal static unsigned int
56fb3cfe6eSFlorian Westphal inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57fb3cfe6eSFlorian Westphal {
58fb3cfe6eSFlorian Westphal 	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59fb3cfe6eSFlorian Westphal }
60fb3cfe6eSFlorian Westphal 
61e3a57d18SFlorian Westphal static bool inet_frag_may_rebuild(struct inet_frags *f)
62321a3a99SPavel Emelyanov {
63e3a57d18SFlorian Westphal 	return time_after(jiffies,
64e3a57d18SFlorian Westphal 	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65e3a57d18SFlorian Westphal }
66e3a57d18SFlorian Westphal 
67e3a57d18SFlorian Westphal static void inet_frag_secret_rebuild(struct inet_frags *f)
68e3a57d18SFlorian Westphal {
69321a3a99SPavel Emelyanov 	int i;
70321a3a99SPavel Emelyanov 
71ab1c724fSFlorian Westphal 	write_seqlock_bh(&f->rnd_seqlock);
72e3a57d18SFlorian Westphal 
73e3a57d18SFlorian Westphal 	if (!inet_frag_may_rebuild(f))
74e3a57d18SFlorian Westphal 		goto out;
7519952cc4SJesper Dangaard Brouer 
76321a3a99SPavel Emelyanov 	get_random_bytes(&f->rnd, sizeof(u32));
77e3a57d18SFlorian Westphal 
78321a3a99SPavel Emelyanov 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
7919952cc4SJesper Dangaard Brouer 		struct inet_frag_bucket *hb;
80321a3a99SPavel Emelyanov 		struct inet_frag_queue *q;
81b67bfe0dSSasha Levin 		struct hlist_node *n;
82321a3a99SPavel Emelyanov 
8319952cc4SJesper Dangaard Brouer 		hb = &f->hash[i];
84ab1c724fSFlorian Westphal 		spin_lock(&hb->chain_lock);
85ab1c724fSFlorian Westphal 
8619952cc4SJesper Dangaard Brouer 		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87fb3cfe6eSFlorian Westphal 			unsigned int hval = inet_frag_hashfn(f, q);
88321a3a99SPavel Emelyanov 
89321a3a99SPavel Emelyanov 			if (hval != i) {
9019952cc4SJesper Dangaard Brouer 				struct inet_frag_bucket *hb_dest;
9119952cc4SJesper Dangaard Brouer 
92321a3a99SPavel Emelyanov 				hlist_del(&q->list);
93321a3a99SPavel Emelyanov 
94321a3a99SPavel Emelyanov 				/* Relink to new hash chain. */
9519952cc4SJesper Dangaard Brouer 				hb_dest = &f->hash[hval];
96ab1c724fSFlorian Westphal 
97ab1c724fSFlorian Westphal 				/* This is the only place where we take
98ab1c724fSFlorian Westphal 				 * another chain_lock while already holding
99ab1c724fSFlorian Westphal 				 * one.  As this will not run concurrently,
100ab1c724fSFlorian Westphal 				 * we cannot deadlock on hb_dest lock below, if its
101ab1c724fSFlorian Westphal 				 * already locked it will be released soon since
102ab1c724fSFlorian Westphal 				 * other caller cannot be waiting for hb lock
103ab1c724fSFlorian Westphal 				 * that we've taken above.
104ab1c724fSFlorian Westphal 				 */
105ab1c724fSFlorian Westphal 				spin_lock_nested(&hb_dest->chain_lock,
106ab1c724fSFlorian Westphal 						 SINGLE_DEPTH_NESTING);
10719952cc4SJesper Dangaard Brouer 				hlist_add_head(&q->list, &hb_dest->chain);
108ab1c724fSFlorian Westphal 				spin_unlock(&hb_dest->chain_lock);
109321a3a99SPavel Emelyanov 			}
110321a3a99SPavel Emelyanov 		}
111ab1c724fSFlorian Westphal 		spin_unlock(&hb->chain_lock);
112321a3a99SPavel Emelyanov 	}
113321a3a99SPavel Emelyanov 
114e3a57d18SFlorian Westphal 	f->rebuild = false;
115e3a57d18SFlorian Westphal 	f->last_rebuild_jiffies = jiffies;
116e3a57d18SFlorian Westphal out:
117ab1c724fSFlorian Westphal 	write_sequnlock_bh(&f->rnd_seqlock);
118321a3a99SPavel Emelyanov }
119321a3a99SPavel Emelyanov 
120b13d3cbfSFlorian Westphal static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121b13d3cbfSFlorian Westphal {
122b13d3cbfSFlorian Westphal 	return q->net->low_thresh == 0 ||
123b13d3cbfSFlorian Westphal 	       frag_mem_limit(q->net) >= q->net->low_thresh;
124b13d3cbfSFlorian Westphal }
125b13d3cbfSFlorian Westphal 
126b13d3cbfSFlorian Westphal static unsigned int
127b13d3cbfSFlorian Westphal inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
128b13d3cbfSFlorian Westphal {
129b13d3cbfSFlorian Westphal 	struct inet_frag_queue *fq;
130b13d3cbfSFlorian Westphal 	struct hlist_node *n;
131b13d3cbfSFlorian Westphal 	unsigned int evicted = 0;
132b13d3cbfSFlorian Westphal 	HLIST_HEAD(expired);
133b13d3cbfSFlorian Westphal 
134b13d3cbfSFlorian Westphal evict_again:
135b13d3cbfSFlorian Westphal 	spin_lock(&hb->chain_lock);
136b13d3cbfSFlorian Westphal 
137b13d3cbfSFlorian Westphal 	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138b13d3cbfSFlorian Westphal 		if (!inet_fragq_should_evict(fq))
139b13d3cbfSFlorian Westphal 			continue;
140b13d3cbfSFlorian Westphal 
141b13d3cbfSFlorian Westphal 		if (!del_timer(&fq->timer)) {
142b13d3cbfSFlorian Westphal 			/* q expiring right now thus increment its refcount so
143b13d3cbfSFlorian Westphal 			 * it won't be freed under us and wait until the timer
144b13d3cbfSFlorian Westphal 			 * has finished executing then destroy it
145b13d3cbfSFlorian Westphal 			 */
146b13d3cbfSFlorian Westphal 			atomic_inc(&fq->refcnt);
147b13d3cbfSFlorian Westphal 			spin_unlock(&hb->chain_lock);
148b13d3cbfSFlorian Westphal 			del_timer_sync(&fq->timer);
149b13d3cbfSFlorian Westphal 			inet_frag_put(fq, f);
150b13d3cbfSFlorian Westphal 			goto evict_again;
151b13d3cbfSFlorian Westphal 		}
152b13d3cbfSFlorian Westphal 
15306aa8b8aSNikolay Aleksandrov 		fq->flags |= INET_FRAG_EVICTED;
154b13d3cbfSFlorian Westphal 		hlist_del(&fq->list);
155b13d3cbfSFlorian Westphal 		hlist_add_head(&fq->list, &expired);
156b13d3cbfSFlorian Westphal 		++evicted;
157b13d3cbfSFlorian Westphal 	}
158b13d3cbfSFlorian Westphal 
159b13d3cbfSFlorian Westphal 	spin_unlock(&hb->chain_lock);
160b13d3cbfSFlorian Westphal 
161b13d3cbfSFlorian Westphal 	hlist_for_each_entry_safe(fq, n, &expired, list)
162b13d3cbfSFlorian Westphal 		f->frag_expire((unsigned long) fq);
163b13d3cbfSFlorian Westphal 
164b13d3cbfSFlorian Westphal 	return evicted;
165b13d3cbfSFlorian Westphal }
166b13d3cbfSFlorian Westphal 
167b13d3cbfSFlorian Westphal static void inet_frag_worker(struct work_struct *work)
168b13d3cbfSFlorian Westphal {
169b13d3cbfSFlorian Westphal 	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
170b13d3cbfSFlorian Westphal 	unsigned int i, evicted = 0;
171b13d3cbfSFlorian Westphal 	struct inet_frags *f;
172b13d3cbfSFlorian Westphal 
173b13d3cbfSFlorian Westphal 	f = container_of(work, struct inet_frags, frags_work);
174b13d3cbfSFlorian Westphal 
175b13d3cbfSFlorian Westphal 	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
176b13d3cbfSFlorian Westphal 
177ab1c724fSFlorian Westphal 	local_bh_disable();
178b13d3cbfSFlorian Westphal 
179b13d3cbfSFlorian Westphal 	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
180b13d3cbfSFlorian Westphal 		evicted += inet_evict_bucket(f, &f->hash[i]);
181b13d3cbfSFlorian Westphal 		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
182b13d3cbfSFlorian Westphal 		if (evicted > INETFRAGS_EVICT_MAX)
183b13d3cbfSFlorian Westphal 			break;
184b13d3cbfSFlorian Westphal 	}
185b13d3cbfSFlorian Westphal 
186b13d3cbfSFlorian Westphal 	f->next_bucket = i;
187b13d3cbfSFlorian Westphal 
188ab1c724fSFlorian Westphal 	local_bh_enable();
189ab1c724fSFlorian Westphal 
190e3a57d18SFlorian Westphal 	if (f->rebuild && inet_frag_may_rebuild(f))
191e3a57d18SFlorian Westphal 		inet_frag_secret_rebuild(f);
192b13d3cbfSFlorian Westphal }
193b13d3cbfSFlorian Westphal 
194b13d3cbfSFlorian Westphal static void inet_frag_schedule_worker(struct inet_frags *f)
195b13d3cbfSFlorian Westphal {
196b13d3cbfSFlorian Westphal 	if (unlikely(!work_pending(&f->frags_work)))
197b13d3cbfSFlorian Westphal 		schedule_work(&f->frags_work);
198b13d3cbfSFlorian Westphal }
199b13d3cbfSFlorian Westphal 
200d4ad4d22SNikolay Aleksandrov int inet_frags_init(struct inet_frags *f)
2017eb95156SPavel Emelyanov {
2027eb95156SPavel Emelyanov 	int i;
2037eb95156SPavel Emelyanov 
204b13d3cbfSFlorian Westphal 	INIT_WORK(&f->frags_work, inet_frag_worker);
205b13d3cbfSFlorian Westphal 
20619952cc4SJesper Dangaard Brouer 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
20719952cc4SJesper Dangaard Brouer 		struct inet_frag_bucket *hb = &f->hash[i];
2087eb95156SPavel Emelyanov 
20919952cc4SJesper Dangaard Brouer 		spin_lock_init(&hb->chain_lock);
21019952cc4SJesper Dangaard Brouer 		INIT_HLIST_HEAD(&hb->chain);
21119952cc4SJesper Dangaard Brouer 	}
212ab1c724fSFlorian Westphal 
213ab1c724fSFlorian Westphal 	seqlock_init(&f->rnd_seqlock);
214e3a57d18SFlorian Westphal 	f->last_rebuild_jiffies = 0;
215d4ad4d22SNikolay Aleksandrov 	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
216d4ad4d22SNikolay Aleksandrov 					    NULL);
217d4ad4d22SNikolay Aleksandrov 	if (!f->frags_cachep)
218d4ad4d22SNikolay Aleksandrov 		return -ENOMEM;
219d4ad4d22SNikolay Aleksandrov 
220d4ad4d22SNikolay Aleksandrov 	return 0;
2217eb95156SPavel Emelyanov }
2227eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init);
2237eb95156SPavel Emelyanov 
224e5a2bb84SPavel Emelyanov void inet_frags_init_net(struct netns_frags *nf)
225e5a2bb84SPavel Emelyanov {
226d433673eSJesper Dangaard Brouer 	init_frag_mem_limit(nf);
227e5a2bb84SPavel Emelyanov }
228e5a2bb84SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init_net);
229e5a2bb84SPavel Emelyanov 
2307eb95156SPavel Emelyanov void inet_frags_fini(struct inet_frags *f)
2317eb95156SPavel Emelyanov {
232b13d3cbfSFlorian Westphal 	cancel_work_sync(&f->frags_work);
233d4ad4d22SNikolay Aleksandrov 	kmem_cache_destroy(f->frags_cachep);
2347eb95156SPavel Emelyanov }
2357eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_fini);
236277e650dSPavel Emelyanov 
23781566e83SPavel Emelyanov void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
23881566e83SPavel Emelyanov {
239ab1c724fSFlorian Westphal 	unsigned int seq;
240b13d3cbfSFlorian Westphal 	int i;
241b13d3cbfSFlorian Westphal 
24281566e83SPavel Emelyanov 	nf->low_thresh = 0;
243ab1c724fSFlorian Westphal 	local_bh_disable();
244e8e16b70SDavid S. Miller 
245ab1c724fSFlorian Westphal evict_again:
246ab1c724fSFlorian Westphal 	seq = read_seqbegin(&f->rnd_seqlock);
247b13d3cbfSFlorian Westphal 
248b13d3cbfSFlorian Westphal 	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
249b13d3cbfSFlorian Westphal 		inet_evict_bucket(f, &f->hash[i]);
250b13d3cbfSFlorian Westphal 
251ab1c724fSFlorian Westphal 	if (read_seqretry(&f->rnd_seqlock, seq))
252ab1c724fSFlorian Westphal 		goto evict_again;
253ab1c724fSFlorian Westphal 
254ab1c724fSFlorian Westphal 	local_bh_enable();
2556d7b857dSJesper Dangaard Brouer 
2566d7b857dSJesper Dangaard Brouer 	percpu_counter_destroy(&nf->mem);
25781566e83SPavel Emelyanov }
25881566e83SPavel Emelyanov EXPORT_SYMBOL(inet_frags_exit_net);
25981566e83SPavel Emelyanov 
260ab1c724fSFlorian Westphal static struct inet_frag_bucket *
261ab1c724fSFlorian Westphal get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
262ab1c724fSFlorian Westphal __acquires(hb->chain_lock)
263277e650dSPavel Emelyanov {
26419952cc4SJesper Dangaard Brouer 	struct inet_frag_bucket *hb;
265ab1c724fSFlorian Westphal 	unsigned int seq, hash;
26619952cc4SJesper Dangaard Brouer 
267ab1c724fSFlorian Westphal  restart:
268ab1c724fSFlorian Westphal 	seq = read_seqbegin(&f->rnd_seqlock);
269ab1c724fSFlorian Westphal 
270fb3cfe6eSFlorian Westphal 	hash = inet_frag_hashfn(f, fq);
27119952cc4SJesper Dangaard Brouer 	hb = &f->hash[hash];
27219952cc4SJesper Dangaard Brouer 
27319952cc4SJesper Dangaard Brouer 	spin_lock(&hb->chain_lock);
274ab1c724fSFlorian Westphal 	if (read_seqretry(&f->rnd_seqlock, seq)) {
275ab1c724fSFlorian Westphal 		spin_unlock(&hb->chain_lock);
276ab1c724fSFlorian Westphal 		goto restart;
277ab1c724fSFlorian Westphal 	}
278ab1c724fSFlorian Westphal 
279ab1c724fSFlorian Westphal 	return hb;
280ab1c724fSFlorian Westphal }
281ab1c724fSFlorian Westphal 
282ab1c724fSFlorian Westphal static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
283ab1c724fSFlorian Westphal {
284ab1c724fSFlorian Westphal 	struct inet_frag_bucket *hb;
285ab1c724fSFlorian Westphal 
286ab1c724fSFlorian Westphal 	hb = get_frag_bucket_locked(fq, f);
28765ba1f1eSNikolay Aleksandrov 	if (!(fq->flags & INET_FRAG_EVICTED))
288277e650dSPavel Emelyanov 		hlist_del(&fq->list);
28919952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
290277e650dSPavel Emelyanov }
291277e650dSPavel Emelyanov 
292277e650dSPavel Emelyanov void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
293277e650dSPavel Emelyanov {
294277e650dSPavel Emelyanov 	if (del_timer(&fq->timer))
295277e650dSPavel Emelyanov 		atomic_dec(&fq->refcnt);
296277e650dSPavel Emelyanov 
29706aa8b8aSNikolay Aleksandrov 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
298277e650dSPavel Emelyanov 		fq_unlink(fq, f);
299277e650dSPavel Emelyanov 		atomic_dec(&fq->refcnt);
30006aa8b8aSNikolay Aleksandrov 		fq->flags |= INET_FRAG_COMPLETE;
301277e650dSPavel Emelyanov 	}
302277e650dSPavel Emelyanov }
303277e650dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_kill);
3041e4b8287SPavel Emelyanov 
3056ddc0822SPavel Emelyanov static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
306d433673eSJesper Dangaard Brouer 				  struct sk_buff *skb)
3071e4b8287SPavel Emelyanov {
3081e4b8287SPavel Emelyanov 	if (f->skb_free)
3091e4b8287SPavel Emelyanov 		f->skb_free(skb);
3101e4b8287SPavel Emelyanov 	kfree_skb(skb);
3111e4b8287SPavel Emelyanov }
3121e4b8287SPavel Emelyanov 
3133fd588ebSFlorian Westphal void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
3141e4b8287SPavel Emelyanov {
3151e4b8287SPavel Emelyanov 	struct sk_buff *fp;
3166ddc0822SPavel Emelyanov 	struct netns_frags *nf;
317d433673eSJesper Dangaard Brouer 	unsigned int sum, sum_truesize = 0;
3181e4b8287SPavel Emelyanov 
31906aa8b8aSNikolay Aleksandrov 	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
320547b792cSIlpo Järvinen 	WARN_ON(del_timer(&q->timer) != 0);
3211e4b8287SPavel Emelyanov 
3221e4b8287SPavel Emelyanov 	/* Release all fragment data. */
3231e4b8287SPavel Emelyanov 	fp = q->fragments;
3246ddc0822SPavel Emelyanov 	nf = q->net;
3251e4b8287SPavel Emelyanov 	while (fp) {
3261e4b8287SPavel Emelyanov 		struct sk_buff *xp = fp->next;
3271e4b8287SPavel Emelyanov 
328d433673eSJesper Dangaard Brouer 		sum_truesize += fp->truesize;
329d433673eSJesper Dangaard Brouer 		frag_kfree_skb(nf, f, fp);
3301e4b8287SPavel Emelyanov 		fp = xp;
3311e4b8287SPavel Emelyanov 	}
332d433673eSJesper Dangaard Brouer 	sum = sum_truesize + f->qsize;
333d433673eSJesper Dangaard Brouer 	sub_frag_mem_limit(q, sum);
3341e4b8287SPavel Emelyanov 
335c9547709SPavel Emelyanov 	if (f->destructor)
3361e4b8287SPavel Emelyanov 		f->destructor(q);
337d4ad4d22SNikolay Aleksandrov 	kmem_cache_free(f->frags_cachep, q);
3381e4b8287SPavel Emelyanov }
3391e4b8287SPavel Emelyanov EXPORT_SYMBOL(inet_frag_destroy);
3408e7999c4SPavel Emelyanov 
341ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
342f926e236SNikolay Aleksandrov 						struct inet_frag_queue *qp_in,
343f926e236SNikolay Aleksandrov 						struct inet_frags *f,
3449a375803SPavel Emelyanov 						void *arg)
3452588fe1dSPavel Emelyanov {
346ab1c724fSFlorian Westphal 	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
3472588fe1dSPavel Emelyanov 	struct inet_frag_queue *qp;
34819952cc4SJesper Dangaard Brouer 
3492588fe1dSPavel Emelyanov #ifdef CONFIG_SMP
3502588fe1dSPavel Emelyanov 	/* With SMP race we have to recheck hash table, because
351ab1c724fSFlorian Westphal 	 * such entry could have been created on other cpu before
352ab1c724fSFlorian Westphal 	 * we acquired hash bucket lock.
3532588fe1dSPavel Emelyanov 	 */
35419952cc4SJesper Dangaard Brouer 	hlist_for_each_entry(qp, &hb->chain, list) {
355ac18e750SPavel Emelyanov 		if (qp->net == nf && f->match(qp, arg)) {
3562588fe1dSPavel Emelyanov 			atomic_inc(&qp->refcnt);
35719952cc4SJesper Dangaard Brouer 			spin_unlock(&hb->chain_lock);
35806aa8b8aSNikolay Aleksandrov 			qp_in->flags |= INET_FRAG_COMPLETE;
3592588fe1dSPavel Emelyanov 			inet_frag_put(qp_in, f);
3602588fe1dSPavel Emelyanov 			return qp;
3612588fe1dSPavel Emelyanov 		}
3622588fe1dSPavel Emelyanov 	}
3632588fe1dSPavel Emelyanov #endif
3642588fe1dSPavel Emelyanov 	qp = qp_in;
365b2fd5321SPavel Emelyanov 	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
3662588fe1dSPavel Emelyanov 		atomic_inc(&qp->refcnt);
3672588fe1dSPavel Emelyanov 
3682588fe1dSPavel Emelyanov 	atomic_inc(&qp->refcnt);
36919952cc4SJesper Dangaard Brouer 	hlist_add_head(&qp->list, &hb->chain);
3703fd588ebSFlorian Westphal 
37119952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
37224b9bf43SNikolay Aleksandrov 
3732588fe1dSPavel Emelyanov 	return qp;
3742588fe1dSPavel Emelyanov }
375e521db9dSPavel Emelyanov 
376ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
377f926e236SNikolay Aleksandrov 					       struct inet_frags *f,
378f926e236SNikolay Aleksandrov 					       void *arg)
379e521db9dSPavel Emelyanov {
380e521db9dSPavel Emelyanov 	struct inet_frag_queue *q;
381e521db9dSPavel Emelyanov 
382b13d3cbfSFlorian Westphal 	if (frag_mem_limit(nf) > nf->high_thresh) {
383b13d3cbfSFlorian Westphal 		inet_frag_schedule_worker(f);
38486e93e47SFlorian Westphal 		return NULL;
385b13d3cbfSFlorian Westphal 	}
38686e93e47SFlorian Westphal 
387d4ad4d22SNikolay Aleksandrov 	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
388e521db9dSPavel Emelyanov 	if (q == NULL)
389e521db9dSPavel Emelyanov 		return NULL;
390e521db9dSPavel Emelyanov 
39154db0cc2SGao feng 	q->net = nf;
392c6fda282SPavel Emelyanov 	f->constructor(q, arg);
393d433673eSJesper Dangaard Brouer 	add_frag_mem_limit(q, f->qsize);
394d433673eSJesper Dangaard Brouer 
395e521db9dSPavel Emelyanov 	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
396e521db9dSPavel Emelyanov 	spin_lock_init(&q->lock);
397e521db9dSPavel Emelyanov 	atomic_set(&q->refcnt, 1);
398e521db9dSPavel Emelyanov 
399e521db9dSPavel Emelyanov 	return q;
400e521db9dSPavel Emelyanov }
401c6fda282SPavel Emelyanov 
402ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
403f926e236SNikolay Aleksandrov 						struct inet_frags *f,
404f926e236SNikolay Aleksandrov 						void *arg)
405c6fda282SPavel Emelyanov {
406c6fda282SPavel Emelyanov 	struct inet_frag_queue *q;
407c6fda282SPavel Emelyanov 
408ac18e750SPavel Emelyanov 	q = inet_frag_alloc(nf, f, arg);
409c6fda282SPavel Emelyanov 	if (q == NULL)
410c6fda282SPavel Emelyanov 		return NULL;
411c6fda282SPavel Emelyanov 
4129a375803SPavel Emelyanov 	return inet_frag_intern(nf, q, f, arg);
413c6fda282SPavel Emelyanov }
414abd6523dSPavel Emelyanov 
415ac18e750SPavel Emelyanov struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
416f926e236SNikolay Aleksandrov 				       struct inet_frags *f, void *key,
417f926e236SNikolay Aleksandrov 				       unsigned int hash)
418abd6523dSPavel Emelyanov {
41919952cc4SJesper Dangaard Brouer 	struct inet_frag_bucket *hb;
420abd6523dSPavel Emelyanov 	struct inet_frag_queue *q;
4215a3da1feSHannes Frederic Sowa 	int depth = 0;
422abd6523dSPavel Emelyanov 
423b13d3cbfSFlorian Westphal 	if (frag_mem_limit(nf) > nf->low_thresh)
424b13d3cbfSFlorian Westphal 		inet_frag_schedule_worker(f);
42586e93e47SFlorian Westphal 
426fb3cfe6eSFlorian Westphal 	hash &= (INETFRAGS_HASHSZ - 1);
42719952cc4SJesper Dangaard Brouer 	hb = &f->hash[hash];
42819952cc4SJesper Dangaard Brouer 
42919952cc4SJesper Dangaard Brouer 	spin_lock(&hb->chain_lock);
43019952cc4SJesper Dangaard Brouer 	hlist_for_each_entry(q, &hb->chain, list) {
431ac18e750SPavel Emelyanov 		if (q->net == nf && f->match(q, key)) {
432abd6523dSPavel Emelyanov 			atomic_inc(&q->refcnt);
43319952cc4SJesper Dangaard Brouer 			spin_unlock(&hb->chain_lock);
434abd6523dSPavel Emelyanov 			return q;
435abd6523dSPavel Emelyanov 		}
4365a3da1feSHannes Frederic Sowa 		depth++;
437abd6523dSPavel Emelyanov 	}
43819952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
439abd6523dSPavel Emelyanov 
4405a3da1feSHannes Frederic Sowa 	if (depth <= INETFRAGS_MAXDEPTH)
4419a375803SPavel Emelyanov 		return inet_frag_create(nf, f, key);
442e3a57d18SFlorian Westphal 
443e3a57d18SFlorian Westphal 	if (inet_frag_may_rebuild(f)) {
444ab1c724fSFlorian Westphal 		if (!f->rebuild)
445e3a57d18SFlorian Westphal 			f->rebuild = true;
446e3a57d18SFlorian Westphal 		inet_frag_schedule_worker(f);
447e3a57d18SFlorian Westphal 	}
448e3a57d18SFlorian Westphal 
4495a3da1feSHannes Frederic Sowa 	return ERR_PTR(-ENOBUFS);
450abd6523dSPavel Emelyanov }
451abd6523dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_find);
4525a3da1feSHannes Frederic Sowa 
4535a3da1feSHannes Frederic Sowa void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
4545a3da1feSHannes Frederic Sowa 				   const char *prefix)
4555a3da1feSHannes Frederic Sowa {
4565a3da1feSHannes Frederic Sowa 	static const char msg[] = "inet_frag_find: Fragment hash bucket"
4575a3da1feSHannes Frederic Sowa 		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
4585a3da1feSHannes Frederic Sowa 		". Dropping fragment.\n";
4595a3da1feSHannes Frederic Sowa 
4605a3da1feSHannes Frederic Sowa 	if (PTR_ERR(q) == -ENOBUFS)
461*ba7a46f1SJoe Perches 		net_dbg_ratelimited("%s%s", prefix, msg);
4625a3da1feSHannes Frederic Sowa }
4635a3da1feSHannes Frederic Sowa EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
464