xref: /linux/net/ipv4/inet_fragment.c (revision 78802011fbe34331bdef6f2dfb1634011f0e4c32)
17eb95156SPavel Emelyanov /*
27eb95156SPavel Emelyanov  * inet fragments management
37eb95156SPavel Emelyanov  *
47eb95156SPavel Emelyanov  *		This program is free software; you can redistribute it and/or
57eb95156SPavel Emelyanov  *		modify it under the terms of the GNU General Public License
67eb95156SPavel Emelyanov  *		as published by the Free Software Foundation; either version
77eb95156SPavel Emelyanov  *		2 of the License, or (at your option) any later version.
87eb95156SPavel Emelyanov  *
97eb95156SPavel Emelyanov  * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
107eb95156SPavel Emelyanov  *				Started as consolidation of ipv4/ip_fragment.c,
117eb95156SPavel Emelyanov  *				ipv6/reassembly. and ipv6 nf conntrack reassembly
127eb95156SPavel Emelyanov  */
137eb95156SPavel Emelyanov 
147eb95156SPavel Emelyanov #include <linux/list.h>
157eb95156SPavel Emelyanov #include <linux/spinlock.h>
167eb95156SPavel Emelyanov #include <linux/module.h>
177eb95156SPavel Emelyanov #include <linux/timer.h>
187eb95156SPavel Emelyanov #include <linux/mm.h>
19321a3a99SPavel Emelyanov #include <linux/random.h>
201e4b8287SPavel Emelyanov #include <linux/skbuff.h>
211e4b8287SPavel Emelyanov #include <linux/rtnetlink.h>
225a0e3ad6STejun Heo #include <linux/slab.h>
237eb95156SPavel Emelyanov 
245a3da1feSHannes Frederic Sowa #include <net/sock.h>
257eb95156SPavel Emelyanov #include <net/inet_frag.h>
26be991971SHannes Frederic Sowa #include <net/inet_ecn.h>
27be991971SHannes Frederic Sowa 
28b13d3cbfSFlorian Westphal #define INETFRAGS_EVICT_BUCKETS   128
29b13d3cbfSFlorian Westphal #define INETFRAGS_EVICT_MAX	  512
30b13d3cbfSFlorian Westphal 
31e3a57d18SFlorian Westphal /* don't rebuild inetfrag table with new secret more often than this */
32e3a57d18SFlorian Westphal #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33e3a57d18SFlorian Westphal 
34be991971SHannes Frederic Sowa /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35be991971SHannes Frederic Sowa  * Value : 0xff if frame should be dropped.
36be991971SHannes Frederic Sowa  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37be991971SHannes Frederic Sowa  */
38be991971SHannes Frederic Sowa const u8 ip_frag_ecn_table[16] = {
39be991971SHannes Frederic Sowa 	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
40be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
41be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
42be991971SHannes Frederic Sowa 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
43be991971SHannes Frederic Sowa 
44be991971SHannes Frederic Sowa 	/* invalid combinations : drop frame */
45be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51be991971SHannes Frederic Sowa 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52be991971SHannes Frederic Sowa };
53be991971SHannes Frederic Sowa EXPORT_SYMBOL(ip_frag_ecn_table);
547eb95156SPavel Emelyanov 
55fb3cfe6eSFlorian Westphal static unsigned int
56fb3cfe6eSFlorian Westphal inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57fb3cfe6eSFlorian Westphal {
58fb3cfe6eSFlorian Westphal 	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59fb3cfe6eSFlorian Westphal }
60fb3cfe6eSFlorian Westphal 
61e3a57d18SFlorian Westphal static bool inet_frag_may_rebuild(struct inet_frags *f)
62321a3a99SPavel Emelyanov {
63e3a57d18SFlorian Westphal 	return time_after(jiffies,
64e3a57d18SFlorian Westphal 	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65e3a57d18SFlorian Westphal }
66e3a57d18SFlorian Westphal 
67e3a57d18SFlorian Westphal static void inet_frag_secret_rebuild(struct inet_frags *f)
68e3a57d18SFlorian Westphal {
69321a3a99SPavel Emelyanov 	int i;
70321a3a99SPavel Emelyanov 
71ab1c724fSFlorian Westphal 	write_seqlock_bh(&f->rnd_seqlock);
72e3a57d18SFlorian Westphal 
73e3a57d18SFlorian Westphal 	if (!inet_frag_may_rebuild(f))
74e3a57d18SFlorian Westphal 		goto out;
7519952cc4SJesper Dangaard Brouer 
76321a3a99SPavel Emelyanov 	get_random_bytes(&f->rnd, sizeof(u32));
77e3a57d18SFlorian Westphal 
78321a3a99SPavel Emelyanov 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
7919952cc4SJesper Dangaard Brouer 		struct inet_frag_bucket *hb;
80321a3a99SPavel Emelyanov 		struct inet_frag_queue *q;
81b67bfe0dSSasha Levin 		struct hlist_node *n;
82321a3a99SPavel Emelyanov 
8319952cc4SJesper Dangaard Brouer 		hb = &f->hash[i];
84ab1c724fSFlorian Westphal 		spin_lock(&hb->chain_lock);
85ab1c724fSFlorian Westphal 
8619952cc4SJesper Dangaard Brouer 		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87fb3cfe6eSFlorian Westphal 			unsigned int hval = inet_frag_hashfn(f, q);
88321a3a99SPavel Emelyanov 
89321a3a99SPavel Emelyanov 			if (hval != i) {
9019952cc4SJesper Dangaard Brouer 				struct inet_frag_bucket *hb_dest;
9119952cc4SJesper Dangaard Brouer 
92321a3a99SPavel Emelyanov 				hlist_del(&q->list);
93321a3a99SPavel Emelyanov 
94321a3a99SPavel Emelyanov 				/* Relink to new hash chain. */
9519952cc4SJesper Dangaard Brouer 				hb_dest = &f->hash[hval];
96ab1c724fSFlorian Westphal 
97ab1c724fSFlorian Westphal 				/* This is the only place where we take
98ab1c724fSFlorian Westphal 				 * another chain_lock while already holding
99ab1c724fSFlorian Westphal 				 * one.  As this will not run concurrently,
100ab1c724fSFlorian Westphal 				 * we cannot deadlock on hb_dest lock below, if its
101ab1c724fSFlorian Westphal 				 * already locked it will be released soon since
102ab1c724fSFlorian Westphal 				 * other caller cannot be waiting for hb lock
103ab1c724fSFlorian Westphal 				 * that we've taken above.
104ab1c724fSFlorian Westphal 				 */
105ab1c724fSFlorian Westphal 				spin_lock_nested(&hb_dest->chain_lock,
106ab1c724fSFlorian Westphal 						 SINGLE_DEPTH_NESTING);
10719952cc4SJesper Dangaard Brouer 				hlist_add_head(&q->list, &hb_dest->chain);
108ab1c724fSFlorian Westphal 				spin_unlock(&hb_dest->chain_lock);
109321a3a99SPavel Emelyanov 			}
110321a3a99SPavel Emelyanov 		}
111ab1c724fSFlorian Westphal 		spin_unlock(&hb->chain_lock);
112321a3a99SPavel Emelyanov 	}
113321a3a99SPavel Emelyanov 
114e3a57d18SFlorian Westphal 	f->rebuild = false;
115e3a57d18SFlorian Westphal 	f->last_rebuild_jiffies = jiffies;
116e3a57d18SFlorian Westphal out:
117ab1c724fSFlorian Westphal 	write_sequnlock_bh(&f->rnd_seqlock);
118321a3a99SPavel Emelyanov }
119321a3a99SPavel Emelyanov 
120b13d3cbfSFlorian Westphal static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121b13d3cbfSFlorian Westphal {
122b13d3cbfSFlorian Westphal 	return q->net->low_thresh == 0 ||
123b13d3cbfSFlorian Westphal 	       frag_mem_limit(q->net) >= q->net->low_thresh;
124b13d3cbfSFlorian Westphal }
125b13d3cbfSFlorian Westphal 
126b13d3cbfSFlorian Westphal static unsigned int
127b13d3cbfSFlorian Westphal inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
128b13d3cbfSFlorian Westphal {
129b13d3cbfSFlorian Westphal 	struct inet_frag_queue *fq;
130b13d3cbfSFlorian Westphal 	struct hlist_node *n;
131b13d3cbfSFlorian Westphal 	unsigned int evicted = 0;
132b13d3cbfSFlorian Westphal 	HLIST_HEAD(expired);
133b13d3cbfSFlorian Westphal 
134b13d3cbfSFlorian Westphal 	spin_lock(&hb->chain_lock);
135b13d3cbfSFlorian Westphal 
136b13d3cbfSFlorian Westphal 	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
137b13d3cbfSFlorian Westphal 		if (!inet_fragq_should_evict(fq))
138b13d3cbfSFlorian Westphal 			continue;
139b13d3cbfSFlorian Westphal 
1405719b296SFlorian Westphal 		if (!del_timer(&fq->timer))
1415719b296SFlorian Westphal 			continue;
142b13d3cbfSFlorian Westphal 
143d1fe1944SFlorian Westphal 		hlist_add_head(&fq->list_evictor, &expired);
144b13d3cbfSFlorian Westphal 		++evicted;
145b13d3cbfSFlorian Westphal 	}
146b13d3cbfSFlorian Westphal 
147b13d3cbfSFlorian Westphal 	spin_unlock(&hb->chain_lock);
148b13d3cbfSFlorian Westphal 
149d1fe1944SFlorian Westphal 	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
150*78802011SKees Cook 		f->frag_expire(&fq->timer);
151b13d3cbfSFlorian Westphal 
152b13d3cbfSFlorian Westphal 	return evicted;
153b13d3cbfSFlorian Westphal }
154b13d3cbfSFlorian Westphal 
155b13d3cbfSFlorian Westphal static void inet_frag_worker(struct work_struct *work)
156b13d3cbfSFlorian Westphal {
157b13d3cbfSFlorian Westphal 	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
158b13d3cbfSFlorian Westphal 	unsigned int i, evicted = 0;
159b13d3cbfSFlorian Westphal 	struct inet_frags *f;
160b13d3cbfSFlorian Westphal 
161b13d3cbfSFlorian Westphal 	f = container_of(work, struct inet_frags, frags_work);
162b13d3cbfSFlorian Westphal 
163b13d3cbfSFlorian Westphal 	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
164b13d3cbfSFlorian Westphal 
165ab1c724fSFlorian Westphal 	local_bh_disable();
166b13d3cbfSFlorian Westphal 
167b13d3cbfSFlorian Westphal 	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
168b13d3cbfSFlorian Westphal 		evicted += inet_evict_bucket(f, &f->hash[i]);
169b13d3cbfSFlorian Westphal 		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
170b13d3cbfSFlorian Westphal 		if (evicted > INETFRAGS_EVICT_MAX)
171b13d3cbfSFlorian Westphal 			break;
172b13d3cbfSFlorian Westphal 	}
173b13d3cbfSFlorian Westphal 
174b13d3cbfSFlorian Westphal 	f->next_bucket = i;
175b13d3cbfSFlorian Westphal 
176ab1c724fSFlorian Westphal 	local_bh_enable();
177ab1c724fSFlorian Westphal 
178e3a57d18SFlorian Westphal 	if (f->rebuild && inet_frag_may_rebuild(f))
179e3a57d18SFlorian Westphal 		inet_frag_secret_rebuild(f);
180b13d3cbfSFlorian Westphal }
181b13d3cbfSFlorian Westphal 
182b13d3cbfSFlorian Westphal static void inet_frag_schedule_worker(struct inet_frags *f)
183b13d3cbfSFlorian Westphal {
184b13d3cbfSFlorian Westphal 	if (unlikely(!work_pending(&f->frags_work)))
185b13d3cbfSFlorian Westphal 		schedule_work(&f->frags_work);
186b13d3cbfSFlorian Westphal }
187b13d3cbfSFlorian Westphal 
188d4ad4d22SNikolay Aleksandrov int inet_frags_init(struct inet_frags *f)
1897eb95156SPavel Emelyanov {
1907eb95156SPavel Emelyanov 	int i;
1917eb95156SPavel Emelyanov 
192b13d3cbfSFlorian Westphal 	INIT_WORK(&f->frags_work, inet_frag_worker);
193b13d3cbfSFlorian Westphal 
19419952cc4SJesper Dangaard Brouer 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
19519952cc4SJesper Dangaard Brouer 		struct inet_frag_bucket *hb = &f->hash[i];
1967eb95156SPavel Emelyanov 
19719952cc4SJesper Dangaard Brouer 		spin_lock_init(&hb->chain_lock);
19819952cc4SJesper Dangaard Brouer 		INIT_HLIST_HEAD(&hb->chain);
19919952cc4SJesper Dangaard Brouer 	}
200ab1c724fSFlorian Westphal 
201ab1c724fSFlorian Westphal 	seqlock_init(&f->rnd_seqlock);
202e3a57d18SFlorian Westphal 	f->last_rebuild_jiffies = 0;
203d4ad4d22SNikolay Aleksandrov 	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
204d4ad4d22SNikolay Aleksandrov 					    NULL);
205d4ad4d22SNikolay Aleksandrov 	if (!f->frags_cachep)
206d4ad4d22SNikolay Aleksandrov 		return -ENOMEM;
207d4ad4d22SNikolay Aleksandrov 
208d4ad4d22SNikolay Aleksandrov 	return 0;
2097eb95156SPavel Emelyanov }
2107eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_init);
2117eb95156SPavel Emelyanov 
2127eb95156SPavel Emelyanov void inet_frags_fini(struct inet_frags *f)
2137eb95156SPavel Emelyanov {
214b13d3cbfSFlorian Westphal 	cancel_work_sync(&f->frags_work);
215d4ad4d22SNikolay Aleksandrov 	kmem_cache_destroy(f->frags_cachep);
2167eb95156SPavel Emelyanov }
2177eb95156SPavel Emelyanov EXPORT_SYMBOL(inet_frags_fini);
218277e650dSPavel Emelyanov 
21981566e83SPavel Emelyanov void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
22081566e83SPavel Emelyanov {
221ab1c724fSFlorian Westphal 	unsigned int seq;
222b13d3cbfSFlorian Westphal 	int i;
223b13d3cbfSFlorian Westphal 
22481566e83SPavel Emelyanov 	nf->low_thresh = 0;
225e8e16b70SDavid S. Miller 
226ab1c724fSFlorian Westphal evict_again:
2275719b296SFlorian Westphal 	local_bh_disable();
228ab1c724fSFlorian Westphal 	seq = read_seqbegin(&f->rnd_seqlock);
229b13d3cbfSFlorian Westphal 
230b13d3cbfSFlorian Westphal 	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
231b13d3cbfSFlorian Westphal 		inet_evict_bucket(f, &f->hash[i]);
232b13d3cbfSFlorian Westphal 
233ab1c724fSFlorian Westphal 	local_bh_enable();
2345719b296SFlorian Westphal 	cond_resched();
2355719b296SFlorian Westphal 
2365719b296SFlorian Westphal 	if (read_seqretry(&f->rnd_seqlock, seq) ||
237fb452a1aSJesper Dangaard Brouer 	    sum_frag_mem_limit(nf))
2385719b296SFlorian Westphal 		goto evict_again;
23981566e83SPavel Emelyanov }
24081566e83SPavel Emelyanov EXPORT_SYMBOL(inet_frags_exit_net);
24181566e83SPavel Emelyanov 
242ab1c724fSFlorian Westphal static struct inet_frag_bucket *
243ab1c724fSFlorian Westphal get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
244ab1c724fSFlorian Westphal __acquires(hb->chain_lock)
245277e650dSPavel Emelyanov {
24619952cc4SJesper Dangaard Brouer 	struct inet_frag_bucket *hb;
247ab1c724fSFlorian Westphal 	unsigned int seq, hash;
24819952cc4SJesper Dangaard Brouer 
249ab1c724fSFlorian Westphal  restart:
250ab1c724fSFlorian Westphal 	seq = read_seqbegin(&f->rnd_seqlock);
251ab1c724fSFlorian Westphal 
252fb3cfe6eSFlorian Westphal 	hash = inet_frag_hashfn(f, fq);
25319952cc4SJesper Dangaard Brouer 	hb = &f->hash[hash];
25419952cc4SJesper Dangaard Brouer 
25519952cc4SJesper Dangaard Brouer 	spin_lock(&hb->chain_lock);
256ab1c724fSFlorian Westphal 	if (read_seqretry(&f->rnd_seqlock, seq)) {
257ab1c724fSFlorian Westphal 		spin_unlock(&hb->chain_lock);
258ab1c724fSFlorian Westphal 		goto restart;
259ab1c724fSFlorian Westphal 	}
260ab1c724fSFlorian Westphal 
261ab1c724fSFlorian Westphal 	return hb;
262ab1c724fSFlorian Westphal }
263ab1c724fSFlorian Westphal 
264ab1c724fSFlorian Westphal static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
265ab1c724fSFlorian Westphal {
266ab1c724fSFlorian Westphal 	struct inet_frag_bucket *hb;
267ab1c724fSFlorian Westphal 
268ab1c724fSFlorian Westphal 	hb = get_frag_bucket_locked(fq, f);
269277e650dSPavel Emelyanov 	hlist_del(&fq->list);
2705719b296SFlorian Westphal 	fq->flags |= INET_FRAG_COMPLETE;
27119952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
272277e650dSPavel Emelyanov }
273277e650dSPavel Emelyanov 
274277e650dSPavel Emelyanov void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
275277e650dSPavel Emelyanov {
276277e650dSPavel Emelyanov 	if (del_timer(&fq->timer))
277edcb6918SReshetova, Elena 		refcount_dec(&fq->refcnt);
278277e650dSPavel Emelyanov 
27906aa8b8aSNikolay Aleksandrov 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
280277e650dSPavel Emelyanov 		fq_unlink(fq, f);
281edcb6918SReshetova, Elena 		refcount_dec(&fq->refcnt);
282277e650dSPavel Emelyanov 	}
283277e650dSPavel Emelyanov }
284277e650dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_kill);
2851e4b8287SPavel Emelyanov 
2863fd588ebSFlorian Westphal void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
2871e4b8287SPavel Emelyanov {
2881e4b8287SPavel Emelyanov 	struct sk_buff *fp;
2896ddc0822SPavel Emelyanov 	struct netns_frags *nf;
290d433673eSJesper Dangaard Brouer 	unsigned int sum, sum_truesize = 0;
2911e4b8287SPavel Emelyanov 
29206aa8b8aSNikolay Aleksandrov 	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
293547b792cSIlpo Järvinen 	WARN_ON(del_timer(&q->timer) != 0);
2941e4b8287SPavel Emelyanov 
2951e4b8287SPavel Emelyanov 	/* Release all fragment data. */
2961e4b8287SPavel Emelyanov 	fp = q->fragments;
2976ddc0822SPavel Emelyanov 	nf = q->net;
2981e4b8287SPavel Emelyanov 	while (fp) {
2991e4b8287SPavel Emelyanov 		struct sk_buff *xp = fp->next;
3001e4b8287SPavel Emelyanov 
301d433673eSJesper Dangaard Brouer 		sum_truesize += fp->truesize;
302a72a5e2dSFlorian Westphal 		kfree_skb(fp);
3031e4b8287SPavel Emelyanov 		fp = xp;
3041e4b8287SPavel Emelyanov 	}
305d433673eSJesper Dangaard Brouer 	sum = sum_truesize + f->qsize;
3061e4b8287SPavel Emelyanov 
307c9547709SPavel Emelyanov 	if (f->destructor)
3081e4b8287SPavel Emelyanov 		f->destructor(q);
309d4ad4d22SNikolay Aleksandrov 	kmem_cache_free(f->frags_cachep, q);
3105719b296SFlorian Westphal 
3115719b296SFlorian Westphal 	sub_frag_mem_limit(nf, sum);
3121e4b8287SPavel Emelyanov }
3131e4b8287SPavel Emelyanov EXPORT_SYMBOL(inet_frag_destroy);
3148e7999c4SPavel Emelyanov 
315ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
316f926e236SNikolay Aleksandrov 						struct inet_frag_queue *qp_in,
317f926e236SNikolay Aleksandrov 						struct inet_frags *f,
3189a375803SPavel Emelyanov 						void *arg)
3192588fe1dSPavel Emelyanov {
320ab1c724fSFlorian Westphal 	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
3212588fe1dSPavel Emelyanov 	struct inet_frag_queue *qp;
32219952cc4SJesper Dangaard Brouer 
3232588fe1dSPavel Emelyanov #ifdef CONFIG_SMP
3242588fe1dSPavel Emelyanov 	/* With SMP race we have to recheck hash table, because
325ab1c724fSFlorian Westphal 	 * such entry could have been created on other cpu before
326ab1c724fSFlorian Westphal 	 * we acquired hash bucket lock.
3272588fe1dSPavel Emelyanov 	 */
32819952cc4SJesper Dangaard Brouer 	hlist_for_each_entry(qp, &hb->chain, list) {
329ac18e750SPavel Emelyanov 		if (qp->net == nf && f->match(qp, arg)) {
330edcb6918SReshetova, Elena 			refcount_inc(&qp->refcnt);
33119952cc4SJesper Dangaard Brouer 			spin_unlock(&hb->chain_lock);
33206aa8b8aSNikolay Aleksandrov 			qp_in->flags |= INET_FRAG_COMPLETE;
3332588fe1dSPavel Emelyanov 			inet_frag_put(qp_in, f);
3342588fe1dSPavel Emelyanov 			return qp;
3352588fe1dSPavel Emelyanov 		}
3362588fe1dSPavel Emelyanov 	}
3372588fe1dSPavel Emelyanov #endif
3382588fe1dSPavel Emelyanov 	qp = qp_in;
339b2fd5321SPavel Emelyanov 	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
340edcb6918SReshetova, Elena 		refcount_inc(&qp->refcnt);
3412588fe1dSPavel Emelyanov 
342edcb6918SReshetova, Elena 	refcount_inc(&qp->refcnt);
34319952cc4SJesper Dangaard Brouer 	hlist_add_head(&qp->list, &hb->chain);
3443fd588ebSFlorian Westphal 
34519952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
34624b9bf43SNikolay Aleksandrov 
3472588fe1dSPavel Emelyanov 	return qp;
3482588fe1dSPavel Emelyanov }
349e521db9dSPavel Emelyanov 
350ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
351f926e236SNikolay Aleksandrov 					       struct inet_frags *f,
352f926e236SNikolay Aleksandrov 					       void *arg)
353e521db9dSPavel Emelyanov {
354e521db9dSPavel Emelyanov 	struct inet_frag_queue *q;
355e521db9dSPavel Emelyanov 
35630759219SMichal Kubeček 	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
357b13d3cbfSFlorian Westphal 		inet_frag_schedule_worker(f);
35886e93e47SFlorian Westphal 		return NULL;
359b13d3cbfSFlorian Westphal 	}
36086e93e47SFlorian Westphal 
361d4ad4d22SNikolay Aleksandrov 	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
36251456b29SIan Morris 	if (!q)
363e521db9dSPavel Emelyanov 		return NULL;
364e521db9dSPavel Emelyanov 
36554db0cc2SGao feng 	q->net = nf;
366c6fda282SPavel Emelyanov 	f->constructor(q, arg);
3670e60d245SFlorian Westphal 	add_frag_mem_limit(nf, f->qsize);
368d433673eSJesper Dangaard Brouer 
369*78802011SKees Cook 	timer_setup(&q->timer, f->frag_expire, 0);
370e521db9dSPavel Emelyanov 	spin_lock_init(&q->lock);
371edcb6918SReshetova, Elena 	refcount_set(&q->refcnt, 1);
372e521db9dSPavel Emelyanov 
373e521db9dSPavel Emelyanov 	return q;
374e521db9dSPavel Emelyanov }
375c6fda282SPavel Emelyanov 
376ac18e750SPavel Emelyanov static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
377f926e236SNikolay Aleksandrov 						struct inet_frags *f,
378f926e236SNikolay Aleksandrov 						void *arg)
379c6fda282SPavel Emelyanov {
380c6fda282SPavel Emelyanov 	struct inet_frag_queue *q;
381c6fda282SPavel Emelyanov 
382ac18e750SPavel Emelyanov 	q = inet_frag_alloc(nf, f, arg);
38351456b29SIan Morris 	if (!q)
384c6fda282SPavel Emelyanov 		return NULL;
385c6fda282SPavel Emelyanov 
3869a375803SPavel Emelyanov 	return inet_frag_intern(nf, q, f, arg);
387c6fda282SPavel Emelyanov }
388abd6523dSPavel Emelyanov 
389ac18e750SPavel Emelyanov struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
390f926e236SNikolay Aleksandrov 				       struct inet_frags *f, void *key,
391f926e236SNikolay Aleksandrov 				       unsigned int hash)
392abd6523dSPavel Emelyanov {
39319952cc4SJesper Dangaard Brouer 	struct inet_frag_bucket *hb;
394abd6523dSPavel Emelyanov 	struct inet_frag_queue *q;
3955a3da1feSHannes Frederic Sowa 	int depth = 0;
396abd6523dSPavel Emelyanov 
397b13d3cbfSFlorian Westphal 	if (frag_mem_limit(nf) > nf->low_thresh)
398b13d3cbfSFlorian Westphal 		inet_frag_schedule_worker(f);
39986e93e47SFlorian Westphal 
400fb3cfe6eSFlorian Westphal 	hash &= (INETFRAGS_HASHSZ - 1);
40119952cc4SJesper Dangaard Brouer 	hb = &f->hash[hash];
40219952cc4SJesper Dangaard Brouer 
40319952cc4SJesper Dangaard Brouer 	spin_lock(&hb->chain_lock);
40419952cc4SJesper Dangaard Brouer 	hlist_for_each_entry(q, &hb->chain, list) {
405ac18e750SPavel Emelyanov 		if (q->net == nf && f->match(q, key)) {
406edcb6918SReshetova, Elena 			refcount_inc(&q->refcnt);
40719952cc4SJesper Dangaard Brouer 			spin_unlock(&hb->chain_lock);
408abd6523dSPavel Emelyanov 			return q;
409abd6523dSPavel Emelyanov 		}
4105a3da1feSHannes Frederic Sowa 		depth++;
411abd6523dSPavel Emelyanov 	}
41219952cc4SJesper Dangaard Brouer 	spin_unlock(&hb->chain_lock);
413abd6523dSPavel Emelyanov 
4145a3da1feSHannes Frederic Sowa 	if (depth <= INETFRAGS_MAXDEPTH)
4159a375803SPavel Emelyanov 		return inet_frag_create(nf, f, key);
416e3a57d18SFlorian Westphal 
417e3a57d18SFlorian Westphal 	if (inet_frag_may_rebuild(f)) {
418ab1c724fSFlorian Westphal 		if (!f->rebuild)
419e3a57d18SFlorian Westphal 			f->rebuild = true;
420e3a57d18SFlorian Westphal 		inet_frag_schedule_worker(f);
421e3a57d18SFlorian Westphal 	}
422e3a57d18SFlorian Westphal 
4235a3da1feSHannes Frederic Sowa 	return ERR_PTR(-ENOBUFS);
424abd6523dSPavel Emelyanov }
425abd6523dSPavel Emelyanov EXPORT_SYMBOL(inet_frag_find);
4265a3da1feSHannes Frederic Sowa 
4275a3da1feSHannes Frederic Sowa void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
4285a3da1feSHannes Frederic Sowa 				   const char *prefix)
4295a3da1feSHannes Frederic Sowa {
4305a3da1feSHannes Frederic Sowa 	static const char msg[] = "inet_frag_find: Fragment hash bucket"
4315a3da1feSHannes Frederic Sowa 		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
4325a3da1feSHannes Frederic Sowa 		". Dropping fragment.\n";
4335a3da1feSHannes Frederic Sowa 
4345a3da1feSHannes Frederic Sowa 	if (PTR_ERR(q) == -ENOBUFS)
435ba7a46f1SJoe Perches 		net_dbg_ratelimited("%s%s", prefix, msg);
4365a3da1feSHannes Frederic Sowa }
4375a3da1feSHannes Frederic Sowa EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
438