xref: /linux/net/ipv4/inet_fragment.c (revision 2638eb8b50cfc16240e0bb080b9afbf541a9b39d)
1 /*
2  * inet fragments management
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
10  *				Started as consolidation of ipv4/ip_fragment.c,
11  *				ipv6/reassembly. and ipv6 nf conntrack reassembly
12  */
13 
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23 #include <linux/rhashtable.h>
24 
25 #include <net/sock.h>
26 #include <net/inet_frag.h>
27 #include <net/inet_ecn.h>
28 #include <net/ip.h>
29 #include <net/ipv6.h>
30 
31 /* Use skb->cb to track consecutive/adjacent fragments coming at
32  * the end of the queue. Nodes in the rb-tree queue will
33  * contain "runs" of one or more adjacent fragments.
34  *
35  * Invariants:
36  * - next_frag is NULL at the tail of a "run";
37  * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
38  */
39 struct ipfrag_skb_cb {
40 	union {
41 		struct inet_skb_parm	h4;
42 		struct inet6_skb_parm	h6;
43 	};
44 	struct sk_buff		*next_frag;
45 	int			frag_run_len;
46 };
47 
48 #define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
49 
50 static void fragcb_clear(struct sk_buff *skb)
51 {
52 	RB_CLEAR_NODE(&skb->rbnode);
53 	FRAG_CB(skb)->next_frag = NULL;
54 	FRAG_CB(skb)->frag_run_len = skb->len;
55 }
56 
57 /* Append skb to the last "run". */
58 static void fragrun_append_to_last(struct inet_frag_queue *q,
59 				   struct sk_buff *skb)
60 {
61 	fragcb_clear(skb);
62 
63 	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
64 	FRAG_CB(q->fragments_tail)->next_frag = skb;
65 	q->fragments_tail = skb;
66 }
67 
68 /* Create a new "run" with the skb. */
69 static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
70 {
71 	BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
72 	fragcb_clear(skb);
73 
74 	if (q->last_run_head)
75 		rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
76 			     &q->last_run_head->rbnode.rb_right);
77 	else
78 		rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
79 	rb_insert_color(&skb->rbnode, &q->rb_fragments);
80 
81 	q->fragments_tail = skb;
82 	q->last_run_head = skb;
83 }
84 
85 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
86  * Value : 0xff if frame should be dropped.
87  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
88  */
89 const u8 ip_frag_ecn_table[16] = {
90 	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
91 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
92 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
93 	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
94 
95 	/* invalid combinations : drop frame */
96 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
97 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
98 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
99 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
100 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
101 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
102 	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
103 };
104 EXPORT_SYMBOL(ip_frag_ecn_table);
105 
106 int inet_frags_init(struct inet_frags *f)
107 {
108 	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
109 					    NULL);
110 	if (!f->frags_cachep)
111 		return -ENOMEM;
112 
113 	refcount_set(&f->refcnt, 1);
114 	init_completion(&f->completion);
115 	return 0;
116 }
117 EXPORT_SYMBOL(inet_frags_init);
118 
119 void inet_frags_fini(struct inet_frags *f)
120 {
121 	if (refcount_dec_and_test(&f->refcnt))
122 		complete(&f->completion);
123 
124 	wait_for_completion(&f->completion);
125 
126 	kmem_cache_destroy(f->frags_cachep);
127 	f->frags_cachep = NULL;
128 }
129 EXPORT_SYMBOL(inet_frags_fini);
130 
131 /* called from rhashtable_free_and_destroy() at netns_frags dismantle */
132 static void inet_frags_free_cb(void *ptr, void *arg)
133 {
134 	struct inet_frag_queue *fq = ptr;
135 	int count;
136 
137 	count = del_timer_sync(&fq->timer) ? 1 : 0;
138 
139 	spin_lock_bh(&fq->lock);
140 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
141 		fq->flags |= INET_FRAG_COMPLETE;
142 		count++;
143 	} else if (fq->flags & INET_FRAG_HASH_DEAD) {
144 		count++;
145 	}
146 	spin_unlock_bh(&fq->lock);
147 
148 	if (refcount_sub_and_test(count, &fq->refcnt))
149 		inet_frag_destroy(fq);
150 }
151 
152 static void fqdir_rwork_fn(struct work_struct *work)
153 {
154 	struct fqdir *fqdir = container_of(to_rcu_work(work),
155 					   struct fqdir, destroy_rwork);
156 	struct inet_frags *f = fqdir->f;
157 
158 	rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
159 
160 	/* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
161 	 * have completed, since they need to dereference fqdir.
162 	 * Would it not be nice to have kfree_rcu_barrier() ? :)
163 	 */
164 	rcu_barrier();
165 
166 	if (refcount_dec_and_test(&f->refcnt))
167 		complete(&f->completion);
168 
169 	kfree(fqdir);
170 }
171 
172 int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
173 {
174 	struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
175 	int res;
176 
177 	if (!fqdir)
178 		return -ENOMEM;
179 	fqdir->f = f;
180 	fqdir->net = net;
181 	res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
182 	if (res < 0) {
183 		kfree(fqdir);
184 		return res;
185 	}
186 	refcount_inc(&f->refcnt);
187 	*fqdirp = fqdir;
188 	return 0;
189 }
190 EXPORT_SYMBOL(fqdir_init);
191 
192 void fqdir_exit(struct fqdir *fqdir)
193 {
194 	fqdir->high_thresh = 0; /* prevent creation of new frags */
195 
196 	fqdir->dead = true;
197 
198 	/* call_rcu is supposed to provide memory barrier semantics,
199 	 * separating the setting of fqdir->dead with the destruction
200 	 * work.  This implicit barrier is paired with inet_frag_kill().
201 	 */
202 
203 	INIT_RCU_WORK(&fqdir->destroy_rwork, fqdir_rwork_fn);
204 	queue_rcu_work(system_wq, &fqdir->destroy_rwork);
205 
206 }
207 EXPORT_SYMBOL(fqdir_exit);
208 
209 void inet_frag_kill(struct inet_frag_queue *fq)
210 {
211 	if (del_timer(&fq->timer))
212 		refcount_dec(&fq->refcnt);
213 
214 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
215 		struct fqdir *fqdir = fq->fqdir;
216 
217 		fq->flags |= INET_FRAG_COMPLETE;
218 		rcu_read_lock();
219 		/* The RCU read lock provides a memory barrier
220 		 * guaranteeing that if fqdir->dead is false then
221 		 * the hash table destruction will not start until
222 		 * after we unlock.  Paired with inet_frags_exit_net().
223 		 */
224 		if (!fqdir->dead) {
225 			rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
226 					       fqdir->f->rhash_params);
227 			refcount_dec(&fq->refcnt);
228 		} else {
229 			fq->flags |= INET_FRAG_HASH_DEAD;
230 		}
231 		rcu_read_unlock();
232 	}
233 }
234 EXPORT_SYMBOL(inet_frag_kill);
235 
236 static void inet_frag_destroy_rcu(struct rcu_head *head)
237 {
238 	struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
239 						 rcu);
240 	struct inet_frags *f = q->fqdir->f;
241 
242 	if (f->destructor)
243 		f->destructor(q);
244 	kmem_cache_free(f->frags_cachep, q);
245 }
246 
247 unsigned int inet_frag_rbtree_purge(struct rb_root *root)
248 {
249 	struct rb_node *p = rb_first(root);
250 	unsigned int sum = 0;
251 
252 	while (p) {
253 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
254 
255 		p = rb_next(p);
256 		rb_erase(&skb->rbnode, root);
257 		while (skb) {
258 			struct sk_buff *next = FRAG_CB(skb)->next_frag;
259 
260 			sum += skb->truesize;
261 			kfree_skb(skb);
262 			skb = next;
263 		}
264 	}
265 	return sum;
266 }
267 EXPORT_SYMBOL(inet_frag_rbtree_purge);
268 
269 void inet_frag_destroy(struct inet_frag_queue *q)
270 {
271 	struct fqdir *fqdir;
272 	unsigned int sum, sum_truesize = 0;
273 	struct inet_frags *f;
274 
275 	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
276 	WARN_ON(del_timer(&q->timer) != 0);
277 
278 	/* Release all fragment data. */
279 	fqdir = q->fqdir;
280 	f = fqdir->f;
281 	sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
282 	sum = sum_truesize + f->qsize;
283 
284 	call_rcu(&q->rcu, inet_frag_destroy_rcu);
285 
286 	sub_frag_mem_limit(fqdir, sum);
287 }
288 EXPORT_SYMBOL(inet_frag_destroy);
289 
290 static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
291 					       struct inet_frags *f,
292 					       void *arg)
293 {
294 	struct inet_frag_queue *q;
295 
296 	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
297 	if (!q)
298 		return NULL;
299 
300 	q->fqdir = fqdir;
301 	f->constructor(q, arg);
302 	add_frag_mem_limit(fqdir, f->qsize);
303 
304 	timer_setup(&q->timer, f->frag_expire, 0);
305 	spin_lock_init(&q->lock);
306 	refcount_set(&q->refcnt, 3);
307 
308 	return q;
309 }
310 
311 static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
312 						void *arg,
313 						struct inet_frag_queue **prev)
314 {
315 	struct inet_frags *f = fqdir->f;
316 	struct inet_frag_queue *q;
317 
318 	q = inet_frag_alloc(fqdir, f, arg);
319 	if (!q) {
320 		*prev = ERR_PTR(-ENOMEM);
321 		return NULL;
322 	}
323 	mod_timer(&q->timer, jiffies + fqdir->timeout);
324 
325 	*prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
326 						 &q->node, f->rhash_params);
327 	if (*prev) {
328 		q->flags |= INET_FRAG_COMPLETE;
329 		inet_frag_kill(q);
330 		inet_frag_destroy(q);
331 		return NULL;
332 	}
333 	return q;
334 }
335 
336 /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
337 struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
338 {
339 	struct inet_frag_queue *fq = NULL, *prev;
340 
341 	if (!fqdir->high_thresh || frag_mem_limit(fqdir) > fqdir->high_thresh)
342 		return NULL;
343 
344 	rcu_read_lock();
345 
346 	prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
347 	if (!prev)
348 		fq = inet_frag_create(fqdir, key, &prev);
349 	if (prev && !IS_ERR(prev)) {
350 		fq = prev;
351 		if (!refcount_inc_not_zero(&fq->refcnt))
352 			fq = NULL;
353 	}
354 	rcu_read_unlock();
355 	return fq;
356 }
357 EXPORT_SYMBOL(inet_frag_find);
358 
359 int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
360 			   int offset, int end)
361 {
362 	struct sk_buff *last = q->fragments_tail;
363 
364 	/* RFC5722, Section 4, amended by Errata ID : 3089
365 	 *                          When reassembling an IPv6 datagram, if
366 	 *   one or more its constituent fragments is determined to be an
367 	 *   overlapping fragment, the entire datagram (and any constituent
368 	 *   fragments) MUST be silently discarded.
369 	 *
370 	 * Duplicates, however, should be ignored (i.e. skb dropped, but the
371 	 * queue/fragments kept for later reassembly).
372 	 */
373 	if (!last)
374 		fragrun_create(q, skb);  /* First fragment. */
375 	else if (last->ip_defrag_offset + last->len < end) {
376 		/* This is the common case: skb goes to the end. */
377 		/* Detect and discard overlaps. */
378 		if (offset < last->ip_defrag_offset + last->len)
379 			return IPFRAG_OVERLAP;
380 		if (offset == last->ip_defrag_offset + last->len)
381 			fragrun_append_to_last(q, skb);
382 		else
383 			fragrun_create(q, skb);
384 	} else {
385 		/* Binary search. Note that skb can become the first fragment,
386 		 * but not the last (covered above).
387 		 */
388 		struct rb_node **rbn, *parent;
389 
390 		rbn = &q->rb_fragments.rb_node;
391 		do {
392 			struct sk_buff *curr;
393 			int curr_run_end;
394 
395 			parent = *rbn;
396 			curr = rb_to_skb(parent);
397 			curr_run_end = curr->ip_defrag_offset +
398 					FRAG_CB(curr)->frag_run_len;
399 			if (end <= curr->ip_defrag_offset)
400 				rbn = &parent->rb_left;
401 			else if (offset >= curr_run_end)
402 				rbn = &parent->rb_right;
403 			else if (offset >= curr->ip_defrag_offset &&
404 				 end <= curr_run_end)
405 				return IPFRAG_DUP;
406 			else
407 				return IPFRAG_OVERLAP;
408 		} while (*rbn);
409 		/* Here we have parent properly set, and rbn pointing to
410 		 * one of its NULL left/right children. Insert skb.
411 		 */
412 		fragcb_clear(skb);
413 		rb_link_node(&skb->rbnode, parent, rbn);
414 		rb_insert_color(&skb->rbnode, &q->rb_fragments);
415 	}
416 
417 	skb->ip_defrag_offset = offset;
418 
419 	return IPFRAG_OK;
420 }
421 EXPORT_SYMBOL(inet_frag_queue_insert);
422 
423 void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
424 			      struct sk_buff *parent)
425 {
426 	struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
427 	struct sk_buff **nextp;
428 	int delta;
429 
430 	if (head != skb) {
431 		fp = skb_clone(skb, GFP_ATOMIC);
432 		if (!fp)
433 			return NULL;
434 		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
435 		if (RB_EMPTY_NODE(&skb->rbnode))
436 			FRAG_CB(parent)->next_frag = fp;
437 		else
438 			rb_replace_node(&skb->rbnode, &fp->rbnode,
439 					&q->rb_fragments);
440 		if (q->fragments_tail == skb)
441 			q->fragments_tail = fp;
442 		skb_morph(skb, head);
443 		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
444 		rb_replace_node(&head->rbnode, &skb->rbnode,
445 				&q->rb_fragments);
446 		consume_skb(head);
447 		head = skb;
448 	}
449 	WARN_ON(head->ip_defrag_offset != 0);
450 
451 	delta = -head->truesize;
452 
453 	/* Head of list must not be cloned. */
454 	if (skb_unclone(head, GFP_ATOMIC))
455 		return NULL;
456 
457 	delta += head->truesize;
458 	if (delta)
459 		add_frag_mem_limit(q->fqdir, delta);
460 
461 	/* If the first fragment is fragmented itself, we split
462 	 * it to two chunks: the first with data and paged part
463 	 * and the second, holding only fragments.
464 	 */
465 	if (skb_has_frag_list(head)) {
466 		struct sk_buff *clone;
467 		int i, plen = 0;
468 
469 		clone = alloc_skb(0, GFP_ATOMIC);
470 		if (!clone)
471 			return NULL;
472 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
473 		skb_frag_list_init(head);
474 		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
475 			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
476 		clone->data_len = head->data_len - plen;
477 		clone->len = clone->data_len;
478 		head->truesize += clone->truesize;
479 		clone->csum = 0;
480 		clone->ip_summed = head->ip_summed;
481 		add_frag_mem_limit(q->fqdir, clone->truesize);
482 		skb_shinfo(head)->frag_list = clone;
483 		nextp = &clone->next;
484 	} else {
485 		nextp = &skb_shinfo(head)->frag_list;
486 	}
487 
488 	return nextp;
489 }
490 EXPORT_SYMBOL(inet_frag_reasm_prepare);
491 
492 void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
493 			    void *reasm_data)
494 {
495 	struct sk_buff **nextp = (struct sk_buff **)reasm_data;
496 	struct rb_node *rbn;
497 	struct sk_buff *fp;
498 
499 	skb_push(head, head->data - skb_network_header(head));
500 
501 	/* Traverse the tree in order, to build frag_list. */
502 	fp = FRAG_CB(head)->next_frag;
503 	rbn = rb_next(&head->rbnode);
504 	rb_erase(&head->rbnode, &q->rb_fragments);
505 	while (rbn || fp) {
506 		/* fp points to the next sk_buff in the current run;
507 		 * rbn points to the next run.
508 		 */
509 		/* Go through the current run. */
510 		while (fp) {
511 			*nextp = fp;
512 			nextp = &fp->next;
513 			fp->prev = NULL;
514 			memset(&fp->rbnode, 0, sizeof(fp->rbnode));
515 			fp->sk = NULL;
516 			head->data_len += fp->len;
517 			head->len += fp->len;
518 			if (head->ip_summed != fp->ip_summed)
519 				head->ip_summed = CHECKSUM_NONE;
520 			else if (head->ip_summed == CHECKSUM_COMPLETE)
521 				head->csum = csum_add(head->csum, fp->csum);
522 			head->truesize += fp->truesize;
523 			fp = FRAG_CB(fp)->next_frag;
524 		}
525 		/* Move to the next run. */
526 		if (rbn) {
527 			struct rb_node *rbnext = rb_next(rbn);
528 
529 			fp = rb_to_skb(rbn);
530 			rb_erase(rbn, &q->rb_fragments);
531 			rbn = rbnext;
532 		}
533 	}
534 	sub_frag_mem_limit(q->fqdir, head->truesize);
535 
536 	*nextp = NULL;
537 	skb_mark_not_on_list(head);
538 	head->prev = NULL;
539 	head->tstamp = q->stamp;
540 }
541 EXPORT_SYMBOL(inet_frag_reasm_finish);
542 
543 struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
544 {
545 	struct sk_buff *head, *skb;
546 
547 	head = skb_rb_first(&q->rb_fragments);
548 	if (!head)
549 		return NULL;
550 	skb = FRAG_CB(head)->next_frag;
551 	if (skb)
552 		rb_replace_node(&head->rbnode, &skb->rbnode,
553 				&q->rb_fragments);
554 	else
555 		rb_erase(&head->rbnode, &q->rb_fragments);
556 	memset(&head->rbnode, 0, sizeof(head->rbnode));
557 	barrier();
558 
559 	if (head == q->fragments_tail)
560 		q->fragments_tail = NULL;
561 
562 	sub_frag_mem_limit(q->fqdir, head->truesize);
563 
564 	return head;
565 }
566 EXPORT_SYMBOL(inet_frag_pull_head);
567