xref: /linux/net/ipv4/ip_fragment.c (revision 42fda66387daa53538ae13a2c858396aaf037158)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		The IP fragmentation functionality.
7  *
8  * Version:	$Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
9  *
10  * Authors:	Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
11  *		Alan Cox <Alan.Cox@linux.org>
12  *
13  * Fixes:
14  *		Alan Cox	:	Split from ip.c , see ip_input.c for history.
15  *		David S. Miller :	Begin massive cleanup...
16  *		Andi Kleen	:	Add sysctls.
17  *		xxxx		:	Overlapfrag bug.
18  *		Ultima          :       ip_expire() kernel panic.
19  *		Bill Hawes	:	Frag accounting and evictor fixes.
20  *		John McDonald	:	0 length frag bug.
21  *		Alexey Kuznetsov:	SMP races, threading, cleanup.
22  *		Patrick McHardy :	LRU queue of frag heads for evictor.
23  */
24 
25 #include <linux/compiler.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29 #include <linux/jiffies.h>
30 #include <linux/skbuff.h>
31 #include <linux/list.h>
32 #include <linux/ip.h>
33 #include <linux/icmp.h>
34 #include <linux/netdevice.h>
35 #include <linux/jhash.h>
36 #include <linux/random.h>
37 #include <net/sock.h>
38 #include <net/ip.h>
39 #include <net/icmp.h>
40 #include <net/checksum.h>
41 #include <net/inetpeer.h>
42 #include <net/inet_frag.h>
43 #include <linux/tcp.h>
44 #include <linux/udp.h>
45 #include <linux/inet.h>
46 #include <linux/netfilter_ipv4.h>
47 
48 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
49  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
50  * as well. Or notify me, at least. --ANK
51  */
52 
53 int sysctl_ipfrag_max_dist __read_mostly = 64;
54 
55 struct ipfrag_skb_cb
56 {
57 	struct inet_skb_parm	h;
58 	int			offset;
59 };
60 
61 #define FRAG_CB(skb)	((struct ipfrag_skb_cb*)((skb)->cb))
62 
63 /* Describe an entry in the "incomplete datagrams" queue. */
64 struct ipq {
65 	struct inet_frag_queue q;
66 
67 	u32		user;
68 	__be32		saddr;
69 	__be32		daddr;
70 	__be16		id;
71 	u8		protocol;
72 	int             iif;
73 	unsigned int    rid;
74 	struct inet_peer *peer;
75 };
76 
77 struct inet_frags_ctl ip4_frags_ctl __read_mostly = {
78 	/*
79 	 * Fragment cache limits. We will commit 256K at one time. Should we
80 	 * cross that limit we will prune down to 192K. This should cope with
81 	 * even the most extreme cases without allowing an attacker to
82 	 * measurably harm machine performance.
83 	 */
84 	.high_thresh	 = 256 * 1024,
85 	.low_thresh	 = 192 * 1024,
86 
87 	/*
88 	 * Important NOTE! Fragment queue must be destroyed before MSL expires.
89 	 * RFC791 is wrong proposing to prolongate timer each fragment arrival
90 	 * by TTL.
91 	 */
92 	.timeout	 = IP_FRAG_TIME,
93 	.secret_interval = 10 * 60 * HZ,
94 };
95 
96 static struct inet_frags ip4_frags;
97 
98 int ip_frag_nqueues(void)
99 {
100 	return ip4_frags.nqueues;
101 }
102 
103 int ip_frag_mem(void)
104 {
105 	return atomic_read(&ip4_frags.mem);
106 }
107 
108 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
109 			 struct net_device *dev);
110 
111 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
112 {
113 	return jhash_3words((__force u32)id << 16 | prot,
114 			    (__force u32)saddr, (__force u32)daddr,
115 			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
116 }
117 
118 static unsigned int ip4_hashfn(struct inet_frag_queue *q)
119 {
120 	struct ipq *ipq;
121 
122 	ipq = container_of(q, struct ipq, q);
123 	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
124 }
125 
126 /* Memory Tracking Functions. */
127 static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
128 {
129 	if (work)
130 		*work -= skb->truesize;
131 	atomic_sub(skb->truesize, &ip4_frags.mem);
132 	kfree_skb(skb);
133 }
134 
135 static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
136 {
137 	struct ipq *qp;
138 
139 	qp = container_of(q, struct ipq, q);
140 	if (qp->peer)
141 		inet_putpeer(qp->peer);
142 	kfree(qp);
143 }
144 
145 static __inline__ struct ipq *frag_alloc_queue(void)
146 {
147 	struct ipq *qp = kzalloc(sizeof(struct ipq), GFP_ATOMIC);
148 
149 	if (!qp)
150 		return NULL;
151 	atomic_add(sizeof(struct ipq), &ip4_frags.mem);
152 	return qp;
153 }
154 
155 
156 /* Destruction primitives. */
157 
158 static __inline__ void ipq_put(struct ipq *ipq)
159 {
160 	inet_frag_put(&ipq->q, &ip4_frags);
161 }
162 
163 /* Kill ipq entry. It is not destroyed immediately,
164  * because caller (and someone more) holds reference count.
165  */
166 static void ipq_kill(struct ipq *ipq)
167 {
168 	inet_frag_kill(&ipq->q, &ip4_frags);
169 }
170 
171 /* Memory limiting on fragments.  Evictor trashes the oldest
172  * fragment queue until we are back under the threshold.
173  */
174 static void ip_evictor(void)
175 {
176 	int evicted;
177 
178 	evicted = inet_frag_evictor(&ip4_frags);
179 	if (evicted)
180 		IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted);
181 }
182 
183 /*
184  * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
185  */
186 static void ip_expire(unsigned long arg)
187 {
188 	struct ipq *qp = (struct ipq *) arg;
189 
190 	spin_lock(&qp->q.lock);
191 
192 	if (qp->q.last_in & COMPLETE)
193 		goto out;
194 
195 	ipq_kill(qp);
196 
197 	IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
198 	IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
199 
200 	if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) {
201 		struct sk_buff *head = qp->q.fragments;
202 		/* Send an ICMP "Fragment Reassembly Timeout" message. */
203 		if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) {
204 			icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
205 			dev_put(head->dev);
206 		}
207 	}
208 out:
209 	spin_unlock(&qp->q.lock);
210 	ipq_put(qp);
211 }
212 
213 /* Creation primitives. */
214 
215 static struct ipq *ip_frag_intern(struct ipq *qp_in)
216 {
217 	struct ipq *qp;
218 #ifdef CONFIG_SMP
219 	struct hlist_node *n;
220 #endif
221 	unsigned int hash;
222 
223 	write_lock(&ip4_frags.lock);
224 	hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr,
225 			 qp_in->protocol);
226 #ifdef CONFIG_SMP
227 	/* With SMP race we have to recheck hash table, because
228 	 * such entry could be created on other cpu, while we
229 	 * promoted read lock to write lock.
230 	 */
231 	hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
232 		if (qp->id == qp_in->id		&&
233 		    qp->saddr == qp_in->saddr	&&
234 		    qp->daddr == qp_in->daddr	&&
235 		    qp->protocol == qp_in->protocol &&
236 		    qp->user == qp_in->user) {
237 			atomic_inc(&qp->q.refcnt);
238 			write_unlock(&ip4_frags.lock);
239 			qp_in->q.last_in |= COMPLETE;
240 			ipq_put(qp_in);
241 			return qp;
242 		}
243 	}
244 #endif
245 	qp = qp_in;
246 
247 	if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout))
248 		atomic_inc(&qp->q.refcnt);
249 
250 	atomic_inc(&qp->q.refcnt);
251 	hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]);
252 	INIT_LIST_HEAD(&qp->q.lru_list);
253 	list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list);
254 	ip4_frags.nqueues++;
255 	write_unlock(&ip4_frags.lock);
256 	return qp;
257 }
258 
259 /* Add an entry to the 'ipq' queue for a newly received IP datagram. */
260 static struct ipq *ip_frag_create(struct iphdr *iph, u32 user)
261 {
262 	struct ipq *qp;
263 
264 	if ((qp = frag_alloc_queue()) == NULL)
265 		goto out_nomem;
266 
267 	qp->protocol = iph->protocol;
268 	qp->id = iph->id;
269 	qp->saddr = iph->saddr;
270 	qp->daddr = iph->daddr;
271 	qp->user = user;
272 	qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL;
273 
274 	/* Initialize a timer for this entry. */
275 	init_timer(&qp->q.timer);
276 	qp->q.timer.data = (unsigned long) qp;	/* pointer to queue	*/
277 	qp->q.timer.function = ip_expire;		/* expire function	*/
278 	spin_lock_init(&qp->q.lock);
279 	atomic_set(&qp->q.refcnt, 1);
280 
281 	return ip_frag_intern(qp);
282 
283 out_nomem:
284 	LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
285 	return NULL;
286 }
287 
288 /* Find the correct entry in the "incomplete datagrams" queue for
289  * this IP datagram, and create new one, if nothing is found.
290  */
291 static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
292 {
293 	__be16 id = iph->id;
294 	__be32 saddr = iph->saddr;
295 	__be32 daddr = iph->daddr;
296 	__u8 protocol = iph->protocol;
297 	unsigned int hash;
298 	struct ipq *qp;
299 	struct hlist_node *n;
300 
301 	read_lock(&ip4_frags.lock);
302 	hash = ipqhashfn(id, saddr, daddr, protocol);
303 	hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
304 		if (qp->id == id		&&
305 		    qp->saddr == saddr	&&
306 		    qp->daddr == daddr	&&
307 		    qp->protocol == protocol &&
308 		    qp->user == user) {
309 			atomic_inc(&qp->q.refcnt);
310 			read_unlock(&ip4_frags.lock);
311 			return qp;
312 		}
313 	}
314 	read_unlock(&ip4_frags.lock);
315 
316 	return ip_frag_create(iph, user);
317 }
318 
319 /* Is the fragment too far ahead to be part of ipq? */
320 static inline int ip_frag_too_far(struct ipq *qp)
321 {
322 	struct inet_peer *peer = qp->peer;
323 	unsigned int max = sysctl_ipfrag_max_dist;
324 	unsigned int start, end;
325 
326 	int rc;
327 
328 	if (!peer || !max)
329 		return 0;
330 
331 	start = qp->rid;
332 	end = atomic_inc_return(&peer->rid);
333 	qp->rid = end;
334 
335 	rc = qp->q.fragments && (end - start) > max;
336 
337 	if (rc) {
338 		IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
339 	}
340 
341 	return rc;
342 }
343 
344 static int ip_frag_reinit(struct ipq *qp)
345 {
346 	struct sk_buff *fp;
347 
348 	if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) {
349 		atomic_inc(&qp->q.refcnt);
350 		return -ETIMEDOUT;
351 	}
352 
353 	fp = qp->q.fragments;
354 	do {
355 		struct sk_buff *xp = fp->next;
356 		frag_kfree_skb(fp, NULL);
357 		fp = xp;
358 	} while (fp);
359 
360 	qp->q.last_in = 0;
361 	qp->q.len = 0;
362 	qp->q.meat = 0;
363 	qp->q.fragments = NULL;
364 	qp->iif = 0;
365 
366 	return 0;
367 }
368 
369 /* Add new segment to existing queue. */
370 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
371 {
372 	struct sk_buff *prev, *next;
373 	struct net_device *dev;
374 	int flags, offset;
375 	int ihl, end;
376 	int err = -ENOENT;
377 
378 	if (qp->q.last_in & COMPLETE)
379 		goto err;
380 
381 	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
382 	    unlikely(ip_frag_too_far(qp)) &&
383 	    unlikely(err = ip_frag_reinit(qp))) {
384 		ipq_kill(qp);
385 		goto err;
386 	}
387 
388 	offset = ntohs(ip_hdr(skb)->frag_off);
389 	flags = offset & ~IP_OFFSET;
390 	offset &= IP_OFFSET;
391 	offset <<= 3;		/* offset is in 8-byte chunks */
392 	ihl = ip_hdrlen(skb);
393 
394 	/* Determine the position of this fragment. */
395 	end = offset + skb->len - ihl;
396 	err = -EINVAL;
397 
398 	/* Is this the final fragment? */
399 	if ((flags & IP_MF) == 0) {
400 		/* If we already have some bits beyond end
401 		 * or have different end, the segment is corrrupted.
402 		 */
403 		if (end < qp->q.len ||
404 		    ((qp->q.last_in & LAST_IN) && end != qp->q.len))
405 			goto err;
406 		qp->q.last_in |= LAST_IN;
407 		qp->q.len = end;
408 	} else {
409 		if (end&7) {
410 			end &= ~7;
411 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
412 				skb->ip_summed = CHECKSUM_NONE;
413 		}
414 		if (end > qp->q.len) {
415 			/* Some bits beyond end -> corruption. */
416 			if (qp->q.last_in & LAST_IN)
417 				goto err;
418 			qp->q.len = end;
419 		}
420 	}
421 	if (end == offset)
422 		goto err;
423 
424 	err = -ENOMEM;
425 	if (pskb_pull(skb, ihl) == NULL)
426 		goto err;
427 
428 	err = pskb_trim_rcsum(skb, end - offset);
429 	if (err)
430 		goto err;
431 
432 	/* Find out which fragments are in front and at the back of us
433 	 * in the chain of fragments so far.  We must know where to put
434 	 * this fragment, right?
435 	 */
436 	prev = NULL;
437 	for (next = qp->q.fragments; next != NULL; next = next->next) {
438 		if (FRAG_CB(next)->offset >= offset)
439 			break;	/* bingo! */
440 		prev = next;
441 	}
442 
443 	/* We found where to put this one.  Check for overlap with
444 	 * preceding fragment, and, if needed, align things so that
445 	 * any overlaps are eliminated.
446 	 */
447 	if (prev) {
448 		int i = (FRAG_CB(prev)->offset + prev->len) - offset;
449 
450 		if (i > 0) {
451 			offset += i;
452 			err = -EINVAL;
453 			if (end <= offset)
454 				goto err;
455 			err = -ENOMEM;
456 			if (!pskb_pull(skb, i))
457 				goto err;
458 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
459 				skb->ip_summed = CHECKSUM_NONE;
460 		}
461 	}
462 
463 	err = -ENOMEM;
464 
465 	while (next && FRAG_CB(next)->offset < end) {
466 		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
467 
468 		if (i < next->len) {
469 			/* Eat head of the next overlapped fragment
470 			 * and leave the loop. The next ones cannot overlap.
471 			 */
472 			if (!pskb_pull(next, i))
473 				goto err;
474 			FRAG_CB(next)->offset += i;
475 			qp->q.meat -= i;
476 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
477 				next->ip_summed = CHECKSUM_NONE;
478 			break;
479 		} else {
480 			struct sk_buff *free_it = next;
481 
482 			/* Old fragment is completely overridden with
483 			 * new one drop it.
484 			 */
485 			next = next->next;
486 
487 			if (prev)
488 				prev->next = next;
489 			else
490 				qp->q.fragments = next;
491 
492 			qp->q.meat -= free_it->len;
493 			frag_kfree_skb(free_it, NULL);
494 		}
495 	}
496 
497 	FRAG_CB(skb)->offset = offset;
498 
499 	/* Insert this fragment in the chain of fragments. */
500 	skb->next = next;
501 	if (prev)
502 		prev->next = skb;
503 	else
504 		qp->q.fragments = skb;
505 
506 	dev = skb->dev;
507 	if (dev) {
508 		qp->iif = dev->ifindex;
509 		skb->dev = NULL;
510 	}
511 	qp->q.stamp = skb->tstamp;
512 	qp->q.meat += skb->len;
513 	atomic_add(skb->truesize, &ip4_frags.mem);
514 	if (offset == 0)
515 		qp->q.last_in |= FIRST_IN;
516 
517 	if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len)
518 		return ip_frag_reasm(qp, prev, dev);
519 
520 	write_lock(&ip4_frags.lock);
521 	list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list);
522 	write_unlock(&ip4_frags.lock);
523 	return -EINPROGRESS;
524 
525 err:
526 	kfree_skb(skb);
527 	return err;
528 }
529 
530 
531 /* Build a new IP datagram from all its fragments. */
532 
533 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
534 			 struct net_device *dev)
535 {
536 	struct iphdr *iph;
537 	struct sk_buff *fp, *head = qp->q.fragments;
538 	int len;
539 	int ihlen;
540 	int err;
541 
542 	ipq_kill(qp);
543 
544 	/* Make the one we just received the head. */
545 	if (prev) {
546 		head = prev->next;
547 		fp = skb_clone(head, GFP_ATOMIC);
548 
549 		if (!fp)
550 			goto out_nomem;
551 
552 		fp->next = head->next;
553 		prev->next = fp;
554 
555 		skb_morph(head, qp->q.fragments);
556 		head->next = qp->q.fragments->next;
557 
558 		kfree_skb(qp->q.fragments);
559 		qp->q.fragments = head;
560 	}
561 
562 	BUG_TRAP(head != NULL);
563 	BUG_TRAP(FRAG_CB(head)->offset == 0);
564 
565 	/* Allocate a new buffer for the datagram. */
566 	ihlen = ip_hdrlen(head);
567 	len = ihlen + qp->q.len;
568 
569 	err = -E2BIG;
570 	if (len > 65535)
571 		goto out_oversize;
572 
573 	/* Head of list must not be cloned. */
574 	err = -ENOMEM;
575 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
576 		goto out_nomem;
577 
578 	/* If the first fragment is fragmented itself, we split
579 	 * it to two chunks: the first with data and paged part
580 	 * and the second, holding only fragments. */
581 	if (skb_shinfo(head)->frag_list) {
582 		struct sk_buff *clone;
583 		int i, plen = 0;
584 
585 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
586 			goto out_nomem;
587 		clone->next = head->next;
588 		head->next = clone;
589 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
590 		skb_shinfo(head)->frag_list = NULL;
591 		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
592 			plen += skb_shinfo(head)->frags[i].size;
593 		clone->len = clone->data_len = head->data_len - plen;
594 		head->data_len -= clone->len;
595 		head->len -= clone->len;
596 		clone->csum = 0;
597 		clone->ip_summed = head->ip_summed;
598 		atomic_add(clone->truesize, &ip4_frags.mem);
599 	}
600 
601 	skb_shinfo(head)->frag_list = head->next;
602 	skb_push(head, head->data - skb_network_header(head));
603 	atomic_sub(head->truesize, &ip4_frags.mem);
604 
605 	for (fp=head->next; fp; fp = fp->next) {
606 		head->data_len += fp->len;
607 		head->len += fp->len;
608 		if (head->ip_summed != fp->ip_summed)
609 			head->ip_summed = CHECKSUM_NONE;
610 		else if (head->ip_summed == CHECKSUM_COMPLETE)
611 			head->csum = csum_add(head->csum, fp->csum);
612 		head->truesize += fp->truesize;
613 		atomic_sub(fp->truesize, &ip4_frags.mem);
614 	}
615 
616 	head->next = NULL;
617 	head->dev = dev;
618 	head->tstamp = qp->q.stamp;
619 
620 	iph = ip_hdr(head);
621 	iph->frag_off = 0;
622 	iph->tot_len = htons(len);
623 	IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
624 	qp->q.fragments = NULL;
625 	return 0;
626 
627 out_nomem:
628 	LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
629 			      "queue %p\n", qp);
630 	goto out_fail;
631 out_oversize:
632 	if (net_ratelimit())
633 		printk(KERN_INFO
634 			"Oversized IP packet from %d.%d.%d.%d.\n",
635 			NIPQUAD(qp->saddr));
636 out_fail:
637 	IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
638 	return err;
639 }
640 
641 /* Process an incoming IP datagram fragment. */
642 int ip_defrag(struct sk_buff *skb, u32 user)
643 {
644 	struct ipq *qp;
645 
646 	IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
647 
648 	/* Start by cleaning up the memory. */
649 	if (atomic_read(&ip4_frags.mem) > ip4_frags_ctl.high_thresh)
650 		ip_evictor();
651 
652 	/* Lookup (or create) queue header */
653 	if ((qp = ip_find(ip_hdr(skb), user)) != NULL) {
654 		int ret;
655 
656 		spin_lock(&qp->q.lock);
657 
658 		ret = ip_frag_queue(qp, skb);
659 
660 		spin_unlock(&qp->q.lock);
661 		ipq_put(qp);
662 		return ret;
663 	}
664 
665 	IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
666 	kfree_skb(skb);
667 	return -ENOMEM;
668 }
669 
670 void __init ipfrag_init(void)
671 {
672 	ip4_frags.ctl = &ip4_frags_ctl;
673 	ip4_frags.hashfn = ip4_hashfn;
674 	ip4_frags.destructor = ip4_frag_free;
675 	ip4_frags.skb_free = NULL;
676 	ip4_frags.qsize = sizeof(struct ipq);
677 	inet_frags_init(&ip4_frags);
678 }
679 
680 EXPORT_SYMBOL(ip_defrag);
681