xref: /linux/net/ipv6/reassembly.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *	IPv6 fragment reassembly
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9  *
10  *	Based on: net/ipv4/ip_fragment.c
11  *
12  *	This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 /*
19  *	Fixes:
20  *	Andi Kleen	Make it work with multiple hosts.
21  *			More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *	Alexey Kuznetsov	SMP races, threading, cleanup.
25  *	Patrick McHardy		LRU queue of frag heads for evictor.
26  *	Mitsuru KANDA @USAGI	Register inet6_protocol{}.
27  *	David Stevens and
28  *	YOSHIFUJI,H. @USAGI	Always remove fragment header to
29  *				calculate ICV correctly.
30  */
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/jiffies.h>
37 #include <linux/net.h>
38 #include <linux/list.h>
39 #include <linux/netdevice.h>
40 #include <linux/in6.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
44 #include <linux/jhash.h>
45 
46 #include <net/sock.h>
47 #include <net/snmp.h>
48 
49 #include <net/ipv6.h>
50 #include <net/protocol.h>
51 #include <net/transp_v6.h>
52 #include <net/rawv6.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
55 
56 int sysctl_ip6frag_high_thresh = 256*1024;
57 int sysctl_ip6frag_low_thresh = 192*1024;
58 
59 int sysctl_ip6frag_time = IPV6_FRAG_TIMEOUT;
60 
61 struct ip6frag_skb_cb
62 {
63 	struct inet6_skb_parm	h;
64 	int			offset;
65 };
66 
67 #define FRAG6_CB(skb)	((struct ip6frag_skb_cb*)((skb)->cb))
68 
69 
70 /*
71  *	Equivalent of ipv4 struct ipq
72  */
73 
74 struct frag_queue
75 {
76 	struct hlist_node	list;
77 	struct list_head lru_list;		/* lru list member	*/
78 
79 	__u32			id;		/* fragment id		*/
80 	struct in6_addr		saddr;
81 	struct in6_addr		daddr;
82 
83 	spinlock_t		lock;
84 	atomic_t		refcnt;
85 	struct timer_list	timer;		/* expire timer		*/
86 	struct sk_buff		*fragments;
87 	int			len;
88 	int			meat;
89 	int			iif;
90 	struct timeval		stamp;
91 	unsigned int		csum;
92 	__u8			last_in;	/* has first/last segment arrived? */
93 #define COMPLETE		4
94 #define FIRST_IN		2
95 #define LAST_IN			1
96 	__u16			nhoffset;
97 };
98 
99 /* Hash table. */
100 
101 #define IP6Q_HASHSZ	64
102 
103 static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ];
104 static DEFINE_RWLOCK(ip6_frag_lock);
105 static u32 ip6_frag_hash_rnd;
106 static LIST_HEAD(ip6_frag_lru_list);
107 int ip6_frag_nqueues = 0;
108 
109 static __inline__ void __fq_unlink(struct frag_queue *fq)
110 {
111 	hlist_del(&fq->list);
112 	list_del(&fq->lru_list);
113 	ip6_frag_nqueues--;
114 }
115 
116 static __inline__ void fq_unlink(struct frag_queue *fq)
117 {
118 	write_lock(&ip6_frag_lock);
119 	__fq_unlink(fq);
120 	write_unlock(&ip6_frag_lock);
121 }
122 
123 /*
124  * callers should be careful not to use the hash value outside the ipfrag_lock
125  * as doing so could race with ipfrag_hash_rnd being recalculated.
126  */
127 static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
128 			       struct in6_addr *daddr)
129 {
130 	u32 a, b, c;
131 
132 	a = saddr->s6_addr32[0];
133 	b = saddr->s6_addr32[1];
134 	c = saddr->s6_addr32[2];
135 
136 	a += JHASH_GOLDEN_RATIO;
137 	b += JHASH_GOLDEN_RATIO;
138 	c += ip6_frag_hash_rnd;
139 	__jhash_mix(a, b, c);
140 
141 	a += saddr->s6_addr32[3];
142 	b += daddr->s6_addr32[0];
143 	c += daddr->s6_addr32[1];
144 	__jhash_mix(a, b, c);
145 
146 	a += daddr->s6_addr32[2];
147 	b += daddr->s6_addr32[3];
148 	c += id;
149 	__jhash_mix(a, b, c);
150 
151 	return c & (IP6Q_HASHSZ - 1);
152 }
153 
154 static struct timer_list ip6_frag_secret_timer;
155 int sysctl_ip6frag_secret_interval = 10 * 60 * HZ;
156 
157 static void ip6_frag_secret_rebuild(unsigned long dummy)
158 {
159 	unsigned long now = jiffies;
160 	int i;
161 
162 	write_lock(&ip6_frag_lock);
163 	get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
164 	for (i = 0; i < IP6Q_HASHSZ; i++) {
165 		struct frag_queue *q;
166 		struct hlist_node *p, *n;
167 
168 		hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
169 			unsigned int hval = ip6qhashfn(q->id,
170 						       &q->saddr,
171 						       &q->daddr);
172 
173 			if (hval != i) {
174 				hlist_del(&q->list);
175 
176 				/* Relink to new hash chain. */
177 				hlist_add_head(&q->list,
178 					       &ip6_frag_hash[hval]);
179 
180 			}
181 		}
182 	}
183 	write_unlock(&ip6_frag_lock);
184 
185 	mod_timer(&ip6_frag_secret_timer, now + sysctl_ip6frag_secret_interval);
186 }
187 
188 atomic_t ip6_frag_mem = ATOMIC_INIT(0);
189 
190 /* Memory Tracking Functions. */
191 static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
192 {
193 	if (work)
194 		*work -= skb->truesize;
195 	atomic_sub(skb->truesize, &ip6_frag_mem);
196 	kfree_skb(skb);
197 }
198 
199 static inline void frag_free_queue(struct frag_queue *fq, int *work)
200 {
201 	if (work)
202 		*work -= sizeof(struct frag_queue);
203 	atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
204 	kfree(fq);
205 }
206 
207 static inline struct frag_queue *frag_alloc_queue(void)
208 {
209 	struct frag_queue *fq = kzalloc(sizeof(struct frag_queue), GFP_ATOMIC);
210 
211 	if(!fq)
212 		return NULL;
213 	atomic_add(sizeof(struct frag_queue), &ip6_frag_mem);
214 	return fq;
215 }
216 
217 /* Destruction primitives. */
218 
219 /* Complete destruction of fq. */
220 static void ip6_frag_destroy(struct frag_queue *fq, int *work)
221 {
222 	struct sk_buff *fp;
223 
224 	BUG_TRAP(fq->last_in&COMPLETE);
225 	BUG_TRAP(del_timer(&fq->timer) == 0);
226 
227 	/* Release all fragment data. */
228 	fp = fq->fragments;
229 	while (fp) {
230 		struct sk_buff *xp = fp->next;
231 
232 		frag_kfree_skb(fp, work);
233 		fp = xp;
234 	}
235 
236 	frag_free_queue(fq, work);
237 }
238 
239 static __inline__ void fq_put(struct frag_queue *fq, int *work)
240 {
241 	if (atomic_dec_and_test(&fq->refcnt))
242 		ip6_frag_destroy(fq, work);
243 }
244 
245 /* Kill fq entry. It is not destroyed immediately,
246  * because caller (and someone more) holds reference count.
247  */
248 static __inline__ void fq_kill(struct frag_queue *fq)
249 {
250 	if (del_timer(&fq->timer))
251 		atomic_dec(&fq->refcnt);
252 
253 	if (!(fq->last_in & COMPLETE)) {
254 		fq_unlink(fq);
255 		atomic_dec(&fq->refcnt);
256 		fq->last_in |= COMPLETE;
257 	}
258 }
259 
260 static void ip6_evictor(void)
261 {
262 	struct frag_queue *fq;
263 	struct list_head *tmp;
264 	int work;
265 
266 	work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh;
267 	if (work <= 0)
268 		return;
269 
270 	while(work > 0) {
271 		read_lock(&ip6_frag_lock);
272 		if (list_empty(&ip6_frag_lru_list)) {
273 			read_unlock(&ip6_frag_lock);
274 			return;
275 		}
276 		tmp = ip6_frag_lru_list.next;
277 		fq = list_entry(tmp, struct frag_queue, lru_list);
278 		atomic_inc(&fq->refcnt);
279 		read_unlock(&ip6_frag_lock);
280 
281 		spin_lock(&fq->lock);
282 		if (!(fq->last_in&COMPLETE))
283 			fq_kill(fq);
284 		spin_unlock(&fq->lock);
285 
286 		fq_put(fq, &work);
287 		IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
288 	}
289 }
290 
291 static void ip6_frag_expire(unsigned long data)
292 {
293 	struct frag_queue *fq = (struct frag_queue *) data;
294 	struct net_device *dev;
295 
296 	spin_lock(&fq->lock);
297 
298 	if (fq->last_in & COMPLETE)
299 		goto out;
300 
301 	fq_kill(fq);
302 
303 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
304 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
305 
306 	/* Don't send error if the first segment did not arrive. */
307 	if (!(fq->last_in&FIRST_IN) || !fq->fragments)
308 		goto out;
309 
310 	dev = dev_get_by_index(fq->iif);
311 	if (!dev)
312 		goto out;
313 
314 	/*
315 	   But use as source device on which LAST ARRIVED
316 	   segment was received. And do not use fq->dev
317 	   pointer directly, device might already disappeared.
318 	 */
319 	fq->fragments->dev = dev;
320 	icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
321 	dev_put(dev);
322 out:
323 	spin_unlock(&fq->lock);
324 	fq_put(fq, NULL);
325 }
326 
327 /* Creation primitives. */
328 
329 
330 static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
331 {
332 	struct frag_queue *fq;
333 	unsigned int hash;
334 #ifdef CONFIG_SMP
335 	struct hlist_node *n;
336 #endif
337 
338 	write_lock(&ip6_frag_lock);
339 	hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
340 #ifdef CONFIG_SMP
341 	hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
342 		if (fq->id == fq_in->id &&
343 		    ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
344 		    ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
345 			atomic_inc(&fq->refcnt);
346 			write_unlock(&ip6_frag_lock);
347 			fq_in->last_in |= COMPLETE;
348 			fq_put(fq_in, NULL);
349 			return fq;
350 		}
351 	}
352 #endif
353 	fq = fq_in;
354 
355 	if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time))
356 		atomic_inc(&fq->refcnt);
357 
358 	atomic_inc(&fq->refcnt);
359 	hlist_add_head(&fq->list, &ip6_frag_hash[hash]);
360 	INIT_LIST_HEAD(&fq->lru_list);
361 	list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
362 	ip6_frag_nqueues++;
363 	write_unlock(&ip6_frag_lock);
364 	return fq;
365 }
366 
367 
368 static struct frag_queue *
369 ip6_frag_create(u32 id, struct in6_addr *src, struct in6_addr *dst)
370 {
371 	struct frag_queue *fq;
372 
373 	if ((fq = frag_alloc_queue()) == NULL)
374 		goto oom;
375 
376 	fq->id = id;
377 	ipv6_addr_copy(&fq->saddr, src);
378 	ipv6_addr_copy(&fq->daddr, dst);
379 
380 	init_timer(&fq->timer);
381 	fq->timer.function = ip6_frag_expire;
382 	fq->timer.data = (long) fq;
383 	spin_lock_init(&fq->lock);
384 	atomic_set(&fq->refcnt, 1);
385 
386 	return ip6_frag_intern(fq);
387 
388 oom:
389 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
390 	return NULL;
391 }
392 
393 static __inline__ struct frag_queue *
394 fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
395 {
396 	struct frag_queue *fq;
397 	struct hlist_node *n;
398 	unsigned int hash;
399 
400 	read_lock(&ip6_frag_lock);
401 	hash = ip6qhashfn(id, src, dst);
402 	hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
403 		if (fq->id == id &&
404 		    ipv6_addr_equal(src, &fq->saddr) &&
405 		    ipv6_addr_equal(dst, &fq->daddr)) {
406 			atomic_inc(&fq->refcnt);
407 			read_unlock(&ip6_frag_lock);
408 			return fq;
409 		}
410 	}
411 	read_unlock(&ip6_frag_lock);
412 
413 	return ip6_frag_create(id, src, dst);
414 }
415 
416 
417 static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
418 			   struct frag_hdr *fhdr, int nhoff)
419 {
420 	struct sk_buff *prev, *next;
421 	int offset, end;
422 
423 	if (fq->last_in & COMPLETE)
424 		goto err;
425 
426 	offset = ntohs(fhdr->frag_off) & ~0x7;
427 	end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
428 			((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
429 
430 	if ((unsigned int)end > IPV6_MAXPLEN) {
431 		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
432  		icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
433  		return;
434 	}
435 
436  	if (skb->ip_summed == CHECKSUM_HW)
437  		skb->csum = csum_sub(skb->csum,
438  				     csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
439 
440 	/* Is this the final fragment? */
441 	if (!(fhdr->frag_off & htons(IP6_MF))) {
442 		/* If we already have some bits beyond end
443 		 * or have different end, the segment is corrupted.
444 		 */
445 		if (end < fq->len ||
446 		    ((fq->last_in & LAST_IN) && end != fq->len))
447 			goto err;
448 		fq->last_in |= LAST_IN;
449 		fq->len = end;
450 	} else {
451 		/* Check if the fragment is rounded to 8 bytes.
452 		 * Required by the RFC.
453 		 */
454 		if (end & 0x7) {
455 			/* RFC2460 says always send parameter problem in
456 			 * this case. -DaveM
457 			 */
458 			IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
459 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
460 					  offsetof(struct ipv6hdr, payload_len));
461 			return;
462 		}
463 		if (end > fq->len) {
464 			/* Some bits beyond end -> corruption. */
465 			if (fq->last_in & LAST_IN)
466 				goto err;
467 			fq->len = end;
468 		}
469 	}
470 
471 	if (end == offset)
472 		goto err;
473 
474 	/* Point into the IP datagram 'data' part. */
475 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
476 		goto err;
477 
478 	if (pskb_trim_rcsum(skb, end - offset))
479 		goto err;
480 
481 	/* Find out which fragments are in front and at the back of us
482 	 * in the chain of fragments so far.  We must know where to put
483 	 * this fragment, right?
484 	 */
485 	prev = NULL;
486 	for(next = fq->fragments; next != NULL; next = next->next) {
487 		if (FRAG6_CB(next)->offset >= offset)
488 			break;	/* bingo! */
489 		prev = next;
490 	}
491 
492 	/* We found where to put this one.  Check for overlap with
493 	 * preceding fragment, and, if needed, align things so that
494 	 * any overlaps are eliminated.
495 	 */
496 	if (prev) {
497 		int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
498 
499 		if (i > 0) {
500 			offset += i;
501 			if (end <= offset)
502 				goto err;
503 			if (!pskb_pull(skb, i))
504 				goto err;
505 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
506 				skb->ip_summed = CHECKSUM_NONE;
507 		}
508 	}
509 
510 	/* Look for overlap with succeeding segments.
511 	 * If we can merge fragments, do it.
512 	 */
513 	while (next && FRAG6_CB(next)->offset < end) {
514 		int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
515 
516 		if (i < next->len) {
517 			/* Eat head of the next overlapped fragment
518 			 * and leave the loop. The next ones cannot overlap.
519 			 */
520 			if (!pskb_pull(next, i))
521 				goto err;
522 			FRAG6_CB(next)->offset += i;	/* next fragment */
523 			fq->meat -= i;
524 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
525 				next->ip_summed = CHECKSUM_NONE;
526 			break;
527 		} else {
528 			struct sk_buff *free_it = next;
529 
530 			/* Old fragment is completely overridden with
531 			 * new one drop it.
532 			 */
533 			next = next->next;
534 
535 			if (prev)
536 				prev->next = next;
537 			else
538 				fq->fragments = next;
539 
540 			fq->meat -= free_it->len;
541 			frag_kfree_skb(free_it, NULL);
542 		}
543 	}
544 
545 	FRAG6_CB(skb)->offset = offset;
546 
547 	/* Insert this fragment in the chain of fragments. */
548 	skb->next = next;
549 	if (prev)
550 		prev->next = skb;
551 	else
552 		fq->fragments = skb;
553 
554 	if (skb->dev)
555 		fq->iif = skb->dev->ifindex;
556 	skb->dev = NULL;
557 	skb_get_timestamp(skb, &fq->stamp);
558 	fq->meat += skb->len;
559 	atomic_add(skb->truesize, &ip6_frag_mem);
560 
561 	/* The first fragment.
562 	 * nhoffset is obtained from the first fragment, of course.
563 	 */
564 	if (offset == 0) {
565 		fq->nhoffset = nhoff;
566 		fq->last_in |= FIRST_IN;
567 	}
568 	write_lock(&ip6_frag_lock);
569 	list_move_tail(&fq->lru_list, &ip6_frag_lru_list);
570 	write_unlock(&ip6_frag_lock);
571 	return;
572 
573 err:
574 	IP6_INC_STATS(IPSTATS_MIB_REASMFAILS);
575 	kfree_skb(skb);
576 }
577 
578 /*
579  *	Check if this packet is complete.
580  *	Returns NULL on failure by any reason, and pointer
581  *	to current nexthdr field in reassembled frame.
582  *
583  *	It is called with locked fq, and caller must check that
584  *	queue is eligible for reassembly i.e. it is not COMPLETE,
585  *	the last and the first frames arrived and all the bits are here.
586  */
587 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
588 			  struct net_device *dev)
589 {
590 	struct sk_buff *fp, *head = fq->fragments;
591 	int    payload_len;
592 	unsigned int nhoff;
593 
594 	fq_kill(fq);
595 
596 	BUG_TRAP(head != NULL);
597 	BUG_TRAP(FRAG6_CB(head)->offset == 0);
598 
599 	/* Unfragmented part is taken from the first segment. */
600 	payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr);
601 	if (payload_len > IPV6_MAXPLEN)
602 		goto out_oversize;
603 
604 	/* Head of list must not be cloned. */
605 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
606 		goto out_oom;
607 
608 	/* If the first fragment is fragmented itself, we split
609 	 * it to two chunks: the first with data and paged part
610 	 * and the second, holding only fragments. */
611 	if (skb_shinfo(head)->frag_list) {
612 		struct sk_buff *clone;
613 		int i, plen = 0;
614 
615 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
616 			goto out_oom;
617 		clone->next = head->next;
618 		head->next = clone;
619 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
620 		skb_shinfo(head)->frag_list = NULL;
621 		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
622 			plen += skb_shinfo(head)->frags[i].size;
623 		clone->len = clone->data_len = head->data_len - plen;
624 		head->data_len -= clone->len;
625 		head->len -= clone->len;
626 		clone->csum = 0;
627 		clone->ip_summed = head->ip_summed;
628 		atomic_add(clone->truesize, &ip6_frag_mem);
629 	}
630 
631 	/* We have to remove fragment header from datagram and to relocate
632 	 * header in order to calculate ICV correctly. */
633 	nhoff = fq->nhoffset;
634 	head->nh.raw[nhoff] = head->h.raw[0];
635 	memmove(head->head + sizeof(struct frag_hdr), head->head,
636 		(head->data - head->head) - sizeof(struct frag_hdr));
637 	head->mac.raw += sizeof(struct frag_hdr);
638 	head->nh.raw += sizeof(struct frag_hdr);
639 
640 	skb_shinfo(head)->frag_list = head->next;
641 	head->h.raw = head->data;
642 	skb_push(head, head->data - head->nh.raw);
643 	atomic_sub(head->truesize, &ip6_frag_mem);
644 
645 	for (fp=head->next; fp; fp = fp->next) {
646 		head->data_len += fp->len;
647 		head->len += fp->len;
648 		if (head->ip_summed != fp->ip_summed)
649 			head->ip_summed = CHECKSUM_NONE;
650 		else if (head->ip_summed == CHECKSUM_HW)
651 			head->csum = csum_add(head->csum, fp->csum);
652 		head->truesize += fp->truesize;
653 		atomic_sub(fp->truesize, &ip6_frag_mem);
654 	}
655 
656 	head->next = NULL;
657 	head->dev = dev;
658 	skb_set_timestamp(head, &fq->stamp);
659 	head->nh.ipv6h->payload_len = htons(payload_len);
660 	IP6CB(head)->nhoff = nhoff;
661 
662 	*skb_in = head;
663 
664 	/* Yes, and fold redundant checksum back. 8) */
665 	if (head->ip_summed == CHECKSUM_HW)
666 		head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
667 
668 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
669 	fq->fragments = NULL;
670 	return 1;
671 
672 out_oversize:
673 	if (net_ratelimit())
674 		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
675 	goto out_fail;
676 out_oom:
677 	if (net_ratelimit())
678 		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
679 out_fail:
680 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
681 	return -1;
682 }
683 
684 static int ipv6_frag_rcv(struct sk_buff **skbp)
685 {
686 	struct sk_buff *skb = *skbp;
687 	struct net_device *dev = skb->dev;
688 	struct frag_hdr *fhdr;
689 	struct frag_queue *fq;
690 	struct ipv6hdr *hdr;
691 
692 	hdr = skb->nh.ipv6h;
693 
694 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
695 
696 	/* Jumbo payload inhibits frag. header */
697 	if (hdr->payload_len==0) {
698 		IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
699 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
700 		return -1;
701 	}
702 	if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
703 		IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
704 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
705 		return -1;
706 	}
707 
708 	hdr = skb->nh.ipv6h;
709 	fhdr = (struct frag_hdr *)skb->h.raw;
710 
711 	if (!(fhdr->frag_off & htons(0xFFF9))) {
712 		/* It is not a fragmented frame */
713 		skb->h.raw += sizeof(struct frag_hdr);
714 		IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
715 
716 		IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw;
717 		return 1;
718 	}
719 
720 	if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
721 		ip6_evictor();
722 
723 	if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) {
724 		int ret = -1;
725 
726 		spin_lock(&fq->lock);
727 
728 		ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
729 
730 		if (fq->last_in == (FIRST_IN|LAST_IN) &&
731 		    fq->meat == fq->len)
732 			ret = ip6_frag_reasm(fq, skbp, dev);
733 
734 		spin_unlock(&fq->lock);
735 		fq_put(fq, NULL);
736 		return ret;
737 	}
738 
739 	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
740 	kfree_skb(skb);
741 	return -1;
742 }
743 
744 static struct inet6_protocol frag_protocol =
745 {
746 	.handler	=	ipv6_frag_rcv,
747 	.flags		=	INET6_PROTO_NOPOLICY,
748 };
749 
750 void __init ipv6_frag_init(void)
751 {
752 	if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
753 		printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
754 
755 	ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
756 				   (jiffies ^ (jiffies >> 6)));
757 
758 	init_timer(&ip6_frag_secret_timer);
759 	ip6_frag_secret_timer.function = ip6_frag_secret_rebuild;
760 	ip6_frag_secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval;
761 	add_timer(&ip6_frag_secret_timer);
762 }
763