xref: /linux/net/ipv4/ip_fragment.c (revision b233b28eac0cc37d07c2d007ea08c86c778c5af4)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		The IP fragmentation functionality.
7  *
8  * Authors:	Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
9  *		Alan Cox <alan@lxorguk.ukuu.org.uk>
10  *
11  * Fixes:
12  *		Alan Cox	:	Split from ip.c , see ip_input.c for history.
13  *		David S. Miller :	Begin massive cleanup...
14  *		Andi Kleen	:	Add sysctls.
15  *		xxxx		:	Overlapfrag bug.
16  *		Ultima          :       ip_expire() kernel panic.
17  *		Bill Hawes	:	Frag accounting and evictor fixes.
18  *		John McDonald	:	0 length frag bug.
19  *		Alexey Kuznetsov:	SMP races, threading, cleanup.
20  *		Patrick McHardy :	LRU queue of frag heads for evictor.
21  */
22 
23 #include <linux/compiler.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/jiffies.h>
28 #include <linux/skbuff.h>
29 #include <linux/list.h>
30 #include <linux/ip.h>
31 #include <linux/icmp.h>
32 #include <linux/netdevice.h>
33 #include <linux/jhash.h>
34 #include <linux/random.h>
35 #include <net/sock.h>
36 #include <net/ip.h>
37 #include <net/icmp.h>
38 #include <net/checksum.h>
39 #include <net/inetpeer.h>
40 #include <net/inet_frag.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/inet.h>
44 #include <linux/netfilter_ipv4.h>
45 
46 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
47  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
48  * as well. Or notify me, at least. --ANK
49  */
50 
51 static int sysctl_ipfrag_max_dist __read_mostly = 64;
52 
53 struct ipfrag_skb_cb
54 {
55 	struct inet_skb_parm	h;
56 	int			offset;
57 };
58 
59 #define FRAG_CB(skb)	((struct ipfrag_skb_cb *)((skb)->cb))
60 
61 /* Describe an entry in the "incomplete datagrams" queue. */
62 struct ipq {
63 	struct inet_frag_queue q;
64 
65 	u32		user;
66 	__be32		saddr;
67 	__be32		daddr;
68 	__be16		id;
69 	u8		protocol;
70 	int             iif;
71 	unsigned int    rid;
72 	struct inet_peer *peer;
73 };
74 
75 static struct inet_frags ip4_frags;
76 
77 int ip_frag_nqueues(struct net *net)
78 {
79 	return net->ipv4.frags.nqueues;
80 }
81 
82 int ip_frag_mem(struct net *net)
83 {
84 	return atomic_read(&net->ipv4.frags.mem);
85 }
86 
87 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
88 			 struct net_device *dev);
89 
90 struct ip4_create_arg {
91 	struct iphdr *iph;
92 	u32 user;
93 };
94 
95 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
96 {
97 	return jhash_3words((__force u32)id << 16 | prot,
98 			    (__force u32)saddr, (__force u32)daddr,
99 			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
100 }
101 
102 static unsigned int ip4_hashfn(struct inet_frag_queue *q)
103 {
104 	struct ipq *ipq;
105 
106 	ipq = container_of(q, struct ipq, q);
107 	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
108 }
109 
110 static int ip4_frag_match(struct inet_frag_queue *q, void *a)
111 {
112 	struct ipq *qp;
113 	struct ip4_create_arg *arg = a;
114 
115 	qp = container_of(q, struct ipq, q);
116 	return (qp->id == arg->iph->id &&
117 			qp->saddr == arg->iph->saddr &&
118 			qp->daddr == arg->iph->daddr &&
119 			qp->protocol == arg->iph->protocol &&
120 			qp->user == arg->user);
121 }
122 
123 /* Memory Tracking Functions. */
124 static __inline__ void frag_kfree_skb(struct netns_frags *nf,
125 		struct sk_buff *skb, int *work)
126 {
127 	if (work)
128 		*work -= skb->truesize;
129 	atomic_sub(skb->truesize, &nf->mem);
130 	kfree_skb(skb);
131 }
132 
133 static void ip4_frag_init(struct inet_frag_queue *q, void *a)
134 {
135 	struct ipq *qp = container_of(q, struct ipq, q);
136 	struct ip4_create_arg *arg = a;
137 
138 	qp->protocol = arg->iph->protocol;
139 	qp->id = arg->iph->id;
140 	qp->saddr = arg->iph->saddr;
141 	qp->daddr = arg->iph->daddr;
142 	qp->user = arg->user;
143 	qp->peer = sysctl_ipfrag_max_dist ?
144 		inet_getpeer(arg->iph->saddr, 1) : NULL;
145 }
146 
147 static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
148 {
149 	struct ipq *qp;
150 
151 	qp = container_of(q, struct ipq, q);
152 	if (qp->peer)
153 		inet_putpeer(qp->peer);
154 }
155 
156 
157 /* Destruction primitives. */
158 
159 static __inline__ void ipq_put(struct ipq *ipq)
160 {
161 	inet_frag_put(&ipq->q, &ip4_frags);
162 }
163 
164 /* Kill ipq entry. It is not destroyed immediately,
165  * because caller (and someone more) holds reference count.
166  */
167 static void ipq_kill(struct ipq *ipq)
168 {
169 	inet_frag_kill(&ipq->q, &ip4_frags);
170 }
171 
172 /* Memory limiting on fragments.  Evictor trashes the oldest
173  * fragment queue until we are back under the threshold.
174  */
175 static void ip_evictor(struct net *net)
176 {
177 	int evicted;
178 
179 	evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
180 	if (evicted)
181 		IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
182 }
183 
184 /*
185  * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
186  */
187 static void ip_expire(unsigned long arg)
188 {
189 	struct ipq *qp;
190 	struct net *net;
191 
192 	qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
193 	net = container_of(qp->q.net, struct net, ipv4.frags);
194 
195 	spin_lock(&qp->q.lock);
196 
197 	if (qp->q.last_in & INET_FRAG_COMPLETE)
198 		goto out;
199 
200 	ipq_kill(qp);
201 
202 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
203 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 
205 	if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 		struct sk_buff *head = qp->q.fragments;
207 
208 		/* Send an ICMP "Fragment Reassembly Timeout" message. */
209 		if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) {
210 			icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
211 			dev_put(head->dev);
212 		}
213 	}
214 out:
215 	spin_unlock(&qp->q.lock);
216 	ipq_put(qp);
217 }
218 
219 /* Find the correct entry in the "incomplete datagrams" queue for
220  * this IP datagram, and create new one, if nothing is found.
221  */
222 static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
223 {
224 	struct inet_frag_queue *q;
225 	struct ip4_create_arg arg;
226 	unsigned int hash;
227 
228 	arg.iph = iph;
229 	arg.user = user;
230 
231 	read_lock(&ip4_frags.lock);
232 	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
233 
234 	q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
235 	if (q == NULL)
236 		goto out_nomem;
237 
238 	return container_of(q, struct ipq, q);
239 
240 out_nomem:
241 	LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
242 	return NULL;
243 }
244 
245 /* Is the fragment too far ahead to be part of ipq? */
246 static inline int ip_frag_too_far(struct ipq *qp)
247 {
248 	struct inet_peer *peer = qp->peer;
249 	unsigned int max = sysctl_ipfrag_max_dist;
250 	unsigned int start, end;
251 
252 	int rc;
253 
254 	if (!peer || !max)
255 		return 0;
256 
257 	start = qp->rid;
258 	end = atomic_inc_return(&peer->rid);
259 	qp->rid = end;
260 
261 	rc = qp->q.fragments && (end - start) > max;
262 
263 	if (rc) {
264 		struct net *net;
265 
266 		net = container_of(qp->q.net, struct net, ipv4.frags);
267 		IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
268 	}
269 
270 	return rc;
271 }
272 
273 static int ip_frag_reinit(struct ipq *qp)
274 {
275 	struct sk_buff *fp;
276 
277 	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
278 		atomic_inc(&qp->q.refcnt);
279 		return -ETIMEDOUT;
280 	}
281 
282 	fp = qp->q.fragments;
283 	do {
284 		struct sk_buff *xp = fp->next;
285 		frag_kfree_skb(qp->q.net, fp, NULL);
286 		fp = xp;
287 	} while (fp);
288 
289 	qp->q.last_in = 0;
290 	qp->q.len = 0;
291 	qp->q.meat = 0;
292 	qp->q.fragments = NULL;
293 	qp->iif = 0;
294 
295 	return 0;
296 }
297 
298 /* Add new segment to existing queue. */
299 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
300 {
301 	struct sk_buff *prev, *next;
302 	struct net_device *dev;
303 	int flags, offset;
304 	int ihl, end;
305 	int err = -ENOENT;
306 
307 	if (qp->q.last_in & INET_FRAG_COMPLETE)
308 		goto err;
309 
310 	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
311 	    unlikely(ip_frag_too_far(qp)) &&
312 	    unlikely(err = ip_frag_reinit(qp))) {
313 		ipq_kill(qp);
314 		goto err;
315 	}
316 
317 	offset = ntohs(ip_hdr(skb)->frag_off);
318 	flags = offset & ~IP_OFFSET;
319 	offset &= IP_OFFSET;
320 	offset <<= 3;		/* offset is in 8-byte chunks */
321 	ihl = ip_hdrlen(skb);
322 
323 	/* Determine the position of this fragment. */
324 	end = offset + skb->len - ihl;
325 	err = -EINVAL;
326 
327 	/* Is this the final fragment? */
328 	if ((flags & IP_MF) == 0) {
329 		/* If we already have some bits beyond end
330 		 * or have different end, the segment is corrrupted.
331 		 */
332 		if (end < qp->q.len ||
333 		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
334 			goto err;
335 		qp->q.last_in |= INET_FRAG_LAST_IN;
336 		qp->q.len = end;
337 	} else {
338 		if (end&7) {
339 			end &= ~7;
340 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
341 				skb->ip_summed = CHECKSUM_NONE;
342 		}
343 		if (end > qp->q.len) {
344 			/* Some bits beyond end -> corruption. */
345 			if (qp->q.last_in & INET_FRAG_LAST_IN)
346 				goto err;
347 			qp->q.len = end;
348 		}
349 	}
350 	if (end == offset)
351 		goto err;
352 
353 	err = -ENOMEM;
354 	if (pskb_pull(skb, ihl) == NULL)
355 		goto err;
356 
357 	err = pskb_trim_rcsum(skb, end - offset);
358 	if (err)
359 		goto err;
360 
361 	/* Find out which fragments are in front and at the back of us
362 	 * in the chain of fragments so far.  We must know where to put
363 	 * this fragment, right?
364 	 */
365 	prev = NULL;
366 	for (next = qp->q.fragments; next != NULL; next = next->next) {
367 		if (FRAG_CB(next)->offset >= offset)
368 			break;	/* bingo! */
369 		prev = next;
370 	}
371 
372 	/* We found where to put this one.  Check for overlap with
373 	 * preceding fragment, and, if needed, align things so that
374 	 * any overlaps are eliminated.
375 	 */
376 	if (prev) {
377 		int i = (FRAG_CB(prev)->offset + prev->len) - offset;
378 
379 		if (i > 0) {
380 			offset += i;
381 			err = -EINVAL;
382 			if (end <= offset)
383 				goto err;
384 			err = -ENOMEM;
385 			if (!pskb_pull(skb, i))
386 				goto err;
387 			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
388 				skb->ip_summed = CHECKSUM_NONE;
389 		}
390 	}
391 
392 	err = -ENOMEM;
393 
394 	while (next && FRAG_CB(next)->offset < end) {
395 		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
396 
397 		if (i < next->len) {
398 			/* Eat head of the next overlapped fragment
399 			 * and leave the loop. The next ones cannot overlap.
400 			 */
401 			if (!pskb_pull(next, i))
402 				goto err;
403 			FRAG_CB(next)->offset += i;
404 			qp->q.meat -= i;
405 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
406 				next->ip_summed = CHECKSUM_NONE;
407 			break;
408 		} else {
409 			struct sk_buff *free_it = next;
410 
411 			/* Old fragment is completely overridden with
412 			 * new one drop it.
413 			 */
414 			next = next->next;
415 
416 			if (prev)
417 				prev->next = next;
418 			else
419 				qp->q.fragments = next;
420 
421 			qp->q.meat -= free_it->len;
422 			frag_kfree_skb(qp->q.net, free_it, NULL);
423 		}
424 	}
425 
426 	FRAG_CB(skb)->offset = offset;
427 
428 	/* Insert this fragment in the chain of fragments. */
429 	skb->next = next;
430 	if (prev)
431 		prev->next = skb;
432 	else
433 		qp->q.fragments = skb;
434 
435 	dev = skb->dev;
436 	if (dev) {
437 		qp->iif = dev->ifindex;
438 		skb->dev = NULL;
439 	}
440 	qp->q.stamp = skb->tstamp;
441 	qp->q.meat += skb->len;
442 	atomic_add(skb->truesize, &qp->q.net->mem);
443 	if (offset == 0)
444 		qp->q.last_in |= INET_FRAG_FIRST_IN;
445 
446 	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
447 	    qp->q.meat == qp->q.len)
448 		return ip_frag_reasm(qp, prev, dev);
449 
450 	write_lock(&ip4_frags.lock);
451 	list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
452 	write_unlock(&ip4_frags.lock);
453 	return -EINPROGRESS;
454 
455 err:
456 	kfree_skb(skb);
457 	return err;
458 }
459 
460 
461 /* Build a new IP datagram from all its fragments. */
462 
463 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
464 			 struct net_device *dev)
465 {
466 	struct iphdr *iph;
467 	struct sk_buff *fp, *head = qp->q.fragments;
468 	int len;
469 	int ihlen;
470 	int err;
471 
472 	ipq_kill(qp);
473 
474 	/* Make the one we just received the head. */
475 	if (prev) {
476 		head = prev->next;
477 		fp = skb_clone(head, GFP_ATOMIC);
478 		if (!fp)
479 			goto out_nomem;
480 
481 		fp->next = head->next;
482 		prev->next = fp;
483 
484 		skb_morph(head, qp->q.fragments);
485 		head->next = qp->q.fragments->next;
486 
487 		kfree_skb(qp->q.fragments);
488 		qp->q.fragments = head;
489 	}
490 
491 	WARN_ON(head == NULL);
492 	WARN_ON(FRAG_CB(head)->offset != 0);
493 
494 	/* Allocate a new buffer for the datagram. */
495 	ihlen = ip_hdrlen(head);
496 	len = ihlen + qp->q.len;
497 
498 	err = -E2BIG;
499 	if (len > 65535)
500 		goto out_oversize;
501 
502 	/* Head of list must not be cloned. */
503 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
504 		goto out_nomem;
505 
506 	/* If the first fragment is fragmented itself, we split
507 	 * it to two chunks: the first with data and paged part
508 	 * and the second, holding only fragments. */
509 	if (skb_shinfo(head)->frag_list) {
510 		struct sk_buff *clone;
511 		int i, plen = 0;
512 
513 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
514 			goto out_nomem;
515 		clone->next = head->next;
516 		head->next = clone;
517 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
518 		skb_shinfo(head)->frag_list = NULL;
519 		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
520 			plen += skb_shinfo(head)->frags[i].size;
521 		clone->len = clone->data_len = head->data_len - plen;
522 		head->data_len -= clone->len;
523 		head->len -= clone->len;
524 		clone->csum = 0;
525 		clone->ip_summed = head->ip_summed;
526 		atomic_add(clone->truesize, &qp->q.net->mem);
527 	}
528 
529 	skb_shinfo(head)->frag_list = head->next;
530 	skb_push(head, head->data - skb_network_header(head));
531 	atomic_sub(head->truesize, &qp->q.net->mem);
532 
533 	for (fp=head->next; fp; fp = fp->next) {
534 		head->data_len += fp->len;
535 		head->len += fp->len;
536 		if (head->ip_summed != fp->ip_summed)
537 			head->ip_summed = CHECKSUM_NONE;
538 		else if (head->ip_summed == CHECKSUM_COMPLETE)
539 			head->csum = csum_add(head->csum, fp->csum);
540 		head->truesize += fp->truesize;
541 		atomic_sub(fp->truesize, &qp->q.net->mem);
542 	}
543 
544 	head->next = NULL;
545 	head->dev = dev;
546 	head->tstamp = qp->q.stamp;
547 
548 	iph = ip_hdr(head);
549 	iph->frag_off = 0;
550 	iph->tot_len = htons(len);
551 	IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMOKS);
552 	qp->q.fragments = NULL;
553 	return 0;
554 
555 out_nomem:
556 	LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
557 			      "queue %p\n", qp);
558 	err = -ENOMEM;
559 	goto out_fail;
560 out_oversize:
561 	if (net_ratelimit())
562 		printk(KERN_INFO "Oversized IP packet from %pI4.\n",
563 			&qp->saddr);
564 out_fail:
565 	IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
566 	return err;
567 }
568 
569 /* Process an incoming IP datagram fragment. */
570 int ip_defrag(struct sk_buff *skb, u32 user)
571 {
572 	struct ipq *qp;
573 	struct net *net;
574 
575 	net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev);
576 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
577 
578 	/* Start by cleaning up the memory. */
579 	if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
580 		ip_evictor(net);
581 
582 	/* Lookup (or create) queue header */
583 	if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
584 		int ret;
585 
586 		spin_lock(&qp->q.lock);
587 
588 		ret = ip_frag_queue(qp, skb);
589 
590 		spin_unlock(&qp->q.lock);
591 		ipq_put(qp);
592 		return ret;
593 	}
594 
595 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
596 	kfree_skb(skb);
597 	return -ENOMEM;
598 }
599 
600 #ifdef CONFIG_SYSCTL
601 static int zero;
602 
603 static struct ctl_table ip4_frags_ns_ctl_table[] = {
604 	{
605 		.ctl_name	= NET_IPV4_IPFRAG_HIGH_THRESH,
606 		.procname	= "ipfrag_high_thresh",
607 		.data		= &init_net.ipv4.frags.high_thresh,
608 		.maxlen		= sizeof(int),
609 		.mode		= 0644,
610 		.proc_handler	= proc_dointvec
611 	},
612 	{
613 		.ctl_name	= NET_IPV4_IPFRAG_LOW_THRESH,
614 		.procname	= "ipfrag_low_thresh",
615 		.data		= &init_net.ipv4.frags.low_thresh,
616 		.maxlen		= sizeof(int),
617 		.mode		= 0644,
618 		.proc_handler	= proc_dointvec
619 	},
620 	{
621 		.ctl_name	= NET_IPV4_IPFRAG_TIME,
622 		.procname	= "ipfrag_time",
623 		.data		= &init_net.ipv4.frags.timeout,
624 		.maxlen		= sizeof(int),
625 		.mode		= 0644,
626 		.proc_handler	= proc_dointvec_jiffies,
627 		.strategy	= sysctl_jiffies
628 	},
629 	{ }
630 };
631 
632 static struct ctl_table ip4_frags_ctl_table[] = {
633 	{
634 		.ctl_name	= NET_IPV4_IPFRAG_SECRET_INTERVAL,
635 		.procname	= "ipfrag_secret_interval",
636 		.data		= &ip4_frags.secret_interval,
637 		.maxlen		= sizeof(int),
638 		.mode		= 0644,
639 		.proc_handler	= proc_dointvec_jiffies,
640 		.strategy	= sysctl_jiffies
641 	},
642 	{
643 		.procname	= "ipfrag_max_dist",
644 		.data		= &sysctl_ipfrag_max_dist,
645 		.maxlen		= sizeof(int),
646 		.mode		= 0644,
647 		.proc_handler	= proc_dointvec_minmax,
648 		.extra1		= &zero
649 	},
650 	{ }
651 };
652 
653 static int ip4_frags_ns_ctl_register(struct net *net)
654 {
655 	struct ctl_table *table;
656 	struct ctl_table_header *hdr;
657 
658 	table = ip4_frags_ns_ctl_table;
659 	if (net != &init_net) {
660 		table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
661 		if (table == NULL)
662 			goto err_alloc;
663 
664 		table[0].data = &net->ipv4.frags.high_thresh;
665 		table[1].data = &net->ipv4.frags.low_thresh;
666 		table[2].data = &net->ipv4.frags.timeout;
667 	}
668 
669 	hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
670 	if (hdr == NULL)
671 		goto err_reg;
672 
673 	net->ipv4.frags_hdr = hdr;
674 	return 0;
675 
676 err_reg:
677 	if (net != &init_net)
678 		kfree(table);
679 err_alloc:
680 	return -ENOMEM;
681 }
682 
683 static void ip4_frags_ns_ctl_unregister(struct net *net)
684 {
685 	struct ctl_table *table;
686 
687 	table = net->ipv4.frags_hdr->ctl_table_arg;
688 	unregister_net_sysctl_table(net->ipv4.frags_hdr);
689 	kfree(table);
690 }
691 
692 static void ip4_frags_ctl_register(void)
693 {
694 	register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
695 }
696 #else
697 static inline int ip4_frags_ns_ctl_register(struct net *net)
698 {
699 	return 0;
700 }
701 
702 static inline void ip4_frags_ns_ctl_unregister(struct net *net)
703 {
704 }
705 
706 static inline void ip4_frags_ctl_register(void)
707 {
708 }
709 #endif
710 
711 static int ipv4_frags_init_net(struct net *net)
712 {
713 	/*
714 	 * Fragment cache limits. We will commit 256K at one time. Should we
715 	 * cross that limit we will prune down to 192K. This should cope with
716 	 * even the most extreme cases without allowing an attacker to
717 	 * measurably harm machine performance.
718 	 */
719 	net->ipv4.frags.high_thresh = 256 * 1024;
720 	net->ipv4.frags.low_thresh = 192 * 1024;
721 	/*
722 	 * Important NOTE! Fragment queue must be destroyed before MSL expires.
723 	 * RFC791 is wrong proposing to prolongate timer each fragment arrival
724 	 * by TTL.
725 	 */
726 	net->ipv4.frags.timeout = IP_FRAG_TIME;
727 
728 	inet_frags_init_net(&net->ipv4.frags);
729 
730 	return ip4_frags_ns_ctl_register(net);
731 }
732 
733 static void ipv4_frags_exit_net(struct net *net)
734 {
735 	ip4_frags_ns_ctl_unregister(net);
736 	inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
737 }
738 
739 static struct pernet_operations ip4_frags_ops = {
740 	.init = ipv4_frags_init_net,
741 	.exit = ipv4_frags_exit_net,
742 };
743 
744 void __init ipfrag_init(void)
745 {
746 	ip4_frags_ctl_register();
747 	register_pernet_subsys(&ip4_frags_ops);
748 	ip4_frags.hashfn = ip4_hashfn;
749 	ip4_frags.constructor = ip4_frag_init;
750 	ip4_frags.destructor = ip4_frag_free;
751 	ip4_frags.skb_free = NULL;
752 	ip4_frags.qsize = sizeof(struct ipq);
753 	ip4_frags.match = ip4_frag_match;
754 	ip4_frags.frag_expire = ip_expire;
755 	ip4_frags.secret_interval = 10 * 60 * HZ;
756 	inet_frags_init(&ip4_frags);
757 }
758 
759 EXPORT_SYMBOL(ip_defrag);
760