1 /* 2 * IPv6 fragment reassembly 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: net/ipv4/ip_fragment.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* 17 * Fixes: 18 * Andi Kleen Make it work with multiple hosts. 19 * More RFC compliance. 20 * 21 * Horst von Brand Add missing #include <linux/string.h> 22 * Alexey Kuznetsov SMP races, threading, cleanup. 23 * Patrick McHardy LRU queue of frag heads for evictor. 24 * Mitsuru KANDA @USAGI Register inet6_protocol{}. 25 * David Stevens and 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 27 * calculate ICV correctly. 28 */ 29 #include <linux/errno.h> 30 #include <linux/types.h> 31 #include <linux/string.h> 32 #include <linux/socket.h> 33 #include <linux/sockios.h> 34 #include <linux/jiffies.h> 35 #include <linux/net.h> 36 #include <linux/list.h> 37 #include <linux/netdevice.h> 38 #include <linux/in6.h> 39 #include <linux/ipv6.h> 40 #include <linux/icmpv6.h> 41 #include <linux/random.h> 42 #include <linux/jhash.h> 43 #include <linux/skbuff.h> 44 #include <linux/slab.h> 45 46 #include <net/sock.h> 47 #include <net/snmp.h> 48 49 #include <net/ipv6.h> 50 #include <net/ip6_route.h> 51 #include <net/protocol.h> 52 #include <net/transp_v6.h> 53 #include <net/rawv6.h> 54 #include <net/ndisc.h> 55 #include <net/addrconf.h> 56 #include <net/inet_frag.h> 57 58 struct ip6frag_skb_cb 59 { 60 struct inet6_skb_parm h; 61 int offset; 62 }; 63 64 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 65 66 67 /* 68 * Equivalent of ipv4 struct ipq 69 */ 70 71 struct frag_queue 72 { 73 struct inet_frag_queue q; 74 75 __be32 id; /* fragment id */ 76 u32 user; 77 struct in6_addr saddr; 78 struct in6_addr daddr; 79 80 int iif; 81 unsigned int csum; 82 __u16 nhoffset; 83 }; 84 85 static struct inet_frags ip6_frags; 86 87 int ip6_frag_nqueues(struct net *net) 88 { 89 return net->ipv6.frags.nqueues; 90 } 91 92 int ip6_frag_mem(struct net *net) 93 { 94 return atomic_read(&net->ipv6.frags.mem); 95 } 96 97 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 98 struct net_device *dev); 99 100 /* 101 * callers should be careful not to use the hash value outside the ipfrag_lock 102 * as doing so could race with ipfrag_hash_rnd being recalculated. 103 */ 104 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, 105 const struct in6_addr *daddr, u32 rnd) 106 { 107 u32 a, b, c; 108 109 a = (__force u32)saddr->s6_addr32[0]; 110 b = (__force u32)saddr->s6_addr32[1]; 111 c = (__force u32)saddr->s6_addr32[2]; 112 113 a += JHASH_GOLDEN_RATIO; 114 b += JHASH_GOLDEN_RATIO; 115 c += rnd; 116 __jhash_mix(a, b, c); 117 118 a += (__force u32)saddr->s6_addr32[3]; 119 b += (__force u32)daddr->s6_addr32[0]; 120 c += (__force u32)daddr->s6_addr32[1]; 121 __jhash_mix(a, b, c); 122 123 a += (__force u32)daddr->s6_addr32[2]; 124 b += (__force u32)daddr->s6_addr32[3]; 125 c += (__force u32)id; 126 __jhash_mix(a, b, c); 127 128 return c & (INETFRAGS_HASHSZ - 1); 129 } 130 EXPORT_SYMBOL_GPL(inet6_hash_frag); 131 132 static unsigned int ip6_hashfn(struct inet_frag_queue *q) 133 { 134 struct frag_queue *fq; 135 136 fq = container_of(q, struct frag_queue, q); 137 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); 138 } 139 140 int ip6_frag_match(struct inet_frag_queue *q, void *a) 141 { 142 struct frag_queue *fq; 143 struct ip6_create_arg *arg = a; 144 145 fq = container_of(q, struct frag_queue, q); 146 return (fq->id == arg->id && fq->user == arg->user && 147 ipv6_addr_equal(&fq->saddr, arg->src) && 148 ipv6_addr_equal(&fq->daddr, arg->dst)); 149 } 150 EXPORT_SYMBOL(ip6_frag_match); 151 152 /* Memory Tracking Functions. */ 153 static inline void frag_kfree_skb(struct netns_frags *nf, 154 struct sk_buff *skb, int *work) 155 { 156 if (work) 157 *work -= skb->truesize; 158 atomic_sub(skb->truesize, &nf->mem); 159 kfree_skb(skb); 160 } 161 162 void ip6_frag_init(struct inet_frag_queue *q, void *a) 163 { 164 struct frag_queue *fq = container_of(q, struct frag_queue, q); 165 struct ip6_create_arg *arg = a; 166 167 fq->id = arg->id; 168 fq->user = arg->user; 169 ipv6_addr_copy(&fq->saddr, arg->src); 170 ipv6_addr_copy(&fq->daddr, arg->dst); 171 } 172 EXPORT_SYMBOL(ip6_frag_init); 173 174 /* Destruction primitives. */ 175 176 static __inline__ void fq_put(struct frag_queue *fq) 177 { 178 inet_frag_put(&fq->q, &ip6_frags); 179 } 180 181 /* Kill fq entry. It is not destroyed immediately, 182 * because caller (and someone more) holds reference count. 183 */ 184 static __inline__ void fq_kill(struct frag_queue *fq) 185 { 186 inet_frag_kill(&fq->q, &ip6_frags); 187 } 188 189 static void ip6_evictor(struct net *net, struct inet6_dev *idev) 190 { 191 int evicted; 192 193 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags); 194 if (evicted) 195 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); 196 } 197 198 static void ip6_frag_expire(unsigned long data) 199 { 200 struct frag_queue *fq; 201 struct net_device *dev = NULL; 202 struct net *net; 203 204 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); 205 206 spin_lock(&fq->q.lock); 207 208 if (fq->q.last_in & INET_FRAG_COMPLETE) 209 goto out; 210 211 fq_kill(fq); 212 213 net = container_of(fq->q.net, struct net, ipv6.frags); 214 rcu_read_lock(); 215 dev = dev_get_by_index_rcu(net, fq->iif); 216 if (!dev) 217 goto out_rcu_unlock; 218 219 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 220 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 221 222 /* Don't send error if the first segment did not arrive. */ 223 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) 224 goto out_rcu_unlock; 225 226 /* 227 But use as source device on which LAST ARRIVED 228 segment was received. And do not use fq->dev 229 pointer directly, device might already disappeared. 230 */ 231 fq->q.fragments->dev = dev; 232 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); 233 out_rcu_unlock: 234 rcu_read_unlock(); 235 out: 236 spin_unlock(&fq->q.lock); 237 fq_put(fq); 238 } 239 240 static __inline__ struct frag_queue * 241 fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst) 242 { 243 struct inet_frag_queue *q; 244 struct ip6_create_arg arg; 245 unsigned int hash; 246 247 arg.id = id; 248 arg.user = IP6_DEFRAG_LOCAL_DELIVER; 249 arg.src = src; 250 arg.dst = dst; 251 252 read_lock(&ip6_frags.lock); 253 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 254 255 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 256 if (q == NULL) 257 return NULL; 258 259 return container_of(q, struct frag_queue, q); 260 } 261 262 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 263 struct frag_hdr *fhdr, int nhoff) 264 { 265 struct sk_buff *prev, *next; 266 struct net_device *dev; 267 int offset, end; 268 struct net *net = dev_net(skb_dst(skb)->dev); 269 270 if (fq->q.last_in & INET_FRAG_COMPLETE) 271 goto err; 272 273 offset = ntohs(fhdr->frag_off) & ~0x7; 274 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - 275 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 276 277 if ((unsigned int)end > IPV6_MAXPLEN) { 278 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 279 IPSTATS_MIB_INHDRERRORS); 280 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 281 ((u8 *)&fhdr->frag_off - 282 skb_network_header(skb))); 283 return -1; 284 } 285 286 if (skb->ip_summed == CHECKSUM_COMPLETE) { 287 const unsigned char *nh = skb_network_header(skb); 288 skb->csum = csum_sub(skb->csum, 289 csum_partial(nh, (u8 *)(fhdr + 1) - nh, 290 0)); 291 } 292 293 /* Is this the final fragment? */ 294 if (!(fhdr->frag_off & htons(IP6_MF))) { 295 /* If we already have some bits beyond end 296 * or have different end, the segment is corrupted. 297 */ 298 if (end < fq->q.len || 299 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) 300 goto err; 301 fq->q.last_in |= INET_FRAG_LAST_IN; 302 fq->q.len = end; 303 } else { 304 /* Check if the fragment is rounded to 8 bytes. 305 * Required by the RFC. 306 */ 307 if (end & 0x7) { 308 /* RFC2460 says always send parameter problem in 309 * this case. -DaveM 310 */ 311 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 312 IPSTATS_MIB_INHDRERRORS); 313 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 314 offsetof(struct ipv6hdr, payload_len)); 315 return -1; 316 } 317 if (end > fq->q.len) { 318 /* Some bits beyond end -> corruption. */ 319 if (fq->q.last_in & INET_FRAG_LAST_IN) 320 goto err; 321 fq->q.len = end; 322 } 323 } 324 325 if (end == offset) 326 goto err; 327 328 /* Point into the IP datagram 'data' part. */ 329 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) 330 goto err; 331 332 if (pskb_trim_rcsum(skb, end - offset)) 333 goto err; 334 335 /* Find out which fragments are in front and at the back of us 336 * in the chain of fragments so far. We must know where to put 337 * this fragment, right? 338 */ 339 prev = NULL; 340 for(next = fq->q.fragments; next != NULL; next = next->next) { 341 if (FRAG6_CB(next)->offset >= offset) 342 break; /* bingo! */ 343 prev = next; 344 } 345 346 /* We found where to put this one. Check for overlap with 347 * preceding fragment, and, if needed, align things so that 348 * any overlaps are eliminated. 349 */ 350 if (prev) { 351 int i = (FRAG6_CB(prev)->offset + prev->len) - offset; 352 353 if (i > 0) { 354 offset += i; 355 if (end <= offset) 356 goto err; 357 if (!pskb_pull(skb, i)) 358 goto err; 359 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 360 skb->ip_summed = CHECKSUM_NONE; 361 } 362 } 363 364 /* Look for overlap with succeeding segments. 365 * If we can merge fragments, do it. 366 */ 367 while (next && FRAG6_CB(next)->offset < end) { 368 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ 369 370 if (i < next->len) { 371 /* Eat head of the next overlapped fragment 372 * and leave the loop. The next ones cannot overlap. 373 */ 374 if (!pskb_pull(next, i)) 375 goto err; 376 FRAG6_CB(next)->offset += i; /* next fragment */ 377 fq->q.meat -= i; 378 if (next->ip_summed != CHECKSUM_UNNECESSARY) 379 next->ip_summed = CHECKSUM_NONE; 380 break; 381 } else { 382 struct sk_buff *free_it = next; 383 384 /* Old fragment is completely overridden with 385 * new one drop it. 386 */ 387 next = next->next; 388 389 if (prev) 390 prev->next = next; 391 else 392 fq->q.fragments = next; 393 394 fq->q.meat -= free_it->len; 395 frag_kfree_skb(fq->q.net, free_it, NULL); 396 } 397 } 398 399 FRAG6_CB(skb)->offset = offset; 400 401 /* Insert this fragment in the chain of fragments. */ 402 skb->next = next; 403 if (prev) 404 prev->next = skb; 405 else 406 fq->q.fragments = skb; 407 408 dev = skb->dev; 409 if (dev) { 410 fq->iif = dev->ifindex; 411 skb->dev = NULL; 412 } 413 fq->q.stamp = skb->tstamp; 414 fq->q.meat += skb->len; 415 atomic_add(skb->truesize, &fq->q.net->mem); 416 417 /* The first fragment. 418 * nhoffset is obtained from the first fragment, of course. 419 */ 420 if (offset == 0) { 421 fq->nhoffset = nhoff; 422 fq->q.last_in |= INET_FRAG_FIRST_IN; 423 } 424 425 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 426 fq->q.meat == fq->q.len) 427 return ip6_frag_reasm(fq, prev, dev); 428 429 write_lock(&ip6_frags.lock); 430 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); 431 write_unlock(&ip6_frags.lock); 432 return -1; 433 434 err: 435 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 436 IPSTATS_MIB_REASMFAILS); 437 kfree_skb(skb); 438 return -1; 439 } 440 441 /* 442 * Check if this packet is complete. 443 * Returns NULL on failure by any reason, and pointer 444 * to current nexthdr field in reassembled frame. 445 * 446 * It is called with locked fq, and caller must check that 447 * queue is eligible for reassembly i.e. it is not COMPLETE, 448 * the last and the first frames arrived and all the bits are here. 449 */ 450 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 451 struct net_device *dev) 452 { 453 struct net *net = container_of(fq->q.net, struct net, ipv6.frags); 454 struct sk_buff *fp, *head = fq->q.fragments; 455 int payload_len; 456 unsigned int nhoff; 457 458 fq_kill(fq); 459 460 /* Make the one we just received the head. */ 461 if (prev) { 462 head = prev->next; 463 fp = skb_clone(head, GFP_ATOMIC); 464 465 if (!fp) 466 goto out_oom; 467 468 fp->next = head->next; 469 prev->next = fp; 470 471 skb_morph(head, fq->q.fragments); 472 head->next = fq->q.fragments->next; 473 474 kfree_skb(fq->q.fragments); 475 fq->q.fragments = head; 476 } 477 478 WARN_ON(head == NULL); 479 WARN_ON(FRAG6_CB(head)->offset != 0); 480 481 /* Unfragmented part is taken from the first segment. */ 482 payload_len = ((head->data - skb_network_header(head)) - 483 sizeof(struct ipv6hdr) + fq->q.len - 484 sizeof(struct frag_hdr)); 485 if (payload_len > IPV6_MAXPLEN) 486 goto out_oversize; 487 488 /* Head of list must not be cloned. */ 489 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 490 goto out_oom; 491 492 /* If the first fragment is fragmented itself, we split 493 * it to two chunks: the first with data and paged part 494 * and the second, holding only fragments. */ 495 if (skb_has_frags(head)) { 496 struct sk_buff *clone; 497 int i, plen = 0; 498 499 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 500 goto out_oom; 501 clone->next = head->next; 502 head->next = clone; 503 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 504 skb_frag_list_init(head); 505 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 506 plen += skb_shinfo(head)->frags[i].size; 507 clone->len = clone->data_len = head->data_len - plen; 508 head->data_len -= clone->len; 509 head->len -= clone->len; 510 clone->csum = 0; 511 clone->ip_summed = head->ip_summed; 512 atomic_add(clone->truesize, &fq->q.net->mem); 513 } 514 515 /* We have to remove fragment header from datagram and to relocate 516 * header in order to calculate ICV correctly. */ 517 nhoff = fq->nhoffset; 518 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; 519 memmove(head->head + sizeof(struct frag_hdr), head->head, 520 (head->data - head->head) - sizeof(struct frag_hdr)); 521 head->mac_header += sizeof(struct frag_hdr); 522 head->network_header += sizeof(struct frag_hdr); 523 524 skb_shinfo(head)->frag_list = head->next; 525 skb_reset_transport_header(head); 526 skb_push(head, head->data - skb_network_header(head)); 527 atomic_sub(head->truesize, &fq->q.net->mem); 528 529 for (fp=head->next; fp; fp = fp->next) { 530 head->data_len += fp->len; 531 head->len += fp->len; 532 if (head->ip_summed != fp->ip_summed) 533 head->ip_summed = CHECKSUM_NONE; 534 else if (head->ip_summed == CHECKSUM_COMPLETE) 535 head->csum = csum_add(head->csum, fp->csum); 536 head->truesize += fp->truesize; 537 atomic_sub(fp->truesize, &fq->q.net->mem); 538 } 539 540 head->next = NULL; 541 head->dev = dev; 542 head->tstamp = fq->q.stamp; 543 ipv6_hdr(head)->payload_len = htons(payload_len); 544 IP6CB(head)->nhoff = nhoff; 545 546 /* Yes, and fold redundant checksum back. 8) */ 547 if (head->ip_summed == CHECKSUM_COMPLETE) 548 head->csum = csum_partial(skb_network_header(head), 549 skb_network_header_len(head), 550 head->csum); 551 552 rcu_read_lock(); 553 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); 554 rcu_read_unlock(); 555 fq->q.fragments = NULL; 556 return 1; 557 558 out_oversize: 559 if (net_ratelimit()) 560 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); 561 goto out_fail; 562 out_oom: 563 if (net_ratelimit()) 564 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); 565 out_fail: 566 rcu_read_lock(); 567 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 568 rcu_read_unlock(); 569 return -1; 570 } 571 572 static int ipv6_frag_rcv(struct sk_buff *skb) 573 { 574 struct frag_hdr *fhdr; 575 struct frag_queue *fq; 576 struct ipv6hdr *hdr = ipv6_hdr(skb); 577 struct net *net = dev_net(skb_dst(skb)->dev); 578 579 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 580 581 /* Jumbo payload inhibits frag. header */ 582 if (hdr->payload_len==0) 583 goto fail_hdr; 584 585 if (!pskb_may_pull(skb, (skb_transport_offset(skb) + 586 sizeof(struct frag_hdr)))) 587 goto fail_hdr; 588 589 hdr = ipv6_hdr(skb); 590 fhdr = (struct frag_hdr *)skb_transport_header(skb); 591 592 if (!(fhdr->frag_off & htons(0xFFF9))) { 593 /* It is not a fragmented frame */ 594 skb->transport_header += sizeof(struct frag_hdr); 595 IP6_INC_STATS_BH(net, 596 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 597 598 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 599 return 1; 600 } 601 602 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 603 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 604 605 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); 606 if (fq != NULL) { 607 int ret; 608 609 spin_lock(&fq->q.lock); 610 611 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 612 613 spin_unlock(&fq->q.lock); 614 fq_put(fq); 615 return ret; 616 } 617 618 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); 619 kfree_skb(skb); 620 return -1; 621 622 fail_hdr: 623 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); 624 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); 625 return -1; 626 } 627 628 static const struct inet6_protocol frag_protocol = 629 { 630 .handler = ipv6_frag_rcv, 631 .flags = INET6_PROTO_NOPOLICY, 632 }; 633 634 #ifdef CONFIG_SYSCTL 635 static struct ctl_table ip6_frags_ns_ctl_table[] = { 636 { 637 .procname = "ip6frag_high_thresh", 638 .data = &init_net.ipv6.frags.high_thresh, 639 .maxlen = sizeof(int), 640 .mode = 0644, 641 .proc_handler = proc_dointvec 642 }, 643 { 644 .procname = "ip6frag_low_thresh", 645 .data = &init_net.ipv6.frags.low_thresh, 646 .maxlen = sizeof(int), 647 .mode = 0644, 648 .proc_handler = proc_dointvec 649 }, 650 { 651 .procname = "ip6frag_time", 652 .data = &init_net.ipv6.frags.timeout, 653 .maxlen = sizeof(int), 654 .mode = 0644, 655 .proc_handler = proc_dointvec_jiffies, 656 }, 657 { } 658 }; 659 660 static struct ctl_table ip6_frags_ctl_table[] = { 661 { 662 .procname = "ip6frag_secret_interval", 663 .data = &ip6_frags.secret_interval, 664 .maxlen = sizeof(int), 665 .mode = 0644, 666 .proc_handler = proc_dointvec_jiffies, 667 }, 668 { } 669 }; 670 671 static int __net_init ip6_frags_ns_sysctl_register(struct net *net) 672 { 673 struct ctl_table *table; 674 struct ctl_table_header *hdr; 675 676 table = ip6_frags_ns_ctl_table; 677 if (!net_eq(net, &init_net)) { 678 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); 679 if (table == NULL) 680 goto err_alloc; 681 682 table[0].data = &net->ipv6.frags.high_thresh; 683 table[1].data = &net->ipv6.frags.low_thresh; 684 table[2].data = &net->ipv6.frags.timeout; 685 } 686 687 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 688 if (hdr == NULL) 689 goto err_reg; 690 691 net->ipv6.sysctl.frags_hdr = hdr; 692 return 0; 693 694 err_reg: 695 if (!net_eq(net, &init_net)) 696 kfree(table); 697 err_alloc: 698 return -ENOMEM; 699 } 700 701 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net) 702 { 703 struct ctl_table *table; 704 705 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; 706 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 707 if (!net_eq(net, &init_net)) 708 kfree(table); 709 } 710 711 static struct ctl_table_header *ip6_ctl_header; 712 713 static int ip6_frags_sysctl_register(void) 714 { 715 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, 716 ip6_frags_ctl_table); 717 return ip6_ctl_header == NULL ? -ENOMEM : 0; 718 } 719 720 static void ip6_frags_sysctl_unregister(void) 721 { 722 unregister_net_sysctl_table(ip6_ctl_header); 723 } 724 #else 725 static inline int ip6_frags_ns_sysctl_register(struct net *net) 726 { 727 return 0; 728 } 729 730 static inline void ip6_frags_ns_sysctl_unregister(struct net *net) 731 { 732 } 733 734 static inline int ip6_frags_sysctl_register(void) 735 { 736 return 0; 737 } 738 739 static inline void ip6_frags_sysctl_unregister(void) 740 { 741 } 742 #endif 743 744 static int __net_init ipv6_frags_init_net(struct net *net) 745 { 746 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; 747 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; 748 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 749 750 inet_frags_init_net(&net->ipv6.frags); 751 752 return ip6_frags_ns_sysctl_register(net); 753 } 754 755 static void __net_exit ipv6_frags_exit_net(struct net *net) 756 { 757 ip6_frags_ns_sysctl_unregister(net); 758 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 759 } 760 761 static struct pernet_operations ip6_frags_ops = { 762 .init = ipv6_frags_init_net, 763 .exit = ipv6_frags_exit_net, 764 }; 765 766 int __init ipv6_frag_init(void) 767 { 768 int ret; 769 770 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); 771 if (ret) 772 goto out; 773 774 ret = ip6_frags_sysctl_register(); 775 if (ret) 776 goto err_sysctl; 777 778 ret = register_pernet_subsys(&ip6_frags_ops); 779 if (ret) 780 goto err_pernet; 781 782 ip6_frags.hashfn = ip6_hashfn; 783 ip6_frags.constructor = ip6_frag_init; 784 ip6_frags.destructor = NULL; 785 ip6_frags.skb_free = NULL; 786 ip6_frags.qsize = sizeof(struct frag_queue); 787 ip6_frags.match = ip6_frag_match; 788 ip6_frags.frag_expire = ip6_frag_expire; 789 ip6_frags.secret_interval = 10 * 60 * HZ; 790 inet_frags_init(&ip6_frags); 791 out: 792 return ret; 793 794 err_pernet: 795 ip6_frags_sysctl_unregister(); 796 err_sysctl: 797 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 798 goto out; 799 } 800 801 void ipv6_frag_exit(void) 802 { 803 inet_frags_fini(&ip6_frags); 804 ip6_frags_sysctl_unregister(); 805 unregister_pernet_subsys(&ip6_frags_ops); 806 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 807 } 808