1 /* 2 * IPv6 fragment reassembly 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: net/ipv4/ip_fragment.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 /* 17 * Fixes: 18 * Andi Kleen Make it work with multiple hosts. 19 * More RFC compliance. 20 * 21 * Horst von Brand Add missing #include <linux/string.h> 22 * Alexey Kuznetsov SMP races, threading, cleanup. 23 * Patrick McHardy LRU queue of frag heads for evictor. 24 * Mitsuru KANDA @USAGI Register inet6_protocol{}. 25 * David Stevens and 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 27 * calculate ICV correctly. 28 */ 29 #include <linux/errno.h> 30 #include <linux/types.h> 31 #include <linux/string.h> 32 #include <linux/socket.h> 33 #include <linux/sockios.h> 34 #include <linux/jiffies.h> 35 #include <linux/net.h> 36 #include <linux/list.h> 37 #include <linux/netdevice.h> 38 #include <linux/in6.h> 39 #include <linux/ipv6.h> 40 #include <linux/icmpv6.h> 41 #include <linux/random.h> 42 #include <linux/jhash.h> 43 #include <linux/skbuff.h> 44 45 #include <net/sock.h> 46 #include <net/snmp.h> 47 48 #include <net/ipv6.h> 49 #include <net/ip6_route.h> 50 #include <net/protocol.h> 51 #include <net/transp_v6.h> 52 #include <net/rawv6.h> 53 #include <net/ndisc.h> 54 #include <net/addrconf.h> 55 #include <net/inet_frag.h> 56 57 struct ip6frag_skb_cb 58 { 59 struct inet6_skb_parm h; 60 int offset; 61 }; 62 63 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 64 65 66 /* 67 * Equivalent of ipv4 struct ipq 68 */ 69 70 struct frag_queue 71 { 72 struct inet_frag_queue q; 73 74 __be32 id; /* fragment id */ 75 struct in6_addr saddr; 76 struct in6_addr daddr; 77 78 int iif; 79 unsigned int csum; 80 __u16 nhoffset; 81 }; 82 83 static struct inet_frags ip6_frags; 84 85 int ip6_frag_nqueues(struct net *net) 86 { 87 return net->ipv6.frags.nqueues; 88 } 89 90 int ip6_frag_mem(struct net *net) 91 { 92 return atomic_read(&net->ipv6.frags.mem); 93 } 94 95 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 96 struct net_device *dev); 97 98 /* 99 * callers should be careful not to use the hash value outside the ipfrag_lock 100 * as doing so could race with ipfrag_hash_rnd being recalculated. 101 */ 102 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, 103 const struct in6_addr *daddr, u32 rnd) 104 { 105 u32 a, b, c; 106 107 a = (__force u32)saddr->s6_addr32[0]; 108 b = (__force u32)saddr->s6_addr32[1]; 109 c = (__force u32)saddr->s6_addr32[2]; 110 111 a += JHASH_GOLDEN_RATIO; 112 b += JHASH_GOLDEN_RATIO; 113 c += rnd; 114 __jhash_mix(a, b, c); 115 116 a += (__force u32)saddr->s6_addr32[3]; 117 b += (__force u32)daddr->s6_addr32[0]; 118 c += (__force u32)daddr->s6_addr32[1]; 119 __jhash_mix(a, b, c); 120 121 a += (__force u32)daddr->s6_addr32[2]; 122 b += (__force u32)daddr->s6_addr32[3]; 123 c += (__force u32)id; 124 __jhash_mix(a, b, c); 125 126 return c & (INETFRAGS_HASHSZ - 1); 127 } 128 EXPORT_SYMBOL_GPL(inet6_hash_frag); 129 130 static unsigned int ip6_hashfn(struct inet_frag_queue *q) 131 { 132 struct frag_queue *fq; 133 134 fq = container_of(q, struct frag_queue, q); 135 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); 136 } 137 138 int ip6_frag_match(struct inet_frag_queue *q, void *a) 139 { 140 struct frag_queue *fq; 141 struct ip6_create_arg *arg = a; 142 143 fq = container_of(q, struct frag_queue, q); 144 return (fq->id == arg->id && 145 ipv6_addr_equal(&fq->saddr, arg->src) && 146 ipv6_addr_equal(&fq->daddr, arg->dst)); 147 } 148 EXPORT_SYMBOL(ip6_frag_match); 149 150 /* Memory Tracking Functions. */ 151 static inline void frag_kfree_skb(struct netns_frags *nf, 152 struct sk_buff *skb, int *work) 153 { 154 if (work) 155 *work -= skb->truesize; 156 atomic_sub(skb->truesize, &nf->mem); 157 kfree_skb(skb); 158 } 159 160 void ip6_frag_init(struct inet_frag_queue *q, void *a) 161 { 162 struct frag_queue *fq = container_of(q, struct frag_queue, q); 163 struct ip6_create_arg *arg = a; 164 165 fq->id = arg->id; 166 ipv6_addr_copy(&fq->saddr, arg->src); 167 ipv6_addr_copy(&fq->daddr, arg->dst); 168 } 169 EXPORT_SYMBOL(ip6_frag_init); 170 171 /* Destruction primitives. */ 172 173 static __inline__ void fq_put(struct frag_queue *fq) 174 { 175 inet_frag_put(&fq->q, &ip6_frags); 176 } 177 178 /* Kill fq entry. It is not destroyed immediately, 179 * because caller (and someone more) holds reference count. 180 */ 181 static __inline__ void fq_kill(struct frag_queue *fq) 182 { 183 inet_frag_kill(&fq->q, &ip6_frags); 184 } 185 186 static void ip6_evictor(struct net *net, struct inet6_dev *idev) 187 { 188 int evicted; 189 190 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags); 191 if (evicted) 192 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); 193 } 194 195 static void ip6_frag_expire(unsigned long data) 196 { 197 struct frag_queue *fq; 198 struct net_device *dev = NULL; 199 struct net *net; 200 201 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); 202 203 spin_lock(&fq->q.lock); 204 205 if (fq->q.last_in & INET_FRAG_COMPLETE) 206 goto out; 207 208 fq_kill(fq); 209 210 net = container_of(fq->q.net, struct net, ipv6.frags); 211 dev = dev_get_by_index(net, fq->iif); 212 if (!dev) 213 goto out; 214 215 rcu_read_lock(); 216 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 217 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 218 rcu_read_unlock(); 219 220 /* Don't send error if the first segment did not arrive. */ 221 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) 222 goto out; 223 224 /* 225 But use as source device on which LAST ARRIVED 226 segment was received. And do not use fq->dev 227 pointer directly, device might already disappeared. 228 */ 229 fq->q.fragments->dev = dev; 230 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 231 out: 232 if (dev) 233 dev_put(dev); 234 spin_unlock(&fq->q.lock); 235 fq_put(fq); 236 } 237 238 static __inline__ struct frag_queue * 239 fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, 240 struct inet6_dev *idev) 241 { 242 struct inet_frag_queue *q; 243 struct ip6_create_arg arg; 244 unsigned int hash; 245 246 arg.id = id; 247 arg.src = src; 248 arg.dst = dst; 249 250 read_lock(&ip6_frags.lock); 251 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 252 253 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 254 if (q == NULL) 255 goto oom; 256 257 return container_of(q, struct frag_queue, q); 258 259 oom: 260 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS); 261 return NULL; 262 } 263 264 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 265 struct frag_hdr *fhdr, int nhoff) 266 { 267 struct sk_buff *prev, *next; 268 struct net_device *dev; 269 int offset, end; 270 struct net *net = dev_net(skb_dst(skb)->dev); 271 272 if (fq->q.last_in & INET_FRAG_COMPLETE) 273 goto err; 274 275 offset = ntohs(fhdr->frag_off) & ~0x7; 276 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - 277 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 278 279 if ((unsigned int)end > IPV6_MAXPLEN) { 280 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 281 IPSTATS_MIB_INHDRERRORS); 282 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 283 ((u8 *)&fhdr->frag_off - 284 skb_network_header(skb))); 285 return -1; 286 } 287 288 if (skb->ip_summed == CHECKSUM_COMPLETE) { 289 const unsigned char *nh = skb_network_header(skb); 290 skb->csum = csum_sub(skb->csum, 291 csum_partial(nh, (u8 *)(fhdr + 1) - nh, 292 0)); 293 } 294 295 /* Is this the final fragment? */ 296 if (!(fhdr->frag_off & htons(IP6_MF))) { 297 /* If we already have some bits beyond end 298 * or have different end, the segment is corrupted. 299 */ 300 if (end < fq->q.len || 301 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) 302 goto err; 303 fq->q.last_in |= INET_FRAG_LAST_IN; 304 fq->q.len = end; 305 } else { 306 /* Check if the fragment is rounded to 8 bytes. 307 * Required by the RFC. 308 */ 309 if (end & 0x7) { 310 /* RFC2460 says always send parameter problem in 311 * this case. -DaveM 312 */ 313 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 314 IPSTATS_MIB_INHDRERRORS); 315 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 316 offsetof(struct ipv6hdr, payload_len)); 317 return -1; 318 } 319 if (end > fq->q.len) { 320 /* Some bits beyond end -> corruption. */ 321 if (fq->q.last_in & INET_FRAG_LAST_IN) 322 goto err; 323 fq->q.len = end; 324 } 325 } 326 327 if (end == offset) 328 goto err; 329 330 /* Point into the IP datagram 'data' part. */ 331 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) 332 goto err; 333 334 if (pskb_trim_rcsum(skb, end - offset)) 335 goto err; 336 337 /* Find out which fragments are in front and at the back of us 338 * in the chain of fragments so far. We must know where to put 339 * this fragment, right? 340 */ 341 prev = NULL; 342 for(next = fq->q.fragments; next != NULL; next = next->next) { 343 if (FRAG6_CB(next)->offset >= offset) 344 break; /* bingo! */ 345 prev = next; 346 } 347 348 /* We found where to put this one. Check for overlap with 349 * preceding fragment, and, if needed, align things so that 350 * any overlaps are eliminated. 351 */ 352 if (prev) { 353 int i = (FRAG6_CB(prev)->offset + prev->len) - offset; 354 355 if (i > 0) { 356 offset += i; 357 if (end <= offset) 358 goto err; 359 if (!pskb_pull(skb, i)) 360 goto err; 361 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 362 skb->ip_summed = CHECKSUM_NONE; 363 } 364 } 365 366 /* Look for overlap with succeeding segments. 367 * If we can merge fragments, do it. 368 */ 369 while (next && FRAG6_CB(next)->offset < end) { 370 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ 371 372 if (i < next->len) { 373 /* Eat head of the next overlapped fragment 374 * and leave the loop. The next ones cannot overlap. 375 */ 376 if (!pskb_pull(next, i)) 377 goto err; 378 FRAG6_CB(next)->offset += i; /* next fragment */ 379 fq->q.meat -= i; 380 if (next->ip_summed != CHECKSUM_UNNECESSARY) 381 next->ip_summed = CHECKSUM_NONE; 382 break; 383 } else { 384 struct sk_buff *free_it = next; 385 386 /* Old fragment is completely overridden with 387 * new one drop it. 388 */ 389 next = next->next; 390 391 if (prev) 392 prev->next = next; 393 else 394 fq->q.fragments = next; 395 396 fq->q.meat -= free_it->len; 397 frag_kfree_skb(fq->q.net, free_it, NULL); 398 } 399 } 400 401 FRAG6_CB(skb)->offset = offset; 402 403 /* Insert this fragment in the chain of fragments. */ 404 skb->next = next; 405 if (prev) 406 prev->next = skb; 407 else 408 fq->q.fragments = skb; 409 410 dev = skb->dev; 411 if (dev) { 412 fq->iif = dev->ifindex; 413 skb->dev = NULL; 414 } 415 fq->q.stamp = skb->tstamp; 416 fq->q.meat += skb->len; 417 atomic_add(skb->truesize, &fq->q.net->mem); 418 419 /* The first fragment. 420 * nhoffset is obtained from the first fragment, of course. 421 */ 422 if (offset == 0) { 423 fq->nhoffset = nhoff; 424 fq->q.last_in |= INET_FRAG_FIRST_IN; 425 } 426 427 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 428 fq->q.meat == fq->q.len) 429 return ip6_frag_reasm(fq, prev, dev); 430 431 write_lock(&ip6_frags.lock); 432 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); 433 write_unlock(&ip6_frags.lock); 434 return -1; 435 436 err: 437 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 438 IPSTATS_MIB_REASMFAILS); 439 kfree_skb(skb); 440 return -1; 441 } 442 443 /* 444 * Check if this packet is complete. 445 * Returns NULL on failure by any reason, and pointer 446 * to current nexthdr field in reassembled frame. 447 * 448 * It is called with locked fq, and caller must check that 449 * queue is eligible for reassembly i.e. it is not COMPLETE, 450 * the last and the first frames arrived and all the bits are here. 451 */ 452 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 453 struct net_device *dev) 454 { 455 struct net *net = container_of(fq->q.net, struct net, ipv6.frags); 456 struct sk_buff *fp, *head = fq->q.fragments; 457 int payload_len; 458 unsigned int nhoff; 459 460 fq_kill(fq); 461 462 /* Make the one we just received the head. */ 463 if (prev) { 464 head = prev->next; 465 fp = skb_clone(head, GFP_ATOMIC); 466 467 if (!fp) 468 goto out_oom; 469 470 fp->next = head->next; 471 prev->next = fp; 472 473 skb_morph(head, fq->q.fragments); 474 head->next = fq->q.fragments->next; 475 476 kfree_skb(fq->q.fragments); 477 fq->q.fragments = head; 478 } 479 480 WARN_ON(head == NULL); 481 WARN_ON(FRAG6_CB(head)->offset != 0); 482 483 /* Unfragmented part is taken from the first segment. */ 484 payload_len = ((head->data - skb_network_header(head)) - 485 sizeof(struct ipv6hdr) + fq->q.len - 486 sizeof(struct frag_hdr)); 487 if (payload_len > IPV6_MAXPLEN) 488 goto out_oversize; 489 490 /* Head of list must not be cloned. */ 491 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 492 goto out_oom; 493 494 /* If the first fragment is fragmented itself, we split 495 * it to two chunks: the first with data and paged part 496 * and the second, holding only fragments. */ 497 if (skb_has_frags(head)) { 498 struct sk_buff *clone; 499 int i, plen = 0; 500 501 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 502 goto out_oom; 503 clone->next = head->next; 504 head->next = clone; 505 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 506 skb_frag_list_init(head); 507 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 508 plen += skb_shinfo(head)->frags[i].size; 509 clone->len = clone->data_len = head->data_len - plen; 510 head->data_len -= clone->len; 511 head->len -= clone->len; 512 clone->csum = 0; 513 clone->ip_summed = head->ip_summed; 514 atomic_add(clone->truesize, &fq->q.net->mem); 515 } 516 517 /* We have to remove fragment header from datagram and to relocate 518 * header in order to calculate ICV correctly. */ 519 nhoff = fq->nhoffset; 520 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; 521 memmove(head->head + sizeof(struct frag_hdr), head->head, 522 (head->data - head->head) - sizeof(struct frag_hdr)); 523 head->mac_header += sizeof(struct frag_hdr); 524 head->network_header += sizeof(struct frag_hdr); 525 526 skb_shinfo(head)->frag_list = head->next; 527 skb_reset_transport_header(head); 528 skb_push(head, head->data - skb_network_header(head)); 529 atomic_sub(head->truesize, &fq->q.net->mem); 530 531 for (fp=head->next; fp; fp = fp->next) { 532 head->data_len += fp->len; 533 head->len += fp->len; 534 if (head->ip_summed != fp->ip_summed) 535 head->ip_summed = CHECKSUM_NONE; 536 else if (head->ip_summed == CHECKSUM_COMPLETE) 537 head->csum = csum_add(head->csum, fp->csum); 538 head->truesize += fp->truesize; 539 atomic_sub(fp->truesize, &fq->q.net->mem); 540 } 541 542 head->next = NULL; 543 head->dev = dev; 544 head->tstamp = fq->q.stamp; 545 ipv6_hdr(head)->payload_len = htons(payload_len); 546 IP6CB(head)->nhoff = nhoff; 547 548 /* Yes, and fold redundant checksum back. 8) */ 549 if (head->ip_summed == CHECKSUM_COMPLETE) 550 head->csum = csum_partial(skb_network_header(head), 551 skb_network_header_len(head), 552 head->csum); 553 554 rcu_read_lock(); 555 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); 556 rcu_read_unlock(); 557 fq->q.fragments = NULL; 558 return 1; 559 560 out_oversize: 561 if (net_ratelimit()) 562 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); 563 goto out_fail; 564 out_oom: 565 if (net_ratelimit()) 566 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); 567 out_fail: 568 rcu_read_lock(); 569 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 570 rcu_read_unlock(); 571 return -1; 572 } 573 574 static int ipv6_frag_rcv(struct sk_buff *skb) 575 { 576 struct frag_hdr *fhdr; 577 struct frag_queue *fq; 578 struct ipv6hdr *hdr = ipv6_hdr(skb); 579 struct net *net = dev_net(skb_dst(skb)->dev); 580 581 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 582 583 /* Jumbo payload inhibits frag. header */ 584 if (hdr->payload_len==0) 585 goto fail_hdr; 586 587 if (!pskb_may_pull(skb, (skb_transport_offset(skb) + 588 sizeof(struct frag_hdr)))) 589 goto fail_hdr; 590 591 hdr = ipv6_hdr(skb); 592 fhdr = (struct frag_hdr *)skb_transport_header(skb); 593 594 if (!(fhdr->frag_off & htons(0xFFF9))) { 595 /* It is not a fragmented frame */ 596 skb->transport_header += sizeof(struct frag_hdr); 597 IP6_INC_STATS_BH(net, 598 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 599 600 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 601 return 1; 602 } 603 604 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 605 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 606 607 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 608 ip6_dst_idev(skb_dst(skb)))) != NULL) { 609 int ret; 610 611 spin_lock(&fq->q.lock); 612 613 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 614 615 spin_unlock(&fq->q.lock); 616 fq_put(fq); 617 return ret; 618 } 619 620 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); 621 kfree_skb(skb); 622 return -1; 623 624 fail_hdr: 625 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); 626 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); 627 return -1; 628 } 629 630 static const struct inet6_protocol frag_protocol = 631 { 632 .handler = ipv6_frag_rcv, 633 .flags = INET6_PROTO_NOPOLICY, 634 }; 635 636 #ifdef CONFIG_SYSCTL 637 static struct ctl_table ip6_frags_ns_ctl_table[] = { 638 { 639 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, 640 .procname = "ip6frag_high_thresh", 641 .data = &init_net.ipv6.frags.high_thresh, 642 .maxlen = sizeof(int), 643 .mode = 0644, 644 .proc_handler = proc_dointvec 645 }, 646 { 647 .ctl_name = NET_IPV6_IP6FRAG_LOW_THRESH, 648 .procname = "ip6frag_low_thresh", 649 .data = &init_net.ipv6.frags.low_thresh, 650 .maxlen = sizeof(int), 651 .mode = 0644, 652 .proc_handler = proc_dointvec 653 }, 654 { 655 .ctl_name = NET_IPV6_IP6FRAG_TIME, 656 .procname = "ip6frag_time", 657 .data = &init_net.ipv6.frags.timeout, 658 .maxlen = sizeof(int), 659 .mode = 0644, 660 .proc_handler = proc_dointvec_jiffies, 661 .strategy = sysctl_jiffies, 662 }, 663 { } 664 }; 665 666 static struct ctl_table ip6_frags_ctl_table[] = { 667 { 668 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, 669 .procname = "ip6frag_secret_interval", 670 .data = &ip6_frags.secret_interval, 671 .maxlen = sizeof(int), 672 .mode = 0644, 673 .proc_handler = proc_dointvec_jiffies, 674 .strategy = sysctl_jiffies 675 }, 676 { } 677 }; 678 679 static int ip6_frags_ns_sysctl_register(struct net *net) 680 { 681 struct ctl_table *table; 682 struct ctl_table_header *hdr; 683 684 table = ip6_frags_ns_ctl_table; 685 if (net != &init_net) { 686 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); 687 if (table == NULL) 688 goto err_alloc; 689 690 table[0].data = &net->ipv6.frags.high_thresh; 691 table[1].data = &net->ipv6.frags.low_thresh; 692 table[2].data = &net->ipv6.frags.timeout; 693 } 694 695 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 696 if (hdr == NULL) 697 goto err_reg; 698 699 net->ipv6.sysctl.frags_hdr = hdr; 700 return 0; 701 702 err_reg: 703 if (net != &init_net) 704 kfree(table); 705 err_alloc: 706 return -ENOMEM; 707 } 708 709 static void ip6_frags_ns_sysctl_unregister(struct net *net) 710 { 711 struct ctl_table *table; 712 713 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; 714 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 715 kfree(table); 716 } 717 718 static struct ctl_table_header *ip6_ctl_header; 719 720 static int ip6_frags_sysctl_register(void) 721 { 722 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, 723 ip6_frags_ctl_table); 724 return ip6_ctl_header == NULL ? -ENOMEM : 0; 725 } 726 727 static void ip6_frags_sysctl_unregister(void) 728 { 729 unregister_net_sysctl_table(ip6_ctl_header); 730 } 731 #else 732 static inline int ip6_frags_ns_sysctl_register(struct net *net) 733 { 734 return 0; 735 } 736 737 static inline void ip6_frags_ns_sysctl_unregister(struct net *net) 738 { 739 } 740 741 static inline int ip6_frags_sysctl_register(void) 742 { 743 return 0; 744 } 745 746 static inline void ip6_frags_sysctl_unregister(void) 747 { 748 } 749 #endif 750 751 static int ipv6_frags_init_net(struct net *net) 752 { 753 net->ipv6.frags.high_thresh = 256 * 1024; 754 net->ipv6.frags.low_thresh = 192 * 1024; 755 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 756 757 inet_frags_init_net(&net->ipv6.frags); 758 759 return ip6_frags_ns_sysctl_register(net); 760 } 761 762 static void ipv6_frags_exit_net(struct net *net) 763 { 764 ip6_frags_ns_sysctl_unregister(net); 765 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 766 } 767 768 static struct pernet_operations ip6_frags_ops = { 769 .init = ipv6_frags_init_net, 770 .exit = ipv6_frags_exit_net, 771 }; 772 773 int __init ipv6_frag_init(void) 774 { 775 int ret; 776 777 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); 778 if (ret) 779 goto out; 780 781 ret = ip6_frags_sysctl_register(); 782 if (ret) 783 goto err_sysctl; 784 785 ret = register_pernet_subsys(&ip6_frags_ops); 786 if (ret) 787 goto err_pernet; 788 789 ip6_frags.hashfn = ip6_hashfn; 790 ip6_frags.constructor = ip6_frag_init; 791 ip6_frags.destructor = NULL; 792 ip6_frags.skb_free = NULL; 793 ip6_frags.qsize = sizeof(struct frag_queue); 794 ip6_frags.match = ip6_frag_match; 795 ip6_frags.frag_expire = ip6_frag_expire; 796 ip6_frags.secret_interval = 10 * 60 * HZ; 797 inet_frags_init(&ip6_frags); 798 out: 799 return ret; 800 801 err_pernet: 802 ip6_frags_sysctl_unregister(); 803 err_sysctl: 804 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 805 goto out; 806 } 807 808 void ipv6_frag_exit(void) 809 { 810 inet_frags_fini(&ip6_frags); 811 ip6_frags_sysctl_unregister(); 812 unregister_pernet_subsys(&ip6_frags_ops); 813 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 814 } 815