1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * The IP fragmentation functionality. 8 * 9 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 10 * Alan Cox <alan@lxorguk.ukuu.org.uk> 11 * 12 * Fixes: 13 * Alan Cox : Split from ip.c , see ip_input.c for history. 14 * David S. Miller : Begin massive cleanup... 15 * Andi Kleen : Add sysctls. 16 * xxxx : Overlapfrag bug. 17 * Ultima : ip_expire() kernel panic. 18 * Bill Hawes : Frag accounting and evictor fixes. 19 * John McDonald : 0 length frag bug. 20 * Alexey Kuznetsov: SMP races, threading, cleanup. 21 * Patrick McHardy : LRU queue of frag heads for evictor. 22 */ 23 24 #define pr_fmt(fmt) "IPv4: " fmt 25 26 #include <linux/compiler.h> 27 #include <linux/module.h> 28 #include <linux/types.h> 29 #include <linux/mm.h> 30 #include <linux/jiffies.h> 31 #include <linux/skbuff.h> 32 #include <linux/list.h> 33 #include <linux/ip.h> 34 #include <linux/icmp.h> 35 #include <linux/netdevice.h> 36 #include <linux/jhash.h> 37 #include <linux/random.h> 38 #include <linux/slab.h> 39 #include <net/route.h> 40 #include <net/dst.h> 41 #include <net/sock.h> 42 #include <net/ip.h> 43 #include <net/icmp.h> 44 #include <net/checksum.h> 45 #include <net/inetpeer.h> 46 #include <net/inet_frag.h> 47 #include <linux/tcp.h> 48 #include <linux/udp.h> 49 #include <linux/inet.h> 50 #include <linux/netfilter_ipv4.h> 51 #include <net/inet_ecn.h> 52 #include <net/l3mdev.h> 53 54 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c 56 * as well. Or notify me, at least. --ANK 57 */ 58 static const char ip_frag_cache_name[] = "ip4-frags"; 59 60 /* Describe an entry in the "incomplete datagrams" queue. */ 61 struct ipq { 62 struct inet_frag_queue q; 63 64 u8 ecn; /* RFC3168 support */ 65 u16 max_df_size; /* largest frag with DF set seen */ 66 int iif; 67 unsigned int rid; 68 struct inet_peer *peer; 69 }; 70 71 static u8 ip4_frag_ecn(u8 tos) 72 { 73 return 1 << (tos & INET_ECN_MASK); 74 } 75 76 static struct inet_frags ip4_frags; 77 78 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 79 struct sk_buff *prev_tail, struct net_device *dev, 80 int *refs); 81 82 83 static void ip4_frag_init(struct inet_frag_queue *q, const void *a) 84 { 85 struct ipq *qp = container_of(q, struct ipq, q); 86 const struct frag_v4_compare_key *key = a; 87 struct net *net = q->fqdir->net; 88 struct inet_peer *p = NULL; 89 90 q->key.v4 = *key; 91 qp->ecn = 0; 92 if (q->fqdir->max_dist) { 93 rcu_read_lock(); 94 p = inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif); 95 if (p && !refcount_inc_not_zero(&p->refcnt)) 96 p = NULL; 97 rcu_read_unlock(); 98 } 99 qp->peer = p; 100 } 101 102 static void ip4_frag_free(struct inet_frag_queue *q) 103 { 104 struct ipq *qp; 105 106 qp = container_of(q, struct ipq, q); 107 if (qp->peer) 108 inet_putpeer(qp->peer); 109 } 110 111 static bool frag_expire_skip_icmp(u32 user) 112 { 113 return user == IP_DEFRAG_AF_PACKET || 114 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN, 115 __IP_DEFRAG_CONNTRACK_IN_END) || 116 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN, 117 __IP_DEFRAG_CONNTRACK_BRIDGE_IN); 118 } 119 120 /* 121 * Oops, a fragment queue timed out. Kill it and send an ICMP reply. 122 */ 123 static void ip_expire(struct timer_list *t) 124 { 125 enum skb_drop_reason reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; 126 struct inet_frag_queue *frag = timer_container_of(frag, t, timer); 127 const struct iphdr *iph; 128 struct sk_buff *head = NULL; 129 struct net *net; 130 struct ipq *qp; 131 int refs = 1; 132 133 qp = container_of(frag, struct ipq, q); 134 net = qp->q.fqdir->net; 135 136 rcu_read_lock(); 137 spin_lock(&qp->q.lock); 138 139 if (qp->q.flags & INET_FRAG_COMPLETE) 140 goto out; 141 142 qp->q.flags |= INET_FRAG_DROP; 143 inet_frag_kill(&qp->q, &refs); 144 145 /* Paired with WRITE_ONCE() in fqdir_pre_exit(). */ 146 if (READ_ONCE(qp->q.fqdir->dead)) { 147 inet_frag_queue_flush(&qp->q, 0); 148 goto out; 149 } 150 151 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 152 __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); 153 154 if (!(qp->q.flags & INET_FRAG_FIRST_IN)) 155 goto out; 156 157 /* sk_buff::dev and sk_buff::rbnode are unionized. So we 158 * pull the head out of the tree in order to be able to 159 * deal with head->dev. 160 */ 161 head = inet_frag_pull_head(&qp->q); 162 if (!head) 163 goto out; 164 head->dev = dev_get_by_index_rcu(net, qp->iif); 165 if (!head->dev) 166 goto out; 167 168 169 /* skb has no dst, perform route lookup again */ 170 iph = ip_hdr(head); 171 reason = ip_route_input_noref(head, iph->daddr, iph->saddr, 172 ip4h_dscp(iph), head->dev); 173 if (reason) 174 goto out; 175 176 /* Only an end host needs to send an ICMP 177 * "Fragment Reassembly Timeout" message, per RFC792. 178 */ 179 reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; 180 if (frag_expire_skip_icmp(qp->q.key.v4.user) && 181 (skb_rtable(head)->rt_type != RTN_LOCAL)) 182 goto out; 183 184 spin_unlock(&qp->q.lock); 185 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 186 goto out_rcu_unlock; 187 188 out: 189 spin_unlock(&qp->q.lock); 190 out_rcu_unlock: 191 rcu_read_unlock(); 192 kfree_skb_reason(head, reason); 193 inet_frag_putn(&qp->q, refs); 194 } 195 196 /* Find the correct entry in the "incomplete datagrams" queue for 197 * this IP datagram, and create new one, if nothing is found. 198 */ 199 static struct ipq *ip_find(struct net *net, struct iphdr *iph, 200 u32 user, int vif) 201 { 202 struct frag_v4_compare_key key = { 203 .saddr = iph->saddr, 204 .daddr = iph->daddr, 205 .user = user, 206 .vif = vif, 207 .id = iph->id, 208 .protocol = iph->protocol, 209 }; 210 struct inet_frag_queue *q; 211 212 q = inet_frag_find(net->ipv4.fqdir, &key); 213 if (!q) 214 return NULL; 215 216 return container_of(q, struct ipq, q); 217 } 218 219 /* Is the fragment too far ahead to be part of ipq? */ 220 static int ip_frag_too_far(struct ipq *qp) 221 { 222 struct inet_peer *peer = qp->peer; 223 unsigned int max = qp->q.fqdir->max_dist; 224 unsigned int start, end; 225 226 int rc; 227 228 if (!peer || !max) 229 return 0; 230 231 start = qp->rid; 232 end = atomic_inc_return(&peer->rid); 233 qp->rid = end; 234 235 rc = qp->q.fragments_tail && (end - start) > max; 236 237 if (rc) 238 __IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS); 239 240 return rc; 241 } 242 243 static int ip_frag_reinit(struct ipq *qp) 244 { 245 if (!mod_timer_pending(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) 246 return -ETIMEDOUT; 247 248 inet_frag_queue_flush(&qp->q, SKB_DROP_REASON_FRAG_TOO_FAR); 249 250 qp->q.flags = 0; 251 qp->q.len = 0; 252 qp->q.meat = 0; 253 qp->q.rb_fragments = RB_ROOT; 254 qp->q.fragments_tail = NULL; 255 qp->q.last_run_head = NULL; 256 qp->iif = 0; 257 qp->ecn = 0; 258 259 return 0; 260 } 261 262 /* Add new segment to existing queue. */ 263 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb, int *refs) 264 { 265 struct net *net = qp->q.fqdir->net; 266 int ihl, end, flags, offset; 267 struct sk_buff *prev_tail; 268 struct net_device *dev; 269 unsigned int fragsize; 270 int err = -ENOENT; 271 SKB_DR(reason); 272 u8 ecn; 273 274 /* If reassembly is already done, @skb must be a duplicate frag. */ 275 if (qp->q.flags & INET_FRAG_COMPLETE) { 276 SKB_DR_SET(reason, DUP_FRAG); 277 goto err; 278 } 279 280 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 281 unlikely(ip_frag_too_far(qp)) && 282 unlikely(err = ip_frag_reinit(qp))) { 283 inet_frag_kill(&qp->q, refs); 284 goto err; 285 } 286 287 ecn = ip4_frag_ecn(ip_hdr(skb)->tos); 288 offset = ntohs(ip_hdr(skb)->frag_off); 289 flags = offset & ~IP_OFFSET; 290 offset &= IP_OFFSET; 291 offset <<= 3; /* offset is in 8-byte chunks */ 292 ihl = ip_hdrlen(skb); 293 294 /* Determine the position of this fragment. */ 295 end = offset + skb->len - skb_network_offset(skb) - ihl; 296 err = -EINVAL; 297 298 /* Is this the final fragment? */ 299 if ((flags & IP_MF) == 0) { 300 /* If we already have some bits beyond end 301 * or have different end, the segment is corrupted. 302 */ 303 if (end < qp->q.len || 304 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) 305 goto discard_qp; 306 qp->q.flags |= INET_FRAG_LAST_IN; 307 qp->q.len = end; 308 } else { 309 if (end&7) { 310 end &= ~7; 311 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 312 skb->ip_summed = CHECKSUM_NONE; 313 } 314 if (end > qp->q.len) { 315 /* Some bits beyond end -> corruption. */ 316 if (qp->q.flags & INET_FRAG_LAST_IN) 317 goto discard_qp; 318 qp->q.len = end; 319 } 320 } 321 if (end == offset) 322 goto discard_qp; 323 324 err = -ENOMEM; 325 if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) 326 goto discard_qp; 327 328 err = pskb_trim_rcsum(skb, end - offset); 329 if (err) 330 goto discard_qp; 331 332 /* Note : skb->rbnode and skb->dev share the same location. */ 333 dev = skb->dev; 334 /* Makes sure compiler wont do silly aliasing games */ 335 barrier(); 336 337 prev_tail = qp->q.fragments_tail; 338 err = inet_frag_queue_insert(&qp->q, skb, offset, end); 339 if (err) 340 goto insert_error; 341 342 if (dev) 343 qp->iif = dev->ifindex; 344 345 qp->q.stamp = skb->tstamp; 346 qp->q.tstamp_type = skb->tstamp_type; 347 qp->q.meat += skb->len; 348 qp->ecn |= ecn; 349 add_frag_mem_limit(qp->q.fqdir, skb->truesize); 350 if (offset == 0) 351 qp->q.flags |= INET_FRAG_FIRST_IN; 352 353 fragsize = skb->len + ihl; 354 355 if (fragsize > qp->q.max_size) 356 qp->q.max_size = fragsize; 357 358 if (ip_hdr(skb)->frag_off & htons(IP_DF) && 359 fragsize > qp->max_df_size) 360 qp->max_df_size = fragsize; 361 362 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 363 qp->q.meat == qp->q.len) { 364 unsigned long orefdst = skb->_skb_refdst; 365 366 skb->_skb_refdst = 0UL; 367 err = ip_frag_reasm(qp, skb, prev_tail, dev, refs); 368 skb->_skb_refdst = orefdst; 369 if (err) 370 inet_frag_kill(&qp->q, refs); 371 return err; 372 } 373 374 skb_dst_drop(skb); 375 skb_orphan(skb); 376 return -EINPROGRESS; 377 378 insert_error: 379 if (err == IPFRAG_DUP) { 380 SKB_DR_SET(reason, DUP_FRAG); 381 err = -EINVAL; 382 goto err; 383 } 384 err = -EINVAL; 385 __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); 386 discard_qp: 387 inet_frag_kill(&qp->q, refs); 388 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 389 err: 390 kfree_skb_reason(skb, reason); 391 return err; 392 } 393 394 static bool ip_frag_coalesce_ok(const struct ipq *qp) 395 { 396 return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER; 397 } 398 399 /* Build a new IP datagram from all its fragments. */ 400 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 401 struct sk_buff *prev_tail, struct net_device *dev, 402 int *refs) 403 { 404 struct net *net = qp->q.fqdir->net; 405 struct iphdr *iph; 406 void *reasm_data; 407 int len, err; 408 u8 ecn; 409 410 inet_frag_kill(&qp->q, refs); 411 412 ecn = ip_frag_ecn_table[qp->ecn]; 413 if (unlikely(ecn == 0xff)) { 414 err = -EINVAL; 415 goto out_fail; 416 } 417 418 /* Make the one we just received the head. */ 419 reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail); 420 if (!reasm_data) 421 goto out_nomem; 422 423 len = ip_hdrlen(skb) + qp->q.len; 424 err = -E2BIG; 425 if (len > 65535) 426 goto out_oversize; 427 428 inet_frag_reasm_finish(&qp->q, skb, reasm_data, 429 ip_frag_coalesce_ok(qp)); 430 431 skb->dev = dev; 432 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); 433 434 iph = ip_hdr(skb); 435 iph->tot_len = htons(len); 436 iph->tos |= ecn; 437 438 /* When we set IP_DF on a refragmented skb we must also force a 439 * call to ip_fragment to avoid forwarding a DF-skb of size s while 440 * original sender only sent fragments of size f (where f < s). 441 * 442 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest 443 * frag seen to avoid sending tiny DF-fragments in case skb was built 444 * from one very small df-fragment and one large non-df frag. 445 */ 446 if (qp->max_df_size == qp->q.max_size) { 447 IPCB(skb)->flags |= IPSKB_FRAG_PMTU; 448 iph->frag_off = htons(IP_DF); 449 } else { 450 iph->frag_off = 0; 451 } 452 453 ip_send_check(iph); 454 455 __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); 456 qp->q.rb_fragments = RB_ROOT; 457 qp->q.fragments_tail = NULL; 458 qp->q.last_run_head = NULL; 459 return 0; 460 461 out_nomem: 462 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); 463 err = -ENOMEM; 464 goto out_fail; 465 out_oversize: 466 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); 467 out_fail: 468 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 469 return err; 470 } 471 472 /* Process an incoming IP datagram fragment. */ 473 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) 474 { 475 struct net_device *dev; 476 struct ipq *qp; 477 int vif; 478 479 __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); 480 481 /* Lookup (or create) queue header */ 482 rcu_read_lock(); 483 dev = skb->dev ? : skb_dst_dev_rcu(skb); 484 vif = l3mdev_master_ifindex_rcu(dev); 485 qp = ip_find(net, ip_hdr(skb), user, vif); 486 if (qp) { 487 int ret, refs = 0; 488 489 spin_lock(&qp->q.lock); 490 491 ret = ip_frag_queue(qp, skb, &refs); 492 493 spin_unlock(&qp->q.lock); 494 rcu_read_unlock(); 495 inet_frag_putn(&qp->q, refs); 496 return ret; 497 } 498 rcu_read_unlock(); 499 500 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 501 kfree_skb(skb); 502 return -ENOMEM; 503 } 504 EXPORT_SYMBOL(ip_defrag); 505 506 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) 507 { 508 struct iphdr iph; 509 int netoff; 510 u32 len; 511 512 if (skb->protocol != htons(ETH_P_IP)) 513 return skb; 514 515 netoff = skb_network_offset(skb); 516 517 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) 518 return skb; 519 520 if (iph.ihl < 5 || iph.version != 4) 521 return skb; 522 523 len = ntohs(iph.tot_len); 524 if (skb->len < netoff + len || len < (iph.ihl * 4)) 525 return skb; 526 527 if (ip_is_fragment(&iph)) { 528 skb = skb_share_check(skb, GFP_ATOMIC); 529 if (skb) { 530 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) { 531 kfree_skb(skb); 532 return NULL; 533 } 534 if (pskb_trim_rcsum(skb, netoff + len)) { 535 kfree_skb(skb); 536 return NULL; 537 } 538 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 539 if (ip_defrag(net, skb, user)) 540 return NULL; 541 skb_clear_hash(skb); 542 } 543 } 544 return skb; 545 } 546 EXPORT_SYMBOL(ip_check_defrag); 547 548 #ifdef CONFIG_SYSCTL 549 static int dist_min; 550 551 static struct ctl_table ip4_frags_ns_ctl_table[] = { 552 { 553 .procname = "ipfrag_high_thresh", 554 .maxlen = sizeof(unsigned long), 555 .mode = 0644, 556 .proc_handler = proc_doulongvec_minmax, 557 }, 558 { 559 .procname = "ipfrag_low_thresh", 560 .maxlen = sizeof(unsigned long), 561 .mode = 0644, 562 .proc_handler = proc_doulongvec_minmax, 563 }, 564 { 565 .procname = "ipfrag_time", 566 .maxlen = sizeof(int), 567 .mode = 0644, 568 .proc_handler = proc_dointvec_jiffies, 569 }, 570 { 571 .procname = "ipfrag_max_dist", 572 .maxlen = sizeof(int), 573 .mode = 0644, 574 .proc_handler = proc_dointvec_minmax, 575 .extra1 = &dist_min, 576 }, 577 }; 578 579 /* secret interval has been deprecated */ 580 static int ip4_frags_secret_interval_unused; 581 static struct ctl_table ip4_frags_ctl_table[] = { 582 { 583 .procname = "ipfrag_secret_interval", 584 .data = &ip4_frags_secret_interval_unused, 585 .maxlen = sizeof(int), 586 .mode = 0644, 587 .proc_handler = proc_dointvec_jiffies, 588 }, 589 }; 590 591 static int __net_init ip4_frags_ns_ctl_register(struct net *net) 592 { 593 struct ctl_table *table; 594 struct ctl_table_header *hdr; 595 596 table = ip4_frags_ns_ctl_table; 597 if (!net_eq(net, &init_net)) { 598 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); 599 if (!table) 600 goto err_alloc; 601 602 } 603 table[0].data = &net->ipv4.fqdir->high_thresh; 604 table[0].extra1 = &net->ipv4.fqdir->low_thresh; 605 table[1].data = &net->ipv4.fqdir->low_thresh; 606 table[1].extra2 = &net->ipv4.fqdir->high_thresh; 607 table[2].data = &net->ipv4.fqdir->timeout; 608 table[3].data = &net->ipv4.fqdir->max_dist; 609 610 hdr = register_net_sysctl_sz(net, "net/ipv4", table, 611 ARRAY_SIZE(ip4_frags_ns_ctl_table)); 612 if (!hdr) 613 goto err_reg; 614 615 net->ipv4.frags_hdr = hdr; 616 return 0; 617 618 err_reg: 619 if (!net_eq(net, &init_net)) 620 kfree(table); 621 err_alloc: 622 return -ENOMEM; 623 } 624 625 static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) 626 { 627 const struct ctl_table *table; 628 629 table = net->ipv4.frags_hdr->ctl_table_arg; 630 unregister_net_sysctl_table(net->ipv4.frags_hdr); 631 kfree(table); 632 } 633 634 static void __init ip4_frags_ctl_register(void) 635 { 636 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); 637 } 638 #else 639 static int ip4_frags_ns_ctl_register(struct net *net) 640 { 641 return 0; 642 } 643 644 static void ip4_frags_ns_ctl_unregister(struct net *net) 645 { 646 } 647 648 static void __init ip4_frags_ctl_register(void) 649 { 650 } 651 #endif 652 653 static int __net_init ipv4_frags_init_net(struct net *net) 654 { 655 int res; 656 657 res = fqdir_init(&net->ipv4.fqdir, &ip4_frags, net); 658 if (res < 0) 659 return res; 660 /* Fragment cache limits. 661 * 662 * The fragment memory accounting code, (tries to) account for 663 * the real memory usage, by measuring both the size of frag 664 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) 665 * and the SKB's truesize. 666 * 667 * A 64K fragment consumes 129736 bytes (44*2944)+200 668 * (1500 truesize == 2944, sizeof(struct ipq) == 200) 669 * 670 * We will commit 4MB at one time. Should we cross that limit 671 * we will prune down to 3MB, making room for approx 8 big 64K 672 * fragments 8x128k. 673 */ 674 net->ipv4.fqdir->high_thresh = 4 * 1024 * 1024; 675 net->ipv4.fqdir->low_thresh = 3 * 1024 * 1024; 676 /* 677 * Important NOTE! Fragment queue must be destroyed before MSL expires. 678 * RFC791 is wrong proposing to prolongate timer each fragment arrival 679 * by TTL. 680 */ 681 net->ipv4.fqdir->timeout = IP_FRAG_TIME; 682 683 net->ipv4.fqdir->max_dist = 64; 684 685 res = ip4_frags_ns_ctl_register(net); 686 if (res < 0) 687 fqdir_exit(net->ipv4.fqdir); 688 return res; 689 } 690 691 static void __net_exit ipv4_frags_pre_exit_net(struct net *net) 692 { 693 fqdir_pre_exit(net->ipv4.fqdir); 694 } 695 696 static void __net_exit ipv4_frags_exit_net(struct net *net) 697 { 698 ip4_frags_ns_ctl_unregister(net); 699 fqdir_exit(net->ipv4.fqdir); 700 } 701 702 static struct pernet_operations ip4_frags_ops = { 703 .init = ipv4_frags_init_net, 704 .pre_exit = ipv4_frags_pre_exit_net, 705 .exit = ipv4_frags_exit_net, 706 }; 707 708 709 static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed) 710 { 711 return jhash2(data, 712 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); 713 } 714 715 static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed) 716 { 717 const struct inet_frag_queue *fq = data; 718 719 return jhash2((const u32 *)&fq->key.v4, 720 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); 721 } 722 723 static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) 724 { 725 const struct frag_v4_compare_key *key = arg->key; 726 const struct inet_frag_queue *fq = ptr; 727 728 return !!memcmp(&fq->key, key, sizeof(*key)); 729 } 730 731 static const struct rhashtable_params ip4_rhash_params = { 732 .head_offset = offsetof(struct inet_frag_queue, node), 733 .key_offset = offsetof(struct inet_frag_queue, key), 734 .key_len = sizeof(struct frag_v4_compare_key), 735 .hashfn = ip4_key_hashfn, 736 .obj_hashfn = ip4_obj_hashfn, 737 .obj_cmpfn = ip4_obj_cmpfn, 738 .automatic_shrinking = true, 739 }; 740 741 void __init ipfrag_init(void) 742 { 743 ip4_frags.constructor = ip4_frag_init; 744 ip4_frags.destructor = ip4_frag_free; 745 ip4_frags.qsize = sizeof(struct ipq); 746 ip4_frags.frag_expire = ip_expire; 747 ip4_frags.frags_cache_name = ip_frag_cache_name; 748 ip4_frags.rhash_params = ip4_rhash_params; 749 if (inet_frags_init(&ip4_frags)) 750 panic("IP: failed to allocate ip4_frags cache\n"); 751 ip4_frags_ctl_register(); 752 register_pernet_subsys(&ip4_frags_ops); 753 } 754