1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * The IP fragmentation functionality. 8 * 9 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 10 * Alan Cox <alan@lxorguk.ukuu.org.uk> 11 * 12 * Fixes: 13 * Alan Cox : Split from ip.c , see ip_input.c for history. 14 * David S. Miller : Begin massive cleanup... 15 * Andi Kleen : Add sysctls. 16 * xxxx : Overlapfrag bug. 17 * Ultima : ip_expire() kernel panic. 18 * Bill Hawes : Frag accounting and evictor fixes. 19 * John McDonald : 0 length frag bug. 20 * Alexey Kuznetsov: SMP races, threading, cleanup. 21 * Patrick McHardy : LRU queue of frag heads for evictor. 22 */ 23 24 #define pr_fmt(fmt) "IPv4: " fmt 25 26 #include <linux/compiler.h> 27 #include <linux/module.h> 28 #include <linux/types.h> 29 #include <linux/mm.h> 30 #include <linux/jiffies.h> 31 #include <linux/skbuff.h> 32 #include <linux/list.h> 33 #include <linux/ip.h> 34 #include <linux/icmp.h> 35 #include <linux/netdevice.h> 36 #include <linux/jhash.h> 37 #include <linux/random.h> 38 #include <linux/slab.h> 39 #include <net/route.h> 40 #include <net/dst.h> 41 #include <net/sock.h> 42 #include <net/ip.h> 43 #include <net/icmp.h> 44 #include <net/checksum.h> 45 #include <net/inetpeer.h> 46 #include <net/inet_frag.h> 47 #include <linux/tcp.h> 48 #include <linux/udp.h> 49 #include <linux/inet.h> 50 #include <linux/netfilter_ipv4.h> 51 #include <net/inet_ecn.h> 52 #include <net/l3mdev.h> 53 54 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c 56 * as well. Or notify me, at least. --ANK 57 */ 58 static const char ip_frag_cache_name[] = "ip4-frags"; 59 60 /* Describe an entry in the "incomplete datagrams" queue. */ 61 struct ipq { 62 struct inet_frag_queue q; 63 64 u8 ecn; /* RFC3168 support */ 65 u16 max_df_size; /* largest frag with DF set seen */ 66 int iif; 67 unsigned int rid; 68 struct inet_peer *peer; 69 }; 70 71 static u8 ip4_frag_ecn(u8 tos) 72 { 73 return 1 << (tos & INET_ECN_MASK); 74 } 75 76 static struct inet_frags ip4_frags; 77 78 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 79 struct sk_buff *prev_tail, struct net_device *dev); 80 81 82 static void ip4_frag_init(struct inet_frag_queue *q, const void *a) 83 { 84 struct ipq *qp = container_of(q, struct ipq, q); 85 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, 86 frags); 87 struct net *net = container_of(ipv4, struct net, ipv4); 88 89 const struct frag_v4_compare_key *key = a; 90 91 q->key.v4 = *key; 92 qp->ecn = 0; 93 qp->peer = q->net->max_dist ? 94 inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) : 95 NULL; 96 } 97 98 static void ip4_frag_free(struct inet_frag_queue *q) 99 { 100 struct ipq *qp; 101 102 qp = container_of(q, struct ipq, q); 103 if (qp->peer) 104 inet_putpeer(qp->peer); 105 } 106 107 108 /* Destruction primitives. */ 109 110 static void ipq_put(struct ipq *ipq) 111 { 112 inet_frag_put(&ipq->q); 113 } 114 115 /* Kill ipq entry. It is not destroyed immediately, 116 * because caller (and someone more) holds reference count. 117 */ 118 static void ipq_kill(struct ipq *ipq) 119 { 120 inet_frag_kill(&ipq->q); 121 } 122 123 static bool frag_expire_skip_icmp(u32 user) 124 { 125 return user == IP_DEFRAG_AF_PACKET || 126 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN, 127 __IP_DEFRAG_CONNTRACK_IN_END) || 128 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN, 129 __IP_DEFRAG_CONNTRACK_BRIDGE_IN); 130 } 131 132 /* 133 * Oops, a fragment queue timed out. Kill it and send an ICMP reply. 134 */ 135 static void ip_expire(struct timer_list *t) 136 { 137 struct inet_frag_queue *frag = from_timer(frag, t, timer); 138 const struct iphdr *iph; 139 struct sk_buff *head = NULL; 140 struct net *net; 141 struct ipq *qp; 142 int err; 143 144 qp = container_of(frag, struct ipq, q); 145 net = container_of(qp->q.net, struct net, ipv4.frags); 146 147 rcu_read_lock(); 148 spin_lock(&qp->q.lock); 149 150 if (qp->q.flags & INET_FRAG_COMPLETE) 151 goto out; 152 153 ipq_kill(qp); 154 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 155 __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); 156 157 if (!(qp->q.flags & INET_FRAG_FIRST_IN)) 158 goto out; 159 160 /* sk_buff::dev and sk_buff::rbnode are unionized. So we 161 * pull the head out of the tree in order to be able to 162 * deal with head->dev. 163 */ 164 head = inet_frag_pull_head(&qp->q); 165 if (!head) 166 goto out; 167 head->dev = dev_get_by_index_rcu(net, qp->iif); 168 if (!head->dev) 169 goto out; 170 171 172 /* skb has no dst, perform route lookup again */ 173 iph = ip_hdr(head); 174 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 175 iph->tos, head->dev); 176 if (err) 177 goto out; 178 179 /* Only an end host needs to send an ICMP 180 * "Fragment Reassembly Timeout" message, per RFC792. 181 */ 182 if (frag_expire_skip_icmp(qp->q.key.v4.user) && 183 (skb_rtable(head)->rt_type != RTN_LOCAL)) 184 goto out; 185 186 spin_unlock(&qp->q.lock); 187 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 188 goto out_rcu_unlock; 189 190 out: 191 spin_unlock(&qp->q.lock); 192 out_rcu_unlock: 193 rcu_read_unlock(); 194 kfree_skb(head); 195 ipq_put(qp); 196 } 197 198 /* Find the correct entry in the "incomplete datagrams" queue for 199 * this IP datagram, and create new one, if nothing is found. 200 */ 201 static struct ipq *ip_find(struct net *net, struct iphdr *iph, 202 u32 user, int vif) 203 { 204 struct frag_v4_compare_key key = { 205 .saddr = iph->saddr, 206 .daddr = iph->daddr, 207 .user = user, 208 .vif = vif, 209 .id = iph->id, 210 .protocol = iph->protocol, 211 }; 212 struct inet_frag_queue *q; 213 214 q = inet_frag_find(&net->ipv4.frags, &key); 215 if (!q) 216 return NULL; 217 218 return container_of(q, struct ipq, q); 219 } 220 221 /* Is the fragment too far ahead to be part of ipq? */ 222 static int ip_frag_too_far(struct ipq *qp) 223 { 224 struct inet_peer *peer = qp->peer; 225 unsigned int max = qp->q.net->max_dist; 226 unsigned int start, end; 227 228 int rc; 229 230 if (!peer || !max) 231 return 0; 232 233 start = qp->rid; 234 end = atomic_inc_return(&peer->rid); 235 qp->rid = end; 236 237 rc = qp->q.fragments_tail && (end - start) > max; 238 239 if (rc) { 240 struct net *net; 241 242 net = container_of(qp->q.net, struct net, ipv4.frags); 243 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 244 } 245 246 return rc; 247 } 248 249 static int ip_frag_reinit(struct ipq *qp) 250 { 251 unsigned int sum_truesize = 0; 252 253 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { 254 refcount_inc(&qp->q.refcnt); 255 return -ETIMEDOUT; 256 } 257 258 sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments); 259 sub_frag_mem_limit(qp->q.net, sum_truesize); 260 261 qp->q.flags = 0; 262 qp->q.len = 0; 263 qp->q.meat = 0; 264 qp->q.rb_fragments = RB_ROOT; 265 qp->q.fragments_tail = NULL; 266 qp->q.last_run_head = NULL; 267 qp->iif = 0; 268 qp->ecn = 0; 269 270 return 0; 271 } 272 273 /* Add new segment to existing queue. */ 274 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) 275 { 276 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); 277 int ihl, end, flags, offset; 278 struct sk_buff *prev_tail; 279 struct net_device *dev; 280 unsigned int fragsize; 281 int err = -ENOENT; 282 u8 ecn; 283 284 if (qp->q.flags & INET_FRAG_COMPLETE) 285 goto err; 286 287 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 288 unlikely(ip_frag_too_far(qp)) && 289 unlikely(err = ip_frag_reinit(qp))) { 290 ipq_kill(qp); 291 goto err; 292 } 293 294 ecn = ip4_frag_ecn(ip_hdr(skb)->tos); 295 offset = ntohs(ip_hdr(skb)->frag_off); 296 flags = offset & ~IP_OFFSET; 297 offset &= IP_OFFSET; 298 offset <<= 3; /* offset is in 8-byte chunks */ 299 ihl = ip_hdrlen(skb); 300 301 /* Determine the position of this fragment. */ 302 end = offset + skb->len - skb_network_offset(skb) - ihl; 303 err = -EINVAL; 304 305 /* Is this the final fragment? */ 306 if ((flags & IP_MF) == 0) { 307 /* If we already have some bits beyond end 308 * or have different end, the segment is corrupted. 309 */ 310 if (end < qp->q.len || 311 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) 312 goto discard_qp; 313 qp->q.flags |= INET_FRAG_LAST_IN; 314 qp->q.len = end; 315 } else { 316 if (end&7) { 317 end &= ~7; 318 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 319 skb->ip_summed = CHECKSUM_NONE; 320 } 321 if (end > qp->q.len) { 322 /* Some bits beyond end -> corruption. */ 323 if (qp->q.flags & INET_FRAG_LAST_IN) 324 goto discard_qp; 325 qp->q.len = end; 326 } 327 } 328 if (end == offset) 329 goto discard_qp; 330 331 err = -ENOMEM; 332 if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) 333 goto discard_qp; 334 335 err = pskb_trim_rcsum(skb, end - offset); 336 if (err) 337 goto discard_qp; 338 339 /* Note : skb->rbnode and skb->dev share the same location. */ 340 dev = skb->dev; 341 /* Makes sure compiler wont do silly aliasing games */ 342 barrier(); 343 344 prev_tail = qp->q.fragments_tail; 345 err = inet_frag_queue_insert(&qp->q, skb, offset, end); 346 if (err) 347 goto insert_error; 348 349 if (dev) 350 qp->iif = dev->ifindex; 351 352 qp->q.stamp = skb->tstamp; 353 qp->q.meat += skb->len; 354 qp->ecn |= ecn; 355 add_frag_mem_limit(qp->q.net, skb->truesize); 356 if (offset == 0) 357 qp->q.flags |= INET_FRAG_FIRST_IN; 358 359 fragsize = skb->len + ihl; 360 361 if (fragsize > qp->q.max_size) 362 qp->q.max_size = fragsize; 363 364 if (ip_hdr(skb)->frag_off & htons(IP_DF) && 365 fragsize > qp->max_df_size) 366 qp->max_df_size = fragsize; 367 368 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 369 qp->q.meat == qp->q.len) { 370 unsigned long orefdst = skb->_skb_refdst; 371 372 skb->_skb_refdst = 0UL; 373 err = ip_frag_reasm(qp, skb, prev_tail, dev); 374 skb->_skb_refdst = orefdst; 375 if (err) 376 inet_frag_kill(&qp->q); 377 return err; 378 } 379 380 skb_dst_drop(skb); 381 return -EINPROGRESS; 382 383 insert_error: 384 if (err == IPFRAG_DUP) { 385 kfree_skb(skb); 386 return -EINVAL; 387 } 388 err = -EINVAL; 389 __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); 390 discard_qp: 391 inet_frag_kill(&qp->q); 392 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 393 err: 394 kfree_skb(skb); 395 return err; 396 } 397 398 /* Build a new IP datagram from all its fragments. */ 399 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 400 struct sk_buff *prev_tail, struct net_device *dev) 401 { 402 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); 403 struct iphdr *iph; 404 void *reasm_data; 405 int len, err; 406 u8 ecn; 407 408 ipq_kill(qp); 409 410 ecn = ip_frag_ecn_table[qp->ecn]; 411 if (unlikely(ecn == 0xff)) { 412 err = -EINVAL; 413 goto out_fail; 414 } 415 416 /* Make the one we just received the head. */ 417 reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail); 418 if (!reasm_data) 419 goto out_nomem; 420 421 len = ip_hdrlen(skb) + qp->q.len; 422 err = -E2BIG; 423 if (len > 65535) 424 goto out_oversize; 425 426 inet_frag_reasm_finish(&qp->q, skb, reasm_data); 427 428 skb->dev = dev; 429 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); 430 431 iph = ip_hdr(skb); 432 iph->tot_len = htons(len); 433 iph->tos |= ecn; 434 435 /* When we set IP_DF on a refragmented skb we must also force a 436 * call to ip_fragment to avoid forwarding a DF-skb of size s while 437 * original sender only sent fragments of size f (where f < s). 438 * 439 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest 440 * frag seen to avoid sending tiny DF-fragments in case skb was built 441 * from one very small df-fragment and one large non-df frag. 442 */ 443 if (qp->max_df_size == qp->q.max_size) { 444 IPCB(skb)->flags |= IPSKB_FRAG_PMTU; 445 iph->frag_off = htons(IP_DF); 446 } else { 447 iph->frag_off = 0; 448 } 449 450 ip_send_check(iph); 451 452 __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); 453 qp->q.rb_fragments = RB_ROOT; 454 qp->q.fragments_tail = NULL; 455 qp->q.last_run_head = NULL; 456 return 0; 457 458 out_nomem: 459 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); 460 err = -ENOMEM; 461 goto out_fail; 462 out_oversize: 463 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); 464 out_fail: 465 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 466 return err; 467 } 468 469 /* Process an incoming IP datagram fragment. */ 470 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) 471 { 472 struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; 473 int vif = l3mdev_master_ifindex_rcu(dev); 474 struct ipq *qp; 475 476 __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); 477 skb_orphan(skb); 478 479 /* Lookup (or create) queue header */ 480 qp = ip_find(net, ip_hdr(skb), user, vif); 481 if (qp) { 482 int ret; 483 484 spin_lock(&qp->q.lock); 485 486 ret = ip_frag_queue(qp, skb); 487 488 spin_unlock(&qp->q.lock); 489 ipq_put(qp); 490 return ret; 491 } 492 493 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 494 kfree_skb(skb); 495 return -ENOMEM; 496 } 497 EXPORT_SYMBOL(ip_defrag); 498 499 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) 500 { 501 struct iphdr iph; 502 int netoff; 503 u32 len; 504 505 if (skb->protocol != htons(ETH_P_IP)) 506 return skb; 507 508 netoff = skb_network_offset(skb); 509 510 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) 511 return skb; 512 513 if (iph.ihl < 5 || iph.version != 4) 514 return skb; 515 516 len = ntohs(iph.tot_len); 517 if (skb->len < netoff + len || len < (iph.ihl * 4)) 518 return skb; 519 520 if (ip_is_fragment(&iph)) { 521 skb = skb_share_check(skb, GFP_ATOMIC); 522 if (skb) { 523 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) { 524 kfree_skb(skb); 525 return NULL; 526 } 527 if (pskb_trim_rcsum(skb, netoff + len)) { 528 kfree_skb(skb); 529 return NULL; 530 } 531 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 532 if (ip_defrag(net, skb, user)) 533 return NULL; 534 skb_clear_hash(skb); 535 } 536 } 537 return skb; 538 } 539 EXPORT_SYMBOL(ip_check_defrag); 540 541 #ifdef CONFIG_SYSCTL 542 static int dist_min; 543 544 static struct ctl_table ip4_frags_ns_ctl_table[] = { 545 { 546 .procname = "ipfrag_high_thresh", 547 .data = &init_net.ipv4.frags.high_thresh, 548 .maxlen = sizeof(unsigned long), 549 .mode = 0644, 550 .proc_handler = proc_doulongvec_minmax, 551 .extra1 = &init_net.ipv4.frags.low_thresh 552 }, 553 { 554 .procname = "ipfrag_low_thresh", 555 .data = &init_net.ipv4.frags.low_thresh, 556 .maxlen = sizeof(unsigned long), 557 .mode = 0644, 558 .proc_handler = proc_doulongvec_minmax, 559 .extra2 = &init_net.ipv4.frags.high_thresh 560 }, 561 { 562 .procname = "ipfrag_time", 563 .data = &init_net.ipv4.frags.timeout, 564 .maxlen = sizeof(int), 565 .mode = 0644, 566 .proc_handler = proc_dointvec_jiffies, 567 }, 568 { 569 .procname = "ipfrag_max_dist", 570 .data = &init_net.ipv4.frags.max_dist, 571 .maxlen = sizeof(int), 572 .mode = 0644, 573 .proc_handler = proc_dointvec_minmax, 574 .extra1 = &dist_min, 575 }, 576 { } 577 }; 578 579 /* secret interval has been deprecated */ 580 static int ip4_frags_secret_interval_unused; 581 static struct ctl_table ip4_frags_ctl_table[] = { 582 { 583 .procname = "ipfrag_secret_interval", 584 .data = &ip4_frags_secret_interval_unused, 585 .maxlen = sizeof(int), 586 .mode = 0644, 587 .proc_handler = proc_dointvec_jiffies, 588 }, 589 { } 590 }; 591 592 static int __net_init ip4_frags_ns_ctl_register(struct net *net) 593 { 594 struct ctl_table *table; 595 struct ctl_table_header *hdr; 596 597 table = ip4_frags_ns_ctl_table; 598 if (!net_eq(net, &init_net)) { 599 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); 600 if (!table) 601 goto err_alloc; 602 603 table[0].data = &net->ipv4.frags.high_thresh; 604 table[0].extra1 = &net->ipv4.frags.low_thresh; 605 table[1].data = &net->ipv4.frags.low_thresh; 606 table[1].extra2 = &net->ipv4.frags.high_thresh; 607 table[2].data = &net->ipv4.frags.timeout; 608 table[3].data = &net->ipv4.frags.max_dist; 609 } 610 611 hdr = register_net_sysctl(net, "net/ipv4", table); 612 if (!hdr) 613 goto err_reg; 614 615 net->ipv4.frags_hdr = hdr; 616 return 0; 617 618 err_reg: 619 if (!net_eq(net, &init_net)) 620 kfree(table); 621 err_alloc: 622 return -ENOMEM; 623 } 624 625 static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) 626 { 627 struct ctl_table *table; 628 629 table = net->ipv4.frags_hdr->ctl_table_arg; 630 unregister_net_sysctl_table(net->ipv4.frags_hdr); 631 kfree(table); 632 } 633 634 static void __init ip4_frags_ctl_register(void) 635 { 636 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); 637 } 638 #else 639 static int ip4_frags_ns_ctl_register(struct net *net) 640 { 641 return 0; 642 } 643 644 static void ip4_frags_ns_ctl_unregister(struct net *net) 645 { 646 } 647 648 static void __init ip4_frags_ctl_register(void) 649 { 650 } 651 #endif 652 653 static int __net_init ipv4_frags_init_net(struct net *net) 654 { 655 int res; 656 657 /* Fragment cache limits. 658 * 659 * The fragment memory accounting code, (tries to) account for 660 * the real memory usage, by measuring both the size of frag 661 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) 662 * and the SKB's truesize. 663 * 664 * A 64K fragment consumes 129736 bytes (44*2944)+200 665 * (1500 truesize == 2944, sizeof(struct ipq) == 200) 666 * 667 * We will commit 4MB at one time. Should we cross that limit 668 * we will prune down to 3MB, making room for approx 8 big 64K 669 * fragments 8x128k. 670 */ 671 net->ipv4.frags.high_thresh = 4 * 1024 * 1024; 672 net->ipv4.frags.low_thresh = 3 * 1024 * 1024; 673 /* 674 * Important NOTE! Fragment queue must be destroyed before MSL expires. 675 * RFC791 is wrong proposing to prolongate timer each fragment arrival 676 * by TTL. 677 */ 678 net->ipv4.frags.timeout = IP_FRAG_TIME; 679 680 net->ipv4.frags.max_dist = 64; 681 net->ipv4.frags.f = &ip4_frags; 682 683 res = inet_frags_init_net(&net->ipv4.frags); 684 if (res < 0) 685 return res; 686 res = ip4_frags_ns_ctl_register(net); 687 if (res < 0) 688 inet_frags_exit_net(&net->ipv4.frags); 689 return res; 690 } 691 692 static void __net_exit ipv4_frags_exit_net(struct net *net) 693 { 694 ip4_frags_ns_ctl_unregister(net); 695 inet_frags_exit_net(&net->ipv4.frags); 696 } 697 698 static struct pernet_operations ip4_frags_ops = { 699 .init = ipv4_frags_init_net, 700 .exit = ipv4_frags_exit_net, 701 }; 702 703 704 static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed) 705 { 706 return jhash2(data, 707 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); 708 } 709 710 static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed) 711 { 712 const struct inet_frag_queue *fq = data; 713 714 return jhash2((const u32 *)&fq->key.v4, 715 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); 716 } 717 718 static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) 719 { 720 const struct frag_v4_compare_key *key = arg->key; 721 const struct inet_frag_queue *fq = ptr; 722 723 return !!memcmp(&fq->key, key, sizeof(*key)); 724 } 725 726 static const struct rhashtable_params ip4_rhash_params = { 727 .head_offset = offsetof(struct inet_frag_queue, node), 728 .key_offset = offsetof(struct inet_frag_queue, key), 729 .key_len = sizeof(struct frag_v4_compare_key), 730 .hashfn = ip4_key_hashfn, 731 .obj_hashfn = ip4_obj_hashfn, 732 .obj_cmpfn = ip4_obj_cmpfn, 733 .automatic_shrinking = true, 734 }; 735 736 void __init ipfrag_init(void) 737 { 738 ip4_frags.constructor = ip4_frag_init; 739 ip4_frags.destructor = ip4_frag_free; 740 ip4_frags.qsize = sizeof(struct ipq); 741 ip4_frags.frag_expire = ip_expire; 742 ip4_frags.frags_cache_name = ip_frag_cache_name; 743 ip4_frags.rhash_params = ip4_rhash_params; 744 if (inet_frags_init(&ip4_frags)) 745 panic("IP: failed to allocate ip4_frags cache\n"); 746 ip4_frags_ctl_register(); 747 register_pernet_subsys(&ip4_frags_ops); 748 } 749