1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_route.h" 40 #include "opt_rss.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/domain.h> 47 #include <sys/protosw.h> 48 #include <sys/socket.h> 49 #include <sys/time.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/rwlock.h> 53 #include <sys/sdt.h> 54 #include <sys/syslog.h> 55 #include <sys/sysctl.h> 56 57 #include <net/pfil.h> 58 #include <net/if.h> 59 #include <net/if_types.h> 60 #include <net/if_var.h> 61 #include <net/if_dl.h> 62 #include <net/route.h> 63 #include <net/netisr.h> 64 #include <net/vnet.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_kdtrace.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/in_pcb.h> 72 #include <netinet/ip_var.h> 73 #include <netinet/ip_fw.h> 74 #include <netinet/ip_icmp.h> 75 #include <netinet/ip_options.h> 76 #include <machine/in_cksum.h> 77 #include <netinet/ip_carp.h> 78 #ifdef IPSEC 79 #include <netinet/ip_ipsec.h> 80 #endif /* IPSEC */ 81 #include <netinet/in_rss.h> 82 83 #include <sys/socketvar.h> 84 85 #include <security/mac/mac_framework.h> 86 87 #ifdef CTASSERT 88 CTASSERT(sizeof(struct ip) == 20); 89 #endif 90 91 struct rwlock in_ifaddr_lock; 92 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 93 94 VNET_DEFINE(int, rsvp_on); 95 96 VNET_DEFINE(int, ipforwarding); 97 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_VNET | CTLFLAG_RW, 98 &VNET_NAME(ipforwarding), 0, 99 "Enable IP forwarding between interfaces"); 100 101 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 102 #define V_ipsendredirects VNET(ipsendredirects) 103 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_VNET | CTLFLAG_RW, 104 &VNET_NAME(ipsendredirects), 0, 105 "Enable sending IP redirects"); 106 107 VNET_DEFINE(int, ip_do_randomid); 108 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_VNET | CTLFLAG_RW, 109 &VNET_NAME(ip_do_randomid), 0, 110 "Assign random ip_id values"); 111 112 /* 113 * XXX - Setting ip_checkinterface mostly implements the receive side of 114 * the Strong ES model described in RFC 1122, but since the routing table 115 * and transmit implementation do not implement the Strong ES model, 116 * setting this to 1 results in an odd hybrid. 117 * 118 * XXX - ip_checkinterface currently must be disabled if you use ipnat 119 * to translate the destination address to another local interface. 120 * 121 * XXX - ip_checkinterface must be disabled if you add IP aliases 122 * to the loopback interface instead of the interface where the 123 * packets for those addresses are received. 124 */ 125 static VNET_DEFINE(int, ip_checkinterface); 126 #define V_ip_checkinterface VNET(ip_checkinterface) 127 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_VNET | CTLFLAG_RW, 128 &VNET_NAME(ip_checkinterface), 0, 129 "Verify packet arrives on correct interface"); 130 131 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ 132 133 static struct netisr_handler ip_nh = { 134 .nh_name = "ip", 135 .nh_handler = ip_input, 136 .nh_proto = NETISR_IP, 137 #ifdef RSS 138 .nh_m2cpuid = rss_soft_m2cpuid, 139 .nh_policy = NETISR_POLICY_CPU, 140 .nh_dispatch = NETISR_DISPATCH_HYBRID, 141 #else 142 .nh_policy = NETISR_POLICY_FLOW, 143 #endif 144 }; 145 146 #ifdef RSS 147 /* 148 * Directly dispatched frames are currently assumed 149 * to have a flowid already calculated. 150 * 151 * It should likely have something that assert it 152 * actually has valid flow details. 153 */ 154 static struct netisr_handler ip_direct_nh = { 155 .nh_name = "ip_direct", 156 .nh_handler = ip_direct_input, 157 .nh_proto = NETISR_IP_DIRECT, 158 .nh_m2cpuid = rss_m2cpuid, 159 .nh_policy = NETISR_POLICY_CPU, 160 .nh_dispatch = NETISR_DISPATCH_HYBRID, 161 }; 162 #endif 163 164 extern struct domain inetdomain; 165 extern struct protosw inetsw[]; 166 u_char ip_protox[IPPROTO_MAX]; 167 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 168 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 169 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 170 171 static VNET_DEFINE(uma_zone_t, ipq_zone); 172 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 173 static struct mtx ipqlock; 174 175 #define V_ipq_zone VNET(ipq_zone) 176 #define V_ipq VNET(ipq) 177 178 #define IPQ_LOCK() mtx_lock(&ipqlock) 179 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 180 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 181 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 182 183 static void maxnipq_update(void); 184 static void ipq_zone_change(void *); 185 static void ip_drain_locked(void); 186 187 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 188 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 189 #define V_maxnipq VNET(maxnipq) 190 #define V_nipq VNET(nipq) 191 SYSCTL_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET | CTLFLAG_RD, 192 &VNET_NAME(nipq), 0, 193 "Current number of IPv4 fragment reassembly queue entries"); 194 195 static VNET_DEFINE(int, maxfragsperpacket); 196 #define V_maxfragsperpacket VNET(maxfragsperpacket) 197 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW, 198 &VNET_NAME(maxfragsperpacket), 0, 199 "Maximum number of IPv4 fragments allowed per packet"); 200 201 #ifdef IPCTL_DEFMTU 202 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 203 &ip_mtu, 0, "Default MTU"); 204 #endif 205 206 #ifdef IPSTEALTH 207 VNET_DEFINE(int, ipstealth); 208 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_VNET | CTLFLAG_RW, 209 &VNET_NAME(ipstealth), 0, 210 "IP stealth mode, no TTL decrementation on forwarding"); 211 #endif 212 213 static void ip_freef(struct ipqhead *, struct ipq *); 214 215 /* 216 * IP statistics are stored in the "array" of counter(9)s. 217 */ 218 VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat); 219 VNET_PCPUSTAT_SYSINIT(ipstat); 220 SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat, 221 "IP statistics (struct ipstat, netinet/ip_var.h)"); 222 223 #ifdef VIMAGE 224 VNET_PCPUSTAT_SYSUNINIT(ipstat); 225 #endif /* VIMAGE */ 226 227 /* 228 * Kernel module interface for updating ipstat. The argument is an index 229 * into ipstat treated as an array. 230 */ 231 void 232 kmod_ipstat_inc(int statnum) 233 { 234 235 counter_u64_add(VNET(ipstat)[statnum], 1); 236 } 237 238 void 239 kmod_ipstat_dec(int statnum) 240 { 241 242 counter_u64_add(VNET(ipstat)[statnum], -1); 243 } 244 245 static int 246 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 247 { 248 int error, qlimit; 249 250 netisr_getqlimit(&ip_nh, &qlimit); 251 error = sysctl_handle_int(oidp, &qlimit, 0, req); 252 if (error || !req->newptr) 253 return (error); 254 if (qlimit < 1) 255 return (EINVAL); 256 return (netisr_setqlimit(&ip_nh, qlimit)); 257 } 258 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 259 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 260 "Maximum size of the IP input queue"); 261 262 static int 263 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 264 { 265 u_int64_t qdrops_long; 266 int error, qdrops; 267 268 netisr_getqdrops(&ip_nh, &qdrops_long); 269 qdrops = qdrops_long; 270 error = sysctl_handle_int(oidp, &qdrops, 0, req); 271 if (error || !req->newptr) 272 return (error); 273 if (qdrops != 0) 274 return (EINVAL); 275 netisr_clearqdrops(&ip_nh); 276 return (0); 277 } 278 279 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 280 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 281 "Number of packets dropped from the IP input queue"); 282 283 #ifdef RSS 284 static int 285 sysctl_netinet_intr_direct_queue_maxlen(SYSCTL_HANDLER_ARGS) 286 { 287 int error, qlimit; 288 289 netisr_getqlimit(&ip_direct_nh, &qlimit); 290 error = sysctl_handle_int(oidp, &qlimit, 0, req); 291 if (error || !req->newptr) 292 return (error); 293 if (qlimit < 1) 294 return (EINVAL); 295 return (netisr_setqlimit(&ip_direct_nh, qlimit)); 296 } 297 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_direct_queue_maxlen, 298 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_direct_queue_maxlen, "I", 299 "Maximum size of the IP direct input queue"); 300 301 static int 302 sysctl_netinet_intr_direct_queue_drops(SYSCTL_HANDLER_ARGS) 303 { 304 u_int64_t qdrops_long; 305 int error, qdrops; 306 307 netisr_getqdrops(&ip_direct_nh, &qdrops_long); 308 qdrops = qdrops_long; 309 error = sysctl_handle_int(oidp, &qdrops, 0, req); 310 if (error || !req->newptr) 311 return (error); 312 if (qdrops != 0) 313 return (EINVAL); 314 netisr_clearqdrops(&ip_direct_nh); 315 return (0); 316 } 317 318 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_direct_queue_drops, 319 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_direct_queue_drops, "I", 320 "Number of packets dropped from the IP direct input queue"); 321 #endif /* RSS */ 322 323 /* 324 * IP initialization: fill in IP protocol switch table. 325 * All protocols not implemented in kernel go to raw IP protocol handler. 326 */ 327 void 328 ip_init(void) 329 { 330 struct protosw *pr; 331 int i; 332 333 V_ip_id = time_second & 0xffff; 334 335 TAILQ_INIT(&V_in_ifaddrhead); 336 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 337 338 /* Initialize IP reassembly queue. */ 339 for (i = 0; i < IPREASS_NHASH; i++) 340 TAILQ_INIT(&V_ipq[i]); 341 V_maxnipq = nmbclusters / 32; 342 V_maxfragsperpacket = 16; 343 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 344 NULL, UMA_ALIGN_PTR, 0); 345 maxnipq_update(); 346 347 /* Initialize packet filter hooks. */ 348 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF; 349 V_inet_pfil_hook.ph_af = AF_INET; 350 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0) 351 printf("%s: WARNING: unable to register pfil hook, " 352 "error %d\n", __func__, i); 353 354 /* Skip initialization of globals for non-default instances. */ 355 if (!IS_DEFAULT_VNET(curvnet)) 356 return; 357 358 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 359 if (pr == NULL) 360 panic("ip_init: PF_INET not found"); 361 362 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 363 for (i = 0; i < IPPROTO_MAX; i++) 364 ip_protox[i] = pr - inetsw; 365 /* 366 * Cycle through IP protocols and put them into the appropriate place 367 * in ip_protox[]. 368 */ 369 for (pr = inetdomain.dom_protosw; 370 pr < inetdomain.dom_protoswNPROTOSW; pr++) 371 if (pr->pr_domain->dom_family == PF_INET && 372 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 373 /* Be careful to only index valid IP protocols. */ 374 if (pr->pr_protocol < IPPROTO_MAX) 375 ip_protox[pr->pr_protocol] = pr - inetsw; 376 } 377 378 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 379 NULL, EVENTHANDLER_PRI_ANY); 380 381 /* Initialize various other remaining things. */ 382 IPQ_LOCK_INIT(); 383 netisr_register(&ip_nh); 384 #ifdef RSS 385 netisr_register(&ip_direct_nh); 386 #endif 387 } 388 389 #ifdef VIMAGE 390 void 391 ip_destroy(void) 392 { 393 int i; 394 395 if ((i = pfil_head_unregister(&V_inet_pfil_hook)) != 0) 396 printf("%s: WARNING: unable to unregister pfil hook, " 397 "error %d\n", __func__, i); 398 399 /* Cleanup in_ifaddr hash table; should be empty. */ 400 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 401 402 IPQ_LOCK(); 403 ip_drain_locked(); 404 IPQ_UNLOCK(); 405 406 uma_zdestroy(V_ipq_zone); 407 } 408 #endif 409 410 #ifdef RSS 411 /* 412 * IP direct input routine. 413 * 414 * This is called when reinjecting completed fragments where 415 * all of the previous checking and book-keeping has been done. 416 */ 417 void 418 ip_direct_input(struct mbuf *m) 419 { 420 struct ip *ip; 421 int hlen; 422 423 ip = mtod(m, struct ip *); 424 hlen = ip->ip_hl << 2; 425 426 IPSTAT_INC(ips_delivered); 427 (*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p); 428 return; 429 } 430 #endif 431 432 /* 433 * Ip input routine. Checksum and byte swap header. If fragmented 434 * try to reassemble. Process options. Pass to next level. 435 */ 436 void 437 ip_input(struct mbuf *m) 438 { 439 struct ip *ip = NULL; 440 struct in_ifaddr *ia = NULL; 441 struct ifaddr *ifa; 442 struct ifnet *ifp; 443 int checkif, hlen = 0; 444 uint16_t sum, ip_len; 445 int dchg = 0; /* dest changed after fw */ 446 struct in_addr odst; /* original dst address */ 447 448 M_ASSERTPKTHDR(m); 449 450 if (m->m_flags & M_FASTFWD_OURS) { 451 m->m_flags &= ~M_FASTFWD_OURS; 452 /* Set up some basics that will be used later. */ 453 ip = mtod(m, struct ip *); 454 hlen = ip->ip_hl << 2; 455 ip_len = ntohs(ip->ip_len); 456 goto ours; 457 } 458 459 IPSTAT_INC(ips_total); 460 461 if (m->m_pkthdr.len < sizeof(struct ip)) 462 goto tooshort; 463 464 if (m->m_len < sizeof (struct ip) && 465 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 466 IPSTAT_INC(ips_toosmall); 467 return; 468 } 469 ip = mtod(m, struct ip *); 470 471 if (ip->ip_v != IPVERSION) { 472 IPSTAT_INC(ips_badvers); 473 goto bad; 474 } 475 476 hlen = ip->ip_hl << 2; 477 if (hlen < sizeof(struct ip)) { /* minimum header length */ 478 IPSTAT_INC(ips_badhlen); 479 goto bad; 480 } 481 if (hlen > m->m_len) { 482 if ((m = m_pullup(m, hlen)) == NULL) { 483 IPSTAT_INC(ips_badhlen); 484 return; 485 } 486 ip = mtod(m, struct ip *); 487 } 488 489 IP_PROBE(receive, NULL, NULL, ip, m->m_pkthdr.rcvif, ip, NULL); 490 491 /* 127/8 must not appear on wire - RFC1122 */ 492 ifp = m->m_pkthdr.rcvif; 493 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 494 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 495 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 496 IPSTAT_INC(ips_badaddr); 497 goto bad; 498 } 499 } 500 501 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 502 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 503 } else { 504 if (hlen == sizeof(struct ip)) { 505 sum = in_cksum_hdr(ip); 506 } else { 507 sum = in_cksum(m, hlen); 508 } 509 } 510 if (sum) { 511 IPSTAT_INC(ips_badsum); 512 goto bad; 513 } 514 515 #ifdef ALTQ 516 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 517 /* packet is dropped by traffic conditioner */ 518 return; 519 #endif 520 521 ip_len = ntohs(ip->ip_len); 522 if (ip_len < hlen) { 523 IPSTAT_INC(ips_badlen); 524 goto bad; 525 } 526 527 /* 528 * Check that the amount of data in the buffers 529 * is as at least much as the IP header would have us expect. 530 * Trim mbufs if longer than we expect. 531 * Drop packet if shorter than we expect. 532 */ 533 if (m->m_pkthdr.len < ip_len) { 534 tooshort: 535 IPSTAT_INC(ips_tooshort); 536 goto bad; 537 } 538 if (m->m_pkthdr.len > ip_len) { 539 if (m->m_len == m->m_pkthdr.len) { 540 m->m_len = ip_len; 541 m->m_pkthdr.len = ip_len; 542 } else 543 m_adj(m, ip_len - m->m_pkthdr.len); 544 } 545 546 #ifdef IPSEC 547 /* 548 * Bypass packet filtering for packets previously handled by IPsec. 549 */ 550 if (ip_ipsec_filtertunnel(m)) 551 goto passin; 552 #endif /* IPSEC */ 553 554 /* 555 * Run through list of hooks for input packets. 556 * 557 * NB: Beware of the destination address changing (e.g. 558 * by NAT rewriting). When this happens, tell 559 * ip_forward to do the right thing. 560 */ 561 562 /* Jump over all PFIL processing if hooks are not active. */ 563 if (!PFIL_HOOKED(&V_inet_pfil_hook)) 564 goto passin; 565 566 odst = ip->ip_dst; 567 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 568 return; 569 if (m == NULL) /* consumed by filter */ 570 return; 571 572 ip = mtod(m, struct ip *); 573 dchg = (odst.s_addr != ip->ip_dst.s_addr); 574 ifp = m->m_pkthdr.rcvif; 575 576 if (m->m_flags & M_FASTFWD_OURS) { 577 m->m_flags &= ~M_FASTFWD_OURS; 578 goto ours; 579 } 580 if (m->m_flags & M_IP_NEXTHOP) { 581 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL); 582 if (dchg != 0) { 583 /* 584 * Directly ship the packet on. This allows 585 * forwarding packets originally destined to us 586 * to some other directly connected host. 587 */ 588 ip_forward(m, 1); 589 return; 590 } 591 } 592 passin: 593 594 /* 595 * Process options and, if not destined for us, 596 * ship it on. ip_dooptions returns 1 when an 597 * error was detected (causing an icmp message 598 * to be sent and the original packet to be freed). 599 */ 600 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 601 return; 602 603 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 604 * matter if it is destined to another node, or whether it is 605 * a multicast one, RSVP wants it! and prevents it from being forwarded 606 * anywhere else. Also checks if the rsvp daemon is running before 607 * grabbing the packet. 608 */ 609 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 610 goto ours; 611 612 /* 613 * Check our list of addresses, to see if the packet is for us. 614 * If we don't have any addresses, assume any unicast packet 615 * we receive might be for us (and let the upper layers deal 616 * with it). 617 */ 618 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 619 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 620 goto ours; 621 622 /* 623 * Enable a consistency check between the destination address 624 * and the arrival interface for a unicast packet (the RFC 1122 625 * strong ES model) if IP forwarding is disabled and the packet 626 * is not locally generated and the packet is not subject to 627 * 'ipfw fwd'. 628 * 629 * XXX - Checking also should be disabled if the destination 630 * address is ipnat'ed to a different interface. 631 * 632 * XXX - Checking is incompatible with IP aliases added 633 * to the loopback interface instead of the interface where 634 * the packets are received. 635 * 636 * XXX - This is the case for carp vhost IPs as well so we 637 * insert a workaround. If the packet got here, we already 638 * checked with carp_iamatch() and carp_forus(). 639 */ 640 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 641 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 642 ifp->if_carp == NULL && (dchg == 0); 643 644 /* 645 * Check for exact addresses in the hash bucket. 646 */ 647 /* IN_IFADDR_RLOCK(); */ 648 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 649 /* 650 * If the address matches, verify that the packet 651 * arrived via the correct interface if checking is 652 * enabled. 653 */ 654 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 655 (!checkif || ia->ia_ifp == ifp)) { 656 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 657 counter_u64_add(ia->ia_ifa.ifa_ibytes, 658 m->m_pkthdr.len); 659 /* IN_IFADDR_RUNLOCK(); */ 660 goto ours; 661 } 662 } 663 /* IN_IFADDR_RUNLOCK(); */ 664 665 /* 666 * Check for broadcast addresses. 667 * 668 * Only accept broadcast packets that arrive via the matching 669 * interface. Reception of forwarded directed broadcasts would 670 * be handled via ip_forward() and ether_output() with the loopback 671 * into the stack for SIMPLEX interfaces handled by ether_output(). 672 */ 673 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 674 IF_ADDR_RLOCK(ifp); 675 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 676 if (ifa->ifa_addr->sa_family != AF_INET) 677 continue; 678 ia = ifatoia(ifa); 679 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 680 ip->ip_dst.s_addr) { 681 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 682 counter_u64_add(ia->ia_ifa.ifa_ibytes, 683 m->m_pkthdr.len); 684 IF_ADDR_RUNLOCK(ifp); 685 goto ours; 686 } 687 #ifdef BOOTP_COMPAT 688 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 689 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 690 counter_u64_add(ia->ia_ifa.ifa_ibytes, 691 m->m_pkthdr.len); 692 IF_ADDR_RUNLOCK(ifp); 693 goto ours; 694 } 695 #endif 696 } 697 IF_ADDR_RUNLOCK(ifp); 698 ia = NULL; 699 } 700 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 701 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 702 IPSTAT_INC(ips_cantforward); 703 m_freem(m); 704 return; 705 } 706 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 707 if (V_ip_mrouter) { 708 /* 709 * If we are acting as a multicast router, all 710 * incoming multicast packets are passed to the 711 * kernel-level multicast forwarding function. 712 * The packet is returned (relatively) intact; if 713 * ip_mforward() returns a non-zero value, the packet 714 * must be discarded, else it may be accepted below. 715 */ 716 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 717 IPSTAT_INC(ips_cantforward); 718 m_freem(m); 719 return; 720 } 721 722 /* 723 * The process-level routing daemon needs to receive 724 * all multicast IGMP packets, whether or not this 725 * host belongs to their destination groups. 726 */ 727 if (ip->ip_p == IPPROTO_IGMP) 728 goto ours; 729 IPSTAT_INC(ips_forward); 730 } 731 /* 732 * Assume the packet is for us, to avoid prematurely taking 733 * a lock on the in_multi hash. Protocols must perform 734 * their own filtering and update statistics accordingly. 735 */ 736 goto ours; 737 } 738 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 739 goto ours; 740 if (ip->ip_dst.s_addr == INADDR_ANY) 741 goto ours; 742 743 /* 744 * Not for us; forward if possible and desirable. 745 */ 746 if (V_ipforwarding == 0) { 747 IPSTAT_INC(ips_cantforward); 748 m_freem(m); 749 } else { 750 ip_forward(m, dchg); 751 } 752 return; 753 754 ours: 755 #ifdef IPSTEALTH 756 /* 757 * IPSTEALTH: Process non-routing options only 758 * if the packet is destined for us. 759 */ 760 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) 761 return; 762 #endif /* IPSTEALTH */ 763 764 /* 765 * Attempt reassembly; if it succeeds, proceed. 766 * ip_reass() will return a different mbuf. 767 */ 768 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { 769 /* XXXGL: shouldn't we save & set m_flags? */ 770 m = ip_reass(m); 771 if (m == NULL) 772 return; 773 ip = mtod(m, struct ip *); 774 /* Get the header length of the reassembled packet */ 775 hlen = ip->ip_hl << 2; 776 } 777 778 #ifdef IPSEC 779 /* 780 * enforce IPsec policy checking if we are seeing last header. 781 * note that we do not visit this with protocols with pcb layer 782 * code - like udp/tcp/raw ip. 783 */ 784 if (ip_ipsec_input(m, ip->ip_p) != 0) 785 goto bad; 786 #endif /* IPSEC */ 787 788 /* 789 * Switch out to protocol's input routine. 790 */ 791 IPSTAT_INC(ips_delivered); 792 793 (*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p); 794 return; 795 bad: 796 m_freem(m); 797 } 798 799 /* 800 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 801 * max has slightly different semantics than the sysctl, for historical 802 * reasons. 803 */ 804 static void 805 maxnipq_update(void) 806 { 807 808 /* 809 * -1 for unlimited allocation. 810 */ 811 if (V_maxnipq < 0) 812 uma_zone_set_max(V_ipq_zone, 0); 813 /* 814 * Positive number for specific bound. 815 */ 816 if (V_maxnipq > 0) 817 uma_zone_set_max(V_ipq_zone, V_maxnipq); 818 /* 819 * Zero specifies no further fragment queue allocation -- set the 820 * bound very low, but rely on implementation elsewhere to actually 821 * prevent allocation and reclaim current queues. 822 */ 823 if (V_maxnipq == 0) 824 uma_zone_set_max(V_ipq_zone, 1); 825 } 826 827 static void 828 ipq_zone_change(void *tag) 829 { 830 831 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 832 V_maxnipq = nmbclusters / 32; 833 maxnipq_update(); 834 } 835 } 836 837 static int 838 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 839 { 840 int error, i; 841 842 i = V_maxnipq; 843 error = sysctl_handle_int(oidp, &i, 0, req); 844 if (error || !req->newptr) 845 return (error); 846 847 /* 848 * XXXRW: Might be a good idea to sanity check the argument and place 849 * an extreme upper bound. 850 */ 851 if (i < -1) 852 return (EINVAL); 853 V_maxnipq = i; 854 maxnipq_update(); 855 return (0); 856 } 857 858 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 859 NULL, 0, sysctl_maxnipq, "I", 860 "Maximum number of IPv4 fragment reassembly queue entries"); 861 862 #define M_IP_FRAG M_PROTO9 863 864 /* 865 * Take incoming datagram fragment and try to reassemble it into 866 * whole datagram. If the argument is the first fragment or one 867 * in between the function will return NULL and store the mbuf 868 * in the fragment chain. If the argument is the last fragment 869 * the packet will be reassembled and the pointer to the new 870 * mbuf returned for further processing. Only m_tags attached 871 * to the first packet/fragment are preserved. 872 * The IP header is *NOT* adjusted out of iplen. 873 */ 874 struct mbuf * 875 ip_reass(struct mbuf *m) 876 { 877 struct ip *ip; 878 struct mbuf *p, *q, *nq, *t; 879 struct ipq *fp = NULL; 880 struct ipqhead *head; 881 int i, hlen, next; 882 u_int8_t ecn, ecn0; 883 u_short hash; 884 #ifdef RSS 885 uint32_t rss_hash, rss_type; 886 #endif 887 888 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 889 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 890 IPSTAT_INC(ips_fragments); 891 IPSTAT_INC(ips_fragdropped); 892 m_freem(m); 893 return (NULL); 894 } 895 896 ip = mtod(m, struct ip *); 897 hlen = ip->ip_hl << 2; 898 899 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 900 head = &V_ipq[hash]; 901 IPQ_LOCK(); 902 903 /* 904 * Look for queue of fragments 905 * of this datagram. 906 */ 907 TAILQ_FOREACH(fp, head, ipq_list) 908 if (ip->ip_id == fp->ipq_id && 909 ip->ip_src.s_addr == fp->ipq_src.s_addr && 910 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 911 #ifdef MAC 912 mac_ipq_match(m, fp) && 913 #endif 914 ip->ip_p == fp->ipq_p) 915 goto found; 916 917 fp = NULL; 918 919 /* 920 * Attempt to trim the number of allocated fragment queues if it 921 * exceeds the administrative limit. 922 */ 923 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 924 /* 925 * drop something from the tail of the current queue 926 * before proceeding further 927 */ 928 struct ipq *q = TAILQ_LAST(head, ipqhead); 929 if (q == NULL) { /* gak */ 930 for (i = 0; i < IPREASS_NHASH; i++) { 931 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 932 if (r) { 933 IPSTAT_ADD(ips_fragtimeout, 934 r->ipq_nfrags); 935 ip_freef(&V_ipq[i], r); 936 break; 937 } 938 } 939 } else { 940 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 941 ip_freef(head, q); 942 } 943 } 944 945 found: 946 /* 947 * Adjust ip_len to not reflect header, 948 * convert offset of this to bytes. 949 */ 950 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 951 if (ip->ip_off & htons(IP_MF)) { 952 /* 953 * Make sure that fragments have a data length 954 * that's a non-zero multiple of 8 bytes. 955 */ 956 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) { 957 IPSTAT_INC(ips_toosmall); /* XXX */ 958 goto dropfrag; 959 } 960 m->m_flags |= M_IP_FRAG; 961 } else 962 m->m_flags &= ~M_IP_FRAG; 963 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 964 965 /* 966 * Attempt reassembly; if it succeeds, proceed. 967 * ip_reass() will return a different mbuf. 968 */ 969 IPSTAT_INC(ips_fragments); 970 m->m_pkthdr.PH_loc.ptr = ip; 971 972 /* Previous ip_reass() started here. */ 973 /* 974 * Presence of header sizes in mbufs 975 * would confuse code below. 976 */ 977 m->m_data += hlen; 978 m->m_len -= hlen; 979 980 /* 981 * If first fragment to arrive, create a reassembly queue. 982 */ 983 if (fp == NULL) { 984 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 985 if (fp == NULL) 986 goto dropfrag; 987 #ifdef MAC 988 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 989 uma_zfree(V_ipq_zone, fp); 990 fp = NULL; 991 goto dropfrag; 992 } 993 mac_ipq_create(m, fp); 994 #endif 995 TAILQ_INSERT_HEAD(head, fp, ipq_list); 996 V_nipq++; 997 fp->ipq_nfrags = 1; 998 fp->ipq_ttl = IPFRAGTTL; 999 fp->ipq_p = ip->ip_p; 1000 fp->ipq_id = ip->ip_id; 1001 fp->ipq_src = ip->ip_src; 1002 fp->ipq_dst = ip->ip_dst; 1003 fp->ipq_frags = m; 1004 m->m_nextpkt = NULL; 1005 goto done; 1006 } else { 1007 fp->ipq_nfrags++; 1008 #ifdef MAC 1009 mac_ipq_update(m, fp); 1010 #endif 1011 } 1012 1013 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr)) 1014 1015 /* 1016 * Handle ECN by comparing this segment with the first one; 1017 * if CE is set, do not lose CE. 1018 * drop if CE and not-ECT are mixed for the same packet. 1019 */ 1020 ecn = ip->ip_tos & IPTOS_ECN_MASK; 1021 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 1022 if (ecn == IPTOS_ECN_CE) { 1023 if (ecn0 == IPTOS_ECN_NOTECT) 1024 goto dropfrag; 1025 if (ecn0 != IPTOS_ECN_CE) 1026 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 1027 } 1028 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 1029 goto dropfrag; 1030 1031 /* 1032 * Find a segment which begins after this one does. 1033 */ 1034 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 1035 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 1036 break; 1037 1038 /* 1039 * If there is a preceding segment, it may provide some of 1040 * our data already. If so, drop the data from the incoming 1041 * segment. If it provides all of our data, drop us, otherwise 1042 * stick new segment in the proper place. 1043 * 1044 * If some of the data is dropped from the preceding 1045 * segment, then it's checksum is invalidated. 1046 */ 1047 if (p) { 1048 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 1049 ntohs(ip->ip_off); 1050 if (i > 0) { 1051 if (i >= ntohs(ip->ip_len)) 1052 goto dropfrag; 1053 m_adj(m, i); 1054 m->m_pkthdr.csum_flags = 0; 1055 ip->ip_off = htons(ntohs(ip->ip_off) + i); 1056 ip->ip_len = htons(ntohs(ip->ip_len) - i); 1057 } 1058 m->m_nextpkt = p->m_nextpkt; 1059 p->m_nextpkt = m; 1060 } else { 1061 m->m_nextpkt = fp->ipq_frags; 1062 fp->ipq_frags = m; 1063 } 1064 1065 /* 1066 * While we overlap succeeding segments trim them or, 1067 * if they are completely covered, dequeue them. 1068 */ 1069 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 1070 ntohs(GETIP(q)->ip_off); q = nq) { 1071 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 1072 ntohs(GETIP(q)->ip_off); 1073 if (i < ntohs(GETIP(q)->ip_len)) { 1074 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 1075 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 1076 m_adj(q, i); 1077 q->m_pkthdr.csum_flags = 0; 1078 break; 1079 } 1080 nq = q->m_nextpkt; 1081 m->m_nextpkt = nq; 1082 IPSTAT_INC(ips_fragdropped); 1083 fp->ipq_nfrags--; 1084 m_freem(q); 1085 } 1086 1087 /* 1088 * Check for complete reassembly and perform frag per packet 1089 * limiting. 1090 * 1091 * Frag limiting is performed here so that the nth frag has 1092 * a chance to complete the packet before we drop the packet. 1093 * As a result, n+1 frags are actually allowed per packet, but 1094 * only n will ever be stored. (n = maxfragsperpacket.) 1095 * 1096 */ 1097 next = 0; 1098 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1099 if (ntohs(GETIP(q)->ip_off) != next) { 1100 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1101 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1102 ip_freef(head, fp); 1103 } 1104 goto done; 1105 } 1106 next += ntohs(GETIP(q)->ip_len); 1107 } 1108 /* Make sure the last packet didn't have the IP_MF flag */ 1109 if (p->m_flags & M_IP_FRAG) { 1110 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1111 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1112 ip_freef(head, fp); 1113 } 1114 goto done; 1115 } 1116 1117 /* 1118 * Reassembly is complete. Make sure the packet is a sane size. 1119 */ 1120 q = fp->ipq_frags; 1121 ip = GETIP(q); 1122 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1123 IPSTAT_INC(ips_toolong); 1124 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1125 ip_freef(head, fp); 1126 goto done; 1127 } 1128 1129 /* 1130 * Concatenate fragments. 1131 */ 1132 m = q; 1133 t = m->m_next; 1134 m->m_next = NULL; 1135 m_cat(m, t); 1136 nq = q->m_nextpkt; 1137 q->m_nextpkt = NULL; 1138 for (q = nq; q != NULL; q = nq) { 1139 nq = q->m_nextpkt; 1140 q->m_nextpkt = NULL; 1141 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1142 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1143 m_cat(m, q); 1144 } 1145 /* 1146 * In order to do checksumming faster we do 'end-around carry' here 1147 * (and not in for{} loop), though it implies we are not going to 1148 * reassemble more than 64k fragments. 1149 */ 1150 while (m->m_pkthdr.csum_data & 0xffff0000) 1151 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 1152 (m->m_pkthdr.csum_data >> 16); 1153 #ifdef MAC 1154 mac_ipq_reassemble(fp, m); 1155 mac_ipq_destroy(fp); 1156 #endif 1157 1158 /* 1159 * Create header for new ip packet by modifying header of first 1160 * packet; dequeue and discard fragment reassembly header. 1161 * Make header visible. 1162 */ 1163 ip->ip_len = htons((ip->ip_hl << 2) + next); 1164 ip->ip_src = fp->ipq_src; 1165 ip->ip_dst = fp->ipq_dst; 1166 TAILQ_REMOVE(head, fp, ipq_list); 1167 V_nipq--; 1168 uma_zfree(V_ipq_zone, fp); 1169 m->m_len += (ip->ip_hl << 2); 1170 m->m_data -= (ip->ip_hl << 2); 1171 /* some debugging cruft by sklower, below, will go away soon */ 1172 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1173 m_fixhdr(m); 1174 IPSTAT_INC(ips_reassembled); 1175 IPQ_UNLOCK(); 1176 1177 #ifdef RSS 1178 /* 1179 * Query the RSS layer for the flowid / flowtype for the 1180 * mbuf payload. 1181 * 1182 * For now, just assume we have to calculate a new one. 1183 * Later on we should check to see if the assigned flowid matches 1184 * what RSS wants for the given IP protocol and if so, just keep it. 1185 * 1186 * We then queue into the relevant netisr so it can be dispatched 1187 * to the correct CPU. 1188 * 1189 * Note - this may return 1, which means the flowid in the mbuf 1190 * is correct for the configured RSS hash types and can be used. 1191 */ 1192 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) { 1193 m->m_pkthdr.flowid = rss_hash; 1194 M_HASHTYPE_SET(m, rss_type); 1195 } 1196 1197 /* 1198 * Queue/dispatch for reprocessing. 1199 * 1200 * Note: this is much slower than just handling the frame in the 1201 * current receive context. It's likely worth investigating 1202 * why this is. 1203 */ 1204 netisr_dispatch(NETISR_IP_DIRECT, m); 1205 return (NULL); 1206 #endif 1207 1208 /* Handle in-line */ 1209 return (m); 1210 1211 dropfrag: 1212 IPSTAT_INC(ips_fragdropped); 1213 if (fp != NULL) 1214 fp->ipq_nfrags--; 1215 m_freem(m); 1216 done: 1217 IPQ_UNLOCK(); 1218 return (NULL); 1219 1220 #undef GETIP 1221 } 1222 1223 /* 1224 * Free a fragment reassembly header and all 1225 * associated datagrams. 1226 */ 1227 static void 1228 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1229 { 1230 struct mbuf *q; 1231 1232 IPQ_LOCK_ASSERT(); 1233 1234 while (fp->ipq_frags) { 1235 q = fp->ipq_frags; 1236 fp->ipq_frags = q->m_nextpkt; 1237 m_freem(q); 1238 } 1239 TAILQ_REMOVE(fhp, fp, ipq_list); 1240 uma_zfree(V_ipq_zone, fp); 1241 V_nipq--; 1242 } 1243 1244 /* 1245 * IP timer processing; 1246 * if a timer expires on a reassembly 1247 * queue, discard it. 1248 */ 1249 void 1250 ip_slowtimo(void) 1251 { 1252 VNET_ITERATOR_DECL(vnet_iter); 1253 struct ipq *fp; 1254 int i; 1255 1256 VNET_LIST_RLOCK_NOSLEEP(); 1257 IPQ_LOCK(); 1258 VNET_FOREACH(vnet_iter) { 1259 CURVNET_SET(vnet_iter); 1260 for (i = 0; i < IPREASS_NHASH; i++) { 1261 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1262 struct ipq *fpp; 1263 1264 fpp = fp; 1265 fp = TAILQ_NEXT(fp, ipq_list); 1266 if(--fpp->ipq_ttl == 0) { 1267 IPSTAT_ADD(ips_fragtimeout, 1268 fpp->ipq_nfrags); 1269 ip_freef(&V_ipq[i], fpp); 1270 } 1271 } 1272 } 1273 /* 1274 * If we are over the maximum number of fragments 1275 * (due to the limit being lowered), drain off 1276 * enough to get down to the new limit. 1277 */ 1278 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1279 for (i = 0; i < IPREASS_NHASH; i++) { 1280 while (V_nipq > V_maxnipq && 1281 !TAILQ_EMPTY(&V_ipq[i])) { 1282 IPSTAT_ADD(ips_fragdropped, 1283 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1284 ip_freef(&V_ipq[i], 1285 TAILQ_FIRST(&V_ipq[i])); 1286 } 1287 } 1288 } 1289 CURVNET_RESTORE(); 1290 } 1291 IPQ_UNLOCK(); 1292 VNET_LIST_RUNLOCK_NOSLEEP(); 1293 } 1294 1295 /* 1296 * Drain off all datagram fragments. 1297 */ 1298 static void 1299 ip_drain_locked(void) 1300 { 1301 int i; 1302 1303 IPQ_LOCK_ASSERT(); 1304 1305 for (i = 0; i < IPREASS_NHASH; i++) { 1306 while(!TAILQ_EMPTY(&V_ipq[i])) { 1307 IPSTAT_ADD(ips_fragdropped, 1308 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1309 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1310 } 1311 } 1312 } 1313 1314 void 1315 ip_drain(void) 1316 { 1317 VNET_ITERATOR_DECL(vnet_iter); 1318 1319 VNET_LIST_RLOCK_NOSLEEP(); 1320 IPQ_LOCK(); 1321 VNET_FOREACH(vnet_iter) { 1322 CURVNET_SET(vnet_iter); 1323 ip_drain_locked(); 1324 CURVNET_RESTORE(); 1325 } 1326 IPQ_UNLOCK(); 1327 VNET_LIST_RUNLOCK_NOSLEEP(); 1328 } 1329 1330 /* 1331 * The protocol to be inserted into ip_protox[] must be already registered 1332 * in inetsw[], either statically or through pf_proto_register(). 1333 */ 1334 int 1335 ipproto_register(short ipproto) 1336 { 1337 struct protosw *pr; 1338 1339 /* Sanity checks. */ 1340 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1341 return (EPROTONOSUPPORT); 1342 1343 /* 1344 * The protocol slot must not be occupied by another protocol 1345 * already. An index pointing to IPPROTO_RAW is unused. 1346 */ 1347 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1348 if (pr == NULL) 1349 return (EPFNOSUPPORT); 1350 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1351 return (EEXIST); 1352 1353 /* Find the protocol position in inetsw[] and set the index. */ 1354 for (pr = inetdomain.dom_protosw; 1355 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1356 if (pr->pr_domain->dom_family == PF_INET && 1357 pr->pr_protocol && pr->pr_protocol == ipproto) { 1358 ip_protox[pr->pr_protocol] = pr - inetsw; 1359 return (0); 1360 } 1361 } 1362 return (EPROTONOSUPPORT); 1363 } 1364 1365 int 1366 ipproto_unregister(short ipproto) 1367 { 1368 struct protosw *pr; 1369 1370 /* Sanity checks. */ 1371 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1372 return (EPROTONOSUPPORT); 1373 1374 /* Check if the protocol was indeed registered. */ 1375 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1376 if (pr == NULL) 1377 return (EPFNOSUPPORT); 1378 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1379 return (ENOENT); 1380 1381 /* Reset the protocol slot to IPPROTO_RAW. */ 1382 ip_protox[ipproto] = pr - inetsw; 1383 return (0); 1384 } 1385 1386 /* 1387 * Given address of next destination (final or next hop), return (referenced) 1388 * internet address info of interface to be used to get there. 1389 */ 1390 struct in_ifaddr * 1391 ip_rtaddr(struct in_addr dst, u_int fibnum) 1392 { 1393 struct route sro; 1394 struct sockaddr_in *sin; 1395 struct in_ifaddr *ia; 1396 1397 bzero(&sro, sizeof(sro)); 1398 sin = (struct sockaddr_in *)&sro.ro_dst; 1399 sin->sin_family = AF_INET; 1400 sin->sin_len = sizeof(*sin); 1401 sin->sin_addr = dst; 1402 in_rtalloc_ign(&sro, 0, fibnum); 1403 1404 if (sro.ro_rt == NULL) 1405 return (NULL); 1406 1407 ia = ifatoia(sro.ro_rt->rt_ifa); 1408 ifa_ref(&ia->ia_ifa); 1409 RTFREE(sro.ro_rt); 1410 return (ia); 1411 } 1412 1413 u_char inetctlerrmap[PRC_NCMDS] = { 1414 0, 0, 0, 0, 1415 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1416 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1417 EMSGSIZE, EHOSTUNREACH, 0, 0, 1418 0, 0, EHOSTUNREACH, 0, 1419 ENOPROTOOPT, ECONNREFUSED 1420 }; 1421 1422 /* 1423 * Forward a packet. If some error occurs return the sender 1424 * an icmp packet. Note we can't always generate a meaningful 1425 * icmp message because icmp doesn't have a large enough repertoire 1426 * of codes and types. 1427 * 1428 * If not forwarding, just drop the packet. This could be confusing 1429 * if ipforwarding was zero but some routing protocol was advancing 1430 * us as a gateway to somewhere. However, we must let the routing 1431 * protocol deal with that. 1432 * 1433 * The srcrt parameter indicates whether the packet is being forwarded 1434 * via a source route. 1435 */ 1436 void 1437 ip_forward(struct mbuf *m, int srcrt) 1438 { 1439 struct ip *ip = mtod(m, struct ip *); 1440 struct in_ifaddr *ia; 1441 struct mbuf *mcopy; 1442 struct in_addr dest; 1443 struct route ro; 1444 int error, type = 0, code = 0, mtu = 0; 1445 1446 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1447 IPSTAT_INC(ips_cantforward); 1448 m_freem(m); 1449 return; 1450 } 1451 #ifdef IPSEC 1452 if (ip_ipsec_fwd(m) != 0) { 1453 IPSTAT_INC(ips_cantforward); 1454 m_freem(m); 1455 return; 1456 } 1457 #endif /* IPSEC */ 1458 #ifdef IPSTEALTH 1459 if (!V_ipstealth) { 1460 #endif 1461 if (ip->ip_ttl <= IPTTLDEC) { 1462 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1463 0, 0); 1464 return; 1465 } 1466 #ifdef IPSTEALTH 1467 } 1468 #endif 1469 1470 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1471 #ifndef IPSEC 1472 /* 1473 * 'ia' may be NULL if there is no route for this destination. 1474 * In case of IPsec, Don't discard it just yet, but pass it to 1475 * ip_output in case of outgoing IPsec policy. 1476 */ 1477 if (!srcrt && ia == NULL) { 1478 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1479 return; 1480 } 1481 #endif 1482 1483 /* 1484 * Save the IP header and at most 8 bytes of the payload, 1485 * in case we need to generate an ICMP message to the src. 1486 * 1487 * XXX this can be optimized a lot by saving the data in a local 1488 * buffer on the stack (72 bytes at most), and only allocating the 1489 * mbuf if really necessary. The vast majority of the packets 1490 * are forwarded without having to send an ICMP back (either 1491 * because unnecessary, or because rate limited), so we are 1492 * really we are wasting a lot of work here. 1493 * 1494 * We don't use m_copy() because it might return a reference 1495 * to a shared cluster. Both this function and ip_output() 1496 * assume exclusive access to the IP header in `m', so any 1497 * data in a cluster may change before we reach icmp_error(). 1498 */ 1499 mcopy = m_gethdr(M_NOWAIT, m->m_type); 1500 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) { 1501 /* 1502 * It's probably ok if the pkthdr dup fails (because 1503 * the deep copy of the tag chain failed), but for now 1504 * be conservative and just discard the copy since 1505 * code below may some day want the tags. 1506 */ 1507 m_free(mcopy); 1508 mcopy = NULL; 1509 } 1510 if (mcopy != NULL) { 1511 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy)); 1512 mcopy->m_pkthdr.len = mcopy->m_len; 1513 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1514 } 1515 1516 #ifdef IPSTEALTH 1517 if (!V_ipstealth) { 1518 #endif 1519 ip->ip_ttl -= IPTTLDEC; 1520 #ifdef IPSTEALTH 1521 } 1522 #endif 1523 1524 /* 1525 * If forwarding packet using same interface that it came in on, 1526 * perhaps should send a redirect to sender to shortcut a hop. 1527 * Only send redirect if source is sending directly to us, 1528 * and if packet was not source routed (or has any options). 1529 * Also, don't send redirect if forwarding using a default route 1530 * or a route modified by a redirect. 1531 */ 1532 dest.s_addr = 0; 1533 if (!srcrt && V_ipsendredirects && 1534 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1535 struct sockaddr_in *sin; 1536 struct rtentry *rt; 1537 1538 bzero(&ro, sizeof(ro)); 1539 sin = (struct sockaddr_in *)&ro.ro_dst; 1540 sin->sin_family = AF_INET; 1541 sin->sin_len = sizeof(*sin); 1542 sin->sin_addr = ip->ip_dst; 1543 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1544 1545 rt = ro.ro_rt; 1546 1547 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1548 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1549 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1550 u_long src = ntohl(ip->ip_src.s_addr); 1551 1552 if (RTA(rt) && 1553 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1554 if (rt->rt_flags & RTF_GATEWAY) 1555 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1556 else 1557 dest.s_addr = ip->ip_dst.s_addr; 1558 /* Router requirements says to only send host redirects */ 1559 type = ICMP_REDIRECT; 1560 code = ICMP_REDIRECT_HOST; 1561 } 1562 } 1563 if (rt) 1564 RTFREE(rt); 1565 } 1566 1567 /* 1568 * Try to cache the route MTU from ip_output so we can consider it for 1569 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1570 */ 1571 bzero(&ro, sizeof(ro)); 1572 1573 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1574 1575 if (error == EMSGSIZE && ro.ro_rt) 1576 mtu = ro.ro_rt->rt_mtu; 1577 RO_RTFREE(&ro); 1578 1579 if (error) 1580 IPSTAT_INC(ips_cantforward); 1581 else { 1582 IPSTAT_INC(ips_forward); 1583 if (type) 1584 IPSTAT_INC(ips_redirectsent); 1585 else { 1586 if (mcopy) 1587 m_freem(mcopy); 1588 if (ia != NULL) 1589 ifa_free(&ia->ia_ifa); 1590 return; 1591 } 1592 } 1593 if (mcopy == NULL) { 1594 if (ia != NULL) 1595 ifa_free(&ia->ia_ifa); 1596 return; 1597 } 1598 1599 switch (error) { 1600 1601 case 0: /* forwarded, but need redirect */ 1602 /* type, code set above */ 1603 break; 1604 1605 case ENETUNREACH: 1606 case EHOSTUNREACH: 1607 case ENETDOWN: 1608 case EHOSTDOWN: 1609 default: 1610 type = ICMP_UNREACH; 1611 code = ICMP_UNREACH_HOST; 1612 break; 1613 1614 case EMSGSIZE: 1615 type = ICMP_UNREACH; 1616 code = ICMP_UNREACH_NEEDFRAG; 1617 1618 #ifdef IPSEC 1619 /* 1620 * If IPsec is configured for this path, 1621 * override any possibly mtu value set by ip_output. 1622 */ 1623 mtu = ip_ipsec_mtu(mcopy, mtu); 1624 #endif /* IPSEC */ 1625 /* 1626 * If the MTU was set before make sure we are below the 1627 * interface MTU. 1628 * If the MTU wasn't set before use the interface mtu or 1629 * fall back to the next smaller mtu step compared to the 1630 * current packet size. 1631 */ 1632 if (mtu != 0) { 1633 if (ia != NULL) 1634 mtu = min(mtu, ia->ia_ifp->if_mtu); 1635 } else { 1636 if (ia != NULL) 1637 mtu = ia->ia_ifp->if_mtu; 1638 else 1639 mtu = ip_next_mtu(ntohs(ip->ip_len), 0); 1640 } 1641 IPSTAT_INC(ips_cantfrag); 1642 break; 1643 1644 case ENOBUFS: 1645 case EACCES: /* ipfw denied packet */ 1646 m_freem(mcopy); 1647 if (ia != NULL) 1648 ifa_free(&ia->ia_ifa); 1649 return; 1650 } 1651 if (ia != NULL) 1652 ifa_free(&ia->ia_ifa); 1653 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1654 } 1655 1656 void 1657 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1658 struct mbuf *m) 1659 { 1660 1661 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1662 struct bintime bt; 1663 1664 bintime(&bt); 1665 if (inp->inp_socket->so_options & SO_BINTIME) { 1666 *mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt), 1667 SCM_BINTIME, SOL_SOCKET); 1668 if (*mp) 1669 mp = &(*mp)->m_next; 1670 } 1671 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1672 struct timeval tv; 1673 1674 bintime2timeval(&bt, &tv); 1675 *mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv), 1676 SCM_TIMESTAMP, SOL_SOCKET); 1677 if (*mp) 1678 mp = &(*mp)->m_next; 1679 } 1680 } 1681 if (inp->inp_flags & INP_RECVDSTADDR) { 1682 *mp = sbcreatecontrol((caddr_t)&ip->ip_dst, 1683 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1684 if (*mp) 1685 mp = &(*mp)->m_next; 1686 } 1687 if (inp->inp_flags & INP_RECVTTL) { 1688 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, 1689 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1690 if (*mp) 1691 mp = &(*mp)->m_next; 1692 } 1693 #ifdef notyet 1694 /* XXX 1695 * Moving these out of udp_input() made them even more broken 1696 * than they already were. 1697 */ 1698 /* options were tossed already */ 1699 if (inp->inp_flags & INP_RECVOPTS) { 1700 *mp = sbcreatecontrol((caddr_t)opts_deleted_above, 1701 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1702 if (*mp) 1703 mp = &(*mp)->m_next; 1704 } 1705 /* ip_srcroute doesn't do what we want here, need to fix */ 1706 if (inp->inp_flags & INP_RECVRETOPTS) { 1707 *mp = sbcreatecontrol((caddr_t)ip_srcroute(m), 1708 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1709 if (*mp) 1710 mp = &(*mp)->m_next; 1711 } 1712 #endif 1713 if (inp->inp_flags & INP_RECVIF) { 1714 struct ifnet *ifp; 1715 struct sdlbuf { 1716 struct sockaddr_dl sdl; 1717 u_char pad[32]; 1718 } sdlbuf; 1719 struct sockaddr_dl *sdp; 1720 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1721 1722 if ((ifp = m->m_pkthdr.rcvif) && 1723 ifp->if_index && ifp->if_index <= V_if_index) { 1724 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1725 /* 1726 * Change our mind and don't try copy. 1727 */ 1728 if (sdp->sdl_family != AF_LINK || 1729 sdp->sdl_len > sizeof(sdlbuf)) { 1730 goto makedummy; 1731 } 1732 bcopy(sdp, sdl2, sdp->sdl_len); 1733 } else { 1734 makedummy: 1735 sdl2->sdl_len = 1736 offsetof(struct sockaddr_dl, sdl_data[0]); 1737 sdl2->sdl_family = AF_LINK; 1738 sdl2->sdl_index = 0; 1739 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1740 } 1741 *mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len, 1742 IP_RECVIF, IPPROTO_IP); 1743 if (*mp) 1744 mp = &(*mp)->m_next; 1745 } 1746 if (inp->inp_flags & INP_RECVTOS) { 1747 *mp = sbcreatecontrol((caddr_t)&ip->ip_tos, 1748 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1749 if (*mp) 1750 mp = &(*mp)->m_next; 1751 } 1752 1753 if (inp->inp_flags2 & INP_RECVFLOWID) { 1754 uint32_t flowid, flow_type; 1755 1756 flowid = m->m_pkthdr.flowid; 1757 flow_type = M_HASHTYPE_GET(m); 1758 1759 /* 1760 * XXX should handle the failure of one or the 1761 * other - don't populate both? 1762 */ 1763 *mp = sbcreatecontrol((caddr_t) &flowid, 1764 sizeof(uint32_t), IP_FLOWID, IPPROTO_IP); 1765 if (*mp) 1766 mp = &(*mp)->m_next; 1767 *mp = sbcreatecontrol((caddr_t) &flow_type, 1768 sizeof(uint32_t), IP_FLOWTYPE, IPPROTO_IP); 1769 if (*mp) 1770 mp = &(*mp)->m_next; 1771 } 1772 1773 #ifdef RSS 1774 if (inp->inp_flags2 & INP_RECVRSSBUCKETID) { 1775 uint32_t flowid, flow_type; 1776 uint32_t rss_bucketid; 1777 1778 flowid = m->m_pkthdr.flowid; 1779 flow_type = M_HASHTYPE_GET(m); 1780 1781 if (rss_hash2bucket(flowid, flow_type, &rss_bucketid) == 0) { 1782 *mp = sbcreatecontrol((caddr_t) &rss_bucketid, 1783 sizeof(uint32_t), IP_RSSBUCKETID, IPPROTO_IP); 1784 if (*mp) 1785 mp = &(*mp)->m_next; 1786 } 1787 } 1788 #endif 1789 } 1790 1791 /* 1792 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1793 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1794 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1795 * compiled. 1796 */ 1797 static VNET_DEFINE(int, ip_rsvp_on); 1798 VNET_DEFINE(struct socket *, ip_rsvpd); 1799 1800 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1801 1802 int 1803 ip_rsvp_init(struct socket *so) 1804 { 1805 1806 if (so->so_type != SOCK_RAW || 1807 so->so_proto->pr_protocol != IPPROTO_RSVP) 1808 return EOPNOTSUPP; 1809 1810 if (V_ip_rsvpd != NULL) 1811 return EADDRINUSE; 1812 1813 V_ip_rsvpd = so; 1814 /* 1815 * This may seem silly, but we need to be sure we don't over-increment 1816 * the RSVP counter, in case something slips up. 1817 */ 1818 if (!V_ip_rsvp_on) { 1819 V_ip_rsvp_on = 1; 1820 V_rsvp_on++; 1821 } 1822 1823 return 0; 1824 } 1825 1826 int 1827 ip_rsvp_done(void) 1828 { 1829 1830 V_ip_rsvpd = NULL; 1831 /* 1832 * This may seem silly, but we need to be sure we don't over-decrement 1833 * the RSVP counter, in case something slips up. 1834 */ 1835 if (V_ip_rsvp_on) { 1836 V_ip_rsvp_on = 0; 1837 V_rsvp_on--; 1838 } 1839 return 0; 1840 } 1841 1842 int 1843 rsvp_input(struct mbuf **mp, int *offp, int proto) 1844 { 1845 struct mbuf *m; 1846 1847 m = *mp; 1848 *mp = NULL; 1849 1850 if (rsvp_input_p) { /* call the real one if loaded */ 1851 *mp = m; 1852 rsvp_input_p(mp, offp, proto); 1853 return (IPPROTO_DONE); 1854 } 1855 1856 /* Can still get packets with rsvp_on = 0 if there is a local member 1857 * of the group to which the RSVP packet is addressed. But in this 1858 * case we want to throw the packet away. 1859 */ 1860 1861 if (!V_rsvp_on) { 1862 m_freem(m); 1863 return (IPPROTO_DONE); 1864 } 1865 1866 if (V_ip_rsvpd != NULL) { 1867 *mp = m; 1868 rip_input(mp, offp, proto); 1869 return (IPPROTO_DONE); 1870 } 1871 /* Drop the packet */ 1872 m_freem(m); 1873 return (IPPROTO_DONE); 1874 } 1875