1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_route.h" 40 #include "opt_carp.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/callout.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/domain.h> 48 #include <sys/protosw.h> 49 #include <sys/socket.h> 50 #include <sys/time.h> 51 #include <sys/kernel.h> 52 #include <sys/lock.h> 53 #include <sys/rwlock.h> 54 #include <sys/syslog.h> 55 #include <sys/sysctl.h> 56 57 #include <net/pfil.h> 58 #include <net/if.h> 59 #include <net/if_types.h> 60 #include <net/if_var.h> 61 #include <net/if_dl.h> 62 #include <net/route.h> 63 #include <net/netisr.h> 64 #include <net/vnet.h> 65 #include <net/flowtable.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/in_pcb.h> 72 #include <netinet/ip_var.h> 73 #include <netinet/ip_fw.h> 74 #include <netinet/ip_icmp.h> 75 #include <netinet/ip_options.h> 76 #include <machine/in_cksum.h> 77 #ifdef DEV_CARP 78 #include <netinet/ip_carp.h> 79 #endif 80 #ifdef IPSEC 81 #include <netinet/ip_ipsec.h> 82 #endif /* IPSEC */ 83 84 #include <sys/socketvar.h> 85 86 #include <security/mac/mac_framework.h> 87 88 #ifdef CTASSERT 89 CTASSERT(sizeof(struct ip) == 20); 90 #endif 91 92 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 93 static VNET_DEFINE(int, ip_checkinterface); 94 static VNET_DEFINE(int, ip_keepfaith); 95 static VNET_DEFINE(int, ip_sendsourcequench); 96 97 #define V_ipsendredirects VNET(ipsendredirects) 98 #define V_ip_checkinterface VNET(ip_checkinterface) 99 #define V_ip_keepfaith VNET(ip_keepfaith) 100 #define V_ip_sendsourcequench VNET(ip_sendsourcequench) 101 102 VNET_DEFINE(int, ip_defttl) = IPDEFTTL; 103 VNET_DEFINE(int, ip_do_randomid); 104 VNET_DEFINE(int, ipforwarding); 105 106 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 107 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 108 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 109 VNET_DEFINE(struct ipstat, ipstat); 110 111 static VNET_DEFINE(int, ip_rsvp_on); 112 VNET_DEFINE(struct socket *, ip_rsvpd); 113 VNET_DEFINE(int, rsvp_on); 114 115 #define V_ip_rsvp_on VNET(ip_rsvp_on) 116 117 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 118 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 119 static VNET_DEFINE(int, maxfragsperpacket); 120 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 121 122 #define V_ipq VNET(ipq) 123 #define V_maxnipq VNET(maxnipq) 124 #define V_maxfragsperpacket VNET(maxfragsperpacket) 125 #define V_nipq VNET(nipq) 126 127 VNET_DEFINE(int, ipstealth); 128 129 struct rwlock in_ifaddr_lock; 130 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 131 132 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 133 &VNET_NAME(ipforwarding), 0, 134 "Enable IP forwarding between interfaces"); 135 136 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 137 &VNET_NAME(ipsendredirects), 0, 138 "Enable sending IP redirects"); 139 140 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, 141 &VNET_NAME(ip_defttl), 0, 142 "Maximum TTL on IP packets"); 143 144 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 145 &VNET_NAME(ip_keepfaith), 0, 146 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 147 148 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 149 &VNET_NAME(ip_sendsourcequench), 0, 150 "Enable the transmission of source quench packets"); 151 152 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 153 &VNET_NAME(ip_do_randomid), 0, 154 "Assign random ip_id values"); 155 156 /* 157 * XXX - Setting ip_checkinterface mostly implements the receive side of 158 * the Strong ES model described in RFC 1122, but since the routing table 159 * and transmit implementation do not implement the Strong ES model, 160 * setting this to 1 results in an odd hybrid. 161 * 162 * XXX - ip_checkinterface currently must be disabled if you use ipnat 163 * to translate the destination address to another local interface. 164 * 165 * XXX - ip_checkinterface must be disabled if you add IP aliases 166 * to the loopback interface instead of the interface where the 167 * packets for those addresses are received. 168 */ 169 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 170 &VNET_NAME(ip_checkinterface), 0, 171 "Verify packet arrives on correct interface"); 172 173 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ 174 175 static struct netisr_handler ip_nh = { 176 .nh_name = "ip", 177 .nh_handler = ip_input, 178 .nh_proto = NETISR_IP, 179 .nh_policy = NETISR_POLICY_FLOW, 180 }; 181 182 extern struct domain inetdomain; 183 extern struct protosw inetsw[]; 184 u_char ip_protox[IPPROTO_MAX]; 185 186 SYSCTL_VNET_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW, 187 &VNET_NAME(ipstat), ipstat, 188 "IP statistics (struct ipstat, netinet/ip_var.h)"); 189 190 static VNET_DEFINE(uma_zone_t, ipq_zone); 191 #define V_ipq_zone VNET(ipq_zone) 192 193 static struct mtx ipqlock; 194 195 #define IPQ_LOCK() mtx_lock(&ipqlock) 196 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 197 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 198 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 199 200 static void maxnipq_update(void); 201 static void ipq_zone_change(void *); 202 static void ip_drain_locked(void); 203 204 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, 205 &VNET_NAME(nipq), 0, 206 "Current number of IPv4 fragment reassembly queue entries"); 207 208 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 209 &VNET_NAME(maxfragsperpacket), 0, 210 "Maximum number of IPv4 fragments allowed per packet"); 211 212 struct callout ipport_tick_callout; 213 214 #ifdef IPCTL_DEFMTU 215 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 216 &ip_mtu, 0, "Default MTU"); 217 #endif 218 219 #ifdef IPSTEALTH 220 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 221 &VNET_NAME(ipstealth), 0, 222 "IP stealth mode, no TTL decrementation on forwarding"); 223 #endif 224 225 #ifdef FLOWTABLE 226 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048; 227 VNET_DEFINE(struct flowtable *, ip_ft); 228 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size) 229 230 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN, 231 &VNET_NAME(ip_output_flowtable_size), 2048, 232 "number of entries in the per-cpu output flow caches"); 233 #endif 234 235 VNET_DEFINE(int, fw_one_pass) = 1; 236 237 static void ip_freef(struct ipqhead *, struct ipq *); 238 239 /* 240 * Kernel module interface for updating ipstat. The argument is an index 241 * into ipstat treated as an array of u_long. While this encodes the general 242 * layout of ipstat into the caller, it doesn't encode its location, so that 243 * future changes to add, for example, per-CPU stats support won't cause 244 * binary compatibility problems for kernel modules. 245 */ 246 void 247 kmod_ipstat_inc(int statnum) 248 { 249 250 (*((u_long *)&V_ipstat + statnum))++; 251 } 252 253 void 254 kmod_ipstat_dec(int statnum) 255 { 256 257 (*((u_long *)&V_ipstat + statnum))--; 258 } 259 260 static int 261 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 262 { 263 int error, qlimit; 264 265 netisr_getqlimit(&ip_nh, &qlimit); 266 error = sysctl_handle_int(oidp, &qlimit, 0, req); 267 if (error || !req->newptr) 268 return (error); 269 if (qlimit < 1) 270 return (EINVAL); 271 return (netisr_setqlimit(&ip_nh, qlimit)); 272 } 273 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 274 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 275 "Maximum size of the IP input queue"); 276 277 static int 278 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 279 { 280 u_int64_t qdrops_long; 281 int error, qdrops; 282 283 netisr_getqdrops(&ip_nh, &qdrops_long); 284 qdrops = qdrops_long; 285 error = sysctl_handle_int(oidp, &qdrops, 0, req); 286 if (error || !req->newptr) 287 return (error); 288 if (qdrops != 0) 289 return (EINVAL); 290 netisr_clearqdrops(&ip_nh); 291 return (0); 292 } 293 294 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 295 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 296 "Number of packets dropped from the IP input queue"); 297 298 /* 299 * IP initialization: fill in IP protocol switch table. 300 * All protocols not implemented in kernel go to raw IP protocol handler. 301 */ 302 void 303 ip_init(void) 304 { 305 struct protosw *pr; 306 int i; 307 308 V_ip_id = time_second & 0xffff; 309 310 TAILQ_INIT(&V_in_ifaddrhead); 311 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 312 313 /* Initialize IP reassembly queue. */ 314 for (i = 0; i < IPREASS_NHASH; i++) 315 TAILQ_INIT(&V_ipq[i]); 316 V_maxnipq = nmbclusters / 32; 317 V_maxfragsperpacket = 16; 318 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 319 NULL, UMA_ALIGN_PTR, 0); 320 maxnipq_update(); 321 322 /* Initialize packet filter hooks. */ 323 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF; 324 V_inet_pfil_hook.ph_af = AF_INET; 325 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0) 326 printf("%s: WARNING: unable to register pfil hook, " 327 "error %d\n", __func__, i); 328 329 #ifdef FLOWTABLE 330 TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size", 331 &V_ip_output_flowtable_size); 332 V_ip_ft = flowtable_alloc(V_ip_output_flowtable_size, FL_PCPU); 333 #endif 334 335 /* Skip initialization of globals for non-default instances. */ 336 if (!IS_DEFAULT_VNET(curvnet)) 337 return; 338 339 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 340 if (pr == NULL) 341 panic("ip_init: PF_INET not found"); 342 343 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 344 for (i = 0; i < IPPROTO_MAX; i++) 345 ip_protox[i] = pr - inetsw; 346 /* 347 * Cycle through IP protocols and put them into the appropriate place 348 * in ip_protox[]. 349 */ 350 for (pr = inetdomain.dom_protosw; 351 pr < inetdomain.dom_protoswNPROTOSW; pr++) 352 if (pr->pr_domain->dom_family == PF_INET && 353 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 354 /* Be careful to only index valid IP protocols. */ 355 if (pr->pr_protocol < IPPROTO_MAX) 356 ip_protox[pr->pr_protocol] = pr - inetsw; 357 } 358 359 /* Start ipport_tick. */ 360 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE); 361 callout_reset(&ipport_tick_callout, 1, ipport_tick, NULL); 362 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL, 363 SHUTDOWN_PRI_DEFAULT); 364 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 365 NULL, EVENTHANDLER_PRI_ANY); 366 367 /* Initialize various other remaining things. */ 368 IPQ_LOCK_INIT(); 369 netisr_register(&ip_nh); 370 } 371 372 #ifdef VIMAGE 373 void 374 ip_destroy(void) 375 { 376 377 /* Cleanup in_ifaddr hash table; should be empty. */ 378 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 379 380 IPQ_LOCK(); 381 ip_drain_locked(); 382 IPQ_UNLOCK(); 383 384 uma_zdestroy(V_ipq_zone); 385 } 386 #endif 387 388 void 389 ip_fini(void *xtp) 390 { 391 392 callout_stop(&ipport_tick_callout); 393 } 394 395 /* 396 * Ip input routine. Checksum and byte swap header. If fragmented 397 * try to reassemble. Process options. Pass to next level. 398 */ 399 void 400 ip_input(struct mbuf *m) 401 { 402 struct ip *ip = NULL; 403 struct in_ifaddr *ia = NULL; 404 struct ifaddr *ifa; 405 struct ifnet *ifp; 406 int checkif, hlen = 0; 407 u_short sum; 408 int dchg = 0; /* dest changed after fw */ 409 struct in_addr odst; /* original dst address */ 410 411 M_ASSERTPKTHDR(m); 412 413 if (m->m_flags & M_FASTFWD_OURS) { 414 /* 415 * Firewall or NAT changed destination to local. 416 * We expect ip_len and ip_off to be in host byte order. 417 */ 418 m->m_flags &= ~M_FASTFWD_OURS; 419 /* Set up some basics that will be used later. */ 420 ip = mtod(m, struct ip *); 421 hlen = ip->ip_hl << 2; 422 goto ours; 423 } 424 425 IPSTAT_INC(ips_total); 426 427 if (m->m_pkthdr.len < sizeof(struct ip)) 428 goto tooshort; 429 430 if (m->m_len < sizeof (struct ip) && 431 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 432 IPSTAT_INC(ips_toosmall); 433 return; 434 } 435 ip = mtod(m, struct ip *); 436 437 if (ip->ip_v != IPVERSION) { 438 IPSTAT_INC(ips_badvers); 439 goto bad; 440 } 441 442 hlen = ip->ip_hl << 2; 443 if (hlen < sizeof(struct ip)) { /* minimum header length */ 444 IPSTAT_INC(ips_badhlen); 445 goto bad; 446 } 447 if (hlen > m->m_len) { 448 if ((m = m_pullup(m, hlen)) == NULL) { 449 IPSTAT_INC(ips_badhlen); 450 return; 451 } 452 ip = mtod(m, struct ip *); 453 } 454 455 /* 127/8 must not appear on wire - RFC1122 */ 456 ifp = m->m_pkthdr.rcvif; 457 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 458 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 459 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 460 IPSTAT_INC(ips_badaddr); 461 goto bad; 462 } 463 } 464 465 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 466 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 467 } else { 468 if (hlen == sizeof(struct ip)) { 469 sum = in_cksum_hdr(ip); 470 } else { 471 sum = in_cksum(m, hlen); 472 } 473 } 474 if (sum) { 475 IPSTAT_INC(ips_badsum); 476 goto bad; 477 } 478 479 #ifdef ALTQ 480 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 481 /* packet is dropped by traffic conditioner */ 482 return; 483 #endif 484 485 /* 486 * Convert fields to host representation. 487 */ 488 ip->ip_len = ntohs(ip->ip_len); 489 if (ip->ip_len < hlen) { 490 IPSTAT_INC(ips_badlen); 491 goto bad; 492 } 493 ip->ip_off = ntohs(ip->ip_off); 494 495 /* 496 * Check that the amount of data in the buffers 497 * is as at least much as the IP header would have us expect. 498 * Trim mbufs if longer than we expect. 499 * Drop packet if shorter than we expect. 500 */ 501 if (m->m_pkthdr.len < ip->ip_len) { 502 tooshort: 503 IPSTAT_INC(ips_tooshort); 504 goto bad; 505 } 506 if (m->m_pkthdr.len > ip->ip_len) { 507 if (m->m_len == m->m_pkthdr.len) { 508 m->m_len = ip->ip_len; 509 m->m_pkthdr.len = ip->ip_len; 510 } else 511 m_adj(m, ip->ip_len - m->m_pkthdr.len); 512 } 513 #ifdef IPSEC 514 /* 515 * Bypass packet filtering for packets from a tunnel (gif). 516 */ 517 if (ip_ipsec_filtertunnel(m)) 518 goto passin; 519 #endif /* IPSEC */ 520 521 /* 522 * Run through list of hooks for input packets. 523 * 524 * NB: Beware of the destination address changing (e.g. 525 * by NAT rewriting). When this happens, tell 526 * ip_forward to do the right thing. 527 */ 528 529 /* Jump over all PFIL processing if hooks are not active. */ 530 if (!PFIL_HOOKED(&V_inet_pfil_hook)) 531 goto passin; 532 533 odst = ip->ip_dst; 534 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 535 return; 536 if (m == NULL) /* consumed by filter */ 537 return; 538 539 ip = mtod(m, struct ip *); 540 dchg = (odst.s_addr != ip->ip_dst.s_addr); 541 ifp = m->m_pkthdr.rcvif; 542 543 #ifdef IPFIREWALL_FORWARD 544 if (m->m_flags & M_FASTFWD_OURS) { 545 m->m_flags &= ~M_FASTFWD_OURS; 546 goto ours; 547 } 548 if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) { 549 /* 550 * Directly ship the packet on. This allows forwarding 551 * packets originally destined to us to some other directly 552 * connected host. 553 */ 554 ip_forward(m, dchg); 555 return; 556 } 557 #endif /* IPFIREWALL_FORWARD */ 558 559 passin: 560 /* 561 * Process options and, if not destined for us, 562 * ship it on. ip_dooptions returns 1 when an 563 * error was detected (causing an icmp message 564 * to be sent and the original packet to be freed). 565 */ 566 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 567 return; 568 569 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 570 * matter if it is destined to another node, or whether it is 571 * a multicast one, RSVP wants it! and prevents it from being forwarded 572 * anywhere else. Also checks if the rsvp daemon is running before 573 * grabbing the packet. 574 */ 575 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 576 goto ours; 577 578 /* 579 * Check our list of addresses, to see if the packet is for us. 580 * If we don't have any addresses, assume any unicast packet 581 * we receive might be for us (and let the upper layers deal 582 * with it). 583 */ 584 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 585 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 586 goto ours; 587 588 /* 589 * Enable a consistency check between the destination address 590 * and the arrival interface for a unicast packet (the RFC 1122 591 * strong ES model) if IP forwarding is disabled and the packet 592 * is not locally generated and the packet is not subject to 593 * 'ipfw fwd'. 594 * 595 * XXX - Checking also should be disabled if the destination 596 * address is ipnat'ed to a different interface. 597 * 598 * XXX - Checking is incompatible with IP aliases added 599 * to the loopback interface instead of the interface where 600 * the packets are received. 601 * 602 * XXX - This is the case for carp vhost IPs as well so we 603 * insert a workaround. If the packet got here, we already 604 * checked with carp_iamatch() and carp_forus(). 605 */ 606 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 607 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 608 #ifdef DEV_CARP 609 !ifp->if_carp && 610 #endif 611 (dchg == 0); 612 613 /* 614 * Check for exact addresses in the hash bucket. 615 */ 616 /* IN_IFADDR_RLOCK(); */ 617 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 618 /* 619 * If the address matches, verify that the packet 620 * arrived via the correct interface if checking is 621 * enabled. 622 */ 623 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 624 (!checkif || ia->ia_ifp == ifp)) { 625 ifa_ref(&ia->ia_ifa); 626 /* IN_IFADDR_RUNLOCK(); */ 627 goto ours; 628 } 629 } 630 /* IN_IFADDR_RUNLOCK(); */ 631 632 /* 633 * Check for broadcast addresses. 634 * 635 * Only accept broadcast packets that arrive via the matching 636 * interface. Reception of forwarded directed broadcasts would 637 * be handled via ip_forward() and ether_output() with the loopback 638 * into the stack for SIMPLEX interfaces handled by ether_output(). 639 */ 640 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 641 IF_ADDR_LOCK(ifp); 642 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 643 if (ifa->ifa_addr->sa_family != AF_INET) 644 continue; 645 ia = ifatoia(ifa); 646 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 647 ip->ip_dst.s_addr) { 648 ifa_ref(ifa); 649 IF_ADDR_UNLOCK(ifp); 650 goto ours; 651 } 652 if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) { 653 ifa_ref(ifa); 654 IF_ADDR_UNLOCK(ifp); 655 goto ours; 656 } 657 #ifdef BOOTP_COMPAT 658 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 659 ifa_ref(ifa); 660 IF_ADDR_UNLOCK(ifp); 661 goto ours; 662 } 663 #endif 664 } 665 IF_ADDR_UNLOCK(ifp); 666 ia = NULL; 667 } 668 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 669 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 670 IPSTAT_INC(ips_cantforward); 671 m_freem(m); 672 return; 673 } 674 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 675 if (V_ip_mrouter) { 676 /* 677 * If we are acting as a multicast router, all 678 * incoming multicast packets are passed to the 679 * kernel-level multicast forwarding function. 680 * The packet is returned (relatively) intact; if 681 * ip_mforward() returns a non-zero value, the packet 682 * must be discarded, else it may be accepted below. 683 */ 684 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 685 IPSTAT_INC(ips_cantforward); 686 m_freem(m); 687 return; 688 } 689 690 /* 691 * The process-level routing daemon needs to receive 692 * all multicast IGMP packets, whether or not this 693 * host belongs to their destination groups. 694 */ 695 if (ip->ip_p == IPPROTO_IGMP) 696 goto ours; 697 IPSTAT_INC(ips_forward); 698 } 699 /* 700 * Assume the packet is for us, to avoid prematurely taking 701 * a lock on the in_multi hash. Protocols must perform 702 * their own filtering and update statistics accordingly. 703 */ 704 goto ours; 705 } 706 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 707 goto ours; 708 if (ip->ip_dst.s_addr == INADDR_ANY) 709 goto ours; 710 711 /* 712 * FAITH(Firewall Aided Internet Translator) 713 */ 714 if (ifp && ifp->if_type == IFT_FAITH) { 715 if (V_ip_keepfaith) { 716 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 717 goto ours; 718 } 719 m_freem(m); 720 return; 721 } 722 723 /* 724 * Not for us; forward if possible and desirable. 725 */ 726 if (V_ipforwarding == 0) { 727 IPSTAT_INC(ips_cantforward); 728 m_freem(m); 729 } else { 730 #ifdef IPSEC 731 if (ip_ipsec_fwd(m)) 732 goto bad; 733 #endif /* IPSEC */ 734 ip_forward(m, dchg); 735 } 736 return; 737 738 ours: 739 #ifdef IPSTEALTH 740 /* 741 * IPSTEALTH: Process non-routing options only 742 * if the packet is destined for us. 743 */ 744 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) { 745 if (ia != NULL) 746 ifa_free(&ia->ia_ifa); 747 return; 748 } 749 #endif /* IPSTEALTH */ 750 751 /* Count the packet in the ip address stats */ 752 if (ia != NULL) { 753 ia->ia_ifa.if_ipackets++; 754 ia->ia_ifa.if_ibytes += m->m_pkthdr.len; 755 ifa_free(&ia->ia_ifa); 756 } 757 758 /* 759 * Attempt reassembly; if it succeeds, proceed. 760 * ip_reass() will return a different mbuf. 761 */ 762 if (ip->ip_off & (IP_MF | IP_OFFMASK)) { 763 m = ip_reass(m); 764 if (m == NULL) 765 return; 766 ip = mtod(m, struct ip *); 767 /* Get the header length of the reassembled packet */ 768 hlen = ip->ip_hl << 2; 769 } 770 771 /* 772 * Further protocols expect the packet length to be w/o the 773 * IP header. 774 */ 775 ip->ip_len -= hlen; 776 777 #ifdef IPSEC 778 /* 779 * enforce IPsec policy checking if we are seeing last header. 780 * note that we do not visit this with protocols with pcb layer 781 * code - like udp/tcp/raw ip. 782 */ 783 if (ip_ipsec_input(m)) 784 goto bad; 785 #endif /* IPSEC */ 786 787 /* 788 * Switch out to protocol's input routine. 789 */ 790 IPSTAT_INC(ips_delivered); 791 792 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 793 return; 794 bad: 795 m_freem(m); 796 } 797 798 /* 799 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 800 * max has slightly different semantics than the sysctl, for historical 801 * reasons. 802 */ 803 static void 804 maxnipq_update(void) 805 { 806 807 /* 808 * -1 for unlimited allocation. 809 */ 810 if (V_maxnipq < 0) 811 uma_zone_set_max(V_ipq_zone, 0); 812 /* 813 * Positive number for specific bound. 814 */ 815 if (V_maxnipq > 0) 816 uma_zone_set_max(V_ipq_zone, V_maxnipq); 817 /* 818 * Zero specifies no further fragment queue allocation -- set the 819 * bound very low, but rely on implementation elsewhere to actually 820 * prevent allocation and reclaim current queues. 821 */ 822 if (V_maxnipq == 0) 823 uma_zone_set_max(V_ipq_zone, 1); 824 } 825 826 static void 827 ipq_zone_change(void *tag) 828 { 829 830 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 831 V_maxnipq = nmbclusters / 32; 832 maxnipq_update(); 833 } 834 } 835 836 static int 837 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 838 { 839 int error, i; 840 841 i = V_maxnipq; 842 error = sysctl_handle_int(oidp, &i, 0, req); 843 if (error || !req->newptr) 844 return (error); 845 846 /* 847 * XXXRW: Might be a good idea to sanity check the argument and place 848 * an extreme upper bound. 849 */ 850 if (i < -1) 851 return (EINVAL); 852 V_maxnipq = i; 853 maxnipq_update(); 854 return (0); 855 } 856 857 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 858 NULL, 0, sysctl_maxnipq, "I", 859 "Maximum number of IPv4 fragment reassembly queue entries"); 860 861 /* 862 * Take incoming datagram fragment and try to reassemble it into 863 * whole datagram. If the argument is the first fragment or one 864 * in between the function will return NULL and store the mbuf 865 * in the fragment chain. If the argument is the last fragment 866 * the packet will be reassembled and the pointer to the new 867 * mbuf returned for further processing. Only m_tags attached 868 * to the first packet/fragment are preserved. 869 * The IP header is *NOT* adjusted out of iplen. 870 */ 871 struct mbuf * 872 ip_reass(struct mbuf *m) 873 { 874 struct ip *ip; 875 struct mbuf *p, *q, *nq, *t; 876 struct ipq *fp = NULL; 877 struct ipqhead *head; 878 int i, hlen, next; 879 u_int8_t ecn, ecn0; 880 u_short hash; 881 882 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 883 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 884 IPSTAT_INC(ips_fragments); 885 IPSTAT_INC(ips_fragdropped); 886 m_freem(m); 887 return (NULL); 888 } 889 890 ip = mtod(m, struct ip *); 891 hlen = ip->ip_hl << 2; 892 893 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 894 head = &V_ipq[hash]; 895 IPQ_LOCK(); 896 897 /* 898 * Look for queue of fragments 899 * of this datagram. 900 */ 901 TAILQ_FOREACH(fp, head, ipq_list) 902 if (ip->ip_id == fp->ipq_id && 903 ip->ip_src.s_addr == fp->ipq_src.s_addr && 904 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 905 #ifdef MAC 906 mac_ipq_match(m, fp) && 907 #endif 908 ip->ip_p == fp->ipq_p) 909 goto found; 910 911 fp = NULL; 912 913 /* 914 * Attempt to trim the number of allocated fragment queues if it 915 * exceeds the administrative limit. 916 */ 917 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 918 /* 919 * drop something from the tail of the current queue 920 * before proceeding further 921 */ 922 struct ipq *q = TAILQ_LAST(head, ipqhead); 923 if (q == NULL) { /* gak */ 924 for (i = 0; i < IPREASS_NHASH; i++) { 925 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 926 if (r) { 927 IPSTAT_ADD(ips_fragtimeout, 928 r->ipq_nfrags); 929 ip_freef(&V_ipq[i], r); 930 break; 931 } 932 } 933 } else { 934 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 935 ip_freef(head, q); 936 } 937 } 938 939 found: 940 /* 941 * Adjust ip_len to not reflect header, 942 * convert offset of this to bytes. 943 */ 944 ip->ip_len -= hlen; 945 if (ip->ip_off & IP_MF) { 946 /* 947 * Make sure that fragments have a data length 948 * that's a non-zero multiple of 8 bytes. 949 */ 950 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { 951 IPSTAT_INC(ips_toosmall); /* XXX */ 952 goto dropfrag; 953 } 954 m->m_flags |= M_FRAG; 955 } else 956 m->m_flags &= ~M_FRAG; 957 ip->ip_off <<= 3; 958 959 960 /* 961 * Attempt reassembly; if it succeeds, proceed. 962 * ip_reass() will return a different mbuf. 963 */ 964 IPSTAT_INC(ips_fragments); 965 m->m_pkthdr.header = ip; 966 967 /* Previous ip_reass() started here. */ 968 /* 969 * Presence of header sizes in mbufs 970 * would confuse code below. 971 */ 972 m->m_data += hlen; 973 m->m_len -= hlen; 974 975 /* 976 * If first fragment to arrive, create a reassembly queue. 977 */ 978 if (fp == NULL) { 979 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 980 if (fp == NULL) 981 goto dropfrag; 982 #ifdef MAC 983 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 984 uma_zfree(V_ipq_zone, fp); 985 fp = NULL; 986 goto dropfrag; 987 } 988 mac_ipq_create(m, fp); 989 #endif 990 TAILQ_INSERT_HEAD(head, fp, ipq_list); 991 V_nipq++; 992 fp->ipq_nfrags = 1; 993 fp->ipq_ttl = IPFRAGTTL; 994 fp->ipq_p = ip->ip_p; 995 fp->ipq_id = ip->ip_id; 996 fp->ipq_src = ip->ip_src; 997 fp->ipq_dst = ip->ip_dst; 998 fp->ipq_frags = m; 999 m->m_nextpkt = NULL; 1000 goto done; 1001 } else { 1002 fp->ipq_nfrags++; 1003 #ifdef MAC 1004 mac_ipq_update(m, fp); 1005 #endif 1006 } 1007 1008 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 1009 1010 /* 1011 * Handle ECN by comparing this segment with the first one; 1012 * if CE is set, do not lose CE. 1013 * drop if CE and not-ECT are mixed for the same packet. 1014 */ 1015 ecn = ip->ip_tos & IPTOS_ECN_MASK; 1016 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 1017 if (ecn == IPTOS_ECN_CE) { 1018 if (ecn0 == IPTOS_ECN_NOTECT) 1019 goto dropfrag; 1020 if (ecn0 != IPTOS_ECN_CE) 1021 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 1022 } 1023 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 1024 goto dropfrag; 1025 1026 /* 1027 * Find a segment which begins after this one does. 1028 */ 1029 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 1030 if (GETIP(q)->ip_off > ip->ip_off) 1031 break; 1032 1033 /* 1034 * If there is a preceding segment, it may provide some of 1035 * our data already. If so, drop the data from the incoming 1036 * segment. If it provides all of our data, drop us, otherwise 1037 * stick new segment in the proper place. 1038 * 1039 * If some of the data is dropped from the the preceding 1040 * segment, then it's checksum is invalidated. 1041 */ 1042 if (p) { 1043 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; 1044 if (i > 0) { 1045 if (i >= ip->ip_len) 1046 goto dropfrag; 1047 m_adj(m, i); 1048 m->m_pkthdr.csum_flags = 0; 1049 ip->ip_off += i; 1050 ip->ip_len -= i; 1051 } 1052 m->m_nextpkt = p->m_nextpkt; 1053 p->m_nextpkt = m; 1054 } else { 1055 m->m_nextpkt = fp->ipq_frags; 1056 fp->ipq_frags = m; 1057 } 1058 1059 /* 1060 * While we overlap succeeding segments trim them or, 1061 * if they are completely covered, dequeue them. 1062 */ 1063 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; 1064 q = nq) { 1065 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off; 1066 if (i < GETIP(q)->ip_len) { 1067 GETIP(q)->ip_len -= i; 1068 GETIP(q)->ip_off += i; 1069 m_adj(q, i); 1070 q->m_pkthdr.csum_flags = 0; 1071 break; 1072 } 1073 nq = q->m_nextpkt; 1074 m->m_nextpkt = nq; 1075 IPSTAT_INC(ips_fragdropped); 1076 fp->ipq_nfrags--; 1077 m_freem(q); 1078 } 1079 1080 /* 1081 * Check for complete reassembly and perform frag per packet 1082 * limiting. 1083 * 1084 * Frag limiting is performed here so that the nth frag has 1085 * a chance to complete the packet before we drop the packet. 1086 * As a result, n+1 frags are actually allowed per packet, but 1087 * only n will ever be stored. (n = maxfragsperpacket.) 1088 * 1089 */ 1090 next = 0; 1091 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1092 if (GETIP(q)->ip_off != next) { 1093 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1094 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1095 ip_freef(head, fp); 1096 } 1097 goto done; 1098 } 1099 next += GETIP(q)->ip_len; 1100 } 1101 /* Make sure the last packet didn't have the IP_MF flag */ 1102 if (p->m_flags & M_FRAG) { 1103 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1104 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1105 ip_freef(head, fp); 1106 } 1107 goto done; 1108 } 1109 1110 /* 1111 * Reassembly is complete. Make sure the packet is a sane size. 1112 */ 1113 q = fp->ipq_frags; 1114 ip = GETIP(q); 1115 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1116 IPSTAT_INC(ips_toolong); 1117 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1118 ip_freef(head, fp); 1119 goto done; 1120 } 1121 1122 /* 1123 * Concatenate fragments. 1124 */ 1125 m = q; 1126 t = m->m_next; 1127 m->m_next = NULL; 1128 m_cat(m, t); 1129 nq = q->m_nextpkt; 1130 q->m_nextpkt = NULL; 1131 for (q = nq; q != NULL; q = nq) { 1132 nq = q->m_nextpkt; 1133 q->m_nextpkt = NULL; 1134 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1135 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1136 m_cat(m, q); 1137 } 1138 /* 1139 * In order to do checksumming faster we do 'end-around carry' here 1140 * (and not in for{} loop), though it implies we are not going to 1141 * reassemble more than 64k fragments. 1142 */ 1143 m->m_pkthdr.csum_data = 1144 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1145 #ifdef MAC 1146 mac_ipq_reassemble(fp, m); 1147 mac_ipq_destroy(fp); 1148 #endif 1149 1150 /* 1151 * Create header for new ip packet by modifying header of first 1152 * packet; dequeue and discard fragment reassembly header. 1153 * Make header visible. 1154 */ 1155 ip->ip_len = (ip->ip_hl << 2) + next; 1156 ip->ip_src = fp->ipq_src; 1157 ip->ip_dst = fp->ipq_dst; 1158 TAILQ_REMOVE(head, fp, ipq_list); 1159 V_nipq--; 1160 uma_zfree(V_ipq_zone, fp); 1161 m->m_len += (ip->ip_hl << 2); 1162 m->m_data -= (ip->ip_hl << 2); 1163 /* some debugging cruft by sklower, below, will go away soon */ 1164 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1165 m_fixhdr(m); 1166 IPSTAT_INC(ips_reassembled); 1167 IPQ_UNLOCK(); 1168 return (m); 1169 1170 dropfrag: 1171 IPSTAT_INC(ips_fragdropped); 1172 if (fp != NULL) 1173 fp->ipq_nfrags--; 1174 m_freem(m); 1175 done: 1176 IPQ_UNLOCK(); 1177 return (NULL); 1178 1179 #undef GETIP 1180 } 1181 1182 /* 1183 * Free a fragment reassembly header and all 1184 * associated datagrams. 1185 */ 1186 static void 1187 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1188 { 1189 struct mbuf *q; 1190 1191 IPQ_LOCK_ASSERT(); 1192 1193 while (fp->ipq_frags) { 1194 q = fp->ipq_frags; 1195 fp->ipq_frags = q->m_nextpkt; 1196 m_freem(q); 1197 } 1198 TAILQ_REMOVE(fhp, fp, ipq_list); 1199 uma_zfree(V_ipq_zone, fp); 1200 V_nipq--; 1201 } 1202 1203 /* 1204 * IP timer processing; 1205 * if a timer expires on a reassembly 1206 * queue, discard it. 1207 */ 1208 void 1209 ip_slowtimo(void) 1210 { 1211 VNET_ITERATOR_DECL(vnet_iter); 1212 struct ipq *fp; 1213 int i; 1214 1215 VNET_LIST_RLOCK_NOSLEEP(); 1216 IPQ_LOCK(); 1217 VNET_FOREACH(vnet_iter) { 1218 CURVNET_SET(vnet_iter); 1219 for (i = 0; i < IPREASS_NHASH; i++) { 1220 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1221 struct ipq *fpp; 1222 1223 fpp = fp; 1224 fp = TAILQ_NEXT(fp, ipq_list); 1225 if(--fpp->ipq_ttl == 0) { 1226 IPSTAT_ADD(ips_fragtimeout, 1227 fpp->ipq_nfrags); 1228 ip_freef(&V_ipq[i], fpp); 1229 } 1230 } 1231 } 1232 /* 1233 * If we are over the maximum number of fragments 1234 * (due to the limit being lowered), drain off 1235 * enough to get down to the new limit. 1236 */ 1237 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1238 for (i = 0; i < IPREASS_NHASH; i++) { 1239 while (V_nipq > V_maxnipq && 1240 !TAILQ_EMPTY(&V_ipq[i])) { 1241 IPSTAT_ADD(ips_fragdropped, 1242 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1243 ip_freef(&V_ipq[i], 1244 TAILQ_FIRST(&V_ipq[i])); 1245 } 1246 } 1247 } 1248 CURVNET_RESTORE(); 1249 } 1250 IPQ_UNLOCK(); 1251 VNET_LIST_RUNLOCK_NOSLEEP(); 1252 } 1253 1254 /* 1255 * Drain off all datagram fragments. 1256 */ 1257 static void 1258 ip_drain_locked(void) 1259 { 1260 int i; 1261 1262 IPQ_LOCK_ASSERT(); 1263 1264 for (i = 0; i < IPREASS_NHASH; i++) { 1265 while(!TAILQ_EMPTY(&V_ipq[i])) { 1266 IPSTAT_ADD(ips_fragdropped, 1267 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1268 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1269 } 1270 } 1271 } 1272 1273 void 1274 ip_drain(void) 1275 { 1276 VNET_ITERATOR_DECL(vnet_iter); 1277 1278 VNET_LIST_RLOCK_NOSLEEP(); 1279 IPQ_LOCK(); 1280 VNET_FOREACH(vnet_iter) { 1281 CURVNET_SET(vnet_iter); 1282 ip_drain_locked(); 1283 CURVNET_RESTORE(); 1284 } 1285 IPQ_UNLOCK(); 1286 VNET_LIST_RUNLOCK_NOSLEEP(); 1287 in_rtqdrain(); 1288 } 1289 1290 /* 1291 * The protocol to be inserted into ip_protox[] must be already registered 1292 * in inetsw[], either statically or through pf_proto_register(). 1293 */ 1294 int 1295 ipproto_register(u_char ipproto) 1296 { 1297 struct protosw *pr; 1298 1299 /* Sanity checks. */ 1300 if (ipproto == 0) 1301 return (EPROTONOSUPPORT); 1302 1303 /* 1304 * The protocol slot must not be occupied by another protocol 1305 * already. An index pointing to IPPROTO_RAW is unused. 1306 */ 1307 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1308 if (pr == NULL) 1309 return (EPFNOSUPPORT); 1310 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1311 return (EEXIST); 1312 1313 /* Find the protocol position in inetsw[] and set the index. */ 1314 for (pr = inetdomain.dom_protosw; 1315 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1316 if (pr->pr_domain->dom_family == PF_INET && 1317 pr->pr_protocol && pr->pr_protocol == ipproto) { 1318 /* Be careful to only index valid IP protocols. */ 1319 if (pr->pr_protocol < IPPROTO_MAX) { 1320 ip_protox[pr->pr_protocol] = pr - inetsw; 1321 return (0); 1322 } else 1323 return (EINVAL); 1324 } 1325 } 1326 return (EPROTONOSUPPORT); 1327 } 1328 1329 int 1330 ipproto_unregister(u_char ipproto) 1331 { 1332 struct protosw *pr; 1333 1334 /* Sanity checks. */ 1335 if (ipproto == 0) 1336 return (EPROTONOSUPPORT); 1337 1338 /* Check if the protocol was indeed registered. */ 1339 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1340 if (pr == NULL) 1341 return (EPFNOSUPPORT); 1342 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1343 return (ENOENT); 1344 1345 /* Reset the protocol slot to IPPROTO_RAW. */ 1346 ip_protox[ipproto] = pr - inetsw; 1347 return (0); 1348 } 1349 1350 /* 1351 * Given address of next destination (final or next hop), return (referenced) 1352 * internet address info of interface to be used to get there. 1353 */ 1354 struct in_ifaddr * 1355 ip_rtaddr(struct in_addr dst, u_int fibnum) 1356 { 1357 struct route sro; 1358 struct sockaddr_in *sin; 1359 struct in_ifaddr *ia; 1360 1361 bzero(&sro, sizeof(sro)); 1362 sin = (struct sockaddr_in *)&sro.ro_dst; 1363 sin->sin_family = AF_INET; 1364 sin->sin_len = sizeof(*sin); 1365 sin->sin_addr = dst; 1366 in_rtalloc_ign(&sro, 0, fibnum); 1367 1368 if (sro.ro_rt == NULL) 1369 return (NULL); 1370 1371 ia = ifatoia(sro.ro_rt->rt_ifa); 1372 ifa_ref(&ia->ia_ifa); 1373 RTFREE(sro.ro_rt); 1374 return (ia); 1375 } 1376 1377 u_char inetctlerrmap[PRC_NCMDS] = { 1378 0, 0, 0, 0, 1379 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1380 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1381 EMSGSIZE, EHOSTUNREACH, 0, 0, 1382 0, 0, EHOSTUNREACH, 0, 1383 ENOPROTOOPT, ECONNREFUSED 1384 }; 1385 1386 /* 1387 * Forward a packet. If some error occurs return the sender 1388 * an icmp packet. Note we can't always generate a meaningful 1389 * icmp message because icmp doesn't have a large enough repertoire 1390 * of codes and types. 1391 * 1392 * If not forwarding, just drop the packet. This could be confusing 1393 * if ipforwarding was zero but some routing protocol was advancing 1394 * us as a gateway to somewhere. However, we must let the routing 1395 * protocol deal with that. 1396 * 1397 * The srcrt parameter indicates whether the packet is being forwarded 1398 * via a source route. 1399 */ 1400 void 1401 ip_forward(struct mbuf *m, int srcrt) 1402 { 1403 struct ip *ip = mtod(m, struct ip *); 1404 struct in_ifaddr *ia; 1405 struct mbuf *mcopy; 1406 struct in_addr dest; 1407 struct route ro; 1408 int error, type = 0, code = 0, mtu = 0; 1409 1410 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1411 IPSTAT_INC(ips_cantforward); 1412 m_freem(m); 1413 return; 1414 } 1415 #ifdef IPSTEALTH 1416 if (!V_ipstealth) { 1417 #endif 1418 if (ip->ip_ttl <= IPTTLDEC) { 1419 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1420 0, 0); 1421 return; 1422 } 1423 #ifdef IPSTEALTH 1424 } 1425 #endif 1426 1427 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1428 #ifndef IPSEC 1429 /* 1430 * 'ia' may be NULL if there is no route for this destination. 1431 * In case of IPsec, Don't discard it just yet, but pass it to 1432 * ip_output in case of outgoing IPsec policy. 1433 */ 1434 if (!srcrt && ia == NULL) { 1435 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1436 return; 1437 } 1438 #endif 1439 1440 /* 1441 * Save the IP header and at most 8 bytes of the payload, 1442 * in case we need to generate an ICMP message to the src. 1443 * 1444 * XXX this can be optimized a lot by saving the data in a local 1445 * buffer on the stack (72 bytes at most), and only allocating the 1446 * mbuf if really necessary. The vast majority of the packets 1447 * are forwarded without having to send an ICMP back (either 1448 * because unnecessary, or because rate limited), so we are 1449 * really we are wasting a lot of work here. 1450 * 1451 * We don't use m_copy() because it might return a reference 1452 * to a shared cluster. Both this function and ip_output() 1453 * assume exclusive access to the IP header in `m', so any 1454 * data in a cluster may change before we reach icmp_error(). 1455 */ 1456 MGETHDR(mcopy, M_DONTWAIT, m->m_type); 1457 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) { 1458 /* 1459 * It's probably ok if the pkthdr dup fails (because 1460 * the deep copy of the tag chain failed), but for now 1461 * be conservative and just discard the copy since 1462 * code below may some day want the tags. 1463 */ 1464 m_free(mcopy); 1465 mcopy = NULL; 1466 } 1467 if (mcopy != NULL) { 1468 mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy)); 1469 mcopy->m_pkthdr.len = mcopy->m_len; 1470 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1471 } 1472 1473 #ifdef IPSTEALTH 1474 if (!V_ipstealth) { 1475 #endif 1476 ip->ip_ttl -= IPTTLDEC; 1477 #ifdef IPSTEALTH 1478 } 1479 #endif 1480 1481 /* 1482 * If forwarding packet using same interface that it came in on, 1483 * perhaps should send a redirect to sender to shortcut a hop. 1484 * Only send redirect if source is sending directly to us, 1485 * and if packet was not source routed (or has any options). 1486 * Also, don't send redirect if forwarding using a default route 1487 * or a route modified by a redirect. 1488 */ 1489 dest.s_addr = 0; 1490 if (!srcrt && V_ipsendredirects && 1491 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1492 struct sockaddr_in *sin; 1493 struct rtentry *rt; 1494 1495 bzero(&ro, sizeof(ro)); 1496 sin = (struct sockaddr_in *)&ro.ro_dst; 1497 sin->sin_family = AF_INET; 1498 sin->sin_len = sizeof(*sin); 1499 sin->sin_addr = ip->ip_dst; 1500 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1501 1502 rt = ro.ro_rt; 1503 1504 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1505 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1506 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1507 u_long src = ntohl(ip->ip_src.s_addr); 1508 1509 if (RTA(rt) && 1510 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1511 if (rt->rt_flags & RTF_GATEWAY) 1512 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1513 else 1514 dest.s_addr = ip->ip_dst.s_addr; 1515 /* Router requirements says to only send host redirects */ 1516 type = ICMP_REDIRECT; 1517 code = ICMP_REDIRECT_HOST; 1518 } 1519 } 1520 if (rt) 1521 RTFREE(rt); 1522 } 1523 1524 /* 1525 * Try to cache the route MTU from ip_output so we can consider it for 1526 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1527 */ 1528 bzero(&ro, sizeof(ro)); 1529 1530 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1531 1532 if (error == EMSGSIZE && ro.ro_rt) 1533 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1534 if (ro.ro_rt) 1535 RTFREE(ro.ro_rt); 1536 1537 if (error) 1538 IPSTAT_INC(ips_cantforward); 1539 else { 1540 IPSTAT_INC(ips_forward); 1541 if (type) 1542 IPSTAT_INC(ips_redirectsent); 1543 else { 1544 if (mcopy) 1545 m_freem(mcopy); 1546 if (ia != NULL) 1547 ifa_free(&ia->ia_ifa); 1548 return; 1549 } 1550 } 1551 if (mcopy == NULL) { 1552 if (ia != NULL) 1553 ifa_free(&ia->ia_ifa); 1554 return; 1555 } 1556 1557 switch (error) { 1558 1559 case 0: /* forwarded, but need redirect */ 1560 /* type, code set above */ 1561 break; 1562 1563 case ENETUNREACH: 1564 case EHOSTUNREACH: 1565 case ENETDOWN: 1566 case EHOSTDOWN: 1567 default: 1568 type = ICMP_UNREACH; 1569 code = ICMP_UNREACH_HOST; 1570 break; 1571 1572 case EMSGSIZE: 1573 type = ICMP_UNREACH; 1574 code = ICMP_UNREACH_NEEDFRAG; 1575 1576 #ifdef IPSEC 1577 /* 1578 * If IPsec is configured for this path, 1579 * override any possibly mtu value set by ip_output. 1580 */ 1581 mtu = ip_ipsec_mtu(m, mtu); 1582 #endif /* IPSEC */ 1583 /* 1584 * If the MTU was set before make sure we are below the 1585 * interface MTU. 1586 * If the MTU wasn't set before use the interface mtu or 1587 * fall back to the next smaller mtu step compared to the 1588 * current packet size. 1589 */ 1590 if (mtu != 0) { 1591 if (ia != NULL) 1592 mtu = min(mtu, ia->ia_ifp->if_mtu); 1593 } else { 1594 if (ia != NULL) 1595 mtu = ia->ia_ifp->if_mtu; 1596 else 1597 mtu = ip_next_mtu(ip->ip_len, 0); 1598 } 1599 IPSTAT_INC(ips_cantfrag); 1600 break; 1601 1602 case ENOBUFS: 1603 /* 1604 * A router should not generate ICMP_SOURCEQUENCH as 1605 * required in RFC1812 Requirements for IP Version 4 Routers. 1606 * Source quench could be a big problem under DoS attacks, 1607 * or if the underlying interface is rate-limited. 1608 * Those who need source quench packets may re-enable them 1609 * via the net.inet.ip.sendsourcequench sysctl. 1610 */ 1611 if (V_ip_sendsourcequench == 0) { 1612 m_freem(mcopy); 1613 if (ia != NULL) 1614 ifa_free(&ia->ia_ifa); 1615 return; 1616 } else { 1617 type = ICMP_SOURCEQUENCH; 1618 code = 0; 1619 } 1620 break; 1621 1622 case EACCES: /* ipfw denied packet */ 1623 m_freem(mcopy); 1624 if (ia != NULL) 1625 ifa_free(&ia->ia_ifa); 1626 return; 1627 } 1628 if (ia != NULL) 1629 ifa_free(&ia->ia_ifa); 1630 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1631 } 1632 1633 void 1634 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1635 struct mbuf *m) 1636 { 1637 1638 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1639 struct bintime bt; 1640 1641 bintime(&bt); 1642 if (inp->inp_socket->so_options & SO_BINTIME) { 1643 *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt), 1644 SCM_BINTIME, SOL_SOCKET); 1645 if (*mp) 1646 mp = &(*mp)->m_next; 1647 } 1648 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1649 struct timeval tv; 1650 1651 bintime2timeval(&bt, &tv); 1652 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1653 SCM_TIMESTAMP, SOL_SOCKET); 1654 if (*mp) 1655 mp = &(*mp)->m_next; 1656 } 1657 } 1658 if (inp->inp_flags & INP_RECVDSTADDR) { 1659 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1660 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1661 if (*mp) 1662 mp = &(*mp)->m_next; 1663 } 1664 if (inp->inp_flags & INP_RECVTTL) { 1665 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1666 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1667 if (*mp) 1668 mp = &(*mp)->m_next; 1669 } 1670 #ifdef notyet 1671 /* XXX 1672 * Moving these out of udp_input() made them even more broken 1673 * than they already were. 1674 */ 1675 /* options were tossed already */ 1676 if (inp->inp_flags & INP_RECVOPTS) { 1677 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1678 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1679 if (*mp) 1680 mp = &(*mp)->m_next; 1681 } 1682 /* ip_srcroute doesn't do what we want here, need to fix */ 1683 if (inp->inp_flags & INP_RECVRETOPTS) { 1684 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1685 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1686 if (*mp) 1687 mp = &(*mp)->m_next; 1688 } 1689 #endif 1690 if (inp->inp_flags & INP_RECVIF) { 1691 struct ifnet *ifp; 1692 struct sdlbuf { 1693 struct sockaddr_dl sdl; 1694 u_char pad[32]; 1695 } sdlbuf; 1696 struct sockaddr_dl *sdp; 1697 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1698 1699 if (((ifp = m->m_pkthdr.rcvif)) 1700 && ( ifp->if_index && (ifp->if_index <= V_if_index))) { 1701 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1702 /* 1703 * Change our mind and don't try copy. 1704 */ 1705 if ((sdp->sdl_family != AF_LINK) 1706 || (sdp->sdl_len > sizeof(sdlbuf))) { 1707 goto makedummy; 1708 } 1709 bcopy(sdp, sdl2, sdp->sdl_len); 1710 } else { 1711 makedummy: 1712 sdl2->sdl_len 1713 = offsetof(struct sockaddr_dl, sdl_data[0]); 1714 sdl2->sdl_family = AF_LINK; 1715 sdl2->sdl_index = 0; 1716 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1717 } 1718 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, 1719 IP_RECVIF, IPPROTO_IP); 1720 if (*mp) 1721 mp = &(*mp)->m_next; 1722 } 1723 } 1724 1725 /* 1726 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1727 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1728 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1729 * compiled. 1730 */ 1731 int 1732 ip_rsvp_init(struct socket *so) 1733 { 1734 1735 if (so->so_type != SOCK_RAW || 1736 so->so_proto->pr_protocol != IPPROTO_RSVP) 1737 return EOPNOTSUPP; 1738 1739 if (V_ip_rsvpd != NULL) 1740 return EADDRINUSE; 1741 1742 V_ip_rsvpd = so; 1743 /* 1744 * This may seem silly, but we need to be sure we don't over-increment 1745 * the RSVP counter, in case something slips up. 1746 */ 1747 if (!V_ip_rsvp_on) { 1748 V_ip_rsvp_on = 1; 1749 V_rsvp_on++; 1750 } 1751 1752 return 0; 1753 } 1754 1755 int 1756 ip_rsvp_done(void) 1757 { 1758 1759 V_ip_rsvpd = NULL; 1760 /* 1761 * This may seem silly, but we need to be sure we don't over-decrement 1762 * the RSVP counter, in case something slips up. 1763 */ 1764 if (V_ip_rsvp_on) { 1765 V_ip_rsvp_on = 0; 1766 V_rsvp_on--; 1767 } 1768 return 0; 1769 } 1770 1771 void 1772 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1773 { 1774 1775 if (rsvp_input_p) { /* call the real one if loaded */ 1776 rsvp_input_p(m, off); 1777 return; 1778 } 1779 1780 /* Can still get packets with rsvp_on = 0 if there is a local member 1781 * of the group to which the RSVP packet is addressed. But in this 1782 * case we want to throw the packet away. 1783 */ 1784 1785 if (!V_rsvp_on) { 1786 m_freem(m); 1787 return; 1788 } 1789 1790 if (V_ip_rsvpd != NULL) { 1791 rip_input(m, off); 1792 return; 1793 } 1794 /* Drop the packet */ 1795 m_freem(m); 1796 } 1797