1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_mac.h" 40 #include "opt_carp.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/callout.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/domain.h> 48 #include <sys/protosw.h> 49 #include <sys/socket.h> 50 #include <sys/time.h> 51 #include <sys/kernel.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 #include <sys/vimage.h> 55 56 #include <net/pfil.h> 57 #include <net/if.h> 58 #include <net/if_types.h> 59 #include <net/if_var.h> 60 #include <net/if_dl.h> 61 #include <net/route.h> 62 #include <net/netisr.h> 63 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip.h> 68 #include <netinet/in_pcb.h> 69 #include <netinet/ip_var.h> 70 #include <netinet/ip_icmp.h> 71 #include <netinet/ip_options.h> 72 #include <machine/in_cksum.h> 73 #ifdef DEV_CARP 74 #include <netinet/ip_carp.h> 75 #endif 76 #ifdef IPSEC 77 #include <netinet/ip_ipsec.h> 78 #endif /* IPSEC */ 79 80 #include <sys/socketvar.h> 81 82 /* XXX: Temporary until ipfw_ether and ipfw_bridge are converted. */ 83 #include <netinet/ip_fw.h> 84 #include <netinet/ip_dummynet.h> 85 86 #include <security/mac/mac_framework.h> 87 88 #ifdef CTASSERT 89 CTASSERT(sizeof(struct ip) == 20); 90 #endif 91 92 #ifdef VIMAGE_GLOBALS 93 static int ipsendredirects; 94 static int ip_checkinterface; 95 static int ip_keepfaith; 96 static int ip_sendsourcequench; 97 int ip_defttl; 98 int ip_do_randomid; 99 int ipforwarding; 100 struct in_ifaddrhead in_ifaddrhead; /* first inet address */ 101 struct in_ifaddrhashhead *in_ifaddrhashtbl; /* inet addr hash table */ 102 u_long in_ifaddrhmask; /* mask for hash table */ 103 struct ipstat ipstat; 104 static int ip_rsvp_on; 105 struct socket *ip_rsvpd; 106 int rsvp_on; 107 static struct ipqhead ipq[IPREASS_NHASH]; 108 static int maxnipq; /* Administrative limit on # reass queues. */ 109 static int maxfragsperpacket; 110 int ipstealth; 111 static int nipq; /* Total # of reass queues */ 112 #endif 113 114 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_FORWARDING, 115 forwarding, CTLFLAG_RW, ipforwarding, 0, 116 "Enable IP forwarding between interfaces"); 117 118 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_SENDREDIRECTS, 119 redirect, CTLFLAG_RW, ipsendredirects, 0, 120 "Enable sending IP redirects"); 121 122 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_DEFTTL, 123 ttl, CTLFLAG_RW, ip_defttl, 0, "Maximum TTL on IP packets"); 124 125 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_KEEPFAITH, 126 keepfaith, CTLFLAG_RW, ip_keepfaith, 0, 127 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 128 129 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, 130 sendsourcequench, CTLFLAG_RW, ip_sendsourcequench, 0, 131 "Enable the transmission of source quench packets"); 132 133 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, random_id, 134 CTLFLAG_RW, ip_do_randomid, 0, "Assign random ip_id values"); 135 136 /* 137 * XXX - Setting ip_checkinterface mostly implements the receive side of 138 * the Strong ES model described in RFC 1122, but since the routing table 139 * and transmit implementation do not implement the Strong ES model, 140 * setting this to 1 results in an odd hybrid. 141 * 142 * XXX - ip_checkinterface currently must be disabled if you use ipnat 143 * to translate the destination address to another local interface. 144 * 145 * XXX - ip_checkinterface must be disabled if you add IP aliases 146 * to the loopback interface instead of the interface where the 147 * packets for those addresses are received. 148 */ 149 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, 150 check_interface, CTLFLAG_RW, ip_checkinterface, 0, 151 "Verify packet arrives on correct interface"); 152 153 struct pfil_head inet_pfil_hook; /* Packet filter hooks */ 154 155 static struct ifqueue ipintrq; 156 static int ipqmaxlen = IFQ_MAXLEN; 157 158 extern struct domain inetdomain; 159 extern struct protosw inetsw[]; 160 u_char ip_protox[IPPROTO_MAX]; 161 162 SYSCTL_INT(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, CTLFLAG_RW, 163 &ipintrq.ifq_maxlen, 0, "Maximum size of the IP input queue"); 164 SYSCTL_INT(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, CTLFLAG_RD, 165 &ipintrq.ifq_drops, 0, 166 "Number of packets dropped from the IP input queue"); 167 168 SYSCTL_V_STRUCT(V_NET, vnet_inet, _net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW, 169 ipstat, ipstat, "IP statistics (struct ipstat, netinet/ip_var.h)"); 170 171 static uma_zone_t ipq_zone; 172 static struct mtx ipqlock; 173 174 #define IPQ_LOCK() mtx_lock(&ipqlock) 175 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 176 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 177 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 178 179 static void maxnipq_update(void); 180 static void ipq_zone_change(void *); 181 182 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, fragpackets, 183 CTLFLAG_RD, nipq, 0, 184 "Current number of IPv4 fragment reassembly queue entries"); 185 186 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, maxfragsperpacket, 187 CTLFLAG_RW, maxfragsperpacket, 0, 188 "Maximum number of IPv4 fragments allowed per packet"); 189 190 struct callout ipport_tick_callout; 191 192 #ifdef IPCTL_DEFMTU 193 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 194 &ip_mtu, 0, "Default MTU"); 195 #endif 196 197 #ifdef IPSTEALTH 198 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 199 ipstealth, 0, "IP stealth mode, no TTL decrementation on forwarding"); 200 #endif 201 202 /* 203 * ipfw_ether and ipfw_bridge hooks. 204 * XXX: Temporary until those are converted to pfil_hooks as well. 205 */ 206 ip_fw_chk_t *ip_fw_chk_ptr = NULL; 207 ip_dn_io_t *ip_dn_io_ptr = NULL; 208 int fw_one_pass = 1; 209 210 static void ip_freef(struct ipqhead *, struct ipq *); 211 212 /* 213 * IP initialization: fill in IP protocol switch table. 214 * All protocols not implemented in kernel go to raw IP protocol handler. 215 */ 216 void 217 ip_init(void) 218 { 219 INIT_VNET_INET(curvnet); 220 struct protosw *pr; 221 int i; 222 223 V_ipsendredirects = 1; /* XXX */ 224 V_ip_checkinterface = 0; 225 V_ip_keepfaith = 0; 226 V_ip_sendsourcequench = 0; 227 V_rsvp_on = 0; 228 V_ip_defttl = IPDEFTTL; 229 V_ip_do_randomid = 0; 230 V_ipforwarding = 0; 231 V_ipstealth = 0; 232 V_nipq = 0; /* Total # of reass queues */ 233 234 V_ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */ 235 V_ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */ 236 V_ipport_firstauto = IPPORT_EPHEMERALFIRST; /* 10000 */ 237 V_ipport_lastauto = IPPORT_EPHEMERALLAST; /* 65535 */ 238 V_ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */ 239 V_ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */ 240 V_ipport_reservedhigh = IPPORT_RESERVED - 1; /* 1023 */ 241 V_ipport_reservedlow = 0; 242 V_ipport_randomized = 1; /* user controlled via sysctl */ 243 V_ipport_randomcps = 10; /* user controlled via sysctl */ 244 V_ipport_randomtime = 45; /* user controlled via sysctl */ 245 V_ipport_stoprandom = 0; /* toggled by ipport_tick */ 246 247 #ifdef NOTYET 248 /* XXX global static but not instantiated in this file */ 249 V_ipfastforward_active = 0; 250 V_subnetsarelocal = 0; 251 V_sameprefixcarponly = 0; 252 #endif 253 254 TAILQ_INIT(&V_in_ifaddrhead); 255 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 256 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 257 if (pr == NULL) 258 panic("ip_init: PF_INET not found"); 259 260 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 261 for (i = 0; i < IPPROTO_MAX; i++) 262 ip_protox[i] = pr - inetsw; 263 /* 264 * Cycle through IP protocols and put them into the appropriate place 265 * in ip_protox[]. 266 */ 267 for (pr = inetdomain.dom_protosw; 268 pr < inetdomain.dom_protoswNPROTOSW; pr++) 269 if (pr->pr_domain->dom_family == PF_INET && 270 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 271 /* Be careful to only index valid IP protocols. */ 272 if (pr->pr_protocol < IPPROTO_MAX) 273 ip_protox[pr->pr_protocol] = pr - inetsw; 274 } 275 276 /* Initialize packet filter hooks. */ 277 inet_pfil_hook.ph_type = PFIL_TYPE_AF; 278 inet_pfil_hook.ph_af = AF_INET; 279 if ((i = pfil_head_register(&inet_pfil_hook)) != 0) 280 printf("%s: WARNING: unable to register pfil hook, " 281 "error %d\n", __func__, i); 282 283 /* Initialize IP reassembly queue. */ 284 IPQ_LOCK_INIT(); 285 for (i = 0; i < IPREASS_NHASH; i++) 286 TAILQ_INIT(&V_ipq[i]); 287 V_maxnipq = nmbclusters / 32; 288 V_maxfragsperpacket = 16; 289 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 290 NULL, UMA_ALIGN_PTR, 0); 291 maxnipq_update(); 292 293 /* Start ipport_tick. */ 294 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE); 295 ipport_tick(NULL); 296 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL, 297 SHUTDOWN_PRI_DEFAULT); 298 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 299 NULL, EVENTHANDLER_PRI_ANY); 300 301 /* Initialize various other remaining things. */ 302 V_ip_id = time_second & 0xffff; 303 ipintrq.ifq_maxlen = ipqmaxlen; 304 mtx_init(&ipintrq.ifq_mtx, "ip_inq", NULL, MTX_DEF); 305 netisr_register(NETISR_IP, ip_input, &ipintrq, 0); 306 } 307 308 void 309 ip_fini(void *xtp) 310 { 311 312 callout_stop(&ipport_tick_callout); 313 } 314 315 /* 316 * Ip input routine. Checksum and byte swap header. If fragmented 317 * try to reassemble. Process options. Pass to next level. 318 */ 319 void 320 ip_input(struct mbuf *m) 321 { 322 INIT_VNET_INET(curvnet); 323 struct ip *ip = NULL; 324 struct in_ifaddr *ia = NULL; 325 struct ifaddr *ifa; 326 int checkif, hlen = 0; 327 u_short sum; 328 int dchg = 0; /* dest changed after fw */ 329 struct in_addr odst; /* original dst address */ 330 331 M_ASSERTPKTHDR(m); 332 333 if (m->m_flags & M_FASTFWD_OURS) { 334 /* 335 * Firewall or NAT changed destination to local. 336 * We expect ip_len and ip_off to be in host byte order. 337 */ 338 m->m_flags &= ~M_FASTFWD_OURS; 339 /* Set up some basics that will be used later. */ 340 ip = mtod(m, struct ip *); 341 hlen = ip->ip_hl << 2; 342 goto ours; 343 } 344 345 V_ipstat.ips_total++; 346 347 if (m->m_pkthdr.len < sizeof(struct ip)) 348 goto tooshort; 349 350 if (m->m_len < sizeof (struct ip) && 351 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 352 V_ipstat.ips_toosmall++; 353 return; 354 } 355 ip = mtod(m, struct ip *); 356 357 if (ip->ip_v != IPVERSION) { 358 V_ipstat.ips_badvers++; 359 goto bad; 360 } 361 362 hlen = ip->ip_hl << 2; 363 if (hlen < sizeof(struct ip)) { /* minimum header length */ 364 V_ipstat.ips_badhlen++; 365 goto bad; 366 } 367 if (hlen > m->m_len) { 368 if ((m = m_pullup(m, hlen)) == NULL) { 369 V_ipstat.ips_badhlen++; 370 return; 371 } 372 ip = mtod(m, struct ip *); 373 } 374 375 /* 127/8 must not appear on wire - RFC1122 */ 376 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 377 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 378 if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) { 379 V_ipstat.ips_badaddr++; 380 goto bad; 381 } 382 } 383 384 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 385 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 386 } else { 387 if (hlen == sizeof(struct ip)) { 388 sum = in_cksum_hdr(ip); 389 } else { 390 sum = in_cksum(m, hlen); 391 } 392 } 393 if (sum) { 394 V_ipstat.ips_badsum++; 395 goto bad; 396 } 397 398 #ifdef ALTQ 399 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 400 /* packet is dropped by traffic conditioner */ 401 return; 402 #endif 403 404 /* 405 * Convert fields to host representation. 406 */ 407 ip->ip_len = ntohs(ip->ip_len); 408 if (ip->ip_len < hlen) { 409 V_ipstat.ips_badlen++; 410 goto bad; 411 } 412 ip->ip_off = ntohs(ip->ip_off); 413 414 /* 415 * Check that the amount of data in the buffers 416 * is as at least much as the IP header would have us expect. 417 * Trim mbufs if longer than we expect. 418 * Drop packet if shorter than we expect. 419 */ 420 if (m->m_pkthdr.len < ip->ip_len) { 421 tooshort: 422 V_ipstat.ips_tooshort++; 423 goto bad; 424 } 425 if (m->m_pkthdr.len > ip->ip_len) { 426 if (m->m_len == m->m_pkthdr.len) { 427 m->m_len = ip->ip_len; 428 m->m_pkthdr.len = ip->ip_len; 429 } else 430 m_adj(m, ip->ip_len - m->m_pkthdr.len); 431 } 432 #ifdef IPSEC 433 /* 434 * Bypass packet filtering for packets from a tunnel (gif). 435 */ 436 if (ip_ipsec_filtertunnel(m)) 437 goto passin; 438 #endif /* IPSEC */ 439 440 /* 441 * Run through list of hooks for input packets. 442 * 443 * NB: Beware of the destination address changing (e.g. 444 * by NAT rewriting). When this happens, tell 445 * ip_forward to do the right thing. 446 */ 447 448 /* Jump over all PFIL processing if hooks are not active. */ 449 if (!PFIL_HOOKED(&inet_pfil_hook)) 450 goto passin; 451 452 odst = ip->ip_dst; 453 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, 454 PFIL_IN, NULL) != 0) 455 return; 456 if (m == NULL) /* consumed by filter */ 457 return; 458 459 ip = mtod(m, struct ip *); 460 dchg = (odst.s_addr != ip->ip_dst.s_addr); 461 462 #ifdef IPFIREWALL_FORWARD 463 if (m->m_flags & M_FASTFWD_OURS) { 464 m->m_flags &= ~M_FASTFWD_OURS; 465 goto ours; 466 } 467 if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) { 468 /* 469 * Directly ship on the packet. This allows to forward packets 470 * that were destined for us to some other directly connected 471 * host. 472 */ 473 ip_forward(m, dchg); 474 return; 475 } 476 #endif /* IPFIREWALL_FORWARD */ 477 478 passin: 479 /* 480 * Process options and, if not destined for us, 481 * ship it on. ip_dooptions returns 1 when an 482 * error was detected (causing an icmp message 483 * to be sent and the original packet to be freed). 484 */ 485 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 486 return; 487 488 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 489 * matter if it is destined to another node, or whether it is 490 * a multicast one, RSVP wants it! and prevents it from being forwarded 491 * anywhere else. Also checks if the rsvp daemon is running before 492 * grabbing the packet. 493 */ 494 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 495 goto ours; 496 497 /* 498 * Check our list of addresses, to see if the packet is for us. 499 * If we don't have any addresses, assume any unicast packet 500 * we receive might be for us (and let the upper layers deal 501 * with it). 502 */ 503 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 504 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 505 goto ours; 506 507 /* 508 * Enable a consistency check between the destination address 509 * and the arrival interface for a unicast packet (the RFC 1122 510 * strong ES model) if IP forwarding is disabled and the packet 511 * is not locally generated and the packet is not subject to 512 * 'ipfw fwd'. 513 * 514 * XXX - Checking also should be disabled if the destination 515 * address is ipnat'ed to a different interface. 516 * 517 * XXX - Checking is incompatible with IP aliases added 518 * to the loopback interface instead of the interface where 519 * the packets are received. 520 * 521 * XXX - This is the case for carp vhost IPs as well so we 522 * insert a workaround. If the packet got here, we already 523 * checked with carp_iamatch() and carp_forus(). 524 */ 525 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 526 m->m_pkthdr.rcvif != NULL && 527 ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) && 528 #ifdef DEV_CARP 529 !m->m_pkthdr.rcvif->if_carp && 530 #endif 531 (dchg == 0); 532 533 /* 534 * Check for exact addresses in the hash bucket. 535 */ 536 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 537 /* 538 * If the address matches, verify that the packet 539 * arrived via the correct interface if checking is 540 * enabled. 541 */ 542 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 543 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif)) 544 goto ours; 545 } 546 /* 547 * Check for broadcast addresses. 548 * 549 * Only accept broadcast packets that arrive via the matching 550 * interface. Reception of forwarded directed broadcasts would 551 * be handled via ip_forward() and ether_output() with the loopback 552 * into the stack for SIMPLEX interfaces handled by ether_output(). 553 */ 554 if (m->m_pkthdr.rcvif != NULL && 555 m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) { 556 TAILQ_FOREACH(ifa, &m->m_pkthdr.rcvif->if_addrhead, ifa_link) { 557 if (ifa->ifa_addr->sa_family != AF_INET) 558 continue; 559 ia = ifatoia(ifa); 560 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 561 ip->ip_dst.s_addr) 562 goto ours; 563 if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) 564 goto ours; 565 #ifdef BOOTP_COMPAT 566 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) 567 goto ours; 568 #endif 569 } 570 } 571 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 572 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 573 V_ipstat.ips_cantforward++; 574 m_freem(m); 575 return; 576 } 577 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 578 struct in_multi *inm; 579 if (V_ip_mrouter) { 580 /* 581 * If we are acting as a multicast router, all 582 * incoming multicast packets are passed to the 583 * kernel-level multicast forwarding function. 584 * The packet is returned (relatively) intact; if 585 * ip_mforward() returns a non-zero value, the packet 586 * must be discarded, else it may be accepted below. 587 */ 588 if (ip_mforward && 589 ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) { 590 V_ipstat.ips_cantforward++; 591 m_freem(m); 592 return; 593 } 594 595 /* 596 * The process-level routing daemon needs to receive 597 * all multicast IGMP packets, whether or not this 598 * host belongs to their destination groups. 599 */ 600 if (ip->ip_p == IPPROTO_IGMP) 601 goto ours; 602 V_ipstat.ips_forward++; 603 } 604 /* 605 * See if we belong to the destination multicast group on the 606 * arrival interface. 607 */ 608 IN_MULTI_LOCK(); 609 IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm); 610 IN_MULTI_UNLOCK(); 611 if (inm == NULL) { 612 V_ipstat.ips_notmember++; 613 m_freem(m); 614 return; 615 } 616 goto ours; 617 } 618 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 619 goto ours; 620 if (ip->ip_dst.s_addr == INADDR_ANY) 621 goto ours; 622 623 /* 624 * FAITH(Firewall Aided Internet Translator) 625 */ 626 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) { 627 if (V_ip_keepfaith) { 628 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 629 goto ours; 630 } 631 m_freem(m); 632 return; 633 } 634 635 /* 636 * Not for us; forward if possible and desirable. 637 */ 638 if (V_ipforwarding == 0) { 639 V_ipstat.ips_cantforward++; 640 m_freem(m); 641 } else { 642 #ifdef IPSEC 643 if (ip_ipsec_fwd(m)) 644 goto bad; 645 #endif /* IPSEC */ 646 ip_forward(m, dchg); 647 } 648 return; 649 650 ours: 651 #ifdef IPSTEALTH 652 /* 653 * IPSTEALTH: Process non-routing options only 654 * if the packet is destined for us. 655 */ 656 if (V_ipstealth && hlen > sizeof (struct ip) && 657 ip_dooptions(m, 1)) 658 return; 659 #endif /* IPSTEALTH */ 660 661 /* Count the packet in the ip address stats */ 662 if (ia != NULL) { 663 ia->ia_ifa.if_ipackets++; 664 ia->ia_ifa.if_ibytes += m->m_pkthdr.len; 665 } 666 667 /* 668 * Attempt reassembly; if it succeeds, proceed. 669 * ip_reass() will return a different mbuf. 670 */ 671 if (ip->ip_off & (IP_MF | IP_OFFMASK)) { 672 m = ip_reass(m); 673 if (m == NULL) 674 return; 675 ip = mtod(m, struct ip *); 676 /* Get the header length of the reassembled packet */ 677 hlen = ip->ip_hl << 2; 678 } 679 680 /* 681 * Further protocols expect the packet length to be w/o the 682 * IP header. 683 */ 684 ip->ip_len -= hlen; 685 686 #ifdef IPSEC 687 /* 688 * enforce IPsec policy checking if we are seeing last header. 689 * note that we do not visit this with protocols with pcb layer 690 * code - like udp/tcp/raw ip. 691 */ 692 if (ip_ipsec_input(m)) 693 goto bad; 694 #endif /* IPSEC */ 695 696 /* 697 * Switch out to protocol's input routine. 698 */ 699 V_ipstat.ips_delivered++; 700 701 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 702 return; 703 bad: 704 m_freem(m); 705 } 706 707 /* 708 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 709 * max has slightly different semantics than the sysctl, for historical 710 * reasons. 711 */ 712 static void 713 maxnipq_update(void) 714 { 715 INIT_VNET_INET(curvnet); 716 717 /* 718 * -1 for unlimited allocation. 719 */ 720 if (V_maxnipq < 0) 721 uma_zone_set_max(V_ipq_zone, 0); 722 /* 723 * Positive number for specific bound. 724 */ 725 if (V_maxnipq > 0) 726 uma_zone_set_max(V_ipq_zone, V_maxnipq); 727 /* 728 * Zero specifies no further fragment queue allocation -- set the 729 * bound very low, but rely on implementation elsewhere to actually 730 * prevent allocation and reclaim current queues. 731 */ 732 if (V_maxnipq == 0) 733 uma_zone_set_max(V_ipq_zone, 1); 734 } 735 736 static void 737 ipq_zone_change(void *tag) 738 { 739 INIT_VNET_INET(curvnet); 740 741 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 742 V_maxnipq = nmbclusters / 32; 743 maxnipq_update(); 744 } 745 } 746 747 static int 748 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 749 { 750 INIT_VNET_INET(curvnet); 751 int error, i; 752 753 i = V_maxnipq; 754 error = sysctl_handle_int(oidp, &i, 0, req); 755 if (error || !req->newptr) 756 return (error); 757 758 /* 759 * XXXRW: Might be a good idea to sanity check the argument and place 760 * an extreme upper bound. 761 */ 762 if (i < -1) 763 return (EINVAL); 764 V_maxnipq = i; 765 maxnipq_update(); 766 return (0); 767 } 768 769 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 770 NULL, 0, sysctl_maxnipq, "I", 771 "Maximum number of IPv4 fragment reassembly queue entries"); 772 773 /* 774 * Take incoming datagram fragment and try to reassemble it into 775 * whole datagram. If the argument is the first fragment or one 776 * in between the function will return NULL and store the mbuf 777 * in the fragment chain. If the argument is the last fragment 778 * the packet will be reassembled and the pointer to the new 779 * mbuf returned for further processing. Only m_tags attached 780 * to the first packet/fragment are preserved. 781 * The IP header is *NOT* adjusted out of iplen. 782 */ 783 struct mbuf * 784 ip_reass(struct mbuf *m) 785 { 786 INIT_VNET_INET(curvnet); 787 struct ip *ip; 788 struct mbuf *p, *q, *nq, *t; 789 struct ipq *fp = NULL; 790 struct ipqhead *head; 791 int i, hlen, next; 792 u_int8_t ecn, ecn0; 793 u_short hash; 794 795 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 796 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 797 V_ipstat.ips_fragments++; 798 V_ipstat.ips_fragdropped++; 799 m_freem(m); 800 return (NULL); 801 } 802 803 ip = mtod(m, struct ip *); 804 hlen = ip->ip_hl << 2; 805 806 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 807 head = &V_ipq[hash]; 808 IPQ_LOCK(); 809 810 /* 811 * Look for queue of fragments 812 * of this datagram. 813 */ 814 TAILQ_FOREACH(fp, head, ipq_list) 815 if (ip->ip_id == fp->ipq_id && 816 ip->ip_src.s_addr == fp->ipq_src.s_addr && 817 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 818 #ifdef MAC 819 mac_ipq_match(m, fp) && 820 #endif 821 ip->ip_p == fp->ipq_p) 822 goto found; 823 824 fp = NULL; 825 826 /* 827 * Attempt to trim the number of allocated fragment queues if it 828 * exceeds the administrative limit. 829 */ 830 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 831 /* 832 * drop something from the tail of the current queue 833 * before proceeding further 834 */ 835 struct ipq *q = TAILQ_LAST(head, ipqhead); 836 if (q == NULL) { /* gak */ 837 for (i = 0; i < IPREASS_NHASH; i++) { 838 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 839 if (r) { 840 V_ipstat.ips_fragtimeout += 841 r->ipq_nfrags; 842 ip_freef(&V_ipq[i], r); 843 break; 844 } 845 } 846 } else { 847 V_ipstat.ips_fragtimeout += q->ipq_nfrags; 848 ip_freef(head, q); 849 } 850 } 851 852 found: 853 /* 854 * Adjust ip_len to not reflect header, 855 * convert offset of this to bytes. 856 */ 857 ip->ip_len -= hlen; 858 if (ip->ip_off & IP_MF) { 859 /* 860 * Make sure that fragments have a data length 861 * that's a non-zero multiple of 8 bytes. 862 */ 863 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { 864 V_ipstat.ips_toosmall++; /* XXX */ 865 goto dropfrag; 866 } 867 m->m_flags |= M_FRAG; 868 } else 869 m->m_flags &= ~M_FRAG; 870 ip->ip_off <<= 3; 871 872 873 /* 874 * Attempt reassembly; if it succeeds, proceed. 875 * ip_reass() will return a different mbuf. 876 */ 877 V_ipstat.ips_fragments++; 878 m->m_pkthdr.header = ip; 879 880 /* Previous ip_reass() started here. */ 881 /* 882 * Presence of header sizes in mbufs 883 * would confuse code below. 884 */ 885 m->m_data += hlen; 886 m->m_len -= hlen; 887 888 /* 889 * If first fragment to arrive, create a reassembly queue. 890 */ 891 if (fp == NULL) { 892 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 893 if (fp == NULL) 894 goto dropfrag; 895 #ifdef MAC 896 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 897 uma_zfree(V_ipq_zone, fp); 898 fp = NULL; 899 goto dropfrag; 900 } 901 mac_ipq_create(m, fp); 902 #endif 903 TAILQ_INSERT_HEAD(head, fp, ipq_list); 904 V_nipq++; 905 fp->ipq_nfrags = 1; 906 fp->ipq_ttl = IPFRAGTTL; 907 fp->ipq_p = ip->ip_p; 908 fp->ipq_id = ip->ip_id; 909 fp->ipq_src = ip->ip_src; 910 fp->ipq_dst = ip->ip_dst; 911 fp->ipq_frags = m; 912 m->m_nextpkt = NULL; 913 goto done; 914 } else { 915 fp->ipq_nfrags++; 916 #ifdef MAC 917 mac_ipq_update(m, fp); 918 #endif 919 } 920 921 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 922 923 /* 924 * Handle ECN by comparing this segment with the first one; 925 * if CE is set, do not lose CE. 926 * drop if CE and not-ECT are mixed for the same packet. 927 */ 928 ecn = ip->ip_tos & IPTOS_ECN_MASK; 929 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 930 if (ecn == IPTOS_ECN_CE) { 931 if (ecn0 == IPTOS_ECN_NOTECT) 932 goto dropfrag; 933 if (ecn0 != IPTOS_ECN_CE) 934 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 935 } 936 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 937 goto dropfrag; 938 939 /* 940 * Find a segment which begins after this one does. 941 */ 942 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 943 if (GETIP(q)->ip_off > ip->ip_off) 944 break; 945 946 /* 947 * If there is a preceding segment, it may provide some of 948 * our data already. If so, drop the data from the incoming 949 * segment. If it provides all of our data, drop us, otherwise 950 * stick new segment in the proper place. 951 * 952 * If some of the data is dropped from the the preceding 953 * segment, then it's checksum is invalidated. 954 */ 955 if (p) { 956 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; 957 if (i > 0) { 958 if (i >= ip->ip_len) 959 goto dropfrag; 960 m_adj(m, i); 961 m->m_pkthdr.csum_flags = 0; 962 ip->ip_off += i; 963 ip->ip_len -= i; 964 } 965 m->m_nextpkt = p->m_nextpkt; 966 p->m_nextpkt = m; 967 } else { 968 m->m_nextpkt = fp->ipq_frags; 969 fp->ipq_frags = m; 970 } 971 972 /* 973 * While we overlap succeeding segments trim them or, 974 * if they are completely covered, dequeue them. 975 */ 976 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; 977 q = nq) { 978 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off; 979 if (i < GETIP(q)->ip_len) { 980 GETIP(q)->ip_len -= i; 981 GETIP(q)->ip_off += i; 982 m_adj(q, i); 983 q->m_pkthdr.csum_flags = 0; 984 break; 985 } 986 nq = q->m_nextpkt; 987 m->m_nextpkt = nq; 988 V_ipstat.ips_fragdropped++; 989 fp->ipq_nfrags--; 990 m_freem(q); 991 } 992 993 /* 994 * Check for complete reassembly and perform frag per packet 995 * limiting. 996 * 997 * Frag limiting is performed here so that the nth frag has 998 * a chance to complete the packet before we drop the packet. 999 * As a result, n+1 frags are actually allowed per packet, but 1000 * only n will ever be stored. (n = maxfragsperpacket.) 1001 * 1002 */ 1003 next = 0; 1004 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1005 if (GETIP(q)->ip_off != next) { 1006 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1007 V_ipstat.ips_fragdropped += fp->ipq_nfrags; 1008 ip_freef(head, fp); 1009 } 1010 goto done; 1011 } 1012 next += GETIP(q)->ip_len; 1013 } 1014 /* Make sure the last packet didn't have the IP_MF flag */ 1015 if (p->m_flags & M_FRAG) { 1016 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1017 V_ipstat.ips_fragdropped += fp->ipq_nfrags; 1018 ip_freef(head, fp); 1019 } 1020 goto done; 1021 } 1022 1023 /* 1024 * Reassembly is complete. Make sure the packet is a sane size. 1025 */ 1026 q = fp->ipq_frags; 1027 ip = GETIP(q); 1028 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1029 V_ipstat.ips_toolong++; 1030 V_ipstat.ips_fragdropped += fp->ipq_nfrags; 1031 ip_freef(head, fp); 1032 goto done; 1033 } 1034 1035 /* 1036 * Concatenate fragments. 1037 */ 1038 m = q; 1039 t = m->m_next; 1040 m->m_next = NULL; 1041 m_cat(m, t); 1042 nq = q->m_nextpkt; 1043 q->m_nextpkt = NULL; 1044 for (q = nq; q != NULL; q = nq) { 1045 nq = q->m_nextpkt; 1046 q->m_nextpkt = NULL; 1047 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1048 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1049 m_cat(m, q); 1050 } 1051 /* 1052 * In order to do checksumming faster we do 'end-around carry' here 1053 * (and not in for{} loop), though it implies we are not going to 1054 * reassemble more than 64k fragments. 1055 */ 1056 m->m_pkthdr.csum_data = 1057 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1058 #ifdef MAC 1059 mac_ipq_reassemble(fp, m); 1060 mac_ipq_destroy(fp); 1061 #endif 1062 1063 /* 1064 * Create header for new ip packet by modifying header of first 1065 * packet; dequeue and discard fragment reassembly header. 1066 * Make header visible. 1067 */ 1068 ip->ip_len = (ip->ip_hl << 2) + next; 1069 ip->ip_src = fp->ipq_src; 1070 ip->ip_dst = fp->ipq_dst; 1071 TAILQ_REMOVE(head, fp, ipq_list); 1072 V_nipq--; 1073 uma_zfree(V_ipq_zone, fp); 1074 m->m_len += (ip->ip_hl << 2); 1075 m->m_data -= (ip->ip_hl << 2); 1076 /* some debugging cruft by sklower, below, will go away soon */ 1077 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1078 m_fixhdr(m); 1079 V_ipstat.ips_reassembled++; 1080 IPQ_UNLOCK(); 1081 return (m); 1082 1083 dropfrag: 1084 V_ipstat.ips_fragdropped++; 1085 if (fp != NULL) 1086 fp->ipq_nfrags--; 1087 m_freem(m); 1088 done: 1089 IPQ_UNLOCK(); 1090 return (NULL); 1091 1092 #undef GETIP 1093 } 1094 1095 /* 1096 * Free a fragment reassembly header and all 1097 * associated datagrams. 1098 */ 1099 static void 1100 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1101 { 1102 INIT_VNET_INET(curvnet); 1103 struct mbuf *q; 1104 1105 IPQ_LOCK_ASSERT(); 1106 1107 while (fp->ipq_frags) { 1108 q = fp->ipq_frags; 1109 fp->ipq_frags = q->m_nextpkt; 1110 m_freem(q); 1111 } 1112 TAILQ_REMOVE(fhp, fp, ipq_list); 1113 uma_zfree(V_ipq_zone, fp); 1114 V_nipq--; 1115 } 1116 1117 /* 1118 * IP timer processing; 1119 * if a timer expires on a reassembly 1120 * queue, discard it. 1121 */ 1122 void 1123 ip_slowtimo(void) 1124 { 1125 VNET_ITERATOR_DECL(vnet_iter); 1126 struct ipq *fp; 1127 int i; 1128 1129 IPQ_LOCK(); 1130 VNET_LIST_RLOCK(); 1131 VNET_FOREACH(vnet_iter) { 1132 CURVNET_SET(vnet_iter); 1133 INIT_VNET_INET(vnet_iter); 1134 for (i = 0; i < IPREASS_NHASH; i++) { 1135 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1136 struct ipq *fpp; 1137 1138 fpp = fp; 1139 fp = TAILQ_NEXT(fp, ipq_list); 1140 if(--fpp->ipq_ttl == 0) { 1141 V_ipstat.ips_fragtimeout += 1142 fpp->ipq_nfrags; 1143 ip_freef(&V_ipq[i], fpp); 1144 } 1145 } 1146 } 1147 /* 1148 * If we are over the maximum number of fragments 1149 * (due to the limit being lowered), drain off 1150 * enough to get down to the new limit. 1151 */ 1152 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1153 for (i = 0; i < IPREASS_NHASH; i++) { 1154 while (V_nipq > V_maxnipq && 1155 !TAILQ_EMPTY(&V_ipq[i])) { 1156 V_ipstat.ips_fragdropped += 1157 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags; 1158 ip_freef(&V_ipq[i], 1159 TAILQ_FIRST(&V_ipq[i])); 1160 } 1161 } 1162 } 1163 CURVNET_RESTORE(); 1164 } 1165 VNET_LIST_RUNLOCK(); 1166 IPQ_UNLOCK(); 1167 } 1168 1169 /* 1170 * Drain off all datagram fragments. 1171 */ 1172 void 1173 ip_drain(void) 1174 { 1175 VNET_ITERATOR_DECL(vnet_iter); 1176 int i; 1177 1178 IPQ_LOCK(); 1179 VNET_LIST_RLOCK(); 1180 VNET_FOREACH(vnet_iter) { 1181 CURVNET_SET(vnet_iter); 1182 INIT_VNET_INET(vnet_iter); 1183 for (i = 0; i < IPREASS_NHASH; i++) { 1184 while(!TAILQ_EMPTY(&V_ipq[i])) { 1185 V_ipstat.ips_fragdropped += 1186 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags; 1187 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1188 } 1189 } 1190 CURVNET_RESTORE(); 1191 } 1192 VNET_LIST_RUNLOCK(); 1193 IPQ_UNLOCK(); 1194 in_rtqdrain(); 1195 } 1196 1197 /* 1198 * The protocol to be inserted into ip_protox[] must be already registered 1199 * in inetsw[], either statically or through pf_proto_register(). 1200 */ 1201 int 1202 ipproto_register(u_char ipproto) 1203 { 1204 struct protosw *pr; 1205 1206 /* Sanity checks. */ 1207 if (ipproto == 0) 1208 return (EPROTONOSUPPORT); 1209 1210 /* 1211 * The protocol slot must not be occupied by another protocol 1212 * already. An index pointing to IPPROTO_RAW is unused. 1213 */ 1214 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1215 if (pr == NULL) 1216 return (EPFNOSUPPORT); 1217 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1218 return (EEXIST); 1219 1220 /* Find the protocol position in inetsw[] and set the index. */ 1221 for (pr = inetdomain.dom_protosw; 1222 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1223 if (pr->pr_domain->dom_family == PF_INET && 1224 pr->pr_protocol && pr->pr_protocol == ipproto) { 1225 /* Be careful to only index valid IP protocols. */ 1226 if (pr->pr_protocol < IPPROTO_MAX) { 1227 ip_protox[pr->pr_protocol] = pr - inetsw; 1228 return (0); 1229 } else 1230 return (EINVAL); 1231 } 1232 } 1233 return (EPROTONOSUPPORT); 1234 } 1235 1236 int 1237 ipproto_unregister(u_char ipproto) 1238 { 1239 struct protosw *pr; 1240 1241 /* Sanity checks. */ 1242 if (ipproto == 0) 1243 return (EPROTONOSUPPORT); 1244 1245 /* Check if the protocol was indeed registered. */ 1246 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1247 if (pr == NULL) 1248 return (EPFNOSUPPORT); 1249 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1250 return (ENOENT); 1251 1252 /* Reset the protocol slot to IPPROTO_RAW. */ 1253 ip_protox[ipproto] = pr - inetsw; 1254 return (0); 1255 } 1256 1257 /* 1258 * Given address of next destination (final or next hop), 1259 * return internet address info of interface to be used to get there. 1260 */ 1261 struct in_ifaddr * 1262 ip_rtaddr(struct in_addr dst, u_int fibnum) 1263 { 1264 struct route sro; 1265 struct sockaddr_in *sin; 1266 struct in_ifaddr *ifa; 1267 1268 bzero(&sro, sizeof(sro)); 1269 sin = (struct sockaddr_in *)&sro.ro_dst; 1270 sin->sin_family = AF_INET; 1271 sin->sin_len = sizeof(*sin); 1272 sin->sin_addr = dst; 1273 in_rtalloc_ign(&sro, RTF_CLONING, fibnum); 1274 1275 if (sro.ro_rt == NULL) 1276 return (NULL); 1277 1278 ifa = ifatoia(sro.ro_rt->rt_ifa); 1279 RTFREE(sro.ro_rt); 1280 return (ifa); 1281 } 1282 1283 u_char inetctlerrmap[PRC_NCMDS] = { 1284 0, 0, 0, 0, 1285 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1286 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1287 EMSGSIZE, EHOSTUNREACH, 0, 0, 1288 0, 0, EHOSTUNREACH, 0, 1289 ENOPROTOOPT, ECONNREFUSED 1290 }; 1291 1292 /* 1293 * Forward a packet. If some error occurs return the sender 1294 * an icmp packet. Note we can't always generate a meaningful 1295 * icmp message because icmp doesn't have a large enough repertoire 1296 * of codes and types. 1297 * 1298 * If not forwarding, just drop the packet. This could be confusing 1299 * if ipforwarding was zero but some routing protocol was advancing 1300 * us as a gateway to somewhere. However, we must let the routing 1301 * protocol deal with that. 1302 * 1303 * The srcrt parameter indicates whether the packet is being forwarded 1304 * via a source route. 1305 */ 1306 void 1307 ip_forward(struct mbuf *m, int srcrt) 1308 { 1309 INIT_VNET_INET(curvnet); 1310 struct ip *ip = mtod(m, struct ip *); 1311 struct in_ifaddr *ia = NULL; 1312 struct mbuf *mcopy; 1313 struct in_addr dest; 1314 struct route ro; 1315 int error, type = 0, code = 0, mtu = 0; 1316 1317 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1318 V_ipstat.ips_cantforward++; 1319 m_freem(m); 1320 return; 1321 } 1322 #ifdef IPSTEALTH 1323 if (!V_ipstealth) { 1324 #endif 1325 if (ip->ip_ttl <= IPTTLDEC) { 1326 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1327 0, 0); 1328 return; 1329 } 1330 #ifdef IPSTEALTH 1331 } 1332 #endif 1333 1334 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1335 if (!srcrt && ia == NULL) { 1336 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1337 return; 1338 } 1339 1340 /* 1341 * Save the IP header and at most 8 bytes of the payload, 1342 * in case we need to generate an ICMP message to the src. 1343 * 1344 * XXX this can be optimized a lot by saving the data in a local 1345 * buffer on the stack (72 bytes at most), and only allocating the 1346 * mbuf if really necessary. The vast majority of the packets 1347 * are forwarded without having to send an ICMP back (either 1348 * because unnecessary, or because rate limited), so we are 1349 * really we are wasting a lot of work here. 1350 * 1351 * We don't use m_copy() because it might return a reference 1352 * to a shared cluster. Both this function and ip_output() 1353 * assume exclusive access to the IP header in `m', so any 1354 * data in a cluster may change before we reach icmp_error(). 1355 */ 1356 MGETHDR(mcopy, M_DONTWAIT, m->m_type); 1357 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) { 1358 /* 1359 * It's probably ok if the pkthdr dup fails (because 1360 * the deep copy of the tag chain failed), but for now 1361 * be conservative and just discard the copy since 1362 * code below may some day want the tags. 1363 */ 1364 m_free(mcopy); 1365 mcopy = NULL; 1366 } 1367 if (mcopy != NULL) { 1368 mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy)); 1369 mcopy->m_pkthdr.len = mcopy->m_len; 1370 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1371 } 1372 1373 #ifdef IPSTEALTH 1374 if (!V_ipstealth) { 1375 #endif 1376 ip->ip_ttl -= IPTTLDEC; 1377 #ifdef IPSTEALTH 1378 } 1379 #endif 1380 1381 /* 1382 * If forwarding packet using same interface that it came in on, 1383 * perhaps should send a redirect to sender to shortcut a hop. 1384 * Only send redirect if source is sending directly to us, 1385 * and if packet was not source routed (or has any options). 1386 * Also, don't send redirect if forwarding using a default route 1387 * or a route modified by a redirect. 1388 */ 1389 dest.s_addr = 0; 1390 if (!srcrt && V_ipsendredirects && ia->ia_ifp == m->m_pkthdr.rcvif) { 1391 struct sockaddr_in *sin; 1392 struct rtentry *rt; 1393 1394 bzero(&ro, sizeof(ro)); 1395 sin = (struct sockaddr_in *)&ro.ro_dst; 1396 sin->sin_family = AF_INET; 1397 sin->sin_len = sizeof(*sin); 1398 sin->sin_addr = ip->ip_dst; 1399 in_rtalloc_ign(&ro, RTF_CLONING, M_GETFIB(m)); 1400 1401 rt = ro.ro_rt; 1402 1403 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1404 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1405 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1406 u_long src = ntohl(ip->ip_src.s_addr); 1407 1408 if (RTA(rt) && 1409 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1410 if (rt->rt_flags & RTF_GATEWAY) 1411 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1412 else 1413 dest.s_addr = ip->ip_dst.s_addr; 1414 /* Router requirements says to only send host redirects */ 1415 type = ICMP_REDIRECT; 1416 code = ICMP_REDIRECT_HOST; 1417 } 1418 } 1419 if (rt) 1420 RTFREE(rt); 1421 } 1422 1423 /* 1424 * Try to cache the route MTU from ip_output so we can consider it for 1425 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1426 */ 1427 bzero(&ro, sizeof(ro)); 1428 1429 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1430 1431 if (error == EMSGSIZE && ro.ro_rt) 1432 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1433 if (ro.ro_rt) 1434 RTFREE(ro.ro_rt); 1435 1436 if (error) 1437 V_ipstat.ips_cantforward++; 1438 else { 1439 V_ipstat.ips_forward++; 1440 if (type) 1441 V_ipstat.ips_redirectsent++; 1442 else { 1443 if (mcopy) 1444 m_freem(mcopy); 1445 return; 1446 } 1447 } 1448 if (mcopy == NULL) 1449 return; 1450 1451 switch (error) { 1452 1453 case 0: /* forwarded, but need redirect */ 1454 /* type, code set above */ 1455 break; 1456 1457 case ENETUNREACH: /* shouldn't happen, checked above */ 1458 case EHOSTUNREACH: 1459 case ENETDOWN: 1460 case EHOSTDOWN: 1461 default: 1462 type = ICMP_UNREACH; 1463 code = ICMP_UNREACH_HOST; 1464 break; 1465 1466 case EMSGSIZE: 1467 type = ICMP_UNREACH; 1468 code = ICMP_UNREACH_NEEDFRAG; 1469 1470 #ifdef IPSEC 1471 /* 1472 * If IPsec is configured for this path, 1473 * override any possibly mtu value set by ip_output. 1474 */ 1475 mtu = ip_ipsec_mtu(m, mtu); 1476 #endif /* IPSEC */ 1477 /* 1478 * If the MTU was set before make sure we are below the 1479 * interface MTU. 1480 * If the MTU wasn't set before use the interface mtu or 1481 * fall back to the next smaller mtu step compared to the 1482 * current packet size. 1483 */ 1484 if (mtu != 0) { 1485 if (ia != NULL) 1486 mtu = min(mtu, ia->ia_ifp->if_mtu); 1487 } else { 1488 if (ia != NULL) 1489 mtu = ia->ia_ifp->if_mtu; 1490 else 1491 mtu = ip_next_mtu(ip->ip_len, 0); 1492 } 1493 V_ipstat.ips_cantfrag++; 1494 break; 1495 1496 case ENOBUFS: 1497 /* 1498 * A router should not generate ICMP_SOURCEQUENCH as 1499 * required in RFC1812 Requirements for IP Version 4 Routers. 1500 * Source quench could be a big problem under DoS attacks, 1501 * or if the underlying interface is rate-limited. 1502 * Those who need source quench packets may re-enable them 1503 * via the net.inet.ip.sendsourcequench sysctl. 1504 */ 1505 if (V_ip_sendsourcequench == 0) { 1506 m_freem(mcopy); 1507 return; 1508 } else { 1509 type = ICMP_SOURCEQUENCH; 1510 code = 0; 1511 } 1512 break; 1513 1514 case EACCES: /* ipfw denied packet */ 1515 m_freem(mcopy); 1516 return; 1517 } 1518 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1519 } 1520 1521 void 1522 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1523 struct mbuf *m) 1524 { 1525 INIT_VNET_NET(inp->inp_vnet); 1526 1527 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1528 struct bintime bt; 1529 1530 bintime(&bt); 1531 if (inp->inp_socket->so_options & SO_BINTIME) { 1532 *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt), 1533 SCM_BINTIME, SOL_SOCKET); 1534 if (*mp) 1535 mp = &(*mp)->m_next; 1536 } 1537 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1538 struct timeval tv; 1539 1540 bintime2timeval(&bt, &tv); 1541 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1542 SCM_TIMESTAMP, SOL_SOCKET); 1543 if (*mp) 1544 mp = &(*mp)->m_next; 1545 } 1546 } 1547 if (inp->inp_flags & INP_RECVDSTADDR) { 1548 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1549 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1550 if (*mp) 1551 mp = &(*mp)->m_next; 1552 } 1553 if (inp->inp_flags & INP_RECVTTL) { 1554 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1555 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1556 if (*mp) 1557 mp = &(*mp)->m_next; 1558 } 1559 #ifdef notyet 1560 /* XXX 1561 * Moving these out of udp_input() made them even more broken 1562 * than they already were. 1563 */ 1564 /* options were tossed already */ 1565 if (inp->inp_flags & INP_RECVOPTS) { 1566 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1567 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1568 if (*mp) 1569 mp = &(*mp)->m_next; 1570 } 1571 /* ip_srcroute doesn't do what we want here, need to fix */ 1572 if (inp->inp_flags & INP_RECVRETOPTS) { 1573 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1574 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1575 if (*mp) 1576 mp = &(*mp)->m_next; 1577 } 1578 #endif 1579 if (inp->inp_flags & INP_RECVIF) { 1580 struct ifnet *ifp; 1581 struct sdlbuf { 1582 struct sockaddr_dl sdl; 1583 u_char pad[32]; 1584 } sdlbuf; 1585 struct sockaddr_dl *sdp; 1586 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1587 1588 if (((ifp = m->m_pkthdr.rcvif)) 1589 && ( ifp->if_index && (ifp->if_index <= V_if_index))) { 1590 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1591 /* 1592 * Change our mind and don't try copy. 1593 */ 1594 if ((sdp->sdl_family != AF_LINK) 1595 || (sdp->sdl_len > sizeof(sdlbuf))) { 1596 goto makedummy; 1597 } 1598 bcopy(sdp, sdl2, sdp->sdl_len); 1599 } else { 1600 makedummy: 1601 sdl2->sdl_len 1602 = offsetof(struct sockaddr_dl, sdl_data[0]); 1603 sdl2->sdl_family = AF_LINK; 1604 sdl2->sdl_index = 0; 1605 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1606 } 1607 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, 1608 IP_RECVIF, IPPROTO_IP); 1609 if (*mp) 1610 mp = &(*mp)->m_next; 1611 } 1612 } 1613 1614 /* 1615 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1616 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1617 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1618 * compiled. 1619 */ 1620 int 1621 ip_rsvp_init(struct socket *so) 1622 { 1623 INIT_VNET_INET(so->so_vnet); 1624 1625 if (so->so_type != SOCK_RAW || 1626 so->so_proto->pr_protocol != IPPROTO_RSVP) 1627 return EOPNOTSUPP; 1628 1629 if (V_ip_rsvpd != NULL) 1630 return EADDRINUSE; 1631 1632 V_ip_rsvpd = so; 1633 /* 1634 * This may seem silly, but we need to be sure we don't over-increment 1635 * the RSVP counter, in case something slips up. 1636 */ 1637 if (!V_ip_rsvp_on) { 1638 V_ip_rsvp_on = 1; 1639 V_rsvp_on++; 1640 } 1641 1642 return 0; 1643 } 1644 1645 int 1646 ip_rsvp_done(void) 1647 { 1648 INIT_VNET_INET(curvnet); 1649 1650 V_ip_rsvpd = NULL; 1651 /* 1652 * This may seem silly, but we need to be sure we don't over-decrement 1653 * the RSVP counter, in case something slips up. 1654 */ 1655 if (V_ip_rsvp_on) { 1656 V_ip_rsvp_on = 0; 1657 V_rsvp_on--; 1658 } 1659 return 0; 1660 } 1661 1662 void 1663 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1664 { 1665 INIT_VNET_INET(curvnet); 1666 1667 if (rsvp_input_p) { /* call the real one if loaded */ 1668 rsvp_input_p(m, off); 1669 return; 1670 } 1671 1672 /* Can still get packets with rsvp_on = 0 if there is a local member 1673 * of the group to which the RSVP packet is addressed. But in this 1674 * case we want to throw the packet away. 1675 */ 1676 1677 if (!V_rsvp_on) { 1678 m_freem(m); 1679 return; 1680 } 1681 1682 if (V_ip_rsvpd != NULL) { 1683 rip_input(m, off); 1684 return; 1685 } 1686 /* Drop the packet */ 1687 m_freem(m); 1688 } 1689