1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_mac.h" 40 #include "opt_carp.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/callout.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/domain.h> 48 #include <sys/protosw.h> 49 #include <sys/socket.h> 50 #include <sys/time.h> 51 #include <sys/kernel.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 #include <sys/vimage.h> 55 56 #include <net/pfil.h> 57 #include <net/if.h> 58 #include <net/if_types.h> 59 #include <net/if_var.h> 60 #include <net/if_dl.h> 61 #include <net/route.h> 62 #include <net/netisr.h> 63 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip.h> 68 #include <netinet/in_pcb.h> 69 #include <netinet/ip_var.h> 70 #include <netinet/ip_icmp.h> 71 #include <netinet/ip_options.h> 72 #include <machine/in_cksum.h> 73 #ifdef DEV_CARP 74 #include <netinet/ip_carp.h> 75 #endif 76 #ifdef IPSEC 77 #include <netinet/ip_ipsec.h> 78 #endif /* IPSEC */ 79 80 #include <sys/socketvar.h> 81 82 /* XXX: Temporary until ipfw_ether and ipfw_bridge are converted. */ 83 #include <netinet/ip_fw.h> 84 #include <netinet/ip_dummynet.h> 85 86 #include <security/mac/mac_framework.h> 87 88 #ifdef CTASSERT 89 CTASSERT(sizeof(struct ip) == 20); 90 #endif 91 92 int rsvp_on = 0; 93 94 int ipforwarding = 0; 95 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_FORWARDING, 96 forwarding, CTLFLAG_RW, ipforwarding, 0, 97 "Enable IP forwarding between interfaces"); 98 99 static int ipsendredirects = 1; /* XXX */ 100 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_SENDREDIRECTS, 101 redirect, CTLFLAG_RW, ipsendredirects, 0, 102 "Enable sending IP redirects"); 103 104 int ip_defttl = IPDEFTTL; 105 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_DEFTTL, 106 ttl, CTLFLAG_RW, ip_defttl, 0, "Maximum TTL on IP packets"); 107 108 static int ip_keepfaith = 0; 109 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, IPCTL_KEEPFAITH, 110 keepfaith, CTLFLAG_RW, ip_keepfaith, 0, 111 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 112 113 static int ip_sendsourcequench = 0; 114 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, 115 sendsourcequench, CTLFLAG_RW, ip_sendsourcequench, 0, 116 "Enable the transmission of source quench packets"); 117 118 int ip_do_randomid = 0; 119 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, random_id, 120 CTLFLAG_RW, ip_do_randomid, 0, "Assign random ip_id values"); 121 122 /* 123 * XXX - Setting ip_checkinterface mostly implements the receive side of 124 * the Strong ES model described in RFC 1122, but since the routing table 125 * and transmit implementation do not implement the Strong ES model, 126 * setting this to 1 results in an odd hybrid. 127 * 128 * XXX - ip_checkinterface currently must be disabled if you use ipnat 129 * to translate the destination address to another local interface. 130 * 131 * XXX - ip_checkinterface must be disabled if you add IP aliases 132 * to the loopback interface instead of the interface where the 133 * packets for those addresses are received. 134 */ 135 static int ip_checkinterface = 0; 136 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, 137 check_interface, CTLFLAG_RW, ip_checkinterface, 0, 138 "Verify packet arrives on correct interface"); 139 140 struct pfil_head inet_pfil_hook; /* Packet filter hooks */ 141 142 static struct ifqueue ipintrq; 143 static int ipqmaxlen = IFQ_MAXLEN; 144 145 extern struct domain inetdomain; 146 extern struct protosw inetsw[]; 147 u_char ip_protox[IPPROTO_MAX]; 148 struct in_ifaddrhead in_ifaddrhead; /* first inet address */ 149 struct in_ifaddrhashhead *in_ifaddrhashtbl; /* inet addr hash table */ 150 u_long in_ifaddrhmask; /* mask for hash table */ 151 152 SYSCTL_INT(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, CTLFLAG_RW, 153 &ipintrq.ifq_maxlen, 0, "Maximum size of the IP input queue"); 154 SYSCTL_INT(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, CTLFLAG_RD, 155 &ipintrq.ifq_drops, 0, 156 "Number of packets dropped from the IP input queue"); 157 158 struct ipstat ipstat; 159 SYSCTL_V_STRUCT(V_NET, vnet_inet, _net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW, 160 ipstat, ipstat, "IP statistics (struct ipstat, netinet/ip_var.h)"); 161 162 /* 163 * IP datagram reassembly. 164 */ 165 #define IPREASS_NHASH_LOG2 6 166 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 167 #define IPREASS_HMASK (IPREASS_NHASH - 1) 168 #define IPREASS_HASH(x,y) \ 169 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK) 170 171 static uma_zone_t ipq_zone; 172 static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH]; 173 static struct mtx ipqlock; 174 175 #define IPQ_LOCK() mtx_lock(&ipqlock) 176 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 177 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 178 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 179 180 static void maxnipq_update(void); 181 static void ipq_zone_change(void *); 182 183 static int maxnipq; /* Administrative limit on # reass queues. */ 184 static int nipq = 0; /* Total # of reass queues */ 185 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, fragpackets, 186 CTLFLAG_RD, nipq, 0, 187 "Current number of IPv4 fragment reassembly queue entries"); 188 189 static int maxfragsperpacket; 190 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, maxfragsperpacket, 191 CTLFLAG_RW, maxfragsperpacket, 0, 192 "Maximum number of IPv4 fragments allowed per packet"); 193 194 struct callout ipport_tick_callout; 195 196 #ifdef IPCTL_DEFMTU 197 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 198 &ip_mtu, 0, "Default MTU"); 199 #endif 200 201 #ifdef IPSTEALTH 202 int ipstealth = 0; 203 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 204 ipstealth, 0, "IP stealth mode, no TTL decrementation on forwarding"); 205 #endif 206 207 /* 208 * ipfw_ether and ipfw_bridge hooks. 209 * XXX: Temporary until those are converted to pfil_hooks as well. 210 */ 211 ip_fw_chk_t *ip_fw_chk_ptr = NULL; 212 ip_dn_io_t *ip_dn_io_ptr = NULL; 213 int fw_one_pass = 1; 214 215 static void ip_freef(struct ipqhead *, struct ipq *); 216 217 /* 218 * IP initialization: fill in IP protocol switch table. 219 * All protocols not implemented in kernel go to raw IP protocol handler. 220 */ 221 void 222 ip_init(void) 223 { 224 INIT_VNET_INET(curvnet); 225 struct protosw *pr; 226 int i; 227 228 TAILQ_INIT(&V_in_ifaddrhead); 229 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 230 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 231 if (pr == NULL) 232 panic("ip_init: PF_INET not found"); 233 234 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 235 for (i = 0; i < IPPROTO_MAX; i++) 236 ip_protox[i] = pr - inetsw; 237 /* 238 * Cycle through IP protocols and put them into the appropriate place 239 * in ip_protox[]. 240 */ 241 for (pr = inetdomain.dom_protosw; 242 pr < inetdomain.dom_protoswNPROTOSW; pr++) 243 if (pr->pr_domain->dom_family == PF_INET && 244 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 245 /* Be careful to only index valid IP protocols. */ 246 if (pr->pr_protocol < IPPROTO_MAX) 247 ip_protox[pr->pr_protocol] = pr - inetsw; 248 } 249 250 /* Initialize packet filter hooks. */ 251 inet_pfil_hook.ph_type = PFIL_TYPE_AF; 252 inet_pfil_hook.ph_af = AF_INET; 253 if ((i = pfil_head_register(&inet_pfil_hook)) != 0) 254 printf("%s: WARNING: unable to register pfil hook, " 255 "error %d\n", __func__, i); 256 257 /* Initialize IP reassembly queue. */ 258 IPQ_LOCK_INIT(); 259 for (i = 0; i < IPREASS_NHASH; i++) 260 TAILQ_INIT(&V_ipq[i]); 261 V_maxnipq = nmbclusters / 32; 262 V_maxfragsperpacket = 16; 263 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 264 NULL, UMA_ALIGN_PTR, 0); 265 maxnipq_update(); 266 267 /* Start ipport_tick. */ 268 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE); 269 ipport_tick(NULL); 270 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL, 271 SHUTDOWN_PRI_DEFAULT); 272 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 273 NULL, EVENTHANDLER_PRI_ANY); 274 275 /* Initialize various other remaining things. */ 276 V_ip_id = time_second & 0xffff; 277 ipintrq.ifq_maxlen = ipqmaxlen; 278 mtx_init(&ipintrq.ifq_mtx, "ip_inq", NULL, MTX_DEF); 279 netisr_register(NETISR_IP, ip_input, &ipintrq, 0); 280 } 281 282 void 283 ip_fini(void *xtp) 284 { 285 286 callout_stop(&ipport_tick_callout); 287 } 288 289 /* 290 * Ip input routine. Checksum and byte swap header. If fragmented 291 * try to reassemble. Process options. Pass to next level. 292 */ 293 void 294 ip_input(struct mbuf *m) 295 { 296 INIT_VNET_INET(curvnet); 297 struct ip *ip = NULL; 298 struct in_ifaddr *ia = NULL; 299 struct ifaddr *ifa; 300 int checkif, hlen = 0; 301 u_short sum; 302 int dchg = 0; /* dest changed after fw */ 303 struct in_addr odst; /* original dst address */ 304 305 M_ASSERTPKTHDR(m); 306 307 if (m->m_flags & M_FASTFWD_OURS) { 308 /* 309 * Firewall or NAT changed destination to local. 310 * We expect ip_len and ip_off to be in host byte order. 311 */ 312 m->m_flags &= ~M_FASTFWD_OURS; 313 /* Set up some basics that will be used later. */ 314 ip = mtod(m, struct ip *); 315 hlen = ip->ip_hl << 2; 316 goto ours; 317 } 318 319 V_ipstat.ips_total++; 320 321 if (m->m_pkthdr.len < sizeof(struct ip)) 322 goto tooshort; 323 324 if (m->m_len < sizeof (struct ip) && 325 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 326 V_ipstat.ips_toosmall++; 327 return; 328 } 329 ip = mtod(m, struct ip *); 330 331 if (ip->ip_v != IPVERSION) { 332 V_ipstat.ips_badvers++; 333 goto bad; 334 } 335 336 hlen = ip->ip_hl << 2; 337 if (hlen < sizeof(struct ip)) { /* minimum header length */ 338 V_ipstat.ips_badhlen++; 339 goto bad; 340 } 341 if (hlen > m->m_len) { 342 if ((m = m_pullup(m, hlen)) == NULL) { 343 V_ipstat.ips_badhlen++; 344 return; 345 } 346 ip = mtod(m, struct ip *); 347 } 348 349 /* 127/8 must not appear on wire - RFC1122 */ 350 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 351 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 352 if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) { 353 V_ipstat.ips_badaddr++; 354 goto bad; 355 } 356 } 357 358 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 359 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 360 } else { 361 if (hlen == sizeof(struct ip)) { 362 sum = in_cksum_hdr(ip); 363 } else { 364 sum = in_cksum(m, hlen); 365 } 366 } 367 if (sum) { 368 V_ipstat.ips_badsum++; 369 goto bad; 370 } 371 372 #ifdef ALTQ 373 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 374 /* packet is dropped by traffic conditioner */ 375 return; 376 #endif 377 378 /* 379 * Convert fields to host representation. 380 */ 381 ip->ip_len = ntohs(ip->ip_len); 382 if (ip->ip_len < hlen) { 383 V_ipstat.ips_badlen++; 384 goto bad; 385 } 386 ip->ip_off = ntohs(ip->ip_off); 387 388 /* 389 * Check that the amount of data in the buffers 390 * is as at least much as the IP header would have us expect. 391 * Trim mbufs if longer than we expect. 392 * Drop packet if shorter than we expect. 393 */ 394 if (m->m_pkthdr.len < ip->ip_len) { 395 tooshort: 396 V_ipstat.ips_tooshort++; 397 goto bad; 398 } 399 if (m->m_pkthdr.len > ip->ip_len) { 400 if (m->m_len == m->m_pkthdr.len) { 401 m->m_len = ip->ip_len; 402 m->m_pkthdr.len = ip->ip_len; 403 } else 404 m_adj(m, ip->ip_len - m->m_pkthdr.len); 405 } 406 #ifdef IPSEC 407 /* 408 * Bypass packet filtering for packets from a tunnel (gif). 409 */ 410 if (ip_ipsec_filtertunnel(m)) 411 goto passin; 412 #endif /* IPSEC */ 413 414 /* 415 * Run through list of hooks for input packets. 416 * 417 * NB: Beware of the destination address changing (e.g. 418 * by NAT rewriting). When this happens, tell 419 * ip_forward to do the right thing. 420 */ 421 422 /* Jump over all PFIL processing if hooks are not active. */ 423 if (!PFIL_HOOKED(&inet_pfil_hook)) 424 goto passin; 425 426 odst = ip->ip_dst; 427 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, 428 PFIL_IN, NULL) != 0) 429 return; 430 if (m == NULL) /* consumed by filter */ 431 return; 432 433 ip = mtod(m, struct ip *); 434 dchg = (odst.s_addr != ip->ip_dst.s_addr); 435 436 #ifdef IPFIREWALL_FORWARD 437 if (m->m_flags & M_FASTFWD_OURS) { 438 m->m_flags &= ~M_FASTFWD_OURS; 439 goto ours; 440 } 441 if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) { 442 /* 443 * Directly ship on the packet. This allows to forward packets 444 * that were destined for us to some other directly connected 445 * host. 446 */ 447 ip_forward(m, dchg); 448 return; 449 } 450 #endif /* IPFIREWALL_FORWARD */ 451 452 passin: 453 /* 454 * Process options and, if not destined for us, 455 * ship it on. ip_dooptions returns 1 when an 456 * error was detected (causing an icmp message 457 * to be sent and the original packet to be freed). 458 */ 459 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 460 return; 461 462 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 463 * matter if it is destined to another node, or whether it is 464 * a multicast one, RSVP wants it! and prevents it from being forwarded 465 * anywhere else. Also checks if the rsvp daemon is running before 466 * grabbing the packet. 467 */ 468 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 469 goto ours; 470 471 /* 472 * Check our list of addresses, to see if the packet is for us. 473 * If we don't have any addresses, assume any unicast packet 474 * we receive might be for us (and let the upper layers deal 475 * with it). 476 */ 477 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 478 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 479 goto ours; 480 481 /* 482 * Enable a consistency check between the destination address 483 * and the arrival interface for a unicast packet (the RFC 1122 484 * strong ES model) if IP forwarding is disabled and the packet 485 * is not locally generated and the packet is not subject to 486 * 'ipfw fwd'. 487 * 488 * XXX - Checking also should be disabled if the destination 489 * address is ipnat'ed to a different interface. 490 * 491 * XXX - Checking is incompatible with IP aliases added 492 * to the loopback interface instead of the interface where 493 * the packets are received. 494 * 495 * XXX - This is the case for carp vhost IPs as well so we 496 * insert a workaround. If the packet got here, we already 497 * checked with carp_iamatch() and carp_forus(). 498 */ 499 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 500 m->m_pkthdr.rcvif != NULL && 501 ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) && 502 #ifdef DEV_CARP 503 !m->m_pkthdr.rcvif->if_carp && 504 #endif 505 (dchg == 0); 506 507 /* 508 * Check for exact addresses in the hash bucket. 509 */ 510 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 511 /* 512 * If the address matches, verify that the packet 513 * arrived via the correct interface if checking is 514 * enabled. 515 */ 516 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 517 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif)) 518 goto ours; 519 } 520 /* 521 * Check for broadcast addresses. 522 * 523 * Only accept broadcast packets that arrive via the matching 524 * interface. Reception of forwarded directed broadcasts would 525 * be handled via ip_forward() and ether_output() with the loopback 526 * into the stack for SIMPLEX interfaces handled by ether_output(). 527 */ 528 if (m->m_pkthdr.rcvif != NULL && 529 m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) { 530 TAILQ_FOREACH(ifa, &m->m_pkthdr.rcvif->if_addrhead, ifa_link) { 531 if (ifa->ifa_addr->sa_family != AF_INET) 532 continue; 533 ia = ifatoia(ifa); 534 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 535 ip->ip_dst.s_addr) 536 goto ours; 537 if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) 538 goto ours; 539 #ifdef BOOTP_COMPAT 540 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) 541 goto ours; 542 #endif 543 } 544 } 545 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 546 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 547 V_ipstat.ips_cantforward++; 548 m_freem(m); 549 return; 550 } 551 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 552 struct in_multi *inm; 553 if (V_ip_mrouter) { 554 /* 555 * If we are acting as a multicast router, all 556 * incoming multicast packets are passed to the 557 * kernel-level multicast forwarding function. 558 * The packet is returned (relatively) intact; if 559 * ip_mforward() returns a non-zero value, the packet 560 * must be discarded, else it may be accepted below. 561 */ 562 if (ip_mforward && 563 ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) { 564 V_ipstat.ips_cantforward++; 565 m_freem(m); 566 return; 567 } 568 569 /* 570 * The process-level routing daemon needs to receive 571 * all multicast IGMP packets, whether or not this 572 * host belongs to their destination groups. 573 */ 574 if (ip->ip_p == IPPROTO_IGMP) 575 goto ours; 576 V_ipstat.ips_forward++; 577 } 578 /* 579 * See if we belong to the destination multicast group on the 580 * arrival interface. 581 */ 582 IN_MULTI_LOCK(); 583 IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm); 584 IN_MULTI_UNLOCK(); 585 if (inm == NULL) { 586 V_ipstat.ips_notmember++; 587 m_freem(m); 588 return; 589 } 590 goto ours; 591 } 592 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 593 goto ours; 594 if (ip->ip_dst.s_addr == INADDR_ANY) 595 goto ours; 596 597 /* 598 * FAITH(Firewall Aided Internet Translator) 599 */ 600 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) { 601 if (V_ip_keepfaith) { 602 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 603 goto ours; 604 } 605 m_freem(m); 606 return; 607 } 608 609 /* 610 * Not for us; forward if possible and desirable. 611 */ 612 if (V_ipforwarding == 0) { 613 V_ipstat.ips_cantforward++; 614 m_freem(m); 615 } else { 616 #ifdef IPSEC 617 if (ip_ipsec_fwd(m)) 618 goto bad; 619 #endif /* IPSEC */ 620 ip_forward(m, dchg); 621 } 622 return; 623 624 ours: 625 #ifdef IPSTEALTH 626 /* 627 * IPSTEALTH: Process non-routing options only 628 * if the packet is destined for us. 629 */ 630 if (V_ipstealth && hlen > sizeof (struct ip) && 631 ip_dooptions(m, 1)) 632 return; 633 #endif /* IPSTEALTH */ 634 635 /* Count the packet in the ip address stats */ 636 if (ia != NULL) { 637 ia->ia_ifa.if_ipackets++; 638 ia->ia_ifa.if_ibytes += m->m_pkthdr.len; 639 } 640 641 /* 642 * Attempt reassembly; if it succeeds, proceed. 643 * ip_reass() will return a different mbuf. 644 */ 645 if (ip->ip_off & (IP_MF | IP_OFFMASK)) { 646 m = ip_reass(m); 647 if (m == NULL) 648 return; 649 ip = mtod(m, struct ip *); 650 /* Get the header length of the reassembled packet */ 651 hlen = ip->ip_hl << 2; 652 } 653 654 /* 655 * Further protocols expect the packet length to be w/o the 656 * IP header. 657 */ 658 ip->ip_len -= hlen; 659 660 #ifdef IPSEC 661 /* 662 * enforce IPsec policy checking if we are seeing last header. 663 * note that we do not visit this with protocols with pcb layer 664 * code - like udp/tcp/raw ip. 665 */ 666 if (ip_ipsec_input(m)) 667 goto bad; 668 #endif /* IPSEC */ 669 670 /* 671 * Switch out to protocol's input routine. 672 */ 673 V_ipstat.ips_delivered++; 674 675 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 676 return; 677 bad: 678 m_freem(m); 679 } 680 681 /* 682 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 683 * max has slightly different semantics than the sysctl, for historical 684 * reasons. 685 */ 686 static void 687 maxnipq_update(void) 688 { 689 INIT_VNET_INET(curvnet); 690 691 /* 692 * -1 for unlimited allocation. 693 */ 694 if (V_maxnipq < 0) 695 uma_zone_set_max(V_ipq_zone, 0); 696 /* 697 * Positive number for specific bound. 698 */ 699 if (V_maxnipq > 0) 700 uma_zone_set_max(V_ipq_zone, V_maxnipq); 701 /* 702 * Zero specifies no further fragment queue allocation -- set the 703 * bound very low, but rely on implementation elsewhere to actually 704 * prevent allocation and reclaim current queues. 705 */ 706 if (V_maxnipq == 0) 707 uma_zone_set_max(V_ipq_zone, 1); 708 } 709 710 static void 711 ipq_zone_change(void *tag) 712 { 713 INIT_VNET_INET(curvnet); 714 715 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 716 V_maxnipq = nmbclusters / 32; 717 maxnipq_update(); 718 } 719 } 720 721 static int 722 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 723 { 724 INIT_VNET_INET(curvnet); 725 int error, i; 726 727 i = V_maxnipq; 728 error = sysctl_handle_int(oidp, &i, 0, req); 729 if (error || !req->newptr) 730 return (error); 731 732 /* 733 * XXXRW: Might be a good idea to sanity check the argument and place 734 * an extreme upper bound. 735 */ 736 if (i < -1) 737 return (EINVAL); 738 V_maxnipq = i; 739 maxnipq_update(); 740 return (0); 741 } 742 743 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 744 NULL, 0, sysctl_maxnipq, "I", 745 "Maximum number of IPv4 fragment reassembly queue entries"); 746 747 /* 748 * Take incoming datagram fragment and try to reassemble it into 749 * whole datagram. If the argument is the first fragment or one 750 * in between the function will return NULL and store the mbuf 751 * in the fragment chain. If the argument is the last fragment 752 * the packet will be reassembled and the pointer to the new 753 * mbuf returned for further processing. Only m_tags attached 754 * to the first packet/fragment are preserved. 755 * The IP header is *NOT* adjusted out of iplen. 756 */ 757 struct mbuf * 758 ip_reass(struct mbuf *m) 759 { 760 INIT_VNET_INET(curvnet); 761 struct ip *ip; 762 struct mbuf *p, *q, *nq, *t; 763 struct ipq *fp = NULL; 764 struct ipqhead *head; 765 int i, hlen, next; 766 u_int8_t ecn, ecn0; 767 u_short hash; 768 769 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 770 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 771 V_ipstat.ips_fragments++; 772 V_ipstat.ips_fragdropped++; 773 m_freem(m); 774 return (NULL); 775 } 776 777 ip = mtod(m, struct ip *); 778 hlen = ip->ip_hl << 2; 779 780 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 781 head = &V_ipq[hash]; 782 IPQ_LOCK(); 783 784 /* 785 * Look for queue of fragments 786 * of this datagram. 787 */ 788 TAILQ_FOREACH(fp, head, ipq_list) 789 if (ip->ip_id == fp->ipq_id && 790 ip->ip_src.s_addr == fp->ipq_src.s_addr && 791 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 792 #ifdef MAC 793 mac_ipq_match(m, fp) && 794 #endif 795 ip->ip_p == fp->ipq_p) 796 goto found; 797 798 fp = NULL; 799 800 /* 801 * Attempt to trim the number of allocated fragment queues if it 802 * exceeds the administrative limit. 803 */ 804 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 805 /* 806 * drop something from the tail of the current queue 807 * before proceeding further 808 */ 809 struct ipq *q = TAILQ_LAST(head, ipqhead); 810 if (q == NULL) { /* gak */ 811 for (i = 0; i < IPREASS_NHASH; i++) { 812 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 813 if (r) { 814 V_ipstat.ips_fragtimeout += 815 r->ipq_nfrags; 816 ip_freef(&V_ipq[i], r); 817 break; 818 } 819 } 820 } else { 821 V_ipstat.ips_fragtimeout += q->ipq_nfrags; 822 ip_freef(head, q); 823 } 824 } 825 826 found: 827 /* 828 * Adjust ip_len to not reflect header, 829 * convert offset of this to bytes. 830 */ 831 ip->ip_len -= hlen; 832 if (ip->ip_off & IP_MF) { 833 /* 834 * Make sure that fragments have a data length 835 * that's a non-zero multiple of 8 bytes. 836 */ 837 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { 838 V_ipstat.ips_toosmall++; /* XXX */ 839 goto dropfrag; 840 } 841 m->m_flags |= M_FRAG; 842 } else 843 m->m_flags &= ~M_FRAG; 844 ip->ip_off <<= 3; 845 846 847 /* 848 * Attempt reassembly; if it succeeds, proceed. 849 * ip_reass() will return a different mbuf. 850 */ 851 V_ipstat.ips_fragments++; 852 m->m_pkthdr.header = ip; 853 854 /* Previous ip_reass() started here. */ 855 /* 856 * Presence of header sizes in mbufs 857 * would confuse code below. 858 */ 859 m->m_data += hlen; 860 m->m_len -= hlen; 861 862 /* 863 * If first fragment to arrive, create a reassembly queue. 864 */ 865 if (fp == NULL) { 866 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 867 if (fp == NULL) 868 goto dropfrag; 869 #ifdef MAC 870 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 871 uma_zfree(V_ipq_zone, fp); 872 fp = NULL; 873 goto dropfrag; 874 } 875 mac_ipq_create(m, fp); 876 #endif 877 TAILQ_INSERT_HEAD(head, fp, ipq_list); 878 V_nipq++; 879 fp->ipq_nfrags = 1; 880 fp->ipq_ttl = IPFRAGTTL; 881 fp->ipq_p = ip->ip_p; 882 fp->ipq_id = ip->ip_id; 883 fp->ipq_src = ip->ip_src; 884 fp->ipq_dst = ip->ip_dst; 885 fp->ipq_frags = m; 886 m->m_nextpkt = NULL; 887 goto done; 888 } else { 889 fp->ipq_nfrags++; 890 #ifdef MAC 891 mac_ipq_update(m, fp); 892 #endif 893 } 894 895 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 896 897 /* 898 * Handle ECN by comparing this segment with the first one; 899 * if CE is set, do not lose CE. 900 * drop if CE and not-ECT are mixed for the same packet. 901 */ 902 ecn = ip->ip_tos & IPTOS_ECN_MASK; 903 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 904 if (ecn == IPTOS_ECN_CE) { 905 if (ecn0 == IPTOS_ECN_NOTECT) 906 goto dropfrag; 907 if (ecn0 != IPTOS_ECN_CE) 908 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 909 } 910 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 911 goto dropfrag; 912 913 /* 914 * Find a segment which begins after this one does. 915 */ 916 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 917 if (GETIP(q)->ip_off > ip->ip_off) 918 break; 919 920 /* 921 * If there is a preceding segment, it may provide some of 922 * our data already. If so, drop the data from the incoming 923 * segment. If it provides all of our data, drop us, otherwise 924 * stick new segment in the proper place. 925 * 926 * If some of the data is dropped from the the preceding 927 * segment, then it's checksum is invalidated. 928 */ 929 if (p) { 930 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; 931 if (i > 0) { 932 if (i >= ip->ip_len) 933 goto dropfrag; 934 m_adj(m, i); 935 m->m_pkthdr.csum_flags = 0; 936 ip->ip_off += i; 937 ip->ip_len -= i; 938 } 939 m->m_nextpkt = p->m_nextpkt; 940 p->m_nextpkt = m; 941 } else { 942 m->m_nextpkt = fp->ipq_frags; 943 fp->ipq_frags = m; 944 } 945 946 /* 947 * While we overlap succeeding segments trim them or, 948 * if they are completely covered, dequeue them. 949 */ 950 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; 951 q = nq) { 952 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off; 953 if (i < GETIP(q)->ip_len) { 954 GETIP(q)->ip_len -= i; 955 GETIP(q)->ip_off += i; 956 m_adj(q, i); 957 q->m_pkthdr.csum_flags = 0; 958 break; 959 } 960 nq = q->m_nextpkt; 961 m->m_nextpkt = nq; 962 V_ipstat.ips_fragdropped++; 963 fp->ipq_nfrags--; 964 m_freem(q); 965 } 966 967 /* 968 * Check for complete reassembly and perform frag per packet 969 * limiting. 970 * 971 * Frag limiting is performed here so that the nth frag has 972 * a chance to complete the packet before we drop the packet. 973 * As a result, n+1 frags are actually allowed per packet, but 974 * only n will ever be stored. (n = maxfragsperpacket.) 975 * 976 */ 977 next = 0; 978 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 979 if (GETIP(q)->ip_off != next) { 980 if (fp->ipq_nfrags > V_maxfragsperpacket) { 981 V_ipstat.ips_fragdropped += fp->ipq_nfrags; 982 ip_freef(head, fp); 983 } 984 goto done; 985 } 986 next += GETIP(q)->ip_len; 987 } 988 /* Make sure the last packet didn't have the IP_MF flag */ 989 if (p->m_flags & M_FRAG) { 990 if (fp->ipq_nfrags > V_maxfragsperpacket) { 991 V_ipstat.ips_fragdropped += fp->ipq_nfrags; 992 ip_freef(head, fp); 993 } 994 goto done; 995 } 996 997 /* 998 * Reassembly is complete. Make sure the packet is a sane size. 999 */ 1000 q = fp->ipq_frags; 1001 ip = GETIP(q); 1002 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1003 V_ipstat.ips_toolong++; 1004 V_ipstat.ips_fragdropped += fp->ipq_nfrags; 1005 ip_freef(head, fp); 1006 goto done; 1007 } 1008 1009 /* 1010 * Concatenate fragments. 1011 */ 1012 m = q; 1013 t = m->m_next; 1014 m->m_next = NULL; 1015 m_cat(m, t); 1016 nq = q->m_nextpkt; 1017 q->m_nextpkt = NULL; 1018 for (q = nq; q != NULL; q = nq) { 1019 nq = q->m_nextpkt; 1020 q->m_nextpkt = NULL; 1021 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1022 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1023 m_cat(m, q); 1024 } 1025 /* 1026 * In order to do checksumming faster we do 'end-around carry' here 1027 * (and not in for{} loop), though it implies we are not going to 1028 * reassemble more than 64k fragments. 1029 */ 1030 m->m_pkthdr.csum_data = 1031 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1032 #ifdef MAC 1033 mac_ipq_reassemble(fp, m); 1034 mac_ipq_destroy(fp); 1035 #endif 1036 1037 /* 1038 * Create header for new ip packet by modifying header of first 1039 * packet; dequeue and discard fragment reassembly header. 1040 * Make header visible. 1041 */ 1042 ip->ip_len = (ip->ip_hl << 2) + next; 1043 ip->ip_src = fp->ipq_src; 1044 ip->ip_dst = fp->ipq_dst; 1045 TAILQ_REMOVE(head, fp, ipq_list); 1046 V_nipq--; 1047 uma_zfree(V_ipq_zone, fp); 1048 m->m_len += (ip->ip_hl << 2); 1049 m->m_data -= (ip->ip_hl << 2); 1050 /* some debugging cruft by sklower, below, will go away soon */ 1051 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1052 m_fixhdr(m); 1053 V_ipstat.ips_reassembled++; 1054 IPQ_UNLOCK(); 1055 return (m); 1056 1057 dropfrag: 1058 V_ipstat.ips_fragdropped++; 1059 if (fp != NULL) 1060 fp->ipq_nfrags--; 1061 m_freem(m); 1062 done: 1063 IPQ_UNLOCK(); 1064 return (NULL); 1065 1066 #undef GETIP 1067 } 1068 1069 /* 1070 * Free a fragment reassembly header and all 1071 * associated datagrams. 1072 */ 1073 static void 1074 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1075 { 1076 INIT_VNET_INET(curvnet); 1077 struct mbuf *q; 1078 1079 IPQ_LOCK_ASSERT(); 1080 1081 while (fp->ipq_frags) { 1082 q = fp->ipq_frags; 1083 fp->ipq_frags = q->m_nextpkt; 1084 m_freem(q); 1085 } 1086 TAILQ_REMOVE(fhp, fp, ipq_list); 1087 uma_zfree(V_ipq_zone, fp); 1088 V_nipq--; 1089 } 1090 1091 /* 1092 * IP timer processing; 1093 * if a timer expires on a reassembly 1094 * queue, discard it. 1095 */ 1096 void 1097 ip_slowtimo(void) 1098 { 1099 VNET_ITERATOR_DECL(vnet_iter); 1100 struct ipq *fp; 1101 int i; 1102 1103 IPQ_LOCK(); 1104 VNET_LIST_RLOCK(); 1105 VNET_FOREACH(vnet_iter) { 1106 CURVNET_SET(vnet_iter); 1107 INIT_VNET_INET(vnet_iter); 1108 for (i = 0; i < IPREASS_NHASH; i++) { 1109 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1110 struct ipq *fpp; 1111 1112 fpp = fp; 1113 fp = TAILQ_NEXT(fp, ipq_list); 1114 if(--fpp->ipq_ttl == 0) { 1115 V_ipstat.ips_fragtimeout += 1116 fpp->ipq_nfrags; 1117 ip_freef(&V_ipq[i], fpp); 1118 } 1119 } 1120 } 1121 /* 1122 * If we are over the maximum number of fragments 1123 * (due to the limit being lowered), drain off 1124 * enough to get down to the new limit. 1125 */ 1126 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1127 for (i = 0; i < IPREASS_NHASH; i++) { 1128 while (V_nipq > V_maxnipq && 1129 !TAILQ_EMPTY(&V_ipq[i])) { 1130 V_ipstat.ips_fragdropped += 1131 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags; 1132 ip_freef(&V_ipq[i], 1133 TAILQ_FIRST(&V_ipq[i])); 1134 } 1135 } 1136 } 1137 CURVNET_RESTORE(); 1138 } 1139 VNET_LIST_RUNLOCK(); 1140 IPQ_UNLOCK(); 1141 } 1142 1143 /* 1144 * Drain off all datagram fragments. 1145 */ 1146 void 1147 ip_drain(void) 1148 { 1149 VNET_ITERATOR_DECL(vnet_iter); 1150 int i; 1151 1152 IPQ_LOCK(); 1153 VNET_LIST_RLOCK(); 1154 VNET_FOREACH(vnet_iter) { 1155 CURVNET_SET(vnet_iter); 1156 INIT_VNET_INET(vnet_iter); 1157 for (i = 0; i < IPREASS_NHASH; i++) { 1158 while(!TAILQ_EMPTY(&V_ipq[i])) { 1159 V_ipstat.ips_fragdropped += 1160 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags; 1161 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1162 } 1163 } 1164 CURVNET_RESTORE(); 1165 } 1166 VNET_LIST_RUNLOCK(); 1167 IPQ_UNLOCK(); 1168 in_rtqdrain(); 1169 } 1170 1171 /* 1172 * The protocol to be inserted into ip_protox[] must be already registered 1173 * in inetsw[], either statically or through pf_proto_register(). 1174 */ 1175 int 1176 ipproto_register(u_char ipproto) 1177 { 1178 struct protosw *pr; 1179 1180 /* Sanity checks. */ 1181 if (ipproto == 0) 1182 return (EPROTONOSUPPORT); 1183 1184 /* 1185 * The protocol slot must not be occupied by another protocol 1186 * already. An index pointing to IPPROTO_RAW is unused. 1187 */ 1188 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1189 if (pr == NULL) 1190 return (EPFNOSUPPORT); 1191 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1192 return (EEXIST); 1193 1194 /* Find the protocol position in inetsw[] and set the index. */ 1195 for (pr = inetdomain.dom_protosw; 1196 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1197 if (pr->pr_domain->dom_family == PF_INET && 1198 pr->pr_protocol && pr->pr_protocol == ipproto) { 1199 /* Be careful to only index valid IP protocols. */ 1200 if (pr->pr_protocol < IPPROTO_MAX) { 1201 ip_protox[pr->pr_protocol] = pr - inetsw; 1202 return (0); 1203 } else 1204 return (EINVAL); 1205 } 1206 } 1207 return (EPROTONOSUPPORT); 1208 } 1209 1210 int 1211 ipproto_unregister(u_char ipproto) 1212 { 1213 struct protosw *pr; 1214 1215 /* Sanity checks. */ 1216 if (ipproto == 0) 1217 return (EPROTONOSUPPORT); 1218 1219 /* Check if the protocol was indeed registered. */ 1220 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1221 if (pr == NULL) 1222 return (EPFNOSUPPORT); 1223 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1224 return (ENOENT); 1225 1226 /* Reset the protocol slot to IPPROTO_RAW. */ 1227 ip_protox[ipproto] = pr - inetsw; 1228 return (0); 1229 } 1230 1231 /* 1232 * Given address of next destination (final or next hop), 1233 * return internet address info of interface to be used to get there. 1234 */ 1235 struct in_ifaddr * 1236 ip_rtaddr(struct in_addr dst, u_int fibnum) 1237 { 1238 struct route sro; 1239 struct sockaddr_in *sin; 1240 struct in_ifaddr *ifa; 1241 1242 bzero(&sro, sizeof(sro)); 1243 sin = (struct sockaddr_in *)&sro.ro_dst; 1244 sin->sin_family = AF_INET; 1245 sin->sin_len = sizeof(*sin); 1246 sin->sin_addr = dst; 1247 in_rtalloc_ign(&sro, RTF_CLONING, fibnum); 1248 1249 if (sro.ro_rt == NULL) 1250 return (NULL); 1251 1252 ifa = ifatoia(sro.ro_rt->rt_ifa); 1253 RTFREE(sro.ro_rt); 1254 return (ifa); 1255 } 1256 1257 u_char inetctlerrmap[PRC_NCMDS] = { 1258 0, 0, 0, 0, 1259 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1260 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1261 EMSGSIZE, EHOSTUNREACH, 0, 0, 1262 0, 0, EHOSTUNREACH, 0, 1263 ENOPROTOOPT, ECONNREFUSED 1264 }; 1265 1266 /* 1267 * Forward a packet. If some error occurs return the sender 1268 * an icmp packet. Note we can't always generate a meaningful 1269 * icmp message because icmp doesn't have a large enough repertoire 1270 * of codes and types. 1271 * 1272 * If not forwarding, just drop the packet. This could be confusing 1273 * if ipforwarding was zero but some routing protocol was advancing 1274 * us as a gateway to somewhere. However, we must let the routing 1275 * protocol deal with that. 1276 * 1277 * The srcrt parameter indicates whether the packet is being forwarded 1278 * via a source route. 1279 */ 1280 void 1281 ip_forward(struct mbuf *m, int srcrt) 1282 { 1283 INIT_VNET_INET(curvnet); 1284 struct ip *ip = mtod(m, struct ip *); 1285 struct in_ifaddr *ia = NULL; 1286 struct mbuf *mcopy; 1287 struct in_addr dest; 1288 struct route ro; 1289 int error, type = 0, code = 0, mtu = 0; 1290 1291 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1292 V_ipstat.ips_cantforward++; 1293 m_freem(m); 1294 return; 1295 } 1296 #ifdef IPSTEALTH 1297 if (!V_ipstealth) { 1298 #endif 1299 if (ip->ip_ttl <= IPTTLDEC) { 1300 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1301 0, 0); 1302 return; 1303 } 1304 #ifdef IPSTEALTH 1305 } 1306 #endif 1307 1308 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1309 if (!srcrt && ia == NULL) { 1310 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1311 return; 1312 } 1313 1314 /* 1315 * Save the IP header and at most 8 bytes of the payload, 1316 * in case we need to generate an ICMP message to the src. 1317 * 1318 * XXX this can be optimized a lot by saving the data in a local 1319 * buffer on the stack (72 bytes at most), and only allocating the 1320 * mbuf if really necessary. The vast majority of the packets 1321 * are forwarded without having to send an ICMP back (either 1322 * because unnecessary, or because rate limited), so we are 1323 * really we are wasting a lot of work here. 1324 * 1325 * We don't use m_copy() because it might return a reference 1326 * to a shared cluster. Both this function and ip_output() 1327 * assume exclusive access to the IP header in `m', so any 1328 * data in a cluster may change before we reach icmp_error(). 1329 */ 1330 MGETHDR(mcopy, M_DONTWAIT, m->m_type); 1331 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) { 1332 /* 1333 * It's probably ok if the pkthdr dup fails (because 1334 * the deep copy of the tag chain failed), but for now 1335 * be conservative and just discard the copy since 1336 * code below may some day want the tags. 1337 */ 1338 m_free(mcopy); 1339 mcopy = NULL; 1340 } 1341 if (mcopy != NULL) { 1342 mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy)); 1343 mcopy->m_pkthdr.len = mcopy->m_len; 1344 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1345 } 1346 1347 #ifdef IPSTEALTH 1348 if (!V_ipstealth) { 1349 #endif 1350 ip->ip_ttl -= IPTTLDEC; 1351 #ifdef IPSTEALTH 1352 } 1353 #endif 1354 1355 /* 1356 * If forwarding packet using same interface that it came in on, 1357 * perhaps should send a redirect to sender to shortcut a hop. 1358 * Only send redirect if source is sending directly to us, 1359 * and if packet was not source routed (or has any options). 1360 * Also, don't send redirect if forwarding using a default route 1361 * or a route modified by a redirect. 1362 */ 1363 dest.s_addr = 0; 1364 if (!srcrt && V_ipsendredirects && ia->ia_ifp == m->m_pkthdr.rcvif) { 1365 struct sockaddr_in *sin; 1366 struct rtentry *rt; 1367 1368 bzero(&ro, sizeof(ro)); 1369 sin = (struct sockaddr_in *)&ro.ro_dst; 1370 sin->sin_family = AF_INET; 1371 sin->sin_len = sizeof(*sin); 1372 sin->sin_addr = ip->ip_dst; 1373 in_rtalloc_ign(&ro, RTF_CLONING, M_GETFIB(m)); 1374 1375 rt = ro.ro_rt; 1376 1377 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1378 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1379 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1380 u_long src = ntohl(ip->ip_src.s_addr); 1381 1382 if (RTA(rt) && 1383 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1384 if (rt->rt_flags & RTF_GATEWAY) 1385 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1386 else 1387 dest.s_addr = ip->ip_dst.s_addr; 1388 /* Router requirements says to only send host redirects */ 1389 type = ICMP_REDIRECT; 1390 code = ICMP_REDIRECT_HOST; 1391 } 1392 } 1393 if (rt) 1394 RTFREE(rt); 1395 } 1396 1397 /* 1398 * Try to cache the route MTU from ip_output so we can consider it for 1399 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1400 */ 1401 bzero(&ro, sizeof(ro)); 1402 1403 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1404 1405 if (error == EMSGSIZE && ro.ro_rt) 1406 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1407 if (ro.ro_rt) 1408 RTFREE(ro.ro_rt); 1409 1410 if (error) 1411 V_ipstat.ips_cantforward++; 1412 else { 1413 V_ipstat.ips_forward++; 1414 if (type) 1415 V_ipstat.ips_redirectsent++; 1416 else { 1417 if (mcopy) 1418 m_freem(mcopy); 1419 return; 1420 } 1421 } 1422 if (mcopy == NULL) 1423 return; 1424 1425 switch (error) { 1426 1427 case 0: /* forwarded, but need redirect */ 1428 /* type, code set above */ 1429 break; 1430 1431 case ENETUNREACH: /* shouldn't happen, checked above */ 1432 case EHOSTUNREACH: 1433 case ENETDOWN: 1434 case EHOSTDOWN: 1435 default: 1436 type = ICMP_UNREACH; 1437 code = ICMP_UNREACH_HOST; 1438 break; 1439 1440 case EMSGSIZE: 1441 type = ICMP_UNREACH; 1442 code = ICMP_UNREACH_NEEDFRAG; 1443 1444 #ifdef IPSEC 1445 /* 1446 * If IPsec is configured for this path, 1447 * override any possibly mtu value set by ip_output. 1448 */ 1449 mtu = ip_ipsec_mtu(m, mtu); 1450 #endif /* IPSEC */ 1451 /* 1452 * If the MTU was set before make sure we are below the 1453 * interface MTU. 1454 * If the MTU wasn't set before use the interface mtu or 1455 * fall back to the next smaller mtu step compared to the 1456 * current packet size. 1457 */ 1458 if (mtu != 0) { 1459 if (ia != NULL) 1460 mtu = min(mtu, ia->ia_ifp->if_mtu); 1461 } else { 1462 if (ia != NULL) 1463 mtu = ia->ia_ifp->if_mtu; 1464 else 1465 mtu = ip_next_mtu(ip->ip_len, 0); 1466 } 1467 V_ipstat.ips_cantfrag++; 1468 break; 1469 1470 case ENOBUFS: 1471 /* 1472 * A router should not generate ICMP_SOURCEQUENCH as 1473 * required in RFC1812 Requirements for IP Version 4 Routers. 1474 * Source quench could be a big problem under DoS attacks, 1475 * or if the underlying interface is rate-limited. 1476 * Those who need source quench packets may re-enable them 1477 * via the net.inet.ip.sendsourcequench sysctl. 1478 */ 1479 if (V_ip_sendsourcequench == 0) { 1480 m_freem(mcopy); 1481 return; 1482 } else { 1483 type = ICMP_SOURCEQUENCH; 1484 code = 0; 1485 } 1486 break; 1487 1488 case EACCES: /* ipfw denied packet */ 1489 m_freem(mcopy); 1490 return; 1491 } 1492 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1493 } 1494 1495 void 1496 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1497 struct mbuf *m) 1498 { 1499 INIT_VNET_NET(inp->inp_vnet); 1500 1501 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1502 struct bintime bt; 1503 1504 bintime(&bt); 1505 if (inp->inp_socket->so_options & SO_BINTIME) { 1506 *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt), 1507 SCM_BINTIME, SOL_SOCKET); 1508 if (*mp) 1509 mp = &(*mp)->m_next; 1510 } 1511 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1512 struct timeval tv; 1513 1514 bintime2timeval(&bt, &tv); 1515 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1516 SCM_TIMESTAMP, SOL_SOCKET); 1517 if (*mp) 1518 mp = &(*mp)->m_next; 1519 } 1520 } 1521 if (inp->inp_flags & INP_RECVDSTADDR) { 1522 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1523 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1524 if (*mp) 1525 mp = &(*mp)->m_next; 1526 } 1527 if (inp->inp_flags & INP_RECVTTL) { 1528 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1529 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1530 if (*mp) 1531 mp = &(*mp)->m_next; 1532 } 1533 #ifdef notyet 1534 /* XXX 1535 * Moving these out of udp_input() made them even more broken 1536 * than they already were. 1537 */ 1538 /* options were tossed already */ 1539 if (inp->inp_flags & INP_RECVOPTS) { 1540 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1541 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1542 if (*mp) 1543 mp = &(*mp)->m_next; 1544 } 1545 /* ip_srcroute doesn't do what we want here, need to fix */ 1546 if (inp->inp_flags & INP_RECVRETOPTS) { 1547 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1548 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1549 if (*mp) 1550 mp = &(*mp)->m_next; 1551 } 1552 #endif 1553 if (inp->inp_flags & INP_RECVIF) { 1554 struct ifnet *ifp; 1555 struct sdlbuf { 1556 struct sockaddr_dl sdl; 1557 u_char pad[32]; 1558 } sdlbuf; 1559 struct sockaddr_dl *sdp; 1560 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1561 1562 if (((ifp = m->m_pkthdr.rcvif)) 1563 && ( ifp->if_index && (ifp->if_index <= V_if_index))) { 1564 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1565 /* 1566 * Change our mind and don't try copy. 1567 */ 1568 if ((sdp->sdl_family != AF_LINK) 1569 || (sdp->sdl_len > sizeof(sdlbuf))) { 1570 goto makedummy; 1571 } 1572 bcopy(sdp, sdl2, sdp->sdl_len); 1573 } else { 1574 makedummy: 1575 sdl2->sdl_len 1576 = offsetof(struct sockaddr_dl, sdl_data[0]); 1577 sdl2->sdl_family = AF_LINK; 1578 sdl2->sdl_index = 0; 1579 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1580 } 1581 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, 1582 IP_RECVIF, IPPROTO_IP); 1583 if (*mp) 1584 mp = &(*mp)->m_next; 1585 } 1586 } 1587 1588 /* 1589 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1590 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1591 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1592 * compiled. 1593 */ 1594 static int ip_rsvp_on; 1595 struct socket *ip_rsvpd; 1596 int 1597 ip_rsvp_init(struct socket *so) 1598 { 1599 INIT_VNET_INET(so->so_vnet); 1600 1601 if (so->so_type != SOCK_RAW || 1602 so->so_proto->pr_protocol != IPPROTO_RSVP) 1603 return EOPNOTSUPP; 1604 1605 if (V_ip_rsvpd != NULL) 1606 return EADDRINUSE; 1607 1608 V_ip_rsvpd = so; 1609 /* 1610 * This may seem silly, but we need to be sure we don't over-increment 1611 * the RSVP counter, in case something slips up. 1612 */ 1613 if (!V_ip_rsvp_on) { 1614 V_ip_rsvp_on = 1; 1615 V_rsvp_on++; 1616 } 1617 1618 return 0; 1619 } 1620 1621 int 1622 ip_rsvp_done(void) 1623 { 1624 INIT_VNET_INET(curvnet); 1625 1626 V_ip_rsvpd = NULL; 1627 /* 1628 * This may seem silly, but we need to be sure we don't over-decrement 1629 * the RSVP counter, in case something slips up. 1630 */ 1631 if (V_ip_rsvp_on) { 1632 V_ip_rsvp_on = 0; 1633 V_rsvp_on--; 1634 } 1635 return 0; 1636 } 1637 1638 void 1639 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1640 { 1641 INIT_VNET_INET(curvnet); 1642 1643 if (rsvp_input_p) { /* call the real one if loaded */ 1644 rsvp_input_p(m, off); 1645 return; 1646 } 1647 1648 /* Can still get packets with rsvp_on = 0 if there is a local member 1649 * of the group to which the RSVP packet is addressed. But in this 1650 * case we want to throw the packet away. 1651 */ 1652 1653 if (!V_rsvp_on) { 1654 m_freem(m); 1655 return; 1656 } 1657 1658 if (V_ip_rsvpd != NULL) { 1659 rip_input(m, off); 1660 return; 1661 } 1662 /* Drop the packet */ 1663 m_freem(m); 1664 } 1665