1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_route.h" 40 #include "opt_carp.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/callout.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/domain.h> 48 #include <sys/protosw.h> 49 #include <sys/socket.h> 50 #include <sys/time.h> 51 #include <sys/kernel.h> 52 #include <sys/lock.h> 53 #include <sys/rwlock.h> 54 #include <sys/syslog.h> 55 #include <sys/sysctl.h> 56 57 #include <net/pfil.h> 58 #include <net/if.h> 59 #include <net/if_types.h> 60 #include <net/if_var.h> 61 #include <net/if_dl.h> 62 #include <net/route.h> 63 #include <net/netisr.h> 64 #include <net/vnet.h> 65 #include <net/flowtable.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/in_pcb.h> 72 #include <netinet/ip_var.h> 73 #include <netinet/ip_fw.h> 74 #include <netinet/ip_icmp.h> 75 #include <netinet/ip_options.h> 76 #include <machine/in_cksum.h> 77 #ifdef DEV_CARP 78 #include <netinet/ip_carp.h> 79 #endif 80 #ifdef IPSEC 81 #include <netinet/ip_ipsec.h> 82 #endif /* IPSEC */ 83 84 #include <sys/socketvar.h> 85 86 #include <security/mac/mac_framework.h> 87 88 #ifdef CTASSERT 89 CTASSERT(sizeof(struct ip) == 20); 90 #endif 91 92 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 93 static VNET_DEFINE(int, ip_checkinterface); 94 static VNET_DEFINE(int, ip_keepfaith); 95 static VNET_DEFINE(int, ip_sendsourcequench); 96 97 #define V_ipsendredirects VNET(ipsendredirects) 98 #define V_ip_checkinterface VNET(ip_checkinterface) 99 #define V_ip_keepfaith VNET(ip_keepfaith) 100 #define V_ip_sendsourcequench VNET(ip_sendsourcequench) 101 102 VNET_DEFINE(int, ip_defttl) = IPDEFTTL; 103 VNET_DEFINE(int, ip_do_randomid); 104 VNET_DEFINE(int, ipforwarding); 105 106 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 107 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 108 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 109 VNET_DEFINE(struct ipstat, ipstat); 110 111 static VNET_DEFINE(int, ip_rsvp_on); 112 VNET_DEFINE(struct socket *, ip_rsvpd); 113 VNET_DEFINE(int, rsvp_on); 114 115 #define V_ip_rsvp_on VNET(ip_rsvp_on) 116 117 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 118 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 119 static VNET_DEFINE(int, maxfragsperpacket); 120 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 121 122 #define V_ipq VNET(ipq) 123 #define V_maxnipq VNET(maxnipq) 124 #define V_maxfragsperpacket VNET(maxfragsperpacket) 125 #define V_nipq VNET(nipq) 126 127 VNET_DEFINE(int, ipstealth); 128 129 struct rwlock in_ifaddr_lock; 130 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 131 132 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 133 &VNET_NAME(ipforwarding), 0, 134 "Enable IP forwarding between interfaces"); 135 136 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 137 &VNET_NAME(ipsendredirects), 0, 138 "Enable sending IP redirects"); 139 140 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, 141 &VNET_NAME(ip_defttl), 0, 142 "Maximum TTL on IP packets"); 143 144 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 145 &VNET_NAME(ip_keepfaith), 0, 146 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 147 148 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 149 &VNET_NAME(ip_sendsourcequench), 0, 150 "Enable the transmission of source quench packets"); 151 152 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 153 &VNET_NAME(ip_do_randomid), 0, 154 "Assign random ip_id values"); 155 156 /* 157 * XXX - Setting ip_checkinterface mostly implements the receive side of 158 * the Strong ES model described in RFC 1122, but since the routing table 159 * and transmit implementation do not implement the Strong ES model, 160 * setting this to 1 results in an odd hybrid. 161 * 162 * XXX - ip_checkinterface currently must be disabled if you use ipnat 163 * to translate the destination address to another local interface. 164 * 165 * XXX - ip_checkinterface must be disabled if you add IP aliases 166 * to the loopback interface instead of the interface where the 167 * packets for those addresses are received. 168 */ 169 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 170 &VNET_NAME(ip_checkinterface), 0, 171 "Verify packet arrives on correct interface"); 172 173 struct pfil_head inet_pfil_hook; /* Packet filter hooks */ 174 175 static struct netisr_handler ip_nh = { 176 .nh_name = "ip", 177 .nh_handler = ip_input, 178 .nh_proto = NETISR_IP, 179 .nh_policy = NETISR_POLICY_FLOW, 180 }; 181 182 extern struct domain inetdomain; 183 extern struct protosw inetsw[]; 184 u_char ip_protox[IPPROTO_MAX]; 185 186 SYSCTL_VNET_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW, 187 &VNET_NAME(ipstat), ipstat, 188 "IP statistics (struct ipstat, netinet/ip_var.h)"); 189 190 static VNET_DEFINE(uma_zone_t, ipq_zone); 191 #define V_ipq_zone VNET(ipq_zone) 192 193 static struct mtx ipqlock; 194 195 #define IPQ_LOCK() mtx_lock(&ipqlock) 196 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 197 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 198 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 199 200 static void maxnipq_update(void); 201 static void ipq_zone_change(void *); 202 203 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, 204 &VNET_NAME(nipq), 0, 205 "Current number of IPv4 fragment reassembly queue entries"); 206 207 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 208 &VNET_NAME(maxfragsperpacket), 0, 209 "Maximum number of IPv4 fragments allowed per packet"); 210 211 struct callout ipport_tick_callout; 212 213 #ifdef IPCTL_DEFMTU 214 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 215 &ip_mtu, 0, "Default MTU"); 216 #endif 217 218 #ifdef IPSTEALTH 219 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 220 &VNET_NAME(ipstealth), 0, 221 "IP stealth mode, no TTL decrementation on forwarding"); 222 #endif 223 224 #ifdef FLOWTABLE 225 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048; 226 VNET_DEFINE(struct flowtable *, ip_ft); 227 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size) 228 229 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN, 230 &VNET_NAME(ip_output_flowtable_size), 2048, 231 "number of entries in the per-cpu output flow caches"); 232 #endif 233 234 VNET_DEFINE(int, fw_one_pass) = 1; 235 236 static void ip_freef(struct ipqhead *, struct ipq *); 237 238 /* 239 * Kernel module interface for updating ipstat. The argument is an index 240 * into ipstat treated as an array of u_long. While this encodes the general 241 * layout of ipstat into the caller, it doesn't encode its location, so that 242 * future changes to add, for example, per-CPU stats support won't cause 243 * binary compatibility problems for kernel modules. 244 */ 245 void 246 kmod_ipstat_inc(int statnum) 247 { 248 249 (*((u_long *)&V_ipstat + statnum))++; 250 } 251 252 void 253 kmod_ipstat_dec(int statnum) 254 { 255 256 (*((u_long *)&V_ipstat + statnum))--; 257 } 258 259 static int 260 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 261 { 262 int error, qlimit; 263 264 netisr_getqlimit(&ip_nh, &qlimit); 265 error = sysctl_handle_int(oidp, &qlimit, 0, req); 266 if (error || !req->newptr) 267 return (error); 268 if (qlimit < 1) 269 return (EINVAL); 270 return (netisr_setqlimit(&ip_nh, qlimit)); 271 } 272 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 273 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 274 "Maximum size of the IP input queue"); 275 276 static int 277 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 278 { 279 u_int64_t qdrops_long; 280 int error, qdrops; 281 282 netisr_getqdrops(&ip_nh, &qdrops_long); 283 qdrops = qdrops_long; 284 error = sysctl_handle_int(oidp, &qdrops, 0, req); 285 if (error || !req->newptr) 286 return (error); 287 if (qdrops != 0) 288 return (EINVAL); 289 netisr_clearqdrops(&ip_nh); 290 return (0); 291 } 292 293 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 294 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 295 "Number of packets dropped from the IP input queue"); 296 297 /* 298 * IP initialization: fill in IP protocol switch table. 299 * All protocols not implemented in kernel go to raw IP protocol handler. 300 */ 301 void 302 ip_init(void) 303 { 304 struct protosw *pr; 305 int i; 306 307 V_ip_id = time_second & 0xffff; 308 309 TAILQ_INIT(&V_in_ifaddrhead); 310 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 311 312 /* Initialize IP reassembly queue. */ 313 for (i = 0; i < IPREASS_NHASH; i++) 314 TAILQ_INIT(&V_ipq[i]); 315 V_maxnipq = nmbclusters / 32; 316 V_maxfragsperpacket = 16; 317 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 318 NULL, UMA_ALIGN_PTR, 0); 319 maxnipq_update(); 320 321 #ifdef FLOWTABLE 322 TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size", 323 &V_ip_output_flowtable_size); 324 V_ip_ft = flowtable_alloc(V_ip_output_flowtable_size, FL_PCPU); 325 #endif 326 327 /* Skip initialization of globals for non-default instances. */ 328 if (!IS_DEFAULT_VNET(curvnet)) 329 return; 330 331 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 332 if (pr == NULL) 333 panic("ip_init: PF_INET not found"); 334 335 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 336 for (i = 0; i < IPPROTO_MAX; i++) 337 ip_protox[i] = pr - inetsw; 338 /* 339 * Cycle through IP protocols and put them into the appropriate place 340 * in ip_protox[]. 341 */ 342 for (pr = inetdomain.dom_protosw; 343 pr < inetdomain.dom_protoswNPROTOSW; pr++) 344 if (pr->pr_domain->dom_family == PF_INET && 345 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 346 /* Be careful to only index valid IP protocols. */ 347 if (pr->pr_protocol < IPPROTO_MAX) 348 ip_protox[pr->pr_protocol] = pr - inetsw; 349 } 350 351 /* Initialize packet filter hooks. */ 352 inet_pfil_hook.ph_type = PFIL_TYPE_AF; 353 inet_pfil_hook.ph_af = AF_INET; 354 if ((i = pfil_head_register(&inet_pfil_hook)) != 0) 355 printf("%s: WARNING: unable to register pfil hook, " 356 "error %d\n", __func__, i); 357 358 /* Start ipport_tick. */ 359 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE); 360 callout_reset(&ipport_tick_callout, 1, ipport_tick, NULL); 361 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL, 362 SHUTDOWN_PRI_DEFAULT); 363 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 364 NULL, EVENTHANDLER_PRI_ANY); 365 366 /* Initialize various other remaining things. */ 367 IPQ_LOCK_INIT(); 368 netisr_register(&ip_nh); 369 } 370 371 void 372 ip_fini(void *xtp) 373 { 374 375 callout_stop(&ipport_tick_callout); 376 } 377 378 /* 379 * Ip input routine. Checksum and byte swap header. If fragmented 380 * try to reassemble. Process options. Pass to next level. 381 */ 382 void 383 ip_input(struct mbuf *m) 384 { 385 struct ip *ip = NULL; 386 struct in_ifaddr *ia = NULL; 387 struct ifaddr *ifa; 388 struct ifnet *ifp; 389 int checkif, hlen = 0; 390 u_short sum; 391 int dchg = 0; /* dest changed after fw */ 392 struct in_addr odst; /* original dst address */ 393 394 M_ASSERTPKTHDR(m); 395 396 if (m->m_flags & M_FASTFWD_OURS) { 397 /* 398 * Firewall or NAT changed destination to local. 399 * We expect ip_len and ip_off to be in host byte order. 400 */ 401 m->m_flags &= ~M_FASTFWD_OURS; 402 /* Set up some basics that will be used later. */ 403 ip = mtod(m, struct ip *); 404 hlen = ip->ip_hl << 2; 405 goto ours; 406 } 407 408 IPSTAT_INC(ips_total); 409 410 if (m->m_pkthdr.len < sizeof(struct ip)) 411 goto tooshort; 412 413 if (m->m_len < sizeof (struct ip) && 414 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 415 IPSTAT_INC(ips_toosmall); 416 return; 417 } 418 ip = mtod(m, struct ip *); 419 420 if (ip->ip_v != IPVERSION) { 421 IPSTAT_INC(ips_badvers); 422 goto bad; 423 } 424 425 hlen = ip->ip_hl << 2; 426 if (hlen < sizeof(struct ip)) { /* minimum header length */ 427 IPSTAT_INC(ips_badhlen); 428 goto bad; 429 } 430 if (hlen > m->m_len) { 431 if ((m = m_pullup(m, hlen)) == NULL) { 432 IPSTAT_INC(ips_badhlen); 433 return; 434 } 435 ip = mtod(m, struct ip *); 436 } 437 438 /* 127/8 must not appear on wire - RFC1122 */ 439 ifp = m->m_pkthdr.rcvif; 440 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 441 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 442 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 443 IPSTAT_INC(ips_badaddr); 444 goto bad; 445 } 446 } 447 448 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 449 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 450 } else { 451 if (hlen == sizeof(struct ip)) { 452 sum = in_cksum_hdr(ip); 453 } else { 454 sum = in_cksum(m, hlen); 455 } 456 } 457 if (sum) { 458 IPSTAT_INC(ips_badsum); 459 goto bad; 460 } 461 462 #ifdef ALTQ 463 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 464 /* packet is dropped by traffic conditioner */ 465 return; 466 #endif 467 468 /* 469 * Convert fields to host representation. 470 */ 471 ip->ip_len = ntohs(ip->ip_len); 472 if (ip->ip_len < hlen) { 473 IPSTAT_INC(ips_badlen); 474 goto bad; 475 } 476 ip->ip_off = ntohs(ip->ip_off); 477 478 /* 479 * Check that the amount of data in the buffers 480 * is as at least much as the IP header would have us expect. 481 * Trim mbufs if longer than we expect. 482 * Drop packet if shorter than we expect. 483 */ 484 if (m->m_pkthdr.len < ip->ip_len) { 485 tooshort: 486 IPSTAT_INC(ips_tooshort); 487 goto bad; 488 } 489 if (m->m_pkthdr.len > ip->ip_len) { 490 if (m->m_len == m->m_pkthdr.len) { 491 m->m_len = ip->ip_len; 492 m->m_pkthdr.len = ip->ip_len; 493 } else 494 m_adj(m, ip->ip_len - m->m_pkthdr.len); 495 } 496 #ifdef IPSEC 497 /* 498 * Bypass packet filtering for packets from a tunnel (gif). 499 */ 500 if (ip_ipsec_filtertunnel(m)) 501 goto passin; 502 #endif /* IPSEC */ 503 504 /* 505 * Run through list of hooks for input packets. 506 * 507 * NB: Beware of the destination address changing (e.g. 508 * by NAT rewriting). When this happens, tell 509 * ip_forward to do the right thing. 510 */ 511 512 /* Jump over all PFIL processing if hooks are not active. */ 513 if (!PFIL_HOOKED(&inet_pfil_hook)) 514 goto passin; 515 516 odst = ip->ip_dst; 517 if (pfil_run_hooks(&inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 518 return; 519 if (m == NULL) /* consumed by filter */ 520 return; 521 522 ip = mtod(m, struct ip *); 523 dchg = (odst.s_addr != ip->ip_dst.s_addr); 524 ifp = m->m_pkthdr.rcvif; 525 526 #ifdef IPFIREWALL_FORWARD 527 if (m->m_flags & M_FASTFWD_OURS) { 528 m->m_flags &= ~M_FASTFWD_OURS; 529 goto ours; 530 } 531 if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) { 532 /* 533 * Directly ship on the packet. This allows to forward packets 534 * that were destined for us to some other directly connected 535 * host. 536 */ 537 ip_forward(m, dchg); 538 return; 539 } 540 #endif /* IPFIREWALL_FORWARD */ 541 542 passin: 543 /* 544 * Process options and, if not destined for us, 545 * ship it on. ip_dooptions returns 1 when an 546 * error was detected (causing an icmp message 547 * to be sent and the original packet to be freed). 548 */ 549 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 550 return; 551 552 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 553 * matter if it is destined to another node, or whether it is 554 * a multicast one, RSVP wants it! and prevents it from being forwarded 555 * anywhere else. Also checks if the rsvp daemon is running before 556 * grabbing the packet. 557 */ 558 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 559 goto ours; 560 561 /* 562 * Check our list of addresses, to see if the packet is for us. 563 * If we don't have any addresses, assume any unicast packet 564 * we receive might be for us (and let the upper layers deal 565 * with it). 566 */ 567 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 568 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 569 goto ours; 570 571 /* 572 * Enable a consistency check between the destination address 573 * and the arrival interface for a unicast packet (the RFC 1122 574 * strong ES model) if IP forwarding is disabled and the packet 575 * is not locally generated and the packet is not subject to 576 * 'ipfw fwd'. 577 * 578 * XXX - Checking also should be disabled if the destination 579 * address is ipnat'ed to a different interface. 580 * 581 * XXX - Checking is incompatible with IP aliases added 582 * to the loopback interface instead of the interface where 583 * the packets are received. 584 * 585 * XXX - This is the case for carp vhost IPs as well so we 586 * insert a workaround. If the packet got here, we already 587 * checked with carp_iamatch() and carp_forus(). 588 */ 589 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 590 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 591 #ifdef DEV_CARP 592 !ifp->if_carp && 593 #endif 594 (dchg == 0); 595 596 /* 597 * Check for exact addresses in the hash bucket. 598 */ 599 /* IN_IFADDR_RLOCK(); */ 600 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 601 /* 602 * If the address matches, verify that the packet 603 * arrived via the correct interface if checking is 604 * enabled. 605 */ 606 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 607 (!checkif || ia->ia_ifp == ifp)) { 608 ifa_ref(&ia->ia_ifa); 609 /* IN_IFADDR_RUNLOCK(); */ 610 goto ours; 611 } 612 } 613 /* IN_IFADDR_RUNLOCK(); */ 614 615 /* 616 * Check for broadcast addresses. 617 * 618 * Only accept broadcast packets that arrive via the matching 619 * interface. Reception of forwarded directed broadcasts would 620 * be handled via ip_forward() and ether_output() with the loopback 621 * into the stack for SIMPLEX interfaces handled by ether_output(). 622 */ 623 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 624 IF_ADDR_LOCK(ifp); 625 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 626 if (ifa->ifa_addr->sa_family != AF_INET) 627 continue; 628 ia = ifatoia(ifa); 629 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 630 ip->ip_dst.s_addr) { 631 ifa_ref(ifa); 632 IF_ADDR_UNLOCK(ifp); 633 goto ours; 634 } 635 if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) { 636 ifa_ref(ifa); 637 IF_ADDR_UNLOCK(ifp); 638 goto ours; 639 } 640 #ifdef BOOTP_COMPAT 641 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 642 ifa_ref(ifa); 643 IF_ADDR_UNLOCK(ifp); 644 goto ours; 645 } 646 #endif 647 } 648 IF_ADDR_UNLOCK(ifp); 649 ia = NULL; 650 } 651 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 652 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 653 IPSTAT_INC(ips_cantforward); 654 m_freem(m); 655 return; 656 } 657 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 658 if (V_ip_mrouter) { 659 /* 660 * If we are acting as a multicast router, all 661 * incoming multicast packets are passed to the 662 * kernel-level multicast forwarding function. 663 * The packet is returned (relatively) intact; if 664 * ip_mforward() returns a non-zero value, the packet 665 * must be discarded, else it may be accepted below. 666 */ 667 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 668 IPSTAT_INC(ips_cantforward); 669 m_freem(m); 670 return; 671 } 672 673 /* 674 * The process-level routing daemon needs to receive 675 * all multicast IGMP packets, whether or not this 676 * host belongs to their destination groups. 677 */ 678 if (ip->ip_p == IPPROTO_IGMP) 679 goto ours; 680 IPSTAT_INC(ips_forward); 681 } 682 /* 683 * Assume the packet is for us, to avoid prematurely taking 684 * a lock on the in_multi hash. Protocols must perform 685 * their own filtering and update statistics accordingly. 686 */ 687 goto ours; 688 } 689 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 690 goto ours; 691 if (ip->ip_dst.s_addr == INADDR_ANY) 692 goto ours; 693 694 /* 695 * FAITH(Firewall Aided Internet Translator) 696 */ 697 if (ifp && ifp->if_type == IFT_FAITH) { 698 if (V_ip_keepfaith) { 699 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 700 goto ours; 701 } 702 m_freem(m); 703 return; 704 } 705 706 /* 707 * Not for us; forward if possible and desirable. 708 */ 709 if (V_ipforwarding == 0) { 710 IPSTAT_INC(ips_cantforward); 711 m_freem(m); 712 } else { 713 #ifdef IPSEC 714 if (ip_ipsec_fwd(m)) 715 goto bad; 716 #endif /* IPSEC */ 717 ip_forward(m, dchg); 718 } 719 return; 720 721 ours: 722 #ifdef IPSTEALTH 723 /* 724 * IPSTEALTH: Process non-routing options only 725 * if the packet is destined for us. 726 */ 727 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) { 728 if (ia != NULL) 729 ifa_free(&ia->ia_ifa); 730 return; 731 } 732 #endif /* IPSTEALTH */ 733 734 /* Count the packet in the ip address stats */ 735 if (ia != NULL) { 736 ia->ia_ifa.if_ipackets++; 737 ia->ia_ifa.if_ibytes += m->m_pkthdr.len; 738 ifa_free(&ia->ia_ifa); 739 } 740 741 /* 742 * Attempt reassembly; if it succeeds, proceed. 743 * ip_reass() will return a different mbuf. 744 */ 745 if (ip->ip_off & (IP_MF | IP_OFFMASK)) { 746 m = ip_reass(m); 747 if (m == NULL) 748 return; 749 ip = mtod(m, struct ip *); 750 /* Get the header length of the reassembled packet */ 751 hlen = ip->ip_hl << 2; 752 } 753 754 /* 755 * Further protocols expect the packet length to be w/o the 756 * IP header. 757 */ 758 ip->ip_len -= hlen; 759 760 #ifdef IPSEC 761 /* 762 * enforce IPsec policy checking if we are seeing last header. 763 * note that we do not visit this with protocols with pcb layer 764 * code - like udp/tcp/raw ip. 765 */ 766 if (ip_ipsec_input(m)) 767 goto bad; 768 #endif /* IPSEC */ 769 770 /* 771 * Switch out to protocol's input routine. 772 */ 773 IPSTAT_INC(ips_delivered); 774 775 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 776 return; 777 bad: 778 m_freem(m); 779 } 780 781 /* 782 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 783 * max has slightly different semantics than the sysctl, for historical 784 * reasons. 785 */ 786 static void 787 maxnipq_update(void) 788 { 789 790 /* 791 * -1 for unlimited allocation. 792 */ 793 if (V_maxnipq < 0) 794 uma_zone_set_max(V_ipq_zone, 0); 795 /* 796 * Positive number for specific bound. 797 */ 798 if (V_maxnipq > 0) 799 uma_zone_set_max(V_ipq_zone, V_maxnipq); 800 /* 801 * Zero specifies no further fragment queue allocation -- set the 802 * bound very low, but rely on implementation elsewhere to actually 803 * prevent allocation and reclaim current queues. 804 */ 805 if (V_maxnipq == 0) 806 uma_zone_set_max(V_ipq_zone, 1); 807 } 808 809 static void 810 ipq_zone_change(void *tag) 811 { 812 813 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 814 V_maxnipq = nmbclusters / 32; 815 maxnipq_update(); 816 } 817 } 818 819 static int 820 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 821 { 822 int error, i; 823 824 i = V_maxnipq; 825 error = sysctl_handle_int(oidp, &i, 0, req); 826 if (error || !req->newptr) 827 return (error); 828 829 /* 830 * XXXRW: Might be a good idea to sanity check the argument and place 831 * an extreme upper bound. 832 */ 833 if (i < -1) 834 return (EINVAL); 835 V_maxnipq = i; 836 maxnipq_update(); 837 return (0); 838 } 839 840 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 841 NULL, 0, sysctl_maxnipq, "I", 842 "Maximum number of IPv4 fragment reassembly queue entries"); 843 844 /* 845 * Take incoming datagram fragment and try to reassemble it into 846 * whole datagram. If the argument is the first fragment or one 847 * in between the function will return NULL and store the mbuf 848 * in the fragment chain. If the argument is the last fragment 849 * the packet will be reassembled and the pointer to the new 850 * mbuf returned for further processing. Only m_tags attached 851 * to the first packet/fragment are preserved. 852 * The IP header is *NOT* adjusted out of iplen. 853 */ 854 struct mbuf * 855 ip_reass(struct mbuf *m) 856 { 857 struct ip *ip; 858 struct mbuf *p, *q, *nq, *t; 859 struct ipq *fp = NULL; 860 struct ipqhead *head; 861 int i, hlen, next; 862 u_int8_t ecn, ecn0; 863 u_short hash; 864 865 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 866 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 867 IPSTAT_INC(ips_fragments); 868 IPSTAT_INC(ips_fragdropped); 869 m_freem(m); 870 return (NULL); 871 } 872 873 ip = mtod(m, struct ip *); 874 hlen = ip->ip_hl << 2; 875 876 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 877 head = &V_ipq[hash]; 878 IPQ_LOCK(); 879 880 /* 881 * Look for queue of fragments 882 * of this datagram. 883 */ 884 TAILQ_FOREACH(fp, head, ipq_list) 885 if (ip->ip_id == fp->ipq_id && 886 ip->ip_src.s_addr == fp->ipq_src.s_addr && 887 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 888 #ifdef MAC 889 mac_ipq_match(m, fp) && 890 #endif 891 ip->ip_p == fp->ipq_p) 892 goto found; 893 894 fp = NULL; 895 896 /* 897 * Attempt to trim the number of allocated fragment queues if it 898 * exceeds the administrative limit. 899 */ 900 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 901 /* 902 * drop something from the tail of the current queue 903 * before proceeding further 904 */ 905 struct ipq *q = TAILQ_LAST(head, ipqhead); 906 if (q == NULL) { /* gak */ 907 for (i = 0; i < IPREASS_NHASH; i++) { 908 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 909 if (r) { 910 IPSTAT_ADD(ips_fragtimeout, 911 r->ipq_nfrags); 912 ip_freef(&V_ipq[i], r); 913 break; 914 } 915 } 916 } else { 917 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 918 ip_freef(head, q); 919 } 920 } 921 922 found: 923 /* 924 * Adjust ip_len to not reflect header, 925 * convert offset of this to bytes. 926 */ 927 ip->ip_len -= hlen; 928 if (ip->ip_off & IP_MF) { 929 /* 930 * Make sure that fragments have a data length 931 * that's a non-zero multiple of 8 bytes. 932 */ 933 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { 934 IPSTAT_INC(ips_toosmall); /* XXX */ 935 goto dropfrag; 936 } 937 m->m_flags |= M_FRAG; 938 } else 939 m->m_flags &= ~M_FRAG; 940 ip->ip_off <<= 3; 941 942 943 /* 944 * Attempt reassembly; if it succeeds, proceed. 945 * ip_reass() will return a different mbuf. 946 */ 947 IPSTAT_INC(ips_fragments); 948 m->m_pkthdr.header = ip; 949 950 /* Previous ip_reass() started here. */ 951 /* 952 * Presence of header sizes in mbufs 953 * would confuse code below. 954 */ 955 m->m_data += hlen; 956 m->m_len -= hlen; 957 958 /* 959 * If first fragment to arrive, create a reassembly queue. 960 */ 961 if (fp == NULL) { 962 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 963 if (fp == NULL) 964 goto dropfrag; 965 #ifdef MAC 966 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 967 uma_zfree(V_ipq_zone, fp); 968 fp = NULL; 969 goto dropfrag; 970 } 971 mac_ipq_create(m, fp); 972 #endif 973 TAILQ_INSERT_HEAD(head, fp, ipq_list); 974 V_nipq++; 975 fp->ipq_nfrags = 1; 976 fp->ipq_ttl = IPFRAGTTL; 977 fp->ipq_p = ip->ip_p; 978 fp->ipq_id = ip->ip_id; 979 fp->ipq_src = ip->ip_src; 980 fp->ipq_dst = ip->ip_dst; 981 fp->ipq_frags = m; 982 m->m_nextpkt = NULL; 983 goto done; 984 } else { 985 fp->ipq_nfrags++; 986 #ifdef MAC 987 mac_ipq_update(m, fp); 988 #endif 989 } 990 991 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 992 993 /* 994 * Handle ECN by comparing this segment with the first one; 995 * if CE is set, do not lose CE. 996 * drop if CE and not-ECT are mixed for the same packet. 997 */ 998 ecn = ip->ip_tos & IPTOS_ECN_MASK; 999 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 1000 if (ecn == IPTOS_ECN_CE) { 1001 if (ecn0 == IPTOS_ECN_NOTECT) 1002 goto dropfrag; 1003 if (ecn0 != IPTOS_ECN_CE) 1004 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 1005 } 1006 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 1007 goto dropfrag; 1008 1009 /* 1010 * Find a segment which begins after this one does. 1011 */ 1012 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 1013 if (GETIP(q)->ip_off > ip->ip_off) 1014 break; 1015 1016 /* 1017 * If there is a preceding segment, it may provide some of 1018 * our data already. If so, drop the data from the incoming 1019 * segment. If it provides all of our data, drop us, otherwise 1020 * stick new segment in the proper place. 1021 * 1022 * If some of the data is dropped from the the preceding 1023 * segment, then it's checksum is invalidated. 1024 */ 1025 if (p) { 1026 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; 1027 if (i > 0) { 1028 if (i >= ip->ip_len) 1029 goto dropfrag; 1030 m_adj(m, i); 1031 m->m_pkthdr.csum_flags = 0; 1032 ip->ip_off += i; 1033 ip->ip_len -= i; 1034 } 1035 m->m_nextpkt = p->m_nextpkt; 1036 p->m_nextpkt = m; 1037 } else { 1038 m->m_nextpkt = fp->ipq_frags; 1039 fp->ipq_frags = m; 1040 } 1041 1042 /* 1043 * While we overlap succeeding segments trim them or, 1044 * if they are completely covered, dequeue them. 1045 */ 1046 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; 1047 q = nq) { 1048 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off; 1049 if (i < GETIP(q)->ip_len) { 1050 GETIP(q)->ip_len -= i; 1051 GETIP(q)->ip_off += i; 1052 m_adj(q, i); 1053 q->m_pkthdr.csum_flags = 0; 1054 break; 1055 } 1056 nq = q->m_nextpkt; 1057 m->m_nextpkt = nq; 1058 IPSTAT_INC(ips_fragdropped); 1059 fp->ipq_nfrags--; 1060 m_freem(q); 1061 } 1062 1063 /* 1064 * Check for complete reassembly and perform frag per packet 1065 * limiting. 1066 * 1067 * Frag limiting is performed here so that the nth frag has 1068 * a chance to complete the packet before we drop the packet. 1069 * As a result, n+1 frags are actually allowed per packet, but 1070 * only n will ever be stored. (n = maxfragsperpacket.) 1071 * 1072 */ 1073 next = 0; 1074 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1075 if (GETIP(q)->ip_off != next) { 1076 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1077 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1078 ip_freef(head, fp); 1079 } 1080 goto done; 1081 } 1082 next += GETIP(q)->ip_len; 1083 } 1084 /* Make sure the last packet didn't have the IP_MF flag */ 1085 if (p->m_flags & M_FRAG) { 1086 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1087 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1088 ip_freef(head, fp); 1089 } 1090 goto done; 1091 } 1092 1093 /* 1094 * Reassembly is complete. Make sure the packet is a sane size. 1095 */ 1096 q = fp->ipq_frags; 1097 ip = GETIP(q); 1098 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1099 IPSTAT_INC(ips_toolong); 1100 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1101 ip_freef(head, fp); 1102 goto done; 1103 } 1104 1105 /* 1106 * Concatenate fragments. 1107 */ 1108 m = q; 1109 t = m->m_next; 1110 m->m_next = NULL; 1111 m_cat(m, t); 1112 nq = q->m_nextpkt; 1113 q->m_nextpkt = NULL; 1114 for (q = nq; q != NULL; q = nq) { 1115 nq = q->m_nextpkt; 1116 q->m_nextpkt = NULL; 1117 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1118 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1119 m_cat(m, q); 1120 } 1121 /* 1122 * In order to do checksumming faster we do 'end-around carry' here 1123 * (and not in for{} loop), though it implies we are not going to 1124 * reassemble more than 64k fragments. 1125 */ 1126 m->m_pkthdr.csum_data = 1127 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1128 #ifdef MAC 1129 mac_ipq_reassemble(fp, m); 1130 mac_ipq_destroy(fp); 1131 #endif 1132 1133 /* 1134 * Create header for new ip packet by modifying header of first 1135 * packet; dequeue and discard fragment reassembly header. 1136 * Make header visible. 1137 */ 1138 ip->ip_len = (ip->ip_hl << 2) + next; 1139 ip->ip_src = fp->ipq_src; 1140 ip->ip_dst = fp->ipq_dst; 1141 TAILQ_REMOVE(head, fp, ipq_list); 1142 V_nipq--; 1143 uma_zfree(V_ipq_zone, fp); 1144 m->m_len += (ip->ip_hl << 2); 1145 m->m_data -= (ip->ip_hl << 2); 1146 /* some debugging cruft by sklower, below, will go away soon */ 1147 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1148 m_fixhdr(m); 1149 IPSTAT_INC(ips_reassembled); 1150 IPQ_UNLOCK(); 1151 return (m); 1152 1153 dropfrag: 1154 IPSTAT_INC(ips_fragdropped); 1155 if (fp != NULL) 1156 fp->ipq_nfrags--; 1157 m_freem(m); 1158 done: 1159 IPQ_UNLOCK(); 1160 return (NULL); 1161 1162 #undef GETIP 1163 } 1164 1165 /* 1166 * Free a fragment reassembly header and all 1167 * associated datagrams. 1168 */ 1169 static void 1170 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1171 { 1172 struct mbuf *q; 1173 1174 IPQ_LOCK_ASSERT(); 1175 1176 while (fp->ipq_frags) { 1177 q = fp->ipq_frags; 1178 fp->ipq_frags = q->m_nextpkt; 1179 m_freem(q); 1180 } 1181 TAILQ_REMOVE(fhp, fp, ipq_list); 1182 uma_zfree(V_ipq_zone, fp); 1183 V_nipq--; 1184 } 1185 1186 /* 1187 * IP timer processing; 1188 * if a timer expires on a reassembly 1189 * queue, discard it. 1190 */ 1191 void 1192 ip_slowtimo(void) 1193 { 1194 VNET_ITERATOR_DECL(vnet_iter); 1195 struct ipq *fp; 1196 int i; 1197 1198 VNET_LIST_RLOCK_NOSLEEP(); 1199 IPQ_LOCK(); 1200 VNET_FOREACH(vnet_iter) { 1201 CURVNET_SET(vnet_iter); 1202 for (i = 0; i < IPREASS_NHASH; i++) { 1203 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1204 struct ipq *fpp; 1205 1206 fpp = fp; 1207 fp = TAILQ_NEXT(fp, ipq_list); 1208 if(--fpp->ipq_ttl == 0) { 1209 IPSTAT_ADD(ips_fragtimeout, 1210 fpp->ipq_nfrags); 1211 ip_freef(&V_ipq[i], fpp); 1212 } 1213 } 1214 } 1215 /* 1216 * If we are over the maximum number of fragments 1217 * (due to the limit being lowered), drain off 1218 * enough to get down to the new limit. 1219 */ 1220 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1221 for (i = 0; i < IPREASS_NHASH; i++) { 1222 while (V_nipq > V_maxnipq && 1223 !TAILQ_EMPTY(&V_ipq[i])) { 1224 IPSTAT_ADD(ips_fragdropped, 1225 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1226 ip_freef(&V_ipq[i], 1227 TAILQ_FIRST(&V_ipq[i])); 1228 } 1229 } 1230 } 1231 CURVNET_RESTORE(); 1232 } 1233 IPQ_UNLOCK(); 1234 VNET_LIST_RUNLOCK_NOSLEEP(); 1235 } 1236 1237 /* 1238 * Drain off all datagram fragments. 1239 */ 1240 void 1241 ip_drain(void) 1242 { 1243 VNET_ITERATOR_DECL(vnet_iter); 1244 int i; 1245 1246 VNET_LIST_RLOCK_NOSLEEP(); 1247 IPQ_LOCK(); 1248 VNET_FOREACH(vnet_iter) { 1249 CURVNET_SET(vnet_iter); 1250 for (i = 0; i < IPREASS_NHASH; i++) { 1251 while(!TAILQ_EMPTY(&V_ipq[i])) { 1252 IPSTAT_ADD(ips_fragdropped, 1253 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1254 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1255 } 1256 } 1257 CURVNET_RESTORE(); 1258 } 1259 IPQ_UNLOCK(); 1260 VNET_LIST_RUNLOCK_NOSLEEP(); 1261 in_rtqdrain(); 1262 } 1263 1264 /* 1265 * The protocol to be inserted into ip_protox[] must be already registered 1266 * in inetsw[], either statically or through pf_proto_register(). 1267 */ 1268 int 1269 ipproto_register(u_char ipproto) 1270 { 1271 struct protosw *pr; 1272 1273 /* Sanity checks. */ 1274 if (ipproto == 0) 1275 return (EPROTONOSUPPORT); 1276 1277 /* 1278 * The protocol slot must not be occupied by another protocol 1279 * already. An index pointing to IPPROTO_RAW is unused. 1280 */ 1281 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1282 if (pr == NULL) 1283 return (EPFNOSUPPORT); 1284 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1285 return (EEXIST); 1286 1287 /* Find the protocol position in inetsw[] and set the index. */ 1288 for (pr = inetdomain.dom_protosw; 1289 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1290 if (pr->pr_domain->dom_family == PF_INET && 1291 pr->pr_protocol && pr->pr_protocol == ipproto) { 1292 /* Be careful to only index valid IP protocols. */ 1293 if (pr->pr_protocol < IPPROTO_MAX) { 1294 ip_protox[pr->pr_protocol] = pr - inetsw; 1295 return (0); 1296 } else 1297 return (EINVAL); 1298 } 1299 } 1300 return (EPROTONOSUPPORT); 1301 } 1302 1303 int 1304 ipproto_unregister(u_char ipproto) 1305 { 1306 struct protosw *pr; 1307 1308 /* Sanity checks. */ 1309 if (ipproto == 0) 1310 return (EPROTONOSUPPORT); 1311 1312 /* Check if the protocol was indeed registered. */ 1313 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1314 if (pr == NULL) 1315 return (EPFNOSUPPORT); 1316 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1317 return (ENOENT); 1318 1319 /* Reset the protocol slot to IPPROTO_RAW. */ 1320 ip_protox[ipproto] = pr - inetsw; 1321 return (0); 1322 } 1323 1324 /* 1325 * Given address of next destination (final or next hop), return (referenced) 1326 * internet address info of interface to be used to get there. 1327 */ 1328 struct in_ifaddr * 1329 ip_rtaddr(struct in_addr dst, u_int fibnum) 1330 { 1331 struct route sro; 1332 struct sockaddr_in *sin; 1333 struct in_ifaddr *ia; 1334 1335 bzero(&sro, sizeof(sro)); 1336 sin = (struct sockaddr_in *)&sro.ro_dst; 1337 sin->sin_family = AF_INET; 1338 sin->sin_len = sizeof(*sin); 1339 sin->sin_addr = dst; 1340 in_rtalloc_ign(&sro, 0, fibnum); 1341 1342 if (sro.ro_rt == NULL) 1343 return (NULL); 1344 1345 ia = ifatoia(sro.ro_rt->rt_ifa); 1346 ifa_ref(&ia->ia_ifa); 1347 RTFREE(sro.ro_rt); 1348 return (ia); 1349 } 1350 1351 u_char inetctlerrmap[PRC_NCMDS] = { 1352 0, 0, 0, 0, 1353 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1354 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1355 EMSGSIZE, EHOSTUNREACH, 0, 0, 1356 0, 0, EHOSTUNREACH, 0, 1357 ENOPROTOOPT, ECONNREFUSED 1358 }; 1359 1360 /* 1361 * Forward a packet. If some error occurs return the sender 1362 * an icmp packet. Note we can't always generate a meaningful 1363 * icmp message because icmp doesn't have a large enough repertoire 1364 * of codes and types. 1365 * 1366 * If not forwarding, just drop the packet. This could be confusing 1367 * if ipforwarding was zero but some routing protocol was advancing 1368 * us as a gateway to somewhere. However, we must let the routing 1369 * protocol deal with that. 1370 * 1371 * The srcrt parameter indicates whether the packet is being forwarded 1372 * via a source route. 1373 */ 1374 void 1375 ip_forward(struct mbuf *m, int srcrt) 1376 { 1377 struct ip *ip = mtod(m, struct ip *); 1378 struct in_ifaddr *ia; 1379 struct mbuf *mcopy; 1380 struct in_addr dest; 1381 struct route ro; 1382 int error, type = 0, code = 0, mtu = 0; 1383 1384 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1385 IPSTAT_INC(ips_cantforward); 1386 m_freem(m); 1387 return; 1388 } 1389 #ifdef IPSTEALTH 1390 if (!V_ipstealth) { 1391 #endif 1392 if (ip->ip_ttl <= IPTTLDEC) { 1393 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1394 0, 0); 1395 return; 1396 } 1397 #ifdef IPSTEALTH 1398 } 1399 #endif 1400 1401 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1402 #ifndef IPSEC 1403 /* 1404 * 'ia' may be NULL if there is no route for this destination. 1405 * In case of IPsec, Don't discard it just yet, but pass it to 1406 * ip_output in case of outgoing IPsec policy. 1407 */ 1408 if (!srcrt && ia == NULL) { 1409 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1410 return; 1411 } 1412 #endif 1413 1414 /* 1415 * Save the IP header and at most 8 bytes of the payload, 1416 * in case we need to generate an ICMP message to the src. 1417 * 1418 * XXX this can be optimized a lot by saving the data in a local 1419 * buffer on the stack (72 bytes at most), and only allocating the 1420 * mbuf if really necessary. The vast majority of the packets 1421 * are forwarded without having to send an ICMP back (either 1422 * because unnecessary, or because rate limited), so we are 1423 * really we are wasting a lot of work here. 1424 * 1425 * We don't use m_copy() because it might return a reference 1426 * to a shared cluster. Both this function and ip_output() 1427 * assume exclusive access to the IP header in `m', so any 1428 * data in a cluster may change before we reach icmp_error(). 1429 */ 1430 MGETHDR(mcopy, M_DONTWAIT, m->m_type); 1431 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) { 1432 /* 1433 * It's probably ok if the pkthdr dup fails (because 1434 * the deep copy of the tag chain failed), but for now 1435 * be conservative and just discard the copy since 1436 * code below may some day want the tags. 1437 */ 1438 m_free(mcopy); 1439 mcopy = NULL; 1440 } 1441 if (mcopy != NULL) { 1442 mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy)); 1443 mcopy->m_pkthdr.len = mcopy->m_len; 1444 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1445 } 1446 1447 #ifdef IPSTEALTH 1448 if (!V_ipstealth) { 1449 #endif 1450 ip->ip_ttl -= IPTTLDEC; 1451 #ifdef IPSTEALTH 1452 } 1453 #endif 1454 1455 /* 1456 * If forwarding packet using same interface that it came in on, 1457 * perhaps should send a redirect to sender to shortcut a hop. 1458 * Only send redirect if source is sending directly to us, 1459 * and if packet was not source routed (or has any options). 1460 * Also, don't send redirect if forwarding using a default route 1461 * or a route modified by a redirect. 1462 */ 1463 dest.s_addr = 0; 1464 if (!srcrt && V_ipsendredirects && 1465 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1466 struct sockaddr_in *sin; 1467 struct rtentry *rt; 1468 1469 bzero(&ro, sizeof(ro)); 1470 sin = (struct sockaddr_in *)&ro.ro_dst; 1471 sin->sin_family = AF_INET; 1472 sin->sin_len = sizeof(*sin); 1473 sin->sin_addr = ip->ip_dst; 1474 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1475 1476 rt = ro.ro_rt; 1477 1478 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1479 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1480 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1481 u_long src = ntohl(ip->ip_src.s_addr); 1482 1483 if (RTA(rt) && 1484 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1485 if (rt->rt_flags & RTF_GATEWAY) 1486 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1487 else 1488 dest.s_addr = ip->ip_dst.s_addr; 1489 /* Router requirements says to only send host redirects */ 1490 type = ICMP_REDIRECT; 1491 code = ICMP_REDIRECT_HOST; 1492 } 1493 } 1494 if (rt) 1495 RTFREE(rt); 1496 } 1497 1498 /* 1499 * Try to cache the route MTU from ip_output so we can consider it for 1500 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1501 */ 1502 bzero(&ro, sizeof(ro)); 1503 1504 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1505 1506 if (error == EMSGSIZE && ro.ro_rt) 1507 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1508 if (ro.ro_rt) 1509 RTFREE(ro.ro_rt); 1510 1511 if (error) 1512 IPSTAT_INC(ips_cantforward); 1513 else { 1514 IPSTAT_INC(ips_forward); 1515 if (type) 1516 IPSTAT_INC(ips_redirectsent); 1517 else { 1518 if (mcopy) 1519 m_freem(mcopy); 1520 if (ia != NULL) 1521 ifa_free(&ia->ia_ifa); 1522 return; 1523 } 1524 } 1525 if (mcopy == NULL) { 1526 if (ia != NULL) 1527 ifa_free(&ia->ia_ifa); 1528 return; 1529 } 1530 1531 switch (error) { 1532 1533 case 0: /* forwarded, but need redirect */ 1534 /* type, code set above */ 1535 break; 1536 1537 case ENETUNREACH: 1538 case EHOSTUNREACH: 1539 case ENETDOWN: 1540 case EHOSTDOWN: 1541 default: 1542 type = ICMP_UNREACH; 1543 code = ICMP_UNREACH_HOST; 1544 break; 1545 1546 case EMSGSIZE: 1547 type = ICMP_UNREACH; 1548 code = ICMP_UNREACH_NEEDFRAG; 1549 1550 #ifdef IPSEC 1551 /* 1552 * If IPsec is configured for this path, 1553 * override any possibly mtu value set by ip_output. 1554 */ 1555 mtu = ip_ipsec_mtu(m, mtu); 1556 #endif /* IPSEC */ 1557 /* 1558 * If the MTU was set before make sure we are below the 1559 * interface MTU. 1560 * If the MTU wasn't set before use the interface mtu or 1561 * fall back to the next smaller mtu step compared to the 1562 * current packet size. 1563 */ 1564 if (mtu != 0) { 1565 if (ia != NULL) 1566 mtu = min(mtu, ia->ia_ifp->if_mtu); 1567 } else { 1568 if (ia != NULL) 1569 mtu = ia->ia_ifp->if_mtu; 1570 else 1571 mtu = ip_next_mtu(ip->ip_len, 0); 1572 } 1573 IPSTAT_INC(ips_cantfrag); 1574 break; 1575 1576 case ENOBUFS: 1577 /* 1578 * A router should not generate ICMP_SOURCEQUENCH as 1579 * required in RFC1812 Requirements for IP Version 4 Routers. 1580 * Source quench could be a big problem under DoS attacks, 1581 * or if the underlying interface is rate-limited. 1582 * Those who need source quench packets may re-enable them 1583 * via the net.inet.ip.sendsourcequench sysctl. 1584 */ 1585 if (V_ip_sendsourcequench == 0) { 1586 m_freem(mcopy); 1587 if (ia != NULL) 1588 ifa_free(&ia->ia_ifa); 1589 return; 1590 } else { 1591 type = ICMP_SOURCEQUENCH; 1592 code = 0; 1593 } 1594 break; 1595 1596 case EACCES: /* ipfw denied packet */ 1597 m_freem(mcopy); 1598 if (ia != NULL) 1599 ifa_free(&ia->ia_ifa); 1600 return; 1601 } 1602 if (ia != NULL) 1603 ifa_free(&ia->ia_ifa); 1604 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1605 } 1606 1607 void 1608 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1609 struct mbuf *m) 1610 { 1611 1612 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1613 struct bintime bt; 1614 1615 bintime(&bt); 1616 if (inp->inp_socket->so_options & SO_BINTIME) { 1617 *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt), 1618 SCM_BINTIME, SOL_SOCKET); 1619 if (*mp) 1620 mp = &(*mp)->m_next; 1621 } 1622 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1623 struct timeval tv; 1624 1625 bintime2timeval(&bt, &tv); 1626 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1627 SCM_TIMESTAMP, SOL_SOCKET); 1628 if (*mp) 1629 mp = &(*mp)->m_next; 1630 } 1631 } 1632 if (inp->inp_flags & INP_RECVDSTADDR) { 1633 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1634 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1635 if (*mp) 1636 mp = &(*mp)->m_next; 1637 } 1638 if (inp->inp_flags & INP_RECVTTL) { 1639 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1640 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1641 if (*mp) 1642 mp = &(*mp)->m_next; 1643 } 1644 #ifdef notyet 1645 /* XXX 1646 * Moving these out of udp_input() made them even more broken 1647 * than they already were. 1648 */ 1649 /* options were tossed already */ 1650 if (inp->inp_flags & INP_RECVOPTS) { 1651 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1652 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1653 if (*mp) 1654 mp = &(*mp)->m_next; 1655 } 1656 /* ip_srcroute doesn't do what we want here, need to fix */ 1657 if (inp->inp_flags & INP_RECVRETOPTS) { 1658 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1659 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1660 if (*mp) 1661 mp = &(*mp)->m_next; 1662 } 1663 #endif 1664 if (inp->inp_flags & INP_RECVIF) { 1665 struct ifnet *ifp; 1666 struct sdlbuf { 1667 struct sockaddr_dl sdl; 1668 u_char pad[32]; 1669 } sdlbuf; 1670 struct sockaddr_dl *sdp; 1671 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1672 1673 if (((ifp = m->m_pkthdr.rcvif)) 1674 && ( ifp->if_index && (ifp->if_index <= V_if_index))) { 1675 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1676 /* 1677 * Change our mind and don't try copy. 1678 */ 1679 if ((sdp->sdl_family != AF_LINK) 1680 || (sdp->sdl_len > sizeof(sdlbuf))) { 1681 goto makedummy; 1682 } 1683 bcopy(sdp, sdl2, sdp->sdl_len); 1684 } else { 1685 makedummy: 1686 sdl2->sdl_len 1687 = offsetof(struct sockaddr_dl, sdl_data[0]); 1688 sdl2->sdl_family = AF_LINK; 1689 sdl2->sdl_index = 0; 1690 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1691 } 1692 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, 1693 IP_RECVIF, IPPROTO_IP); 1694 if (*mp) 1695 mp = &(*mp)->m_next; 1696 } 1697 } 1698 1699 /* 1700 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1701 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1702 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1703 * compiled. 1704 */ 1705 int 1706 ip_rsvp_init(struct socket *so) 1707 { 1708 1709 if (so->so_type != SOCK_RAW || 1710 so->so_proto->pr_protocol != IPPROTO_RSVP) 1711 return EOPNOTSUPP; 1712 1713 if (V_ip_rsvpd != NULL) 1714 return EADDRINUSE; 1715 1716 V_ip_rsvpd = so; 1717 /* 1718 * This may seem silly, but we need to be sure we don't over-increment 1719 * the RSVP counter, in case something slips up. 1720 */ 1721 if (!V_ip_rsvp_on) { 1722 V_ip_rsvp_on = 1; 1723 V_rsvp_on++; 1724 } 1725 1726 return 0; 1727 } 1728 1729 int 1730 ip_rsvp_done(void) 1731 { 1732 1733 V_ip_rsvpd = NULL; 1734 /* 1735 * This may seem silly, but we need to be sure we don't over-decrement 1736 * the RSVP counter, in case something slips up. 1737 */ 1738 if (V_ip_rsvp_on) { 1739 V_ip_rsvp_on = 0; 1740 V_rsvp_on--; 1741 } 1742 return 0; 1743 } 1744 1745 void 1746 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1747 { 1748 1749 if (rsvp_input_p) { /* call the real one if loaded */ 1750 rsvp_input_p(m, off); 1751 return; 1752 } 1753 1754 /* Can still get packets with rsvp_on = 0 if there is a local member 1755 * of the group to which the RSVP packet is addressed. But in this 1756 * case we want to throw the packet away. 1757 */ 1758 1759 if (!V_rsvp_on) { 1760 m_freem(m); 1761 return; 1762 } 1763 1764 if (V_ip_rsvpd != NULL) { 1765 rip_input(m, off); 1766 return; 1767 } 1768 /* Drop the packet */ 1769 m_freem(m); 1770 } 1771