1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_route.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/domain.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/time.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/rwlock.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 55 #include <net/pfil.h> 56 #include <net/if.h> 57 #include <net/if_types.h> 58 #include <net/if_var.h> 59 #include <net/if_dl.h> 60 #include <net/route.h> 61 #include <net/netisr.h> 62 #include <net/vnet.h> 63 #include <net/flowtable.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_fw.h> 72 #include <netinet/ip_icmp.h> 73 #include <netinet/ip_options.h> 74 #include <machine/in_cksum.h> 75 #include <netinet/ip_carp.h> 76 #ifdef IPSEC 77 #include <netinet/ip_ipsec.h> 78 #endif /* IPSEC */ 79 80 #include <sys/socketvar.h> 81 82 #include <security/mac/mac_framework.h> 83 84 #ifdef CTASSERT 85 CTASSERT(sizeof(struct ip) == 20); 86 #endif 87 88 struct rwlock in_ifaddr_lock; 89 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 90 91 VNET_DEFINE(int, rsvp_on); 92 93 VNET_DEFINE(int, ipforwarding); 94 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 95 &VNET_NAME(ipforwarding), 0, 96 "Enable IP forwarding between interfaces"); 97 98 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 99 #define V_ipsendredirects VNET(ipsendredirects) 100 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 101 &VNET_NAME(ipsendredirects), 0, 102 "Enable sending IP redirects"); 103 104 static VNET_DEFINE(int, ip_keepfaith); 105 #define V_ip_keepfaith VNET(ip_keepfaith) 106 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 107 &VNET_NAME(ip_keepfaith), 0, 108 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 109 110 static VNET_DEFINE(int, ip_sendsourcequench); 111 #define V_ip_sendsourcequench VNET(ip_sendsourcequench) 112 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 113 &VNET_NAME(ip_sendsourcequench), 0, 114 "Enable the transmission of source quench packets"); 115 116 VNET_DEFINE(int, ip_do_randomid); 117 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 118 &VNET_NAME(ip_do_randomid), 0, 119 "Assign random ip_id values"); 120 121 /* 122 * XXX - Setting ip_checkinterface mostly implements the receive side of 123 * the Strong ES model described in RFC 1122, but since the routing table 124 * and transmit implementation do not implement the Strong ES model, 125 * setting this to 1 results in an odd hybrid. 126 * 127 * XXX - ip_checkinterface currently must be disabled if you use ipnat 128 * to translate the destination address to another local interface. 129 * 130 * XXX - ip_checkinterface must be disabled if you add IP aliases 131 * to the loopback interface instead of the interface where the 132 * packets for those addresses are received. 133 */ 134 static VNET_DEFINE(int, ip_checkinterface); 135 #define V_ip_checkinterface VNET(ip_checkinterface) 136 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 137 &VNET_NAME(ip_checkinterface), 0, 138 "Verify packet arrives on correct interface"); 139 140 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ 141 142 static struct netisr_handler ip_nh = { 143 .nh_name = "ip", 144 .nh_handler = ip_input, 145 .nh_proto = NETISR_IP, 146 .nh_policy = NETISR_POLICY_FLOW, 147 }; 148 149 extern struct domain inetdomain; 150 extern struct protosw inetsw[]; 151 u_char ip_protox[IPPROTO_MAX]; 152 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 153 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 154 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 155 156 VNET_DEFINE(struct ipstat, ipstat); 157 SYSCTL_VNET_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW, 158 &VNET_NAME(ipstat), ipstat, 159 "IP statistics (struct ipstat, netinet/ip_var.h)"); 160 161 static VNET_DEFINE(uma_zone_t, ipq_zone); 162 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 163 static struct mtx ipqlock; 164 165 #define V_ipq_zone VNET(ipq_zone) 166 #define V_ipq VNET(ipq) 167 168 #define IPQ_LOCK() mtx_lock(&ipqlock) 169 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 170 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 171 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 172 173 static void maxnipq_update(void); 174 static void ipq_zone_change(void *); 175 static void ip_drain_locked(void); 176 177 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 178 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 179 #define V_maxnipq VNET(maxnipq) 180 #define V_nipq VNET(nipq) 181 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, 182 &VNET_NAME(nipq), 0, 183 "Current number of IPv4 fragment reassembly queue entries"); 184 185 static VNET_DEFINE(int, maxfragsperpacket); 186 #define V_maxfragsperpacket VNET(maxfragsperpacket) 187 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 188 &VNET_NAME(maxfragsperpacket), 0, 189 "Maximum number of IPv4 fragments allowed per packet"); 190 191 #ifdef IPCTL_DEFMTU 192 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 193 &ip_mtu, 0, "Default MTU"); 194 #endif 195 196 #ifdef IPSTEALTH 197 VNET_DEFINE(int, ipstealth); 198 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 199 &VNET_NAME(ipstealth), 0, 200 "IP stealth mode, no TTL decrementation on forwarding"); 201 #endif 202 203 #ifdef FLOWTABLE 204 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048; 205 VNET_DEFINE(struct flowtable *, ip_ft); 206 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size) 207 208 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN, 209 &VNET_NAME(ip_output_flowtable_size), 2048, 210 "number of entries in the per-cpu output flow caches"); 211 #endif 212 213 static void ip_freef(struct ipqhead *, struct ipq *); 214 215 /* 216 * Kernel module interface for updating ipstat. The argument is an index 217 * into ipstat treated as an array of u_long. While this encodes the general 218 * layout of ipstat into the caller, it doesn't encode its location, so that 219 * future changes to add, for example, per-CPU stats support won't cause 220 * binary compatibility problems for kernel modules. 221 */ 222 void 223 kmod_ipstat_inc(int statnum) 224 { 225 226 (*((u_long *)&V_ipstat + statnum))++; 227 } 228 229 void 230 kmod_ipstat_dec(int statnum) 231 { 232 233 (*((u_long *)&V_ipstat + statnum))--; 234 } 235 236 static int 237 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 238 { 239 int error, qlimit; 240 241 netisr_getqlimit(&ip_nh, &qlimit); 242 error = sysctl_handle_int(oidp, &qlimit, 0, req); 243 if (error || !req->newptr) 244 return (error); 245 if (qlimit < 1) 246 return (EINVAL); 247 return (netisr_setqlimit(&ip_nh, qlimit)); 248 } 249 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 250 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 251 "Maximum size of the IP input queue"); 252 253 static int 254 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 255 { 256 u_int64_t qdrops_long; 257 int error, qdrops; 258 259 netisr_getqdrops(&ip_nh, &qdrops_long); 260 qdrops = qdrops_long; 261 error = sysctl_handle_int(oidp, &qdrops, 0, req); 262 if (error || !req->newptr) 263 return (error); 264 if (qdrops != 0) 265 return (EINVAL); 266 netisr_clearqdrops(&ip_nh); 267 return (0); 268 } 269 270 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 271 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 272 "Number of packets dropped from the IP input queue"); 273 274 /* 275 * IP initialization: fill in IP protocol switch table. 276 * All protocols not implemented in kernel go to raw IP protocol handler. 277 */ 278 void 279 ip_init(void) 280 { 281 struct protosw *pr; 282 int i; 283 284 V_ip_id = time_second & 0xffff; 285 286 TAILQ_INIT(&V_in_ifaddrhead); 287 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 288 289 /* Initialize IP reassembly queue. */ 290 for (i = 0; i < IPREASS_NHASH; i++) 291 TAILQ_INIT(&V_ipq[i]); 292 V_maxnipq = nmbclusters / 32; 293 V_maxfragsperpacket = 16; 294 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 295 NULL, UMA_ALIGN_PTR, 0); 296 maxnipq_update(); 297 298 /* Initialize packet filter hooks. */ 299 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF; 300 V_inet_pfil_hook.ph_af = AF_INET; 301 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0) 302 printf("%s: WARNING: unable to register pfil hook, " 303 "error %d\n", __func__, i); 304 305 #ifdef FLOWTABLE 306 if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size", 307 &V_ip_output_flowtable_size)) { 308 if (V_ip_output_flowtable_size < 256) 309 V_ip_output_flowtable_size = 256; 310 if (!powerof2(V_ip_output_flowtable_size)) { 311 printf("flowtable must be power of 2 size\n"); 312 V_ip_output_flowtable_size = 2048; 313 } 314 } else { 315 /* 316 * round up to the next power of 2 317 */ 318 V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1); 319 } 320 V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU); 321 #endif 322 323 /* Skip initialization of globals for non-default instances. */ 324 if (!IS_DEFAULT_VNET(curvnet)) 325 return; 326 327 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 328 if (pr == NULL) 329 panic("ip_init: PF_INET not found"); 330 331 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 332 for (i = 0; i < IPPROTO_MAX; i++) 333 ip_protox[i] = pr - inetsw; 334 /* 335 * Cycle through IP protocols and put them into the appropriate place 336 * in ip_protox[]. 337 */ 338 for (pr = inetdomain.dom_protosw; 339 pr < inetdomain.dom_protoswNPROTOSW; pr++) 340 if (pr->pr_domain->dom_family == PF_INET && 341 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 342 /* Be careful to only index valid IP protocols. */ 343 if (pr->pr_protocol < IPPROTO_MAX) 344 ip_protox[pr->pr_protocol] = pr - inetsw; 345 } 346 347 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 348 NULL, EVENTHANDLER_PRI_ANY); 349 350 /* Initialize various other remaining things. */ 351 IPQ_LOCK_INIT(); 352 netisr_register(&ip_nh); 353 } 354 355 #ifdef VIMAGE 356 void 357 ip_destroy(void) 358 { 359 360 /* Cleanup in_ifaddr hash table; should be empty. */ 361 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 362 363 IPQ_LOCK(); 364 ip_drain_locked(); 365 IPQ_UNLOCK(); 366 367 uma_zdestroy(V_ipq_zone); 368 } 369 #endif 370 371 /* 372 * Ip input routine. Checksum and byte swap header. If fragmented 373 * try to reassemble. Process options. Pass to next level. 374 */ 375 void 376 ip_input(struct mbuf *m) 377 { 378 struct ip *ip = NULL; 379 struct in_ifaddr *ia = NULL; 380 struct ifaddr *ifa; 381 struct ifnet *ifp; 382 int checkif, hlen = 0; 383 u_short sum; 384 int dchg = 0; /* dest changed after fw */ 385 struct in_addr odst; /* original dst address */ 386 387 M_ASSERTPKTHDR(m); 388 389 if (m->m_flags & M_FASTFWD_OURS) { 390 /* 391 * Firewall or NAT changed destination to local. 392 * We expect ip_len and ip_off to be in host byte order. 393 */ 394 m->m_flags &= ~M_FASTFWD_OURS; 395 /* Set up some basics that will be used later. */ 396 ip = mtod(m, struct ip *); 397 hlen = ip->ip_hl << 2; 398 goto ours; 399 } 400 401 IPSTAT_INC(ips_total); 402 403 if (m->m_pkthdr.len < sizeof(struct ip)) 404 goto tooshort; 405 406 if (m->m_len < sizeof (struct ip) && 407 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 408 IPSTAT_INC(ips_toosmall); 409 return; 410 } 411 ip = mtod(m, struct ip *); 412 413 if (ip->ip_v != IPVERSION) { 414 IPSTAT_INC(ips_badvers); 415 goto bad; 416 } 417 418 hlen = ip->ip_hl << 2; 419 if (hlen < sizeof(struct ip)) { /* minimum header length */ 420 IPSTAT_INC(ips_badhlen); 421 goto bad; 422 } 423 if (hlen > m->m_len) { 424 if ((m = m_pullup(m, hlen)) == NULL) { 425 IPSTAT_INC(ips_badhlen); 426 return; 427 } 428 ip = mtod(m, struct ip *); 429 } 430 431 /* 127/8 must not appear on wire - RFC1122 */ 432 ifp = m->m_pkthdr.rcvif; 433 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 434 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 435 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 436 IPSTAT_INC(ips_badaddr); 437 goto bad; 438 } 439 } 440 441 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 442 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 443 } else { 444 if (hlen == sizeof(struct ip)) { 445 sum = in_cksum_hdr(ip); 446 } else { 447 sum = in_cksum(m, hlen); 448 } 449 } 450 if (sum) { 451 IPSTAT_INC(ips_badsum); 452 goto bad; 453 } 454 455 #ifdef ALTQ 456 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 457 /* packet is dropped by traffic conditioner */ 458 return; 459 #endif 460 461 /* 462 * Convert fields to host representation. 463 */ 464 ip->ip_len = ntohs(ip->ip_len); 465 if (ip->ip_len < hlen) { 466 IPSTAT_INC(ips_badlen); 467 goto bad; 468 } 469 ip->ip_off = ntohs(ip->ip_off); 470 471 /* 472 * Check that the amount of data in the buffers 473 * is as at least much as the IP header would have us expect. 474 * Trim mbufs if longer than we expect. 475 * Drop packet if shorter than we expect. 476 */ 477 if (m->m_pkthdr.len < ip->ip_len) { 478 tooshort: 479 IPSTAT_INC(ips_tooshort); 480 goto bad; 481 } 482 if (m->m_pkthdr.len > ip->ip_len) { 483 if (m->m_len == m->m_pkthdr.len) { 484 m->m_len = ip->ip_len; 485 m->m_pkthdr.len = ip->ip_len; 486 } else 487 m_adj(m, ip->ip_len - m->m_pkthdr.len); 488 } 489 #ifdef IPSEC 490 /* 491 * Bypass packet filtering for packets previously handled by IPsec. 492 */ 493 if (ip_ipsec_filtertunnel(m)) 494 goto passin; 495 #endif /* IPSEC */ 496 497 /* 498 * Run through list of hooks for input packets. 499 * 500 * NB: Beware of the destination address changing (e.g. 501 * by NAT rewriting). When this happens, tell 502 * ip_forward to do the right thing. 503 */ 504 505 /* Jump over all PFIL processing if hooks are not active. */ 506 if (!PFIL_HOOKED(&V_inet_pfil_hook)) 507 goto passin; 508 509 odst = ip->ip_dst; 510 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 511 return; 512 if (m == NULL) /* consumed by filter */ 513 return; 514 515 ip = mtod(m, struct ip *); 516 dchg = (odst.s_addr != ip->ip_dst.s_addr); 517 ifp = m->m_pkthdr.rcvif; 518 519 #ifdef IPFIREWALL_FORWARD 520 if (m->m_flags & M_FASTFWD_OURS) { 521 m->m_flags &= ~M_FASTFWD_OURS; 522 goto ours; 523 } 524 if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) { 525 /* 526 * Directly ship the packet on. This allows forwarding 527 * packets originally destined to us to some other directly 528 * connected host. 529 */ 530 ip_forward(m, dchg); 531 return; 532 } 533 #endif /* IPFIREWALL_FORWARD */ 534 535 passin: 536 /* 537 * Process options and, if not destined for us, 538 * ship it on. ip_dooptions returns 1 when an 539 * error was detected (causing an icmp message 540 * to be sent and the original packet to be freed). 541 */ 542 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 543 return; 544 545 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 546 * matter if it is destined to another node, or whether it is 547 * a multicast one, RSVP wants it! and prevents it from being forwarded 548 * anywhere else. Also checks if the rsvp daemon is running before 549 * grabbing the packet. 550 */ 551 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 552 goto ours; 553 554 /* 555 * Check our list of addresses, to see if the packet is for us. 556 * If we don't have any addresses, assume any unicast packet 557 * we receive might be for us (and let the upper layers deal 558 * with it). 559 */ 560 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 561 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 562 goto ours; 563 564 /* 565 * Enable a consistency check between the destination address 566 * and the arrival interface for a unicast packet (the RFC 1122 567 * strong ES model) if IP forwarding is disabled and the packet 568 * is not locally generated and the packet is not subject to 569 * 'ipfw fwd'. 570 * 571 * XXX - Checking also should be disabled if the destination 572 * address is ipnat'ed to a different interface. 573 * 574 * XXX - Checking is incompatible with IP aliases added 575 * to the loopback interface instead of the interface where 576 * the packets are received. 577 * 578 * XXX - This is the case for carp vhost IPs as well so we 579 * insert a workaround. If the packet got here, we already 580 * checked with carp_iamatch() and carp_forus(). 581 */ 582 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 583 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 584 ifp->if_carp == NULL && (dchg == 0); 585 586 /* 587 * Check for exact addresses in the hash bucket. 588 */ 589 /* IN_IFADDR_RLOCK(); */ 590 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 591 /* 592 * If the address matches, verify that the packet 593 * arrived via the correct interface if checking is 594 * enabled. 595 */ 596 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 597 (!checkif || ia->ia_ifp == ifp)) { 598 ifa_ref(&ia->ia_ifa); 599 /* IN_IFADDR_RUNLOCK(); */ 600 goto ours; 601 } 602 } 603 /* IN_IFADDR_RUNLOCK(); */ 604 605 /* 606 * Check for broadcast addresses. 607 * 608 * Only accept broadcast packets that arrive via the matching 609 * interface. Reception of forwarded directed broadcasts would 610 * be handled via ip_forward() and ether_output() with the loopback 611 * into the stack for SIMPLEX interfaces handled by ether_output(). 612 */ 613 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 614 IF_ADDR_RLOCK(ifp); 615 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 616 if (ifa->ifa_addr->sa_family != AF_INET) 617 continue; 618 ia = ifatoia(ifa); 619 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 620 ip->ip_dst.s_addr) { 621 ifa_ref(ifa); 622 IF_ADDR_RUNLOCK(ifp); 623 goto ours; 624 } 625 #ifdef BOOTP_COMPAT 626 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 627 ifa_ref(ifa); 628 IF_ADDR_RUNLOCK(ifp); 629 goto ours; 630 } 631 #endif 632 } 633 IF_ADDR_RUNLOCK(ifp); 634 ia = NULL; 635 } 636 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 637 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 638 IPSTAT_INC(ips_cantforward); 639 m_freem(m); 640 return; 641 } 642 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 643 if (V_ip_mrouter) { 644 /* 645 * If we are acting as a multicast router, all 646 * incoming multicast packets are passed to the 647 * kernel-level multicast forwarding function. 648 * The packet is returned (relatively) intact; if 649 * ip_mforward() returns a non-zero value, the packet 650 * must be discarded, else it may be accepted below. 651 */ 652 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 653 IPSTAT_INC(ips_cantforward); 654 m_freem(m); 655 return; 656 } 657 658 /* 659 * The process-level routing daemon needs to receive 660 * all multicast IGMP packets, whether or not this 661 * host belongs to their destination groups. 662 */ 663 if (ip->ip_p == IPPROTO_IGMP) 664 goto ours; 665 IPSTAT_INC(ips_forward); 666 } 667 /* 668 * Assume the packet is for us, to avoid prematurely taking 669 * a lock on the in_multi hash. Protocols must perform 670 * their own filtering and update statistics accordingly. 671 */ 672 goto ours; 673 } 674 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 675 goto ours; 676 if (ip->ip_dst.s_addr == INADDR_ANY) 677 goto ours; 678 679 /* 680 * FAITH(Firewall Aided Internet Translator) 681 */ 682 if (ifp && ifp->if_type == IFT_FAITH) { 683 if (V_ip_keepfaith) { 684 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 685 goto ours; 686 } 687 m_freem(m); 688 return; 689 } 690 691 /* 692 * Not for us; forward if possible and desirable. 693 */ 694 if (V_ipforwarding == 0) { 695 IPSTAT_INC(ips_cantforward); 696 m_freem(m); 697 } else { 698 #ifdef IPSEC 699 if (ip_ipsec_fwd(m)) 700 goto bad; 701 #endif /* IPSEC */ 702 ip_forward(m, dchg); 703 } 704 return; 705 706 ours: 707 #ifdef IPSTEALTH 708 /* 709 * IPSTEALTH: Process non-routing options only 710 * if the packet is destined for us. 711 */ 712 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) { 713 if (ia != NULL) 714 ifa_free(&ia->ia_ifa); 715 return; 716 } 717 #endif /* IPSTEALTH */ 718 719 /* Count the packet in the ip address stats */ 720 if (ia != NULL) { 721 ia->ia_ifa.if_ipackets++; 722 ia->ia_ifa.if_ibytes += m->m_pkthdr.len; 723 ifa_free(&ia->ia_ifa); 724 } 725 726 /* 727 * Attempt reassembly; if it succeeds, proceed. 728 * ip_reass() will return a different mbuf. 729 */ 730 if (ip->ip_off & (IP_MF | IP_OFFMASK)) { 731 m = ip_reass(m); 732 if (m == NULL) 733 return; 734 ip = mtod(m, struct ip *); 735 /* Get the header length of the reassembled packet */ 736 hlen = ip->ip_hl << 2; 737 } 738 739 /* 740 * Further protocols expect the packet length to be w/o the 741 * IP header. 742 */ 743 ip->ip_len -= hlen; 744 745 #ifdef IPSEC 746 /* 747 * enforce IPsec policy checking if we are seeing last header. 748 * note that we do not visit this with protocols with pcb layer 749 * code - like udp/tcp/raw ip. 750 */ 751 if (ip_ipsec_input(m)) 752 goto bad; 753 #endif /* IPSEC */ 754 755 /* 756 * Switch out to protocol's input routine. 757 */ 758 IPSTAT_INC(ips_delivered); 759 760 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 761 return; 762 bad: 763 m_freem(m); 764 } 765 766 /* 767 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 768 * max has slightly different semantics than the sysctl, for historical 769 * reasons. 770 */ 771 static void 772 maxnipq_update(void) 773 { 774 775 /* 776 * -1 for unlimited allocation. 777 */ 778 if (V_maxnipq < 0) 779 uma_zone_set_max(V_ipq_zone, 0); 780 /* 781 * Positive number for specific bound. 782 */ 783 if (V_maxnipq > 0) 784 uma_zone_set_max(V_ipq_zone, V_maxnipq); 785 /* 786 * Zero specifies no further fragment queue allocation -- set the 787 * bound very low, but rely on implementation elsewhere to actually 788 * prevent allocation and reclaim current queues. 789 */ 790 if (V_maxnipq == 0) 791 uma_zone_set_max(V_ipq_zone, 1); 792 } 793 794 static void 795 ipq_zone_change(void *tag) 796 { 797 798 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 799 V_maxnipq = nmbclusters / 32; 800 maxnipq_update(); 801 } 802 } 803 804 static int 805 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 806 { 807 int error, i; 808 809 i = V_maxnipq; 810 error = sysctl_handle_int(oidp, &i, 0, req); 811 if (error || !req->newptr) 812 return (error); 813 814 /* 815 * XXXRW: Might be a good idea to sanity check the argument and place 816 * an extreme upper bound. 817 */ 818 if (i < -1) 819 return (EINVAL); 820 V_maxnipq = i; 821 maxnipq_update(); 822 return (0); 823 } 824 825 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 826 NULL, 0, sysctl_maxnipq, "I", 827 "Maximum number of IPv4 fragment reassembly queue entries"); 828 829 /* 830 * Take incoming datagram fragment and try to reassemble it into 831 * whole datagram. If the argument is the first fragment or one 832 * in between the function will return NULL and store the mbuf 833 * in the fragment chain. If the argument is the last fragment 834 * the packet will be reassembled and the pointer to the new 835 * mbuf returned for further processing. Only m_tags attached 836 * to the first packet/fragment are preserved. 837 * The IP header is *NOT* adjusted out of iplen. 838 */ 839 struct mbuf * 840 ip_reass(struct mbuf *m) 841 { 842 struct ip *ip; 843 struct mbuf *p, *q, *nq, *t; 844 struct ipq *fp = NULL; 845 struct ipqhead *head; 846 int i, hlen, next; 847 u_int8_t ecn, ecn0; 848 u_short hash; 849 850 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 851 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 852 IPSTAT_INC(ips_fragments); 853 IPSTAT_INC(ips_fragdropped); 854 m_freem(m); 855 return (NULL); 856 } 857 858 ip = mtod(m, struct ip *); 859 hlen = ip->ip_hl << 2; 860 861 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 862 head = &V_ipq[hash]; 863 IPQ_LOCK(); 864 865 /* 866 * Look for queue of fragments 867 * of this datagram. 868 */ 869 TAILQ_FOREACH(fp, head, ipq_list) 870 if (ip->ip_id == fp->ipq_id && 871 ip->ip_src.s_addr == fp->ipq_src.s_addr && 872 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 873 #ifdef MAC 874 mac_ipq_match(m, fp) && 875 #endif 876 ip->ip_p == fp->ipq_p) 877 goto found; 878 879 fp = NULL; 880 881 /* 882 * Attempt to trim the number of allocated fragment queues if it 883 * exceeds the administrative limit. 884 */ 885 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 886 /* 887 * drop something from the tail of the current queue 888 * before proceeding further 889 */ 890 struct ipq *q = TAILQ_LAST(head, ipqhead); 891 if (q == NULL) { /* gak */ 892 for (i = 0; i < IPREASS_NHASH; i++) { 893 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 894 if (r) { 895 IPSTAT_ADD(ips_fragtimeout, 896 r->ipq_nfrags); 897 ip_freef(&V_ipq[i], r); 898 break; 899 } 900 } 901 } else { 902 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 903 ip_freef(head, q); 904 } 905 } 906 907 found: 908 /* 909 * Adjust ip_len to not reflect header, 910 * convert offset of this to bytes. 911 */ 912 ip->ip_len -= hlen; 913 if (ip->ip_off & IP_MF) { 914 /* 915 * Make sure that fragments have a data length 916 * that's a non-zero multiple of 8 bytes. 917 */ 918 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { 919 IPSTAT_INC(ips_toosmall); /* XXX */ 920 goto dropfrag; 921 } 922 m->m_flags |= M_FRAG; 923 } else 924 m->m_flags &= ~M_FRAG; 925 ip->ip_off <<= 3; 926 927 928 /* 929 * Attempt reassembly; if it succeeds, proceed. 930 * ip_reass() will return a different mbuf. 931 */ 932 IPSTAT_INC(ips_fragments); 933 m->m_pkthdr.header = ip; 934 935 /* Previous ip_reass() started here. */ 936 /* 937 * Presence of header sizes in mbufs 938 * would confuse code below. 939 */ 940 m->m_data += hlen; 941 m->m_len -= hlen; 942 943 /* 944 * If first fragment to arrive, create a reassembly queue. 945 */ 946 if (fp == NULL) { 947 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 948 if (fp == NULL) 949 goto dropfrag; 950 #ifdef MAC 951 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 952 uma_zfree(V_ipq_zone, fp); 953 fp = NULL; 954 goto dropfrag; 955 } 956 mac_ipq_create(m, fp); 957 #endif 958 TAILQ_INSERT_HEAD(head, fp, ipq_list); 959 V_nipq++; 960 fp->ipq_nfrags = 1; 961 fp->ipq_ttl = IPFRAGTTL; 962 fp->ipq_p = ip->ip_p; 963 fp->ipq_id = ip->ip_id; 964 fp->ipq_src = ip->ip_src; 965 fp->ipq_dst = ip->ip_dst; 966 fp->ipq_frags = m; 967 m->m_nextpkt = NULL; 968 goto done; 969 } else { 970 fp->ipq_nfrags++; 971 #ifdef MAC 972 mac_ipq_update(m, fp); 973 #endif 974 } 975 976 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 977 978 /* 979 * Handle ECN by comparing this segment with the first one; 980 * if CE is set, do not lose CE. 981 * drop if CE and not-ECT are mixed for the same packet. 982 */ 983 ecn = ip->ip_tos & IPTOS_ECN_MASK; 984 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 985 if (ecn == IPTOS_ECN_CE) { 986 if (ecn0 == IPTOS_ECN_NOTECT) 987 goto dropfrag; 988 if (ecn0 != IPTOS_ECN_CE) 989 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 990 } 991 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 992 goto dropfrag; 993 994 /* 995 * Find a segment which begins after this one does. 996 */ 997 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 998 if (GETIP(q)->ip_off > ip->ip_off) 999 break; 1000 1001 /* 1002 * If there is a preceding segment, it may provide some of 1003 * our data already. If so, drop the data from the incoming 1004 * segment. If it provides all of our data, drop us, otherwise 1005 * stick new segment in the proper place. 1006 * 1007 * If some of the data is dropped from the preceding 1008 * segment, then it's checksum is invalidated. 1009 */ 1010 if (p) { 1011 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; 1012 if (i > 0) { 1013 if (i >= ip->ip_len) 1014 goto dropfrag; 1015 m_adj(m, i); 1016 m->m_pkthdr.csum_flags = 0; 1017 ip->ip_off += i; 1018 ip->ip_len -= i; 1019 } 1020 m->m_nextpkt = p->m_nextpkt; 1021 p->m_nextpkt = m; 1022 } else { 1023 m->m_nextpkt = fp->ipq_frags; 1024 fp->ipq_frags = m; 1025 } 1026 1027 /* 1028 * While we overlap succeeding segments trim them or, 1029 * if they are completely covered, dequeue them. 1030 */ 1031 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; 1032 q = nq) { 1033 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off; 1034 if (i < GETIP(q)->ip_len) { 1035 GETIP(q)->ip_len -= i; 1036 GETIP(q)->ip_off += i; 1037 m_adj(q, i); 1038 q->m_pkthdr.csum_flags = 0; 1039 break; 1040 } 1041 nq = q->m_nextpkt; 1042 m->m_nextpkt = nq; 1043 IPSTAT_INC(ips_fragdropped); 1044 fp->ipq_nfrags--; 1045 m_freem(q); 1046 } 1047 1048 /* 1049 * Check for complete reassembly and perform frag per packet 1050 * limiting. 1051 * 1052 * Frag limiting is performed here so that the nth frag has 1053 * a chance to complete the packet before we drop the packet. 1054 * As a result, n+1 frags are actually allowed per packet, but 1055 * only n will ever be stored. (n = maxfragsperpacket.) 1056 * 1057 */ 1058 next = 0; 1059 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1060 if (GETIP(q)->ip_off != next) { 1061 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1062 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1063 ip_freef(head, fp); 1064 } 1065 goto done; 1066 } 1067 next += GETIP(q)->ip_len; 1068 } 1069 /* Make sure the last packet didn't have the IP_MF flag */ 1070 if (p->m_flags & M_FRAG) { 1071 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1072 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1073 ip_freef(head, fp); 1074 } 1075 goto done; 1076 } 1077 1078 /* 1079 * Reassembly is complete. Make sure the packet is a sane size. 1080 */ 1081 q = fp->ipq_frags; 1082 ip = GETIP(q); 1083 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1084 IPSTAT_INC(ips_toolong); 1085 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1086 ip_freef(head, fp); 1087 goto done; 1088 } 1089 1090 /* 1091 * Concatenate fragments. 1092 */ 1093 m = q; 1094 t = m->m_next; 1095 m->m_next = NULL; 1096 m_cat(m, t); 1097 nq = q->m_nextpkt; 1098 q->m_nextpkt = NULL; 1099 for (q = nq; q != NULL; q = nq) { 1100 nq = q->m_nextpkt; 1101 q->m_nextpkt = NULL; 1102 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1103 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1104 m_cat(m, q); 1105 } 1106 /* 1107 * In order to do checksumming faster we do 'end-around carry' here 1108 * (and not in for{} loop), though it implies we are not going to 1109 * reassemble more than 64k fragments. 1110 */ 1111 m->m_pkthdr.csum_data = 1112 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1113 #ifdef MAC 1114 mac_ipq_reassemble(fp, m); 1115 mac_ipq_destroy(fp); 1116 #endif 1117 1118 /* 1119 * Create header for new ip packet by modifying header of first 1120 * packet; dequeue and discard fragment reassembly header. 1121 * Make header visible. 1122 */ 1123 ip->ip_len = (ip->ip_hl << 2) + next; 1124 ip->ip_src = fp->ipq_src; 1125 ip->ip_dst = fp->ipq_dst; 1126 TAILQ_REMOVE(head, fp, ipq_list); 1127 V_nipq--; 1128 uma_zfree(V_ipq_zone, fp); 1129 m->m_len += (ip->ip_hl << 2); 1130 m->m_data -= (ip->ip_hl << 2); 1131 /* some debugging cruft by sklower, below, will go away soon */ 1132 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1133 m_fixhdr(m); 1134 IPSTAT_INC(ips_reassembled); 1135 IPQ_UNLOCK(); 1136 return (m); 1137 1138 dropfrag: 1139 IPSTAT_INC(ips_fragdropped); 1140 if (fp != NULL) 1141 fp->ipq_nfrags--; 1142 m_freem(m); 1143 done: 1144 IPQ_UNLOCK(); 1145 return (NULL); 1146 1147 #undef GETIP 1148 } 1149 1150 /* 1151 * Free a fragment reassembly header and all 1152 * associated datagrams. 1153 */ 1154 static void 1155 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1156 { 1157 struct mbuf *q; 1158 1159 IPQ_LOCK_ASSERT(); 1160 1161 while (fp->ipq_frags) { 1162 q = fp->ipq_frags; 1163 fp->ipq_frags = q->m_nextpkt; 1164 m_freem(q); 1165 } 1166 TAILQ_REMOVE(fhp, fp, ipq_list); 1167 uma_zfree(V_ipq_zone, fp); 1168 V_nipq--; 1169 } 1170 1171 /* 1172 * IP timer processing; 1173 * if a timer expires on a reassembly 1174 * queue, discard it. 1175 */ 1176 void 1177 ip_slowtimo(void) 1178 { 1179 VNET_ITERATOR_DECL(vnet_iter); 1180 struct ipq *fp; 1181 int i; 1182 1183 VNET_LIST_RLOCK_NOSLEEP(); 1184 IPQ_LOCK(); 1185 VNET_FOREACH(vnet_iter) { 1186 CURVNET_SET(vnet_iter); 1187 for (i = 0; i < IPREASS_NHASH; i++) { 1188 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1189 struct ipq *fpp; 1190 1191 fpp = fp; 1192 fp = TAILQ_NEXT(fp, ipq_list); 1193 if(--fpp->ipq_ttl == 0) { 1194 IPSTAT_ADD(ips_fragtimeout, 1195 fpp->ipq_nfrags); 1196 ip_freef(&V_ipq[i], fpp); 1197 } 1198 } 1199 } 1200 /* 1201 * If we are over the maximum number of fragments 1202 * (due to the limit being lowered), drain off 1203 * enough to get down to the new limit. 1204 */ 1205 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1206 for (i = 0; i < IPREASS_NHASH; i++) { 1207 while (V_nipq > V_maxnipq && 1208 !TAILQ_EMPTY(&V_ipq[i])) { 1209 IPSTAT_ADD(ips_fragdropped, 1210 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1211 ip_freef(&V_ipq[i], 1212 TAILQ_FIRST(&V_ipq[i])); 1213 } 1214 } 1215 } 1216 CURVNET_RESTORE(); 1217 } 1218 IPQ_UNLOCK(); 1219 VNET_LIST_RUNLOCK_NOSLEEP(); 1220 } 1221 1222 /* 1223 * Drain off all datagram fragments. 1224 */ 1225 static void 1226 ip_drain_locked(void) 1227 { 1228 int i; 1229 1230 IPQ_LOCK_ASSERT(); 1231 1232 for (i = 0; i < IPREASS_NHASH; i++) { 1233 while(!TAILQ_EMPTY(&V_ipq[i])) { 1234 IPSTAT_ADD(ips_fragdropped, 1235 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1236 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1237 } 1238 } 1239 } 1240 1241 void 1242 ip_drain(void) 1243 { 1244 VNET_ITERATOR_DECL(vnet_iter); 1245 1246 VNET_LIST_RLOCK_NOSLEEP(); 1247 IPQ_LOCK(); 1248 VNET_FOREACH(vnet_iter) { 1249 CURVNET_SET(vnet_iter); 1250 ip_drain_locked(); 1251 CURVNET_RESTORE(); 1252 } 1253 IPQ_UNLOCK(); 1254 VNET_LIST_RUNLOCK_NOSLEEP(); 1255 in_rtqdrain(); 1256 } 1257 1258 /* 1259 * The protocol to be inserted into ip_protox[] must be already registered 1260 * in inetsw[], either statically or through pf_proto_register(). 1261 */ 1262 int 1263 ipproto_register(short ipproto) 1264 { 1265 struct protosw *pr; 1266 1267 /* Sanity checks. */ 1268 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1269 return (EPROTONOSUPPORT); 1270 1271 /* 1272 * The protocol slot must not be occupied by another protocol 1273 * already. An index pointing to IPPROTO_RAW is unused. 1274 */ 1275 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1276 if (pr == NULL) 1277 return (EPFNOSUPPORT); 1278 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1279 return (EEXIST); 1280 1281 /* Find the protocol position in inetsw[] and set the index. */ 1282 for (pr = inetdomain.dom_protosw; 1283 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1284 if (pr->pr_domain->dom_family == PF_INET && 1285 pr->pr_protocol && pr->pr_protocol == ipproto) { 1286 ip_protox[pr->pr_protocol] = pr - inetsw; 1287 return (0); 1288 } 1289 } 1290 return (EPROTONOSUPPORT); 1291 } 1292 1293 int 1294 ipproto_unregister(short ipproto) 1295 { 1296 struct protosw *pr; 1297 1298 /* Sanity checks. */ 1299 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1300 return (EPROTONOSUPPORT); 1301 1302 /* Check if the protocol was indeed registered. */ 1303 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1304 if (pr == NULL) 1305 return (EPFNOSUPPORT); 1306 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1307 return (ENOENT); 1308 1309 /* Reset the protocol slot to IPPROTO_RAW. */ 1310 ip_protox[ipproto] = pr - inetsw; 1311 return (0); 1312 } 1313 1314 /* 1315 * Given address of next destination (final or next hop), return (referenced) 1316 * internet address info of interface to be used to get there. 1317 */ 1318 struct in_ifaddr * 1319 ip_rtaddr(struct in_addr dst, u_int fibnum) 1320 { 1321 struct route sro; 1322 struct sockaddr_in *sin; 1323 struct in_ifaddr *ia; 1324 1325 bzero(&sro, sizeof(sro)); 1326 sin = (struct sockaddr_in *)&sro.ro_dst; 1327 sin->sin_family = AF_INET; 1328 sin->sin_len = sizeof(*sin); 1329 sin->sin_addr = dst; 1330 in_rtalloc_ign(&sro, 0, fibnum); 1331 1332 if (sro.ro_rt == NULL) 1333 return (NULL); 1334 1335 ia = ifatoia(sro.ro_rt->rt_ifa); 1336 ifa_ref(&ia->ia_ifa); 1337 RTFREE(sro.ro_rt); 1338 return (ia); 1339 } 1340 1341 u_char inetctlerrmap[PRC_NCMDS] = { 1342 0, 0, 0, 0, 1343 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1344 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1345 EMSGSIZE, EHOSTUNREACH, 0, 0, 1346 0, 0, EHOSTUNREACH, 0, 1347 ENOPROTOOPT, ECONNREFUSED 1348 }; 1349 1350 /* 1351 * Forward a packet. If some error occurs return the sender 1352 * an icmp packet. Note we can't always generate a meaningful 1353 * icmp message because icmp doesn't have a large enough repertoire 1354 * of codes and types. 1355 * 1356 * If not forwarding, just drop the packet. This could be confusing 1357 * if ipforwarding was zero but some routing protocol was advancing 1358 * us as a gateway to somewhere. However, we must let the routing 1359 * protocol deal with that. 1360 * 1361 * The srcrt parameter indicates whether the packet is being forwarded 1362 * via a source route. 1363 */ 1364 void 1365 ip_forward(struct mbuf *m, int srcrt) 1366 { 1367 struct ip *ip = mtod(m, struct ip *); 1368 struct in_ifaddr *ia; 1369 struct mbuf *mcopy; 1370 struct in_addr dest; 1371 struct route ro; 1372 int error, type = 0, code = 0, mtu = 0; 1373 1374 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1375 IPSTAT_INC(ips_cantforward); 1376 m_freem(m); 1377 return; 1378 } 1379 #ifdef IPSTEALTH 1380 if (!V_ipstealth) { 1381 #endif 1382 if (ip->ip_ttl <= IPTTLDEC) { 1383 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1384 0, 0); 1385 return; 1386 } 1387 #ifdef IPSTEALTH 1388 } 1389 #endif 1390 1391 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1392 #ifndef IPSEC 1393 /* 1394 * 'ia' may be NULL if there is no route for this destination. 1395 * In case of IPsec, Don't discard it just yet, but pass it to 1396 * ip_output in case of outgoing IPsec policy. 1397 */ 1398 if (!srcrt && ia == NULL) { 1399 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1400 return; 1401 } 1402 #endif 1403 1404 /* 1405 * Save the IP header and at most 8 bytes of the payload, 1406 * in case we need to generate an ICMP message to the src. 1407 * 1408 * XXX this can be optimized a lot by saving the data in a local 1409 * buffer on the stack (72 bytes at most), and only allocating the 1410 * mbuf if really necessary. The vast majority of the packets 1411 * are forwarded without having to send an ICMP back (either 1412 * because unnecessary, or because rate limited), so we are 1413 * really we are wasting a lot of work here. 1414 * 1415 * We don't use m_copy() because it might return a reference 1416 * to a shared cluster. Both this function and ip_output() 1417 * assume exclusive access to the IP header in `m', so any 1418 * data in a cluster may change before we reach icmp_error(). 1419 */ 1420 MGETHDR(mcopy, M_DONTWAIT, m->m_type); 1421 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) { 1422 /* 1423 * It's probably ok if the pkthdr dup fails (because 1424 * the deep copy of the tag chain failed), but for now 1425 * be conservative and just discard the copy since 1426 * code below may some day want the tags. 1427 */ 1428 m_free(mcopy); 1429 mcopy = NULL; 1430 } 1431 if (mcopy != NULL) { 1432 mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy)); 1433 mcopy->m_pkthdr.len = mcopy->m_len; 1434 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1435 } 1436 1437 #ifdef IPSTEALTH 1438 if (!V_ipstealth) { 1439 #endif 1440 ip->ip_ttl -= IPTTLDEC; 1441 #ifdef IPSTEALTH 1442 } 1443 #endif 1444 1445 /* 1446 * If forwarding packet using same interface that it came in on, 1447 * perhaps should send a redirect to sender to shortcut a hop. 1448 * Only send redirect if source is sending directly to us, 1449 * and if packet was not source routed (or has any options). 1450 * Also, don't send redirect if forwarding using a default route 1451 * or a route modified by a redirect. 1452 */ 1453 dest.s_addr = 0; 1454 if (!srcrt && V_ipsendredirects && 1455 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1456 struct sockaddr_in *sin; 1457 struct rtentry *rt; 1458 1459 bzero(&ro, sizeof(ro)); 1460 sin = (struct sockaddr_in *)&ro.ro_dst; 1461 sin->sin_family = AF_INET; 1462 sin->sin_len = sizeof(*sin); 1463 sin->sin_addr = ip->ip_dst; 1464 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1465 1466 rt = ro.ro_rt; 1467 1468 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1469 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1470 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1471 u_long src = ntohl(ip->ip_src.s_addr); 1472 1473 if (RTA(rt) && 1474 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1475 if (rt->rt_flags & RTF_GATEWAY) 1476 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1477 else 1478 dest.s_addr = ip->ip_dst.s_addr; 1479 /* Router requirements says to only send host redirects */ 1480 type = ICMP_REDIRECT; 1481 code = ICMP_REDIRECT_HOST; 1482 } 1483 } 1484 if (rt) 1485 RTFREE(rt); 1486 } 1487 1488 /* 1489 * Try to cache the route MTU from ip_output so we can consider it for 1490 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1491 */ 1492 bzero(&ro, sizeof(ro)); 1493 1494 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1495 1496 if (error == EMSGSIZE && ro.ro_rt) 1497 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1498 RO_RTFREE(&ro); 1499 1500 if (error) 1501 IPSTAT_INC(ips_cantforward); 1502 else { 1503 IPSTAT_INC(ips_forward); 1504 if (type) 1505 IPSTAT_INC(ips_redirectsent); 1506 else { 1507 if (mcopy) 1508 m_freem(mcopy); 1509 if (ia != NULL) 1510 ifa_free(&ia->ia_ifa); 1511 return; 1512 } 1513 } 1514 if (mcopy == NULL) { 1515 if (ia != NULL) 1516 ifa_free(&ia->ia_ifa); 1517 return; 1518 } 1519 1520 switch (error) { 1521 1522 case 0: /* forwarded, but need redirect */ 1523 /* type, code set above */ 1524 break; 1525 1526 case ENETUNREACH: 1527 case EHOSTUNREACH: 1528 case ENETDOWN: 1529 case EHOSTDOWN: 1530 default: 1531 type = ICMP_UNREACH; 1532 code = ICMP_UNREACH_HOST; 1533 break; 1534 1535 case EMSGSIZE: 1536 type = ICMP_UNREACH; 1537 code = ICMP_UNREACH_NEEDFRAG; 1538 1539 #ifdef IPSEC 1540 /* 1541 * If IPsec is configured for this path, 1542 * override any possibly mtu value set by ip_output. 1543 */ 1544 mtu = ip_ipsec_mtu(mcopy, mtu); 1545 #endif /* IPSEC */ 1546 /* 1547 * If the MTU was set before make sure we are below the 1548 * interface MTU. 1549 * If the MTU wasn't set before use the interface mtu or 1550 * fall back to the next smaller mtu step compared to the 1551 * current packet size. 1552 */ 1553 if (mtu != 0) { 1554 if (ia != NULL) 1555 mtu = min(mtu, ia->ia_ifp->if_mtu); 1556 } else { 1557 if (ia != NULL) 1558 mtu = ia->ia_ifp->if_mtu; 1559 else 1560 mtu = ip_next_mtu(ip->ip_len, 0); 1561 } 1562 IPSTAT_INC(ips_cantfrag); 1563 break; 1564 1565 case ENOBUFS: 1566 /* 1567 * A router should not generate ICMP_SOURCEQUENCH as 1568 * required in RFC1812 Requirements for IP Version 4 Routers. 1569 * Source quench could be a big problem under DoS attacks, 1570 * or if the underlying interface is rate-limited. 1571 * Those who need source quench packets may re-enable them 1572 * via the net.inet.ip.sendsourcequench sysctl. 1573 */ 1574 if (V_ip_sendsourcequench == 0) { 1575 m_freem(mcopy); 1576 if (ia != NULL) 1577 ifa_free(&ia->ia_ifa); 1578 return; 1579 } else { 1580 type = ICMP_SOURCEQUENCH; 1581 code = 0; 1582 } 1583 break; 1584 1585 case EACCES: /* ipfw denied packet */ 1586 m_freem(mcopy); 1587 if (ia != NULL) 1588 ifa_free(&ia->ia_ifa); 1589 return; 1590 } 1591 if (ia != NULL) 1592 ifa_free(&ia->ia_ifa); 1593 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1594 } 1595 1596 void 1597 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1598 struct mbuf *m) 1599 { 1600 1601 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1602 struct bintime bt; 1603 1604 bintime(&bt); 1605 if (inp->inp_socket->so_options & SO_BINTIME) { 1606 *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt), 1607 SCM_BINTIME, SOL_SOCKET); 1608 if (*mp) 1609 mp = &(*mp)->m_next; 1610 } 1611 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1612 struct timeval tv; 1613 1614 bintime2timeval(&bt, &tv); 1615 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1616 SCM_TIMESTAMP, SOL_SOCKET); 1617 if (*mp) 1618 mp = &(*mp)->m_next; 1619 } 1620 } 1621 if (inp->inp_flags & INP_RECVDSTADDR) { 1622 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1623 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1624 if (*mp) 1625 mp = &(*mp)->m_next; 1626 } 1627 if (inp->inp_flags & INP_RECVTTL) { 1628 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1629 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1630 if (*mp) 1631 mp = &(*mp)->m_next; 1632 } 1633 #ifdef notyet 1634 /* XXX 1635 * Moving these out of udp_input() made them even more broken 1636 * than they already were. 1637 */ 1638 /* options were tossed already */ 1639 if (inp->inp_flags & INP_RECVOPTS) { 1640 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1641 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1642 if (*mp) 1643 mp = &(*mp)->m_next; 1644 } 1645 /* ip_srcroute doesn't do what we want here, need to fix */ 1646 if (inp->inp_flags & INP_RECVRETOPTS) { 1647 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1648 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1649 if (*mp) 1650 mp = &(*mp)->m_next; 1651 } 1652 #endif 1653 if (inp->inp_flags & INP_RECVIF) { 1654 struct ifnet *ifp; 1655 struct sdlbuf { 1656 struct sockaddr_dl sdl; 1657 u_char pad[32]; 1658 } sdlbuf; 1659 struct sockaddr_dl *sdp; 1660 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1661 1662 if (((ifp = m->m_pkthdr.rcvif)) 1663 && ( ifp->if_index && (ifp->if_index <= V_if_index))) { 1664 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1665 /* 1666 * Change our mind and don't try copy. 1667 */ 1668 if ((sdp->sdl_family != AF_LINK) 1669 || (sdp->sdl_len > sizeof(sdlbuf))) { 1670 goto makedummy; 1671 } 1672 bcopy(sdp, sdl2, sdp->sdl_len); 1673 } else { 1674 makedummy: 1675 sdl2->sdl_len 1676 = offsetof(struct sockaddr_dl, sdl_data[0]); 1677 sdl2->sdl_family = AF_LINK; 1678 sdl2->sdl_index = 0; 1679 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1680 } 1681 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, 1682 IP_RECVIF, IPPROTO_IP); 1683 if (*mp) 1684 mp = &(*mp)->m_next; 1685 } 1686 if (inp->inp_flags & INP_RECVTOS) { 1687 *mp = sbcreatecontrol((caddr_t) &ip->ip_tos, 1688 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1689 if (*mp) 1690 mp = &(*mp)->m_next; 1691 } 1692 } 1693 1694 /* 1695 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1696 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1697 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1698 * compiled. 1699 */ 1700 static VNET_DEFINE(int, ip_rsvp_on); 1701 VNET_DEFINE(struct socket *, ip_rsvpd); 1702 1703 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1704 1705 int 1706 ip_rsvp_init(struct socket *so) 1707 { 1708 1709 if (so->so_type != SOCK_RAW || 1710 so->so_proto->pr_protocol != IPPROTO_RSVP) 1711 return EOPNOTSUPP; 1712 1713 if (V_ip_rsvpd != NULL) 1714 return EADDRINUSE; 1715 1716 V_ip_rsvpd = so; 1717 /* 1718 * This may seem silly, but we need to be sure we don't over-increment 1719 * the RSVP counter, in case something slips up. 1720 */ 1721 if (!V_ip_rsvp_on) { 1722 V_ip_rsvp_on = 1; 1723 V_rsvp_on++; 1724 } 1725 1726 return 0; 1727 } 1728 1729 int 1730 ip_rsvp_done(void) 1731 { 1732 1733 V_ip_rsvpd = NULL; 1734 /* 1735 * This may seem silly, but we need to be sure we don't over-decrement 1736 * the RSVP counter, in case something slips up. 1737 */ 1738 if (V_ip_rsvp_on) { 1739 V_ip_rsvp_on = 0; 1740 V_rsvp_on--; 1741 } 1742 return 0; 1743 } 1744 1745 void 1746 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1747 { 1748 1749 if (rsvp_input_p) { /* call the real one if loaded */ 1750 rsvp_input_p(m, off); 1751 return; 1752 } 1753 1754 /* Can still get packets with rsvp_on = 0 if there is a local member 1755 * of the group to which the RSVP packet is addressed. But in this 1756 * case we want to throw the packet away. 1757 */ 1758 1759 if (!V_rsvp_on) { 1760 m_freem(m); 1761 return; 1762 } 1763 1764 if (V_ip_rsvpd != NULL) { 1765 rip_input(m, off); 1766 return; 1767 } 1768 /* Drop the packet */ 1769 m_freem(m); 1770 } 1771