1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_route.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/domain.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/time.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/rwlock.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 55 #include <net/pfil.h> 56 #include <net/if.h> 57 #include <net/if_types.h> 58 #include <net/if_var.h> 59 #include <net/if_dl.h> 60 #include <net/route.h> 61 #include <net/netisr.h> 62 #include <net/vnet.h> 63 #include <net/flowtable.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_fw.h> 72 #include <netinet/ip_icmp.h> 73 #include <netinet/ip_options.h> 74 #include <machine/in_cksum.h> 75 #include <netinet/ip_carp.h> 76 #ifdef IPSEC 77 #include <netinet/ip_ipsec.h> 78 #endif /* IPSEC */ 79 80 #include <sys/socketvar.h> 81 82 #include <security/mac/mac_framework.h> 83 84 #ifdef CTASSERT 85 CTASSERT(sizeof(struct ip) == 20); 86 #endif 87 88 struct rwlock in_ifaddr_lock; 89 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 90 91 VNET_DEFINE(int, rsvp_on); 92 93 VNET_DEFINE(int, ipforwarding); 94 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 95 &VNET_NAME(ipforwarding), 0, 96 "Enable IP forwarding between interfaces"); 97 98 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 99 #define V_ipsendredirects VNET(ipsendredirects) 100 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 101 &VNET_NAME(ipsendredirects), 0, 102 "Enable sending IP redirects"); 103 104 static VNET_DEFINE(int, ip_keepfaith); 105 #define V_ip_keepfaith VNET(ip_keepfaith) 106 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 107 &VNET_NAME(ip_keepfaith), 0, 108 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 109 110 static VNET_DEFINE(int, ip_sendsourcequench); 111 #define V_ip_sendsourcequench VNET(ip_sendsourcequench) 112 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 113 &VNET_NAME(ip_sendsourcequench), 0, 114 "Enable the transmission of source quench packets"); 115 116 VNET_DEFINE(int, ip_do_randomid); 117 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 118 &VNET_NAME(ip_do_randomid), 0, 119 "Assign random ip_id values"); 120 121 /* 122 * XXX - Setting ip_checkinterface mostly implements the receive side of 123 * the Strong ES model described in RFC 1122, but since the routing table 124 * and transmit implementation do not implement the Strong ES model, 125 * setting this to 1 results in an odd hybrid. 126 * 127 * XXX - ip_checkinterface currently must be disabled if you use ipnat 128 * to translate the destination address to another local interface. 129 * 130 * XXX - ip_checkinterface must be disabled if you add IP aliases 131 * to the loopback interface instead of the interface where the 132 * packets for those addresses are received. 133 */ 134 static VNET_DEFINE(int, ip_checkinterface); 135 #define V_ip_checkinterface VNET(ip_checkinterface) 136 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 137 &VNET_NAME(ip_checkinterface), 0, 138 "Verify packet arrives on correct interface"); 139 140 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ 141 142 static struct netisr_handler ip_nh = { 143 .nh_name = "ip", 144 .nh_handler = ip_input, 145 .nh_proto = NETISR_IP, 146 .nh_policy = NETISR_POLICY_FLOW, 147 }; 148 149 extern struct domain inetdomain; 150 extern struct protosw inetsw[]; 151 u_char ip_protox[IPPROTO_MAX]; 152 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 153 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 154 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 155 156 VNET_DEFINE(struct ipstat, ipstat); 157 SYSCTL_VNET_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW, 158 &VNET_NAME(ipstat), ipstat, 159 "IP statistics (struct ipstat, netinet/ip_var.h)"); 160 161 static VNET_DEFINE(uma_zone_t, ipq_zone); 162 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 163 static struct mtx ipqlock; 164 165 #define V_ipq_zone VNET(ipq_zone) 166 #define V_ipq VNET(ipq) 167 168 #define IPQ_LOCK() mtx_lock(&ipqlock) 169 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 170 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 171 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 172 173 static void maxnipq_update(void); 174 static void ipq_zone_change(void *); 175 static void ip_drain_locked(void); 176 177 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 178 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 179 #define V_maxnipq VNET(maxnipq) 180 #define V_nipq VNET(nipq) 181 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, 182 &VNET_NAME(nipq), 0, 183 "Current number of IPv4 fragment reassembly queue entries"); 184 185 static VNET_DEFINE(int, maxfragsperpacket); 186 #define V_maxfragsperpacket VNET(maxfragsperpacket) 187 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 188 &VNET_NAME(maxfragsperpacket), 0, 189 "Maximum number of IPv4 fragments allowed per packet"); 190 191 #ifdef IPCTL_DEFMTU 192 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 193 &ip_mtu, 0, "Default MTU"); 194 #endif 195 196 #ifdef IPSTEALTH 197 VNET_DEFINE(int, ipstealth); 198 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 199 &VNET_NAME(ipstealth), 0, 200 "IP stealth mode, no TTL decrementation on forwarding"); 201 #endif 202 203 #ifdef FLOWTABLE 204 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048; 205 VNET_DEFINE(struct flowtable *, ip_ft); 206 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size) 207 208 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN, 209 &VNET_NAME(ip_output_flowtable_size), 2048, 210 "number of entries in the per-cpu output flow caches"); 211 #endif 212 213 static void ip_freef(struct ipqhead *, struct ipq *); 214 215 /* 216 * Kernel module interface for updating ipstat. The argument is an index 217 * into ipstat treated as an array of u_long. While this encodes the general 218 * layout of ipstat into the caller, it doesn't encode its location, so that 219 * future changes to add, for example, per-CPU stats support won't cause 220 * binary compatibility problems for kernel modules. 221 */ 222 void 223 kmod_ipstat_inc(int statnum) 224 { 225 226 (*((u_long *)&V_ipstat + statnum))++; 227 } 228 229 void 230 kmod_ipstat_dec(int statnum) 231 { 232 233 (*((u_long *)&V_ipstat + statnum))--; 234 } 235 236 static int 237 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 238 { 239 int error, qlimit; 240 241 netisr_getqlimit(&ip_nh, &qlimit); 242 error = sysctl_handle_int(oidp, &qlimit, 0, req); 243 if (error || !req->newptr) 244 return (error); 245 if (qlimit < 1) 246 return (EINVAL); 247 return (netisr_setqlimit(&ip_nh, qlimit)); 248 } 249 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 250 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 251 "Maximum size of the IP input queue"); 252 253 static int 254 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 255 { 256 u_int64_t qdrops_long; 257 int error, qdrops; 258 259 netisr_getqdrops(&ip_nh, &qdrops_long); 260 qdrops = qdrops_long; 261 error = sysctl_handle_int(oidp, &qdrops, 0, req); 262 if (error || !req->newptr) 263 return (error); 264 if (qdrops != 0) 265 return (EINVAL); 266 netisr_clearqdrops(&ip_nh); 267 return (0); 268 } 269 270 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 271 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 272 "Number of packets dropped from the IP input queue"); 273 274 /* 275 * IP initialization: fill in IP protocol switch table. 276 * All protocols not implemented in kernel go to raw IP protocol handler. 277 */ 278 void 279 ip_init(void) 280 { 281 struct protosw *pr; 282 int i; 283 284 V_ip_id = time_second & 0xffff; 285 286 TAILQ_INIT(&V_in_ifaddrhead); 287 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 288 289 /* Initialize IP reassembly queue. */ 290 for (i = 0; i < IPREASS_NHASH; i++) 291 TAILQ_INIT(&V_ipq[i]); 292 V_maxnipq = nmbclusters / 32; 293 V_maxfragsperpacket = 16; 294 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 295 NULL, UMA_ALIGN_PTR, 0); 296 maxnipq_update(); 297 298 /* Initialize packet filter hooks. */ 299 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF; 300 V_inet_pfil_hook.ph_af = AF_INET; 301 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0) 302 printf("%s: WARNING: unable to register pfil hook, " 303 "error %d\n", __func__, i); 304 305 #ifdef FLOWTABLE 306 if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size", 307 &V_ip_output_flowtable_size)) { 308 if (V_ip_output_flowtable_size < 256) 309 V_ip_output_flowtable_size = 256; 310 if (!powerof2(V_ip_output_flowtable_size)) { 311 printf("flowtable must be power of 2 size\n"); 312 V_ip_output_flowtable_size = 2048; 313 } 314 } else { 315 /* 316 * round up to the next power of 2 317 */ 318 V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1); 319 } 320 V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU); 321 #endif 322 323 /* Skip initialization of globals for non-default instances. */ 324 if (!IS_DEFAULT_VNET(curvnet)) 325 return; 326 327 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 328 if (pr == NULL) 329 panic("ip_init: PF_INET not found"); 330 331 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 332 for (i = 0; i < IPPROTO_MAX; i++) 333 ip_protox[i] = pr - inetsw; 334 /* 335 * Cycle through IP protocols and put them into the appropriate place 336 * in ip_protox[]. 337 */ 338 for (pr = inetdomain.dom_protosw; 339 pr < inetdomain.dom_protoswNPROTOSW; pr++) 340 if (pr->pr_domain->dom_family == PF_INET && 341 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 342 /* Be careful to only index valid IP protocols. */ 343 if (pr->pr_protocol < IPPROTO_MAX) 344 ip_protox[pr->pr_protocol] = pr - inetsw; 345 } 346 347 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 348 NULL, EVENTHANDLER_PRI_ANY); 349 350 /* Initialize various other remaining things. */ 351 IPQ_LOCK_INIT(); 352 netisr_register(&ip_nh); 353 } 354 355 #ifdef VIMAGE 356 void 357 ip_destroy(void) 358 { 359 360 /* Cleanup in_ifaddr hash table; should be empty. */ 361 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 362 363 IPQ_LOCK(); 364 ip_drain_locked(); 365 IPQ_UNLOCK(); 366 367 uma_zdestroy(V_ipq_zone); 368 } 369 #endif 370 371 /* 372 * Ip input routine. Checksum and byte swap header. If fragmented 373 * try to reassemble. Process options. Pass to next level. 374 */ 375 void 376 ip_input(struct mbuf *m) 377 { 378 struct ip *ip = NULL; 379 struct in_ifaddr *ia = NULL; 380 struct ifaddr *ifa; 381 struct ifnet *ifp; 382 int checkif, hlen = 0; 383 uint16_t sum, ip_len; 384 int dchg = 0; /* dest changed after fw */ 385 struct in_addr odst; /* original dst address */ 386 387 M_ASSERTPKTHDR(m); 388 389 if (m->m_flags & M_FASTFWD_OURS) { 390 m->m_flags &= ~M_FASTFWD_OURS; 391 /* Set up some basics that will be used later. */ 392 ip = mtod(m, struct ip *); 393 hlen = ip->ip_hl << 2; 394 ip_len = ntohs(ip->ip_len); 395 goto ours; 396 } 397 398 IPSTAT_INC(ips_total); 399 400 if (m->m_pkthdr.len < sizeof(struct ip)) 401 goto tooshort; 402 403 if (m->m_len < sizeof (struct ip) && 404 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 405 IPSTAT_INC(ips_toosmall); 406 return; 407 } 408 ip = mtod(m, struct ip *); 409 410 if (ip->ip_v != IPVERSION) { 411 IPSTAT_INC(ips_badvers); 412 goto bad; 413 } 414 415 hlen = ip->ip_hl << 2; 416 if (hlen < sizeof(struct ip)) { /* minimum header length */ 417 IPSTAT_INC(ips_badhlen); 418 goto bad; 419 } 420 if (hlen > m->m_len) { 421 if ((m = m_pullup(m, hlen)) == NULL) { 422 IPSTAT_INC(ips_badhlen); 423 return; 424 } 425 ip = mtod(m, struct ip *); 426 } 427 428 /* 127/8 must not appear on wire - RFC1122 */ 429 ifp = m->m_pkthdr.rcvif; 430 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 431 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 432 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 433 IPSTAT_INC(ips_badaddr); 434 goto bad; 435 } 436 } 437 438 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 439 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 440 } else { 441 if (hlen == sizeof(struct ip)) { 442 sum = in_cksum_hdr(ip); 443 } else { 444 sum = in_cksum(m, hlen); 445 } 446 } 447 if (sum) { 448 IPSTAT_INC(ips_badsum); 449 goto bad; 450 } 451 452 #ifdef ALTQ 453 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 454 /* packet is dropped by traffic conditioner */ 455 return; 456 #endif 457 458 ip_len = ntohs(ip->ip_len); 459 if (ip_len < hlen) { 460 IPSTAT_INC(ips_badlen); 461 goto bad; 462 } 463 464 /* 465 * Check that the amount of data in the buffers 466 * is as at least much as the IP header would have us expect. 467 * Trim mbufs if longer than we expect. 468 * Drop packet if shorter than we expect. 469 */ 470 if (m->m_pkthdr.len < ip_len) { 471 tooshort: 472 IPSTAT_INC(ips_tooshort); 473 goto bad; 474 } 475 if (m->m_pkthdr.len > ip_len) { 476 if (m->m_len == m->m_pkthdr.len) { 477 m->m_len = ip_len; 478 m->m_pkthdr.len = ip_len; 479 } else 480 m_adj(m, ip_len - m->m_pkthdr.len); 481 } 482 #ifdef IPSEC 483 /* 484 * Bypass packet filtering for packets previously handled by IPsec. 485 */ 486 if (ip_ipsec_filtertunnel(m)) 487 goto passin; 488 #endif /* IPSEC */ 489 490 /* 491 * Run through list of hooks for input packets. 492 * 493 * NB: Beware of the destination address changing (e.g. 494 * by NAT rewriting). When this happens, tell 495 * ip_forward to do the right thing. 496 */ 497 498 /* Jump over all PFIL processing if hooks are not active. */ 499 if (!PFIL_HOOKED(&V_inet_pfil_hook)) 500 goto passin; 501 502 odst = ip->ip_dst; 503 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 504 return; 505 if (m == NULL) /* consumed by filter */ 506 return; 507 508 ip = mtod(m, struct ip *); 509 dchg = (odst.s_addr != ip->ip_dst.s_addr); 510 ifp = m->m_pkthdr.rcvif; 511 512 if (m->m_flags & M_FASTFWD_OURS) { 513 m->m_flags &= ~M_FASTFWD_OURS; 514 goto ours; 515 } 516 if (m->m_flags & M_IP_NEXTHOP) { 517 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL); 518 if (dchg != 0) { 519 /* 520 * Directly ship the packet on. This allows 521 * forwarding packets originally destined to us 522 * to some other directly connected host. 523 */ 524 ip_forward(m, 1); 525 return; 526 } 527 } 528 passin: 529 530 /* 531 * Process options and, if not destined for us, 532 * ship it on. ip_dooptions returns 1 when an 533 * error was detected (causing an icmp message 534 * to be sent and the original packet to be freed). 535 */ 536 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 537 return; 538 539 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 540 * matter if it is destined to another node, or whether it is 541 * a multicast one, RSVP wants it! and prevents it from being forwarded 542 * anywhere else. Also checks if the rsvp daemon is running before 543 * grabbing the packet. 544 */ 545 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 546 goto ours; 547 548 /* 549 * Check our list of addresses, to see if the packet is for us. 550 * If we don't have any addresses, assume any unicast packet 551 * we receive might be for us (and let the upper layers deal 552 * with it). 553 */ 554 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 555 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 556 goto ours; 557 558 /* 559 * Enable a consistency check between the destination address 560 * and the arrival interface for a unicast packet (the RFC 1122 561 * strong ES model) if IP forwarding is disabled and the packet 562 * is not locally generated and the packet is not subject to 563 * 'ipfw fwd'. 564 * 565 * XXX - Checking also should be disabled if the destination 566 * address is ipnat'ed to a different interface. 567 * 568 * XXX - Checking is incompatible with IP aliases added 569 * to the loopback interface instead of the interface where 570 * the packets are received. 571 * 572 * XXX - This is the case for carp vhost IPs as well so we 573 * insert a workaround. If the packet got here, we already 574 * checked with carp_iamatch() and carp_forus(). 575 */ 576 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 577 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 578 ifp->if_carp == NULL && (dchg == 0); 579 580 /* 581 * Check for exact addresses in the hash bucket. 582 */ 583 /* IN_IFADDR_RLOCK(); */ 584 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 585 /* 586 * If the address matches, verify that the packet 587 * arrived via the correct interface if checking is 588 * enabled. 589 */ 590 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 591 (!checkif || ia->ia_ifp == ifp)) { 592 ifa_ref(&ia->ia_ifa); 593 /* IN_IFADDR_RUNLOCK(); */ 594 goto ours; 595 } 596 } 597 /* IN_IFADDR_RUNLOCK(); */ 598 599 /* 600 * Check for broadcast addresses. 601 * 602 * Only accept broadcast packets that arrive via the matching 603 * interface. Reception of forwarded directed broadcasts would 604 * be handled via ip_forward() and ether_output() with the loopback 605 * into the stack for SIMPLEX interfaces handled by ether_output(). 606 */ 607 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 608 IF_ADDR_RLOCK(ifp); 609 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 610 if (ifa->ifa_addr->sa_family != AF_INET) 611 continue; 612 ia = ifatoia(ifa); 613 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 614 ip->ip_dst.s_addr) { 615 ifa_ref(ifa); 616 IF_ADDR_RUNLOCK(ifp); 617 goto ours; 618 } 619 #ifdef BOOTP_COMPAT 620 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 621 ifa_ref(ifa); 622 IF_ADDR_RUNLOCK(ifp); 623 goto ours; 624 } 625 #endif 626 } 627 IF_ADDR_RUNLOCK(ifp); 628 ia = NULL; 629 } 630 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 631 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 632 IPSTAT_INC(ips_cantforward); 633 m_freem(m); 634 return; 635 } 636 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 637 if (V_ip_mrouter) { 638 /* 639 * If we are acting as a multicast router, all 640 * incoming multicast packets are passed to the 641 * kernel-level multicast forwarding function. 642 * The packet is returned (relatively) intact; if 643 * ip_mforward() returns a non-zero value, the packet 644 * must be discarded, else it may be accepted below. 645 */ 646 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 647 IPSTAT_INC(ips_cantforward); 648 m_freem(m); 649 return; 650 } 651 652 /* 653 * The process-level routing daemon needs to receive 654 * all multicast IGMP packets, whether or not this 655 * host belongs to their destination groups. 656 */ 657 if (ip->ip_p == IPPROTO_IGMP) 658 goto ours; 659 IPSTAT_INC(ips_forward); 660 } 661 /* 662 * Assume the packet is for us, to avoid prematurely taking 663 * a lock on the in_multi hash. Protocols must perform 664 * their own filtering and update statistics accordingly. 665 */ 666 goto ours; 667 } 668 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 669 goto ours; 670 if (ip->ip_dst.s_addr == INADDR_ANY) 671 goto ours; 672 673 /* 674 * FAITH(Firewall Aided Internet Translator) 675 */ 676 if (ifp && ifp->if_type == IFT_FAITH) { 677 if (V_ip_keepfaith) { 678 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 679 goto ours; 680 } 681 m_freem(m); 682 return; 683 } 684 685 /* 686 * Not for us; forward if possible and desirable. 687 */ 688 if (V_ipforwarding == 0) { 689 IPSTAT_INC(ips_cantforward); 690 m_freem(m); 691 } else { 692 #ifdef IPSEC 693 if (ip_ipsec_fwd(m)) 694 goto bad; 695 #endif /* IPSEC */ 696 ip_forward(m, dchg); 697 } 698 return; 699 700 ours: 701 #ifdef IPSTEALTH 702 /* 703 * IPSTEALTH: Process non-routing options only 704 * if the packet is destined for us. 705 */ 706 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) { 707 if (ia != NULL) 708 ifa_free(&ia->ia_ifa); 709 return; 710 } 711 #endif /* IPSTEALTH */ 712 713 /* Count the packet in the ip address stats */ 714 if (ia != NULL) { 715 ia->ia_ifa.if_ipackets++; 716 ia->ia_ifa.if_ibytes += m->m_pkthdr.len; 717 ifa_free(&ia->ia_ifa); 718 } 719 720 /* 721 * Attempt reassembly; if it succeeds, proceed. 722 * ip_reass() will return a different mbuf. 723 */ 724 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { 725 m = ip_reass(m); 726 if (m == NULL) 727 return; 728 ip = mtod(m, struct ip *); 729 /* Get the header length of the reassembled packet */ 730 hlen = ip->ip_hl << 2; 731 } 732 733 #ifdef IPSEC 734 /* 735 * enforce IPsec policy checking if we are seeing last header. 736 * note that we do not visit this with protocols with pcb layer 737 * code - like udp/tcp/raw ip. 738 */ 739 if (ip_ipsec_input(m)) 740 goto bad; 741 #endif /* IPSEC */ 742 743 /* 744 * Switch out to protocol's input routine. 745 */ 746 IPSTAT_INC(ips_delivered); 747 748 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 749 return; 750 bad: 751 m_freem(m); 752 } 753 754 /* 755 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 756 * max has slightly different semantics than the sysctl, for historical 757 * reasons. 758 */ 759 static void 760 maxnipq_update(void) 761 { 762 763 /* 764 * -1 for unlimited allocation. 765 */ 766 if (V_maxnipq < 0) 767 uma_zone_set_max(V_ipq_zone, 0); 768 /* 769 * Positive number for specific bound. 770 */ 771 if (V_maxnipq > 0) 772 uma_zone_set_max(V_ipq_zone, V_maxnipq); 773 /* 774 * Zero specifies no further fragment queue allocation -- set the 775 * bound very low, but rely on implementation elsewhere to actually 776 * prevent allocation and reclaim current queues. 777 */ 778 if (V_maxnipq == 0) 779 uma_zone_set_max(V_ipq_zone, 1); 780 } 781 782 static void 783 ipq_zone_change(void *tag) 784 { 785 786 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 787 V_maxnipq = nmbclusters / 32; 788 maxnipq_update(); 789 } 790 } 791 792 static int 793 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 794 { 795 int error, i; 796 797 i = V_maxnipq; 798 error = sysctl_handle_int(oidp, &i, 0, req); 799 if (error || !req->newptr) 800 return (error); 801 802 /* 803 * XXXRW: Might be a good idea to sanity check the argument and place 804 * an extreme upper bound. 805 */ 806 if (i < -1) 807 return (EINVAL); 808 V_maxnipq = i; 809 maxnipq_update(); 810 return (0); 811 } 812 813 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 814 NULL, 0, sysctl_maxnipq, "I", 815 "Maximum number of IPv4 fragment reassembly queue entries"); 816 817 /* 818 * Take incoming datagram fragment and try to reassemble it into 819 * whole datagram. If the argument is the first fragment or one 820 * in between the function will return NULL and store the mbuf 821 * in the fragment chain. If the argument is the last fragment 822 * the packet will be reassembled and the pointer to the new 823 * mbuf returned for further processing. Only m_tags attached 824 * to the first packet/fragment are preserved. 825 * The IP header is *NOT* adjusted out of iplen. 826 */ 827 struct mbuf * 828 ip_reass(struct mbuf *m) 829 { 830 struct ip *ip; 831 struct mbuf *p, *q, *nq, *t; 832 struct ipq *fp = NULL; 833 struct ipqhead *head; 834 int i, hlen, next; 835 u_int8_t ecn, ecn0; 836 u_short hash; 837 838 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 839 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 840 IPSTAT_INC(ips_fragments); 841 IPSTAT_INC(ips_fragdropped); 842 m_freem(m); 843 return (NULL); 844 } 845 846 ip = mtod(m, struct ip *); 847 hlen = ip->ip_hl << 2; 848 849 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 850 head = &V_ipq[hash]; 851 IPQ_LOCK(); 852 853 /* 854 * Look for queue of fragments 855 * of this datagram. 856 */ 857 TAILQ_FOREACH(fp, head, ipq_list) 858 if (ip->ip_id == fp->ipq_id && 859 ip->ip_src.s_addr == fp->ipq_src.s_addr && 860 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 861 #ifdef MAC 862 mac_ipq_match(m, fp) && 863 #endif 864 ip->ip_p == fp->ipq_p) 865 goto found; 866 867 fp = NULL; 868 869 /* 870 * Attempt to trim the number of allocated fragment queues if it 871 * exceeds the administrative limit. 872 */ 873 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 874 /* 875 * drop something from the tail of the current queue 876 * before proceeding further 877 */ 878 struct ipq *q = TAILQ_LAST(head, ipqhead); 879 if (q == NULL) { /* gak */ 880 for (i = 0; i < IPREASS_NHASH; i++) { 881 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 882 if (r) { 883 IPSTAT_ADD(ips_fragtimeout, 884 r->ipq_nfrags); 885 ip_freef(&V_ipq[i], r); 886 break; 887 } 888 } 889 } else { 890 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 891 ip_freef(head, q); 892 } 893 } 894 895 found: 896 /* 897 * Adjust ip_len to not reflect header, 898 * convert offset of this to bytes. 899 */ 900 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 901 if (ip->ip_off & htons(IP_MF)) { 902 /* 903 * Make sure that fragments have a data length 904 * that's a non-zero multiple of 8 bytes. 905 */ 906 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) { 907 IPSTAT_INC(ips_toosmall); /* XXX */ 908 goto dropfrag; 909 } 910 m->m_flags |= M_FRAG; 911 } else 912 m->m_flags &= ~M_FRAG; 913 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 914 915 /* 916 * Attempt reassembly; if it succeeds, proceed. 917 * ip_reass() will return a different mbuf. 918 */ 919 IPSTAT_INC(ips_fragments); 920 m->m_pkthdr.header = ip; 921 922 /* Previous ip_reass() started here. */ 923 /* 924 * Presence of header sizes in mbufs 925 * would confuse code below. 926 */ 927 m->m_data += hlen; 928 m->m_len -= hlen; 929 930 /* 931 * If first fragment to arrive, create a reassembly queue. 932 */ 933 if (fp == NULL) { 934 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 935 if (fp == NULL) 936 goto dropfrag; 937 #ifdef MAC 938 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 939 uma_zfree(V_ipq_zone, fp); 940 fp = NULL; 941 goto dropfrag; 942 } 943 mac_ipq_create(m, fp); 944 #endif 945 TAILQ_INSERT_HEAD(head, fp, ipq_list); 946 V_nipq++; 947 fp->ipq_nfrags = 1; 948 fp->ipq_ttl = IPFRAGTTL; 949 fp->ipq_p = ip->ip_p; 950 fp->ipq_id = ip->ip_id; 951 fp->ipq_src = ip->ip_src; 952 fp->ipq_dst = ip->ip_dst; 953 fp->ipq_frags = m; 954 m->m_nextpkt = NULL; 955 goto done; 956 } else { 957 fp->ipq_nfrags++; 958 #ifdef MAC 959 mac_ipq_update(m, fp); 960 #endif 961 } 962 963 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 964 965 /* 966 * Handle ECN by comparing this segment with the first one; 967 * if CE is set, do not lose CE. 968 * drop if CE and not-ECT are mixed for the same packet. 969 */ 970 ecn = ip->ip_tos & IPTOS_ECN_MASK; 971 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 972 if (ecn == IPTOS_ECN_CE) { 973 if (ecn0 == IPTOS_ECN_NOTECT) 974 goto dropfrag; 975 if (ecn0 != IPTOS_ECN_CE) 976 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 977 } 978 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 979 goto dropfrag; 980 981 /* 982 * Find a segment which begins after this one does. 983 */ 984 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 985 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 986 break; 987 988 /* 989 * If there is a preceding segment, it may provide some of 990 * our data already. If so, drop the data from the incoming 991 * segment. If it provides all of our data, drop us, otherwise 992 * stick new segment in the proper place. 993 * 994 * If some of the data is dropped from the preceding 995 * segment, then it's checksum is invalidated. 996 */ 997 if (p) { 998 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 999 ntohs(ip->ip_off); 1000 if (i > 0) { 1001 if (i >= ntohs(ip->ip_len)) 1002 goto dropfrag; 1003 m_adj(m, i); 1004 m->m_pkthdr.csum_flags = 0; 1005 ip->ip_off = htons(ntohs(ip->ip_off) + i); 1006 ip->ip_len = htons(ntohs(ip->ip_len) - i); 1007 } 1008 m->m_nextpkt = p->m_nextpkt; 1009 p->m_nextpkt = m; 1010 } else { 1011 m->m_nextpkt = fp->ipq_frags; 1012 fp->ipq_frags = m; 1013 } 1014 1015 /* 1016 * While we overlap succeeding segments trim them or, 1017 * if they are completely covered, dequeue them. 1018 */ 1019 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 1020 ntohs(GETIP(q)->ip_off); q = nq) { 1021 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 1022 ntohs(GETIP(q)->ip_off); 1023 if (i < ntohs(GETIP(q)->ip_len)) { 1024 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 1025 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 1026 m_adj(q, i); 1027 q->m_pkthdr.csum_flags = 0; 1028 break; 1029 } 1030 nq = q->m_nextpkt; 1031 m->m_nextpkt = nq; 1032 IPSTAT_INC(ips_fragdropped); 1033 fp->ipq_nfrags--; 1034 m_freem(q); 1035 } 1036 1037 /* 1038 * Check for complete reassembly and perform frag per packet 1039 * limiting. 1040 * 1041 * Frag limiting is performed here so that the nth frag has 1042 * a chance to complete the packet before we drop the packet. 1043 * As a result, n+1 frags are actually allowed per packet, but 1044 * only n will ever be stored. (n = maxfragsperpacket.) 1045 * 1046 */ 1047 next = 0; 1048 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1049 if (ntohs(GETIP(q)->ip_off) != next) { 1050 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1051 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1052 ip_freef(head, fp); 1053 } 1054 goto done; 1055 } 1056 next += ntohs(GETIP(q)->ip_len); 1057 } 1058 /* Make sure the last packet didn't have the IP_MF flag */ 1059 if (p->m_flags & M_FRAG) { 1060 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1061 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1062 ip_freef(head, fp); 1063 } 1064 goto done; 1065 } 1066 1067 /* 1068 * Reassembly is complete. Make sure the packet is a sane size. 1069 */ 1070 q = fp->ipq_frags; 1071 ip = GETIP(q); 1072 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1073 IPSTAT_INC(ips_toolong); 1074 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1075 ip_freef(head, fp); 1076 goto done; 1077 } 1078 1079 /* 1080 * Concatenate fragments. 1081 */ 1082 m = q; 1083 t = m->m_next; 1084 m->m_next = NULL; 1085 m_cat(m, t); 1086 nq = q->m_nextpkt; 1087 q->m_nextpkt = NULL; 1088 for (q = nq; q != NULL; q = nq) { 1089 nq = q->m_nextpkt; 1090 q->m_nextpkt = NULL; 1091 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1092 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1093 m_cat(m, q); 1094 } 1095 /* 1096 * In order to do checksumming faster we do 'end-around carry' here 1097 * (and not in for{} loop), though it implies we are not going to 1098 * reassemble more than 64k fragments. 1099 */ 1100 m->m_pkthdr.csum_data = 1101 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1102 #ifdef MAC 1103 mac_ipq_reassemble(fp, m); 1104 mac_ipq_destroy(fp); 1105 #endif 1106 1107 /* 1108 * Create header for new ip packet by modifying header of first 1109 * packet; dequeue and discard fragment reassembly header. 1110 * Make header visible. 1111 */ 1112 ip->ip_len = htons((ip->ip_hl << 2) + next); 1113 ip->ip_src = fp->ipq_src; 1114 ip->ip_dst = fp->ipq_dst; 1115 TAILQ_REMOVE(head, fp, ipq_list); 1116 V_nipq--; 1117 uma_zfree(V_ipq_zone, fp); 1118 m->m_len += (ip->ip_hl << 2); 1119 m->m_data -= (ip->ip_hl << 2); 1120 /* some debugging cruft by sklower, below, will go away soon */ 1121 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1122 m_fixhdr(m); 1123 IPSTAT_INC(ips_reassembled); 1124 IPQ_UNLOCK(); 1125 return (m); 1126 1127 dropfrag: 1128 IPSTAT_INC(ips_fragdropped); 1129 if (fp != NULL) 1130 fp->ipq_nfrags--; 1131 m_freem(m); 1132 done: 1133 IPQ_UNLOCK(); 1134 return (NULL); 1135 1136 #undef GETIP 1137 } 1138 1139 /* 1140 * Free a fragment reassembly header and all 1141 * associated datagrams. 1142 */ 1143 static void 1144 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1145 { 1146 struct mbuf *q; 1147 1148 IPQ_LOCK_ASSERT(); 1149 1150 while (fp->ipq_frags) { 1151 q = fp->ipq_frags; 1152 fp->ipq_frags = q->m_nextpkt; 1153 m_freem(q); 1154 } 1155 TAILQ_REMOVE(fhp, fp, ipq_list); 1156 uma_zfree(V_ipq_zone, fp); 1157 V_nipq--; 1158 } 1159 1160 /* 1161 * IP timer processing; 1162 * if a timer expires on a reassembly 1163 * queue, discard it. 1164 */ 1165 void 1166 ip_slowtimo(void) 1167 { 1168 VNET_ITERATOR_DECL(vnet_iter); 1169 struct ipq *fp; 1170 int i; 1171 1172 VNET_LIST_RLOCK_NOSLEEP(); 1173 IPQ_LOCK(); 1174 VNET_FOREACH(vnet_iter) { 1175 CURVNET_SET(vnet_iter); 1176 for (i = 0; i < IPREASS_NHASH; i++) { 1177 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1178 struct ipq *fpp; 1179 1180 fpp = fp; 1181 fp = TAILQ_NEXT(fp, ipq_list); 1182 if(--fpp->ipq_ttl == 0) { 1183 IPSTAT_ADD(ips_fragtimeout, 1184 fpp->ipq_nfrags); 1185 ip_freef(&V_ipq[i], fpp); 1186 } 1187 } 1188 } 1189 /* 1190 * If we are over the maximum number of fragments 1191 * (due to the limit being lowered), drain off 1192 * enough to get down to the new limit. 1193 */ 1194 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1195 for (i = 0; i < IPREASS_NHASH; i++) { 1196 while (V_nipq > V_maxnipq && 1197 !TAILQ_EMPTY(&V_ipq[i])) { 1198 IPSTAT_ADD(ips_fragdropped, 1199 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1200 ip_freef(&V_ipq[i], 1201 TAILQ_FIRST(&V_ipq[i])); 1202 } 1203 } 1204 } 1205 CURVNET_RESTORE(); 1206 } 1207 IPQ_UNLOCK(); 1208 VNET_LIST_RUNLOCK_NOSLEEP(); 1209 } 1210 1211 /* 1212 * Drain off all datagram fragments. 1213 */ 1214 static void 1215 ip_drain_locked(void) 1216 { 1217 int i; 1218 1219 IPQ_LOCK_ASSERT(); 1220 1221 for (i = 0; i < IPREASS_NHASH; i++) { 1222 while(!TAILQ_EMPTY(&V_ipq[i])) { 1223 IPSTAT_ADD(ips_fragdropped, 1224 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1225 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1226 } 1227 } 1228 } 1229 1230 void 1231 ip_drain(void) 1232 { 1233 VNET_ITERATOR_DECL(vnet_iter); 1234 1235 VNET_LIST_RLOCK_NOSLEEP(); 1236 IPQ_LOCK(); 1237 VNET_FOREACH(vnet_iter) { 1238 CURVNET_SET(vnet_iter); 1239 ip_drain_locked(); 1240 CURVNET_RESTORE(); 1241 } 1242 IPQ_UNLOCK(); 1243 VNET_LIST_RUNLOCK_NOSLEEP(); 1244 in_rtqdrain(); 1245 } 1246 1247 /* 1248 * The protocol to be inserted into ip_protox[] must be already registered 1249 * in inetsw[], either statically or through pf_proto_register(). 1250 */ 1251 int 1252 ipproto_register(short ipproto) 1253 { 1254 struct protosw *pr; 1255 1256 /* Sanity checks. */ 1257 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1258 return (EPROTONOSUPPORT); 1259 1260 /* 1261 * The protocol slot must not be occupied by another protocol 1262 * already. An index pointing to IPPROTO_RAW is unused. 1263 */ 1264 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1265 if (pr == NULL) 1266 return (EPFNOSUPPORT); 1267 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1268 return (EEXIST); 1269 1270 /* Find the protocol position in inetsw[] and set the index. */ 1271 for (pr = inetdomain.dom_protosw; 1272 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1273 if (pr->pr_domain->dom_family == PF_INET && 1274 pr->pr_protocol && pr->pr_protocol == ipproto) { 1275 ip_protox[pr->pr_protocol] = pr - inetsw; 1276 return (0); 1277 } 1278 } 1279 return (EPROTONOSUPPORT); 1280 } 1281 1282 int 1283 ipproto_unregister(short ipproto) 1284 { 1285 struct protosw *pr; 1286 1287 /* Sanity checks. */ 1288 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1289 return (EPROTONOSUPPORT); 1290 1291 /* Check if the protocol was indeed registered. */ 1292 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1293 if (pr == NULL) 1294 return (EPFNOSUPPORT); 1295 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1296 return (ENOENT); 1297 1298 /* Reset the protocol slot to IPPROTO_RAW. */ 1299 ip_protox[ipproto] = pr - inetsw; 1300 return (0); 1301 } 1302 1303 /* 1304 * Given address of next destination (final or next hop), return (referenced) 1305 * internet address info of interface to be used to get there. 1306 */ 1307 struct in_ifaddr * 1308 ip_rtaddr(struct in_addr dst, u_int fibnum) 1309 { 1310 struct route sro; 1311 struct sockaddr_in *sin; 1312 struct in_ifaddr *ia; 1313 1314 bzero(&sro, sizeof(sro)); 1315 sin = (struct sockaddr_in *)&sro.ro_dst; 1316 sin->sin_family = AF_INET; 1317 sin->sin_len = sizeof(*sin); 1318 sin->sin_addr = dst; 1319 in_rtalloc_ign(&sro, 0, fibnum); 1320 1321 if (sro.ro_rt == NULL) 1322 return (NULL); 1323 1324 ia = ifatoia(sro.ro_rt->rt_ifa); 1325 ifa_ref(&ia->ia_ifa); 1326 RTFREE(sro.ro_rt); 1327 return (ia); 1328 } 1329 1330 u_char inetctlerrmap[PRC_NCMDS] = { 1331 0, 0, 0, 0, 1332 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1333 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1334 EMSGSIZE, EHOSTUNREACH, 0, 0, 1335 0, 0, EHOSTUNREACH, 0, 1336 ENOPROTOOPT, ECONNREFUSED 1337 }; 1338 1339 /* 1340 * Forward a packet. If some error occurs return the sender 1341 * an icmp packet. Note we can't always generate a meaningful 1342 * icmp message because icmp doesn't have a large enough repertoire 1343 * of codes and types. 1344 * 1345 * If not forwarding, just drop the packet. This could be confusing 1346 * if ipforwarding was zero but some routing protocol was advancing 1347 * us as a gateway to somewhere. However, we must let the routing 1348 * protocol deal with that. 1349 * 1350 * The srcrt parameter indicates whether the packet is being forwarded 1351 * via a source route. 1352 */ 1353 void 1354 ip_forward(struct mbuf *m, int srcrt) 1355 { 1356 struct ip *ip = mtod(m, struct ip *); 1357 struct in_ifaddr *ia; 1358 struct mbuf *mcopy; 1359 struct in_addr dest; 1360 struct route ro; 1361 int error, type = 0, code = 0, mtu = 0; 1362 1363 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1364 IPSTAT_INC(ips_cantforward); 1365 m_freem(m); 1366 return; 1367 } 1368 #ifdef IPSTEALTH 1369 if (!V_ipstealth) { 1370 #endif 1371 if (ip->ip_ttl <= IPTTLDEC) { 1372 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1373 0, 0); 1374 return; 1375 } 1376 #ifdef IPSTEALTH 1377 } 1378 #endif 1379 1380 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1381 #ifndef IPSEC 1382 /* 1383 * 'ia' may be NULL if there is no route for this destination. 1384 * In case of IPsec, Don't discard it just yet, but pass it to 1385 * ip_output in case of outgoing IPsec policy. 1386 */ 1387 if (!srcrt && ia == NULL) { 1388 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1389 return; 1390 } 1391 #endif 1392 1393 /* 1394 * Save the IP header and at most 8 bytes of the payload, 1395 * in case we need to generate an ICMP message to the src. 1396 * 1397 * XXX this can be optimized a lot by saving the data in a local 1398 * buffer on the stack (72 bytes at most), and only allocating the 1399 * mbuf if really necessary. The vast majority of the packets 1400 * are forwarded without having to send an ICMP back (either 1401 * because unnecessary, or because rate limited), so we are 1402 * really we are wasting a lot of work here. 1403 * 1404 * We don't use m_copy() because it might return a reference 1405 * to a shared cluster. Both this function and ip_output() 1406 * assume exclusive access to the IP header in `m', so any 1407 * data in a cluster may change before we reach icmp_error(). 1408 */ 1409 MGETHDR(mcopy, M_NOWAIT, m->m_type); 1410 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) { 1411 /* 1412 * It's probably ok if the pkthdr dup fails (because 1413 * the deep copy of the tag chain failed), but for now 1414 * be conservative and just discard the copy since 1415 * code below may some day want the tags. 1416 */ 1417 m_free(mcopy); 1418 mcopy = NULL; 1419 } 1420 if (mcopy != NULL) { 1421 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy)); 1422 mcopy->m_pkthdr.len = mcopy->m_len; 1423 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1424 } 1425 1426 #ifdef IPSTEALTH 1427 if (!V_ipstealth) { 1428 #endif 1429 ip->ip_ttl -= IPTTLDEC; 1430 #ifdef IPSTEALTH 1431 } 1432 #endif 1433 1434 /* 1435 * If forwarding packet using same interface that it came in on, 1436 * perhaps should send a redirect to sender to shortcut a hop. 1437 * Only send redirect if source is sending directly to us, 1438 * and if packet was not source routed (or has any options). 1439 * Also, don't send redirect if forwarding using a default route 1440 * or a route modified by a redirect. 1441 */ 1442 dest.s_addr = 0; 1443 if (!srcrt && V_ipsendredirects && 1444 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1445 struct sockaddr_in *sin; 1446 struct rtentry *rt; 1447 1448 bzero(&ro, sizeof(ro)); 1449 sin = (struct sockaddr_in *)&ro.ro_dst; 1450 sin->sin_family = AF_INET; 1451 sin->sin_len = sizeof(*sin); 1452 sin->sin_addr = ip->ip_dst; 1453 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1454 1455 rt = ro.ro_rt; 1456 1457 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1458 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1459 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1460 u_long src = ntohl(ip->ip_src.s_addr); 1461 1462 if (RTA(rt) && 1463 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1464 if (rt->rt_flags & RTF_GATEWAY) 1465 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1466 else 1467 dest.s_addr = ip->ip_dst.s_addr; 1468 /* Router requirements says to only send host redirects */ 1469 type = ICMP_REDIRECT; 1470 code = ICMP_REDIRECT_HOST; 1471 } 1472 } 1473 if (rt) 1474 RTFREE(rt); 1475 } 1476 1477 /* 1478 * Try to cache the route MTU from ip_output so we can consider it for 1479 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1480 */ 1481 bzero(&ro, sizeof(ro)); 1482 1483 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1484 1485 if (error == EMSGSIZE && ro.ro_rt) 1486 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1487 RO_RTFREE(&ro); 1488 1489 if (error) 1490 IPSTAT_INC(ips_cantforward); 1491 else { 1492 IPSTAT_INC(ips_forward); 1493 if (type) 1494 IPSTAT_INC(ips_redirectsent); 1495 else { 1496 if (mcopy) 1497 m_freem(mcopy); 1498 if (ia != NULL) 1499 ifa_free(&ia->ia_ifa); 1500 return; 1501 } 1502 } 1503 if (mcopy == NULL) { 1504 if (ia != NULL) 1505 ifa_free(&ia->ia_ifa); 1506 return; 1507 } 1508 1509 switch (error) { 1510 1511 case 0: /* forwarded, but need redirect */ 1512 /* type, code set above */ 1513 break; 1514 1515 case ENETUNREACH: 1516 case EHOSTUNREACH: 1517 case ENETDOWN: 1518 case EHOSTDOWN: 1519 default: 1520 type = ICMP_UNREACH; 1521 code = ICMP_UNREACH_HOST; 1522 break; 1523 1524 case EMSGSIZE: 1525 type = ICMP_UNREACH; 1526 code = ICMP_UNREACH_NEEDFRAG; 1527 1528 #ifdef IPSEC 1529 /* 1530 * If IPsec is configured for this path, 1531 * override any possibly mtu value set by ip_output. 1532 */ 1533 mtu = ip_ipsec_mtu(mcopy, mtu); 1534 #endif /* IPSEC */ 1535 /* 1536 * If the MTU was set before make sure we are below the 1537 * interface MTU. 1538 * If the MTU wasn't set before use the interface mtu or 1539 * fall back to the next smaller mtu step compared to the 1540 * current packet size. 1541 */ 1542 if (mtu != 0) { 1543 if (ia != NULL) 1544 mtu = min(mtu, ia->ia_ifp->if_mtu); 1545 } else { 1546 if (ia != NULL) 1547 mtu = ia->ia_ifp->if_mtu; 1548 else 1549 mtu = ip_next_mtu(ntohs(ip->ip_len), 0); 1550 } 1551 IPSTAT_INC(ips_cantfrag); 1552 break; 1553 1554 case ENOBUFS: 1555 /* 1556 * A router should not generate ICMP_SOURCEQUENCH as 1557 * required in RFC1812 Requirements for IP Version 4 Routers. 1558 * Source quench could be a big problem under DoS attacks, 1559 * or if the underlying interface is rate-limited. 1560 * Those who need source quench packets may re-enable them 1561 * via the net.inet.ip.sendsourcequench sysctl. 1562 */ 1563 if (V_ip_sendsourcequench == 0) { 1564 m_freem(mcopy); 1565 if (ia != NULL) 1566 ifa_free(&ia->ia_ifa); 1567 return; 1568 } else { 1569 type = ICMP_SOURCEQUENCH; 1570 code = 0; 1571 } 1572 break; 1573 1574 case EACCES: /* ipfw denied packet */ 1575 m_freem(mcopy); 1576 if (ia != NULL) 1577 ifa_free(&ia->ia_ifa); 1578 return; 1579 } 1580 if (ia != NULL) 1581 ifa_free(&ia->ia_ifa); 1582 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1583 } 1584 1585 void 1586 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1587 struct mbuf *m) 1588 { 1589 1590 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1591 struct bintime bt; 1592 1593 bintime(&bt); 1594 if (inp->inp_socket->so_options & SO_BINTIME) { 1595 *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt), 1596 SCM_BINTIME, SOL_SOCKET); 1597 if (*mp) 1598 mp = &(*mp)->m_next; 1599 } 1600 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1601 struct timeval tv; 1602 1603 bintime2timeval(&bt, &tv); 1604 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), 1605 SCM_TIMESTAMP, SOL_SOCKET); 1606 if (*mp) 1607 mp = &(*mp)->m_next; 1608 } 1609 } 1610 if (inp->inp_flags & INP_RECVDSTADDR) { 1611 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, 1612 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1613 if (*mp) 1614 mp = &(*mp)->m_next; 1615 } 1616 if (inp->inp_flags & INP_RECVTTL) { 1617 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl, 1618 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1619 if (*mp) 1620 mp = &(*mp)->m_next; 1621 } 1622 #ifdef notyet 1623 /* XXX 1624 * Moving these out of udp_input() made them even more broken 1625 * than they already were. 1626 */ 1627 /* options were tossed already */ 1628 if (inp->inp_flags & INP_RECVOPTS) { 1629 *mp = sbcreatecontrol((caddr_t) opts_deleted_above, 1630 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1631 if (*mp) 1632 mp = &(*mp)->m_next; 1633 } 1634 /* ip_srcroute doesn't do what we want here, need to fix */ 1635 if (inp->inp_flags & INP_RECVRETOPTS) { 1636 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m), 1637 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1638 if (*mp) 1639 mp = &(*mp)->m_next; 1640 } 1641 #endif 1642 if (inp->inp_flags & INP_RECVIF) { 1643 struct ifnet *ifp; 1644 struct sdlbuf { 1645 struct sockaddr_dl sdl; 1646 u_char pad[32]; 1647 } sdlbuf; 1648 struct sockaddr_dl *sdp; 1649 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1650 1651 if (((ifp = m->m_pkthdr.rcvif)) 1652 && ( ifp->if_index && (ifp->if_index <= V_if_index))) { 1653 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1654 /* 1655 * Change our mind and don't try copy. 1656 */ 1657 if ((sdp->sdl_family != AF_LINK) 1658 || (sdp->sdl_len > sizeof(sdlbuf))) { 1659 goto makedummy; 1660 } 1661 bcopy(sdp, sdl2, sdp->sdl_len); 1662 } else { 1663 makedummy: 1664 sdl2->sdl_len 1665 = offsetof(struct sockaddr_dl, sdl_data[0]); 1666 sdl2->sdl_family = AF_LINK; 1667 sdl2->sdl_index = 0; 1668 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1669 } 1670 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, 1671 IP_RECVIF, IPPROTO_IP); 1672 if (*mp) 1673 mp = &(*mp)->m_next; 1674 } 1675 if (inp->inp_flags & INP_RECVTOS) { 1676 *mp = sbcreatecontrol((caddr_t) &ip->ip_tos, 1677 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1678 if (*mp) 1679 mp = &(*mp)->m_next; 1680 } 1681 } 1682 1683 /* 1684 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1685 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1686 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1687 * compiled. 1688 */ 1689 static VNET_DEFINE(int, ip_rsvp_on); 1690 VNET_DEFINE(struct socket *, ip_rsvpd); 1691 1692 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1693 1694 int 1695 ip_rsvp_init(struct socket *so) 1696 { 1697 1698 if (so->so_type != SOCK_RAW || 1699 so->so_proto->pr_protocol != IPPROTO_RSVP) 1700 return EOPNOTSUPP; 1701 1702 if (V_ip_rsvpd != NULL) 1703 return EADDRINUSE; 1704 1705 V_ip_rsvpd = so; 1706 /* 1707 * This may seem silly, but we need to be sure we don't over-increment 1708 * the RSVP counter, in case something slips up. 1709 */ 1710 if (!V_ip_rsvp_on) { 1711 V_ip_rsvp_on = 1; 1712 V_rsvp_on++; 1713 } 1714 1715 return 0; 1716 } 1717 1718 int 1719 ip_rsvp_done(void) 1720 { 1721 1722 V_ip_rsvpd = NULL; 1723 /* 1724 * This may seem silly, but we need to be sure we don't over-decrement 1725 * the RSVP counter, in case something slips up. 1726 */ 1727 if (V_ip_rsvp_on) { 1728 V_ip_rsvp_on = 0; 1729 V_rsvp_on--; 1730 } 1731 return 0; 1732 } 1733 1734 void 1735 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1736 { 1737 1738 if (rsvp_input_p) { /* call the real one if loaded */ 1739 rsvp_input_p(m, off); 1740 return; 1741 } 1742 1743 /* Can still get packets with rsvp_on = 0 if there is a local member 1744 * of the group to which the RSVP packet is addressed. But in this 1745 * case we want to throw the packet away. 1746 */ 1747 1748 if (!V_rsvp_on) { 1749 m_freem(m); 1750 return; 1751 } 1752 1753 if (V_ip_rsvpd != NULL) { 1754 rip_input(m, off); 1755 return; 1756 } 1757 /* Drop the packet */ 1758 m_freem(m); 1759 } 1760