1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_route.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/domain.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/time.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/rwlock.h> 52 #include <sys/sdt.h> 53 #include <sys/syslog.h> 54 #include <sys/sysctl.h> 55 56 #include <net/pfil.h> 57 #include <net/if.h> 58 #include <net/if_types.h> 59 #include <net/if_var.h> 60 #include <net/if_dl.h> 61 #include <net/route.h> 62 #include <net/netisr.h> 63 #include <net/vnet.h> 64 #include <net/flowtable.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_kdtrace.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/in_pcb.h> 72 #include <netinet/ip_var.h> 73 #include <netinet/ip_fw.h> 74 #include <netinet/ip_icmp.h> 75 #include <netinet/ip_options.h> 76 #include <machine/in_cksum.h> 77 #include <netinet/ip_carp.h> 78 #ifdef IPSEC 79 #include <netinet/ip_ipsec.h> 80 #endif /* IPSEC */ 81 82 #include <sys/socketvar.h> 83 84 #include <security/mac/mac_framework.h> 85 86 #ifdef CTASSERT 87 CTASSERT(sizeof(struct ip) == 20); 88 #endif 89 90 struct rwlock in_ifaddr_lock; 91 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 92 93 VNET_DEFINE(int, rsvp_on); 94 95 VNET_DEFINE(int, ipforwarding); 96 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 97 &VNET_NAME(ipforwarding), 0, 98 "Enable IP forwarding between interfaces"); 99 100 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 101 #define V_ipsendredirects VNET(ipsendredirects) 102 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 103 &VNET_NAME(ipsendredirects), 0, 104 "Enable sending IP redirects"); 105 106 static VNET_DEFINE(int, ip_keepfaith); 107 #define V_ip_keepfaith VNET(ip_keepfaith) 108 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 109 &VNET_NAME(ip_keepfaith), 0, 110 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 111 112 static VNET_DEFINE(int, ip_sendsourcequench); 113 #define V_ip_sendsourcequench VNET(ip_sendsourcequench) 114 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 115 &VNET_NAME(ip_sendsourcequench), 0, 116 "Enable the transmission of source quench packets"); 117 118 VNET_DEFINE(int, ip_do_randomid); 119 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 120 &VNET_NAME(ip_do_randomid), 0, 121 "Assign random ip_id values"); 122 123 /* 124 * XXX - Setting ip_checkinterface mostly implements the receive side of 125 * the Strong ES model described in RFC 1122, but since the routing table 126 * and transmit implementation do not implement the Strong ES model, 127 * setting this to 1 results in an odd hybrid. 128 * 129 * XXX - ip_checkinterface currently must be disabled if you use ipnat 130 * to translate the destination address to another local interface. 131 * 132 * XXX - ip_checkinterface must be disabled if you add IP aliases 133 * to the loopback interface instead of the interface where the 134 * packets for those addresses are received. 135 */ 136 static VNET_DEFINE(int, ip_checkinterface); 137 #define V_ip_checkinterface VNET(ip_checkinterface) 138 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 139 &VNET_NAME(ip_checkinterface), 0, 140 "Verify packet arrives on correct interface"); 141 142 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ 143 144 static struct netisr_handler ip_nh = { 145 .nh_name = "ip", 146 .nh_handler = ip_input, 147 .nh_proto = NETISR_IP, 148 .nh_policy = NETISR_POLICY_FLOW, 149 }; 150 151 extern struct domain inetdomain; 152 extern struct protosw inetsw[]; 153 u_char ip_protox[IPPROTO_MAX]; 154 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 155 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 156 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 157 158 static VNET_DEFINE(uma_zone_t, ipq_zone); 159 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 160 static struct mtx ipqlock; 161 162 #define V_ipq_zone VNET(ipq_zone) 163 #define V_ipq VNET(ipq) 164 165 #define IPQ_LOCK() mtx_lock(&ipqlock) 166 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 167 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 168 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 169 170 static void maxnipq_update(void); 171 static void ipq_zone_change(void *); 172 static void ip_drain_locked(void); 173 174 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 175 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 176 #define V_maxnipq VNET(maxnipq) 177 #define V_nipq VNET(nipq) 178 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, 179 &VNET_NAME(nipq), 0, 180 "Current number of IPv4 fragment reassembly queue entries"); 181 182 static VNET_DEFINE(int, maxfragsperpacket); 183 #define V_maxfragsperpacket VNET(maxfragsperpacket) 184 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 185 &VNET_NAME(maxfragsperpacket), 0, 186 "Maximum number of IPv4 fragments allowed per packet"); 187 188 #ifdef IPCTL_DEFMTU 189 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 190 &ip_mtu, 0, "Default MTU"); 191 #endif 192 193 #ifdef IPSTEALTH 194 VNET_DEFINE(int, ipstealth); 195 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 196 &VNET_NAME(ipstealth), 0, 197 "IP stealth mode, no TTL decrementation on forwarding"); 198 #endif 199 200 #ifdef FLOWTABLE 201 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048; 202 VNET_DEFINE(struct flowtable *, ip_ft); 203 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size) 204 205 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN, 206 &VNET_NAME(ip_output_flowtable_size), 2048, 207 "number of entries in the per-cpu output flow caches"); 208 #endif 209 210 static void ip_freef(struct ipqhead *, struct ipq *); 211 212 /* 213 * IP statistics are stored in the "array" of counter(9)s. 214 */ 215 VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat); 216 VNET_PCPUSTAT_SYSINIT(ipstat); 217 SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat, 218 "IP statistics (struct ipstat, netinet/ip_var.h)"); 219 220 #ifdef VIMAGE 221 VNET_PCPUSTAT_SYSUNINIT(ipstat); 222 #endif /* VIMAGE */ 223 224 /* 225 * Kernel module interface for updating ipstat. The argument is an index 226 * into ipstat treated as an array. 227 */ 228 void 229 kmod_ipstat_inc(int statnum) 230 { 231 232 counter_u64_add(VNET(ipstat)[statnum], 1); 233 } 234 235 void 236 kmod_ipstat_dec(int statnum) 237 { 238 239 counter_u64_add(VNET(ipstat)[statnum], -1); 240 } 241 242 static int 243 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 244 { 245 int error, qlimit; 246 247 netisr_getqlimit(&ip_nh, &qlimit); 248 error = sysctl_handle_int(oidp, &qlimit, 0, req); 249 if (error || !req->newptr) 250 return (error); 251 if (qlimit < 1) 252 return (EINVAL); 253 return (netisr_setqlimit(&ip_nh, qlimit)); 254 } 255 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 256 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 257 "Maximum size of the IP input queue"); 258 259 static int 260 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 261 { 262 u_int64_t qdrops_long; 263 int error, qdrops; 264 265 netisr_getqdrops(&ip_nh, &qdrops_long); 266 qdrops = qdrops_long; 267 error = sysctl_handle_int(oidp, &qdrops, 0, req); 268 if (error || !req->newptr) 269 return (error); 270 if (qdrops != 0) 271 return (EINVAL); 272 netisr_clearqdrops(&ip_nh); 273 return (0); 274 } 275 276 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 277 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 278 "Number of packets dropped from the IP input queue"); 279 280 /* 281 * IP initialization: fill in IP protocol switch table. 282 * All protocols not implemented in kernel go to raw IP protocol handler. 283 */ 284 void 285 ip_init(void) 286 { 287 struct protosw *pr; 288 int i; 289 290 V_ip_id = time_second & 0xffff; 291 292 TAILQ_INIT(&V_in_ifaddrhead); 293 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 294 295 /* Initialize IP reassembly queue. */ 296 for (i = 0; i < IPREASS_NHASH; i++) 297 TAILQ_INIT(&V_ipq[i]); 298 V_maxnipq = nmbclusters / 32; 299 V_maxfragsperpacket = 16; 300 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 301 NULL, UMA_ALIGN_PTR, 0); 302 maxnipq_update(); 303 304 /* Initialize packet filter hooks. */ 305 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF; 306 V_inet_pfil_hook.ph_af = AF_INET; 307 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0) 308 printf("%s: WARNING: unable to register pfil hook, " 309 "error %d\n", __func__, i); 310 311 #ifdef FLOWTABLE 312 if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size", 313 &V_ip_output_flowtable_size)) { 314 if (V_ip_output_flowtable_size < 256) 315 V_ip_output_flowtable_size = 256; 316 if (!powerof2(V_ip_output_flowtable_size)) { 317 printf("flowtable must be power of 2 size\n"); 318 V_ip_output_flowtable_size = 2048; 319 } 320 } else { 321 /* 322 * round up to the next power of 2 323 */ 324 V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1); 325 } 326 V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU); 327 #endif 328 329 /* Skip initialization of globals for non-default instances. */ 330 if (!IS_DEFAULT_VNET(curvnet)) 331 return; 332 333 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 334 if (pr == NULL) 335 panic("ip_init: PF_INET not found"); 336 337 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 338 for (i = 0; i < IPPROTO_MAX; i++) 339 ip_protox[i] = pr - inetsw; 340 /* 341 * Cycle through IP protocols and put them into the appropriate place 342 * in ip_protox[]. 343 */ 344 for (pr = inetdomain.dom_protosw; 345 pr < inetdomain.dom_protoswNPROTOSW; pr++) 346 if (pr->pr_domain->dom_family == PF_INET && 347 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 348 /* Be careful to only index valid IP protocols. */ 349 if (pr->pr_protocol < IPPROTO_MAX) 350 ip_protox[pr->pr_protocol] = pr - inetsw; 351 } 352 353 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 354 NULL, EVENTHANDLER_PRI_ANY); 355 356 /* Initialize various other remaining things. */ 357 IPQ_LOCK_INIT(); 358 netisr_register(&ip_nh); 359 } 360 361 #ifdef VIMAGE 362 void 363 ip_destroy(void) 364 { 365 int i; 366 367 if ((i = pfil_head_unregister(&V_inet_pfil_hook)) != 0) 368 printf("%s: WARNING: unable to unregister pfil hook, " 369 "error %d\n", __func__, i); 370 371 /* Cleanup in_ifaddr hash table; should be empty. */ 372 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 373 374 IPQ_LOCK(); 375 ip_drain_locked(); 376 IPQ_UNLOCK(); 377 378 uma_zdestroy(V_ipq_zone); 379 } 380 #endif 381 382 /* 383 * Ip input routine. Checksum and byte swap header. If fragmented 384 * try to reassemble. Process options. Pass to next level. 385 */ 386 void 387 ip_input(struct mbuf *m) 388 { 389 struct ip *ip = NULL; 390 struct in_ifaddr *ia = NULL; 391 struct ifaddr *ifa; 392 struct ifnet *ifp; 393 int checkif, hlen = 0; 394 uint16_t sum, ip_len; 395 int dchg = 0; /* dest changed after fw */ 396 struct in_addr odst; /* original dst address */ 397 398 M_ASSERTPKTHDR(m); 399 400 if (m->m_flags & M_FASTFWD_OURS) { 401 m->m_flags &= ~M_FASTFWD_OURS; 402 /* Set up some basics that will be used later. */ 403 ip = mtod(m, struct ip *); 404 hlen = ip->ip_hl << 2; 405 ip_len = ntohs(ip->ip_len); 406 goto ours; 407 } 408 409 IPSTAT_INC(ips_total); 410 411 if (m->m_pkthdr.len < sizeof(struct ip)) 412 goto tooshort; 413 414 if (m->m_len < sizeof (struct ip) && 415 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 416 IPSTAT_INC(ips_toosmall); 417 return; 418 } 419 ip = mtod(m, struct ip *); 420 421 if (ip->ip_v != IPVERSION) { 422 IPSTAT_INC(ips_badvers); 423 goto bad; 424 } 425 426 hlen = ip->ip_hl << 2; 427 if (hlen < sizeof(struct ip)) { /* minimum header length */ 428 IPSTAT_INC(ips_badhlen); 429 goto bad; 430 } 431 if (hlen > m->m_len) { 432 if ((m = m_pullup(m, hlen)) == NULL) { 433 IPSTAT_INC(ips_badhlen); 434 return; 435 } 436 ip = mtod(m, struct ip *); 437 } 438 439 IP_PROBE(receive, NULL, NULL, ip, m->m_pkthdr.rcvif, ip, NULL); 440 441 /* 127/8 must not appear on wire - RFC1122 */ 442 ifp = m->m_pkthdr.rcvif; 443 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 444 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 445 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 446 IPSTAT_INC(ips_badaddr); 447 goto bad; 448 } 449 } 450 451 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 452 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 453 } else { 454 if (hlen == sizeof(struct ip)) { 455 sum = in_cksum_hdr(ip); 456 } else { 457 sum = in_cksum(m, hlen); 458 } 459 } 460 if (sum) { 461 IPSTAT_INC(ips_badsum); 462 goto bad; 463 } 464 465 #ifdef ALTQ 466 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 467 /* packet is dropped by traffic conditioner */ 468 return; 469 #endif 470 471 ip_len = ntohs(ip->ip_len); 472 if (ip_len < hlen) { 473 IPSTAT_INC(ips_badlen); 474 goto bad; 475 } 476 477 /* 478 * Check that the amount of data in the buffers 479 * is as at least much as the IP header would have us expect. 480 * Trim mbufs if longer than we expect. 481 * Drop packet if shorter than we expect. 482 */ 483 if (m->m_pkthdr.len < ip_len) { 484 tooshort: 485 IPSTAT_INC(ips_tooshort); 486 goto bad; 487 } 488 if (m->m_pkthdr.len > ip_len) { 489 if (m->m_len == m->m_pkthdr.len) { 490 m->m_len = ip_len; 491 m->m_pkthdr.len = ip_len; 492 } else 493 m_adj(m, ip_len - m->m_pkthdr.len); 494 } 495 #ifdef IPSEC 496 /* 497 * Bypass packet filtering for packets previously handled by IPsec. 498 */ 499 if (ip_ipsec_filtertunnel(m)) 500 goto passin; 501 #endif /* IPSEC */ 502 503 /* 504 * Run through list of hooks for input packets. 505 * 506 * NB: Beware of the destination address changing (e.g. 507 * by NAT rewriting). When this happens, tell 508 * ip_forward to do the right thing. 509 */ 510 511 /* Jump over all PFIL processing if hooks are not active. */ 512 if (!PFIL_HOOKED(&V_inet_pfil_hook)) 513 goto passin; 514 515 odst = ip->ip_dst; 516 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 517 return; 518 if (m == NULL) /* consumed by filter */ 519 return; 520 521 ip = mtod(m, struct ip *); 522 dchg = (odst.s_addr != ip->ip_dst.s_addr); 523 ifp = m->m_pkthdr.rcvif; 524 525 if (m->m_flags & M_FASTFWD_OURS) { 526 m->m_flags &= ~M_FASTFWD_OURS; 527 goto ours; 528 } 529 if (m->m_flags & M_IP_NEXTHOP) { 530 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL); 531 if (dchg != 0) { 532 /* 533 * Directly ship the packet on. This allows 534 * forwarding packets originally destined to us 535 * to some other directly connected host. 536 */ 537 ip_forward(m, 1); 538 return; 539 } 540 } 541 passin: 542 543 /* 544 * Process options and, if not destined for us, 545 * ship it on. ip_dooptions returns 1 when an 546 * error was detected (causing an icmp message 547 * to be sent and the original packet to be freed). 548 */ 549 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 550 return; 551 552 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 553 * matter if it is destined to another node, or whether it is 554 * a multicast one, RSVP wants it! and prevents it from being forwarded 555 * anywhere else. Also checks if the rsvp daemon is running before 556 * grabbing the packet. 557 */ 558 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 559 goto ours; 560 561 /* 562 * Check our list of addresses, to see if the packet is for us. 563 * If we don't have any addresses, assume any unicast packet 564 * we receive might be for us (and let the upper layers deal 565 * with it). 566 */ 567 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 568 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 569 goto ours; 570 571 /* 572 * Enable a consistency check between the destination address 573 * and the arrival interface for a unicast packet (the RFC 1122 574 * strong ES model) if IP forwarding is disabled and the packet 575 * is not locally generated and the packet is not subject to 576 * 'ipfw fwd'. 577 * 578 * XXX - Checking also should be disabled if the destination 579 * address is ipnat'ed to a different interface. 580 * 581 * XXX - Checking is incompatible with IP aliases added 582 * to the loopback interface instead of the interface where 583 * the packets are received. 584 * 585 * XXX - This is the case for carp vhost IPs as well so we 586 * insert a workaround. If the packet got here, we already 587 * checked with carp_iamatch() and carp_forus(). 588 */ 589 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 590 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 591 ifp->if_carp == NULL && (dchg == 0); 592 593 /* 594 * Check for exact addresses in the hash bucket. 595 */ 596 /* IN_IFADDR_RLOCK(); */ 597 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 598 /* 599 * If the address matches, verify that the packet 600 * arrived via the correct interface if checking is 601 * enabled. 602 */ 603 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 604 (!checkif || ia->ia_ifp == ifp)) { 605 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 606 counter_u64_add(ia->ia_ifa.ifa_ibytes, 607 m->m_pkthdr.len); 608 /* IN_IFADDR_RUNLOCK(); */ 609 goto ours; 610 } 611 } 612 /* IN_IFADDR_RUNLOCK(); */ 613 614 /* 615 * Check for broadcast addresses. 616 * 617 * Only accept broadcast packets that arrive via the matching 618 * interface. Reception of forwarded directed broadcasts would 619 * be handled via ip_forward() and ether_output() with the loopback 620 * into the stack for SIMPLEX interfaces handled by ether_output(). 621 */ 622 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 623 IF_ADDR_RLOCK(ifp); 624 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 625 if (ifa->ifa_addr->sa_family != AF_INET) 626 continue; 627 ia = ifatoia(ifa); 628 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 629 ip->ip_dst.s_addr) { 630 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 631 counter_u64_add(ia->ia_ifa.ifa_ibytes, 632 m->m_pkthdr.len); 633 IF_ADDR_RUNLOCK(ifp); 634 goto ours; 635 } 636 #ifdef BOOTP_COMPAT 637 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 638 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 639 counter_u64_add(ia->ia_ifa.ifa_ibytes, 640 m->m_pkthdr.len); 641 IF_ADDR_RUNLOCK(ifp); 642 goto ours; 643 } 644 #endif 645 } 646 IF_ADDR_RUNLOCK(ifp); 647 ia = NULL; 648 } 649 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 650 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 651 IPSTAT_INC(ips_cantforward); 652 m_freem(m); 653 return; 654 } 655 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 656 if (V_ip_mrouter) { 657 /* 658 * If we are acting as a multicast router, all 659 * incoming multicast packets are passed to the 660 * kernel-level multicast forwarding function. 661 * The packet is returned (relatively) intact; if 662 * ip_mforward() returns a non-zero value, the packet 663 * must be discarded, else it may be accepted below. 664 */ 665 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 666 IPSTAT_INC(ips_cantforward); 667 m_freem(m); 668 return; 669 } 670 671 /* 672 * The process-level routing daemon needs to receive 673 * all multicast IGMP packets, whether or not this 674 * host belongs to their destination groups. 675 */ 676 if (ip->ip_p == IPPROTO_IGMP) 677 goto ours; 678 IPSTAT_INC(ips_forward); 679 } 680 /* 681 * Assume the packet is for us, to avoid prematurely taking 682 * a lock on the in_multi hash. Protocols must perform 683 * their own filtering and update statistics accordingly. 684 */ 685 goto ours; 686 } 687 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 688 goto ours; 689 if (ip->ip_dst.s_addr == INADDR_ANY) 690 goto ours; 691 692 /* 693 * FAITH(Firewall Aided Internet Translator) 694 */ 695 if (ifp && ifp->if_type == IFT_FAITH) { 696 if (V_ip_keepfaith) { 697 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 698 goto ours; 699 } 700 m_freem(m); 701 return; 702 } 703 704 /* 705 * Not for us; forward if possible and desirable. 706 */ 707 if (V_ipforwarding == 0) { 708 IPSTAT_INC(ips_cantforward); 709 m_freem(m); 710 } else { 711 #ifdef IPSEC 712 if (ip_ipsec_fwd(m)) 713 goto bad; 714 #endif /* IPSEC */ 715 ip_forward(m, dchg); 716 } 717 return; 718 719 ours: 720 #ifdef IPSTEALTH 721 /* 722 * IPSTEALTH: Process non-routing options only 723 * if the packet is destined for us. 724 */ 725 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) 726 return; 727 #endif /* IPSTEALTH */ 728 729 /* 730 * Attempt reassembly; if it succeeds, proceed. 731 * ip_reass() will return a different mbuf. 732 */ 733 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { 734 m = ip_reass(m); 735 if (m == NULL) 736 return; 737 ip = mtod(m, struct ip *); 738 /* Get the header length of the reassembled packet */ 739 hlen = ip->ip_hl << 2; 740 } 741 742 #ifdef IPSEC 743 /* 744 * enforce IPsec policy checking if we are seeing last header. 745 * note that we do not visit this with protocols with pcb layer 746 * code - like udp/tcp/raw ip. 747 */ 748 if (ip_ipsec_input(m)) 749 goto bad; 750 #endif /* IPSEC */ 751 752 /* 753 * Switch out to protocol's input routine. 754 */ 755 IPSTAT_INC(ips_delivered); 756 757 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 758 return; 759 bad: 760 m_freem(m); 761 } 762 763 /* 764 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 765 * max has slightly different semantics than the sysctl, for historical 766 * reasons. 767 */ 768 static void 769 maxnipq_update(void) 770 { 771 772 /* 773 * -1 for unlimited allocation. 774 */ 775 if (V_maxnipq < 0) 776 uma_zone_set_max(V_ipq_zone, 0); 777 /* 778 * Positive number for specific bound. 779 */ 780 if (V_maxnipq > 0) 781 uma_zone_set_max(V_ipq_zone, V_maxnipq); 782 /* 783 * Zero specifies no further fragment queue allocation -- set the 784 * bound very low, but rely on implementation elsewhere to actually 785 * prevent allocation and reclaim current queues. 786 */ 787 if (V_maxnipq == 0) 788 uma_zone_set_max(V_ipq_zone, 1); 789 } 790 791 static void 792 ipq_zone_change(void *tag) 793 { 794 795 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 796 V_maxnipq = nmbclusters / 32; 797 maxnipq_update(); 798 } 799 } 800 801 static int 802 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 803 { 804 int error, i; 805 806 i = V_maxnipq; 807 error = sysctl_handle_int(oidp, &i, 0, req); 808 if (error || !req->newptr) 809 return (error); 810 811 /* 812 * XXXRW: Might be a good idea to sanity check the argument and place 813 * an extreme upper bound. 814 */ 815 if (i < -1) 816 return (EINVAL); 817 V_maxnipq = i; 818 maxnipq_update(); 819 return (0); 820 } 821 822 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 823 NULL, 0, sysctl_maxnipq, "I", 824 "Maximum number of IPv4 fragment reassembly queue entries"); 825 826 /* 827 * Take incoming datagram fragment and try to reassemble it into 828 * whole datagram. If the argument is the first fragment or one 829 * in between the function will return NULL and store the mbuf 830 * in the fragment chain. If the argument is the last fragment 831 * the packet will be reassembled and the pointer to the new 832 * mbuf returned for further processing. Only m_tags attached 833 * to the first packet/fragment are preserved. 834 * The IP header is *NOT* adjusted out of iplen. 835 */ 836 struct mbuf * 837 ip_reass(struct mbuf *m) 838 { 839 struct ip *ip; 840 struct mbuf *p, *q, *nq, *t; 841 struct ipq *fp = NULL; 842 struct ipqhead *head; 843 int i, hlen, next; 844 u_int8_t ecn, ecn0; 845 u_short hash; 846 847 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 848 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 849 IPSTAT_INC(ips_fragments); 850 IPSTAT_INC(ips_fragdropped); 851 m_freem(m); 852 return (NULL); 853 } 854 855 ip = mtod(m, struct ip *); 856 hlen = ip->ip_hl << 2; 857 858 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 859 head = &V_ipq[hash]; 860 IPQ_LOCK(); 861 862 /* 863 * Look for queue of fragments 864 * of this datagram. 865 */ 866 TAILQ_FOREACH(fp, head, ipq_list) 867 if (ip->ip_id == fp->ipq_id && 868 ip->ip_src.s_addr == fp->ipq_src.s_addr && 869 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 870 #ifdef MAC 871 mac_ipq_match(m, fp) && 872 #endif 873 ip->ip_p == fp->ipq_p) 874 goto found; 875 876 fp = NULL; 877 878 /* 879 * Attempt to trim the number of allocated fragment queues if it 880 * exceeds the administrative limit. 881 */ 882 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 883 /* 884 * drop something from the tail of the current queue 885 * before proceeding further 886 */ 887 struct ipq *q = TAILQ_LAST(head, ipqhead); 888 if (q == NULL) { /* gak */ 889 for (i = 0; i < IPREASS_NHASH; i++) { 890 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 891 if (r) { 892 IPSTAT_ADD(ips_fragtimeout, 893 r->ipq_nfrags); 894 ip_freef(&V_ipq[i], r); 895 break; 896 } 897 } 898 } else { 899 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 900 ip_freef(head, q); 901 } 902 } 903 904 found: 905 /* 906 * Adjust ip_len to not reflect header, 907 * convert offset of this to bytes. 908 */ 909 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 910 if (ip->ip_off & htons(IP_MF)) { 911 /* 912 * Make sure that fragments have a data length 913 * that's a non-zero multiple of 8 bytes. 914 */ 915 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) { 916 IPSTAT_INC(ips_toosmall); /* XXX */ 917 goto dropfrag; 918 } 919 m->m_flags |= M_IP_FRAG; 920 } else 921 m->m_flags &= ~M_IP_FRAG; 922 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 923 924 /* 925 * Attempt reassembly; if it succeeds, proceed. 926 * ip_reass() will return a different mbuf. 927 */ 928 IPSTAT_INC(ips_fragments); 929 m->m_pkthdr.PH_loc.ptr = ip; 930 931 /* Previous ip_reass() started here. */ 932 /* 933 * Presence of header sizes in mbufs 934 * would confuse code below. 935 */ 936 m->m_data += hlen; 937 m->m_len -= hlen; 938 939 /* 940 * If first fragment to arrive, create a reassembly queue. 941 */ 942 if (fp == NULL) { 943 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 944 if (fp == NULL) 945 goto dropfrag; 946 #ifdef MAC 947 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 948 uma_zfree(V_ipq_zone, fp); 949 fp = NULL; 950 goto dropfrag; 951 } 952 mac_ipq_create(m, fp); 953 #endif 954 TAILQ_INSERT_HEAD(head, fp, ipq_list); 955 V_nipq++; 956 fp->ipq_nfrags = 1; 957 fp->ipq_ttl = IPFRAGTTL; 958 fp->ipq_p = ip->ip_p; 959 fp->ipq_id = ip->ip_id; 960 fp->ipq_src = ip->ip_src; 961 fp->ipq_dst = ip->ip_dst; 962 fp->ipq_frags = m; 963 m->m_nextpkt = NULL; 964 goto done; 965 } else { 966 fp->ipq_nfrags++; 967 #ifdef MAC 968 mac_ipq_update(m, fp); 969 #endif 970 } 971 972 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr)) 973 974 /* 975 * Handle ECN by comparing this segment with the first one; 976 * if CE is set, do not lose CE. 977 * drop if CE and not-ECT are mixed for the same packet. 978 */ 979 ecn = ip->ip_tos & IPTOS_ECN_MASK; 980 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 981 if (ecn == IPTOS_ECN_CE) { 982 if (ecn0 == IPTOS_ECN_NOTECT) 983 goto dropfrag; 984 if (ecn0 != IPTOS_ECN_CE) 985 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 986 } 987 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 988 goto dropfrag; 989 990 /* 991 * Find a segment which begins after this one does. 992 */ 993 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 994 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 995 break; 996 997 /* 998 * If there is a preceding segment, it may provide some of 999 * our data already. If so, drop the data from the incoming 1000 * segment. If it provides all of our data, drop us, otherwise 1001 * stick new segment in the proper place. 1002 * 1003 * If some of the data is dropped from the preceding 1004 * segment, then it's checksum is invalidated. 1005 */ 1006 if (p) { 1007 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 1008 ntohs(ip->ip_off); 1009 if (i > 0) { 1010 if (i >= ntohs(ip->ip_len)) 1011 goto dropfrag; 1012 m_adj(m, i); 1013 m->m_pkthdr.csum_flags = 0; 1014 ip->ip_off = htons(ntohs(ip->ip_off) + i); 1015 ip->ip_len = htons(ntohs(ip->ip_len) - i); 1016 } 1017 m->m_nextpkt = p->m_nextpkt; 1018 p->m_nextpkt = m; 1019 } else { 1020 m->m_nextpkt = fp->ipq_frags; 1021 fp->ipq_frags = m; 1022 } 1023 1024 /* 1025 * While we overlap succeeding segments trim them or, 1026 * if they are completely covered, dequeue them. 1027 */ 1028 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 1029 ntohs(GETIP(q)->ip_off); q = nq) { 1030 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 1031 ntohs(GETIP(q)->ip_off); 1032 if (i < ntohs(GETIP(q)->ip_len)) { 1033 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 1034 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 1035 m_adj(q, i); 1036 q->m_pkthdr.csum_flags = 0; 1037 break; 1038 } 1039 nq = q->m_nextpkt; 1040 m->m_nextpkt = nq; 1041 IPSTAT_INC(ips_fragdropped); 1042 fp->ipq_nfrags--; 1043 m_freem(q); 1044 } 1045 1046 /* 1047 * Check for complete reassembly and perform frag per packet 1048 * limiting. 1049 * 1050 * Frag limiting is performed here so that the nth frag has 1051 * a chance to complete the packet before we drop the packet. 1052 * As a result, n+1 frags are actually allowed per packet, but 1053 * only n will ever be stored. (n = maxfragsperpacket.) 1054 * 1055 */ 1056 next = 0; 1057 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1058 if (ntohs(GETIP(q)->ip_off) != next) { 1059 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1060 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1061 ip_freef(head, fp); 1062 } 1063 goto done; 1064 } 1065 next += ntohs(GETIP(q)->ip_len); 1066 } 1067 /* Make sure the last packet didn't have the IP_MF flag */ 1068 if (p->m_flags & M_IP_FRAG) { 1069 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1070 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1071 ip_freef(head, fp); 1072 } 1073 goto done; 1074 } 1075 1076 /* 1077 * Reassembly is complete. Make sure the packet is a sane size. 1078 */ 1079 q = fp->ipq_frags; 1080 ip = GETIP(q); 1081 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1082 IPSTAT_INC(ips_toolong); 1083 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1084 ip_freef(head, fp); 1085 goto done; 1086 } 1087 1088 /* 1089 * Concatenate fragments. 1090 */ 1091 m = q; 1092 t = m->m_next; 1093 m->m_next = NULL; 1094 m_cat(m, t); 1095 nq = q->m_nextpkt; 1096 q->m_nextpkt = NULL; 1097 for (q = nq; q != NULL; q = nq) { 1098 nq = q->m_nextpkt; 1099 q->m_nextpkt = NULL; 1100 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1101 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1102 m_cat(m, q); 1103 } 1104 /* 1105 * In order to do checksumming faster we do 'end-around carry' here 1106 * (and not in for{} loop), though it implies we are not going to 1107 * reassemble more than 64k fragments. 1108 */ 1109 m->m_pkthdr.csum_data = 1110 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1111 #ifdef MAC 1112 mac_ipq_reassemble(fp, m); 1113 mac_ipq_destroy(fp); 1114 #endif 1115 1116 /* 1117 * Create header for new ip packet by modifying header of first 1118 * packet; dequeue and discard fragment reassembly header. 1119 * Make header visible. 1120 */ 1121 ip->ip_len = htons((ip->ip_hl << 2) + next); 1122 ip->ip_src = fp->ipq_src; 1123 ip->ip_dst = fp->ipq_dst; 1124 TAILQ_REMOVE(head, fp, ipq_list); 1125 V_nipq--; 1126 uma_zfree(V_ipq_zone, fp); 1127 m->m_len += (ip->ip_hl << 2); 1128 m->m_data -= (ip->ip_hl << 2); 1129 /* some debugging cruft by sklower, below, will go away soon */ 1130 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1131 m_fixhdr(m); 1132 IPSTAT_INC(ips_reassembled); 1133 IPQ_UNLOCK(); 1134 return (m); 1135 1136 dropfrag: 1137 IPSTAT_INC(ips_fragdropped); 1138 if (fp != NULL) 1139 fp->ipq_nfrags--; 1140 m_freem(m); 1141 done: 1142 IPQ_UNLOCK(); 1143 return (NULL); 1144 1145 #undef GETIP 1146 } 1147 1148 /* 1149 * Free a fragment reassembly header and all 1150 * associated datagrams. 1151 */ 1152 static void 1153 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1154 { 1155 struct mbuf *q; 1156 1157 IPQ_LOCK_ASSERT(); 1158 1159 while (fp->ipq_frags) { 1160 q = fp->ipq_frags; 1161 fp->ipq_frags = q->m_nextpkt; 1162 m_freem(q); 1163 } 1164 TAILQ_REMOVE(fhp, fp, ipq_list); 1165 uma_zfree(V_ipq_zone, fp); 1166 V_nipq--; 1167 } 1168 1169 /* 1170 * IP timer processing; 1171 * if a timer expires on a reassembly 1172 * queue, discard it. 1173 */ 1174 void 1175 ip_slowtimo(void) 1176 { 1177 VNET_ITERATOR_DECL(vnet_iter); 1178 struct ipq *fp; 1179 int i; 1180 1181 VNET_LIST_RLOCK_NOSLEEP(); 1182 IPQ_LOCK(); 1183 VNET_FOREACH(vnet_iter) { 1184 CURVNET_SET(vnet_iter); 1185 for (i = 0; i < IPREASS_NHASH; i++) { 1186 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1187 struct ipq *fpp; 1188 1189 fpp = fp; 1190 fp = TAILQ_NEXT(fp, ipq_list); 1191 if(--fpp->ipq_ttl == 0) { 1192 IPSTAT_ADD(ips_fragtimeout, 1193 fpp->ipq_nfrags); 1194 ip_freef(&V_ipq[i], fpp); 1195 } 1196 } 1197 } 1198 /* 1199 * If we are over the maximum number of fragments 1200 * (due to the limit being lowered), drain off 1201 * enough to get down to the new limit. 1202 */ 1203 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1204 for (i = 0; i < IPREASS_NHASH; i++) { 1205 while (V_nipq > V_maxnipq && 1206 !TAILQ_EMPTY(&V_ipq[i])) { 1207 IPSTAT_ADD(ips_fragdropped, 1208 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1209 ip_freef(&V_ipq[i], 1210 TAILQ_FIRST(&V_ipq[i])); 1211 } 1212 } 1213 } 1214 CURVNET_RESTORE(); 1215 } 1216 IPQ_UNLOCK(); 1217 VNET_LIST_RUNLOCK_NOSLEEP(); 1218 } 1219 1220 /* 1221 * Drain off all datagram fragments. 1222 */ 1223 static void 1224 ip_drain_locked(void) 1225 { 1226 int i; 1227 1228 IPQ_LOCK_ASSERT(); 1229 1230 for (i = 0; i < IPREASS_NHASH; i++) { 1231 while(!TAILQ_EMPTY(&V_ipq[i])) { 1232 IPSTAT_ADD(ips_fragdropped, 1233 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1234 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1235 } 1236 } 1237 } 1238 1239 void 1240 ip_drain(void) 1241 { 1242 VNET_ITERATOR_DECL(vnet_iter); 1243 1244 VNET_LIST_RLOCK_NOSLEEP(); 1245 IPQ_LOCK(); 1246 VNET_FOREACH(vnet_iter) { 1247 CURVNET_SET(vnet_iter); 1248 ip_drain_locked(); 1249 CURVNET_RESTORE(); 1250 } 1251 IPQ_UNLOCK(); 1252 VNET_LIST_RUNLOCK_NOSLEEP(); 1253 in_rtqdrain(); 1254 } 1255 1256 /* 1257 * The protocol to be inserted into ip_protox[] must be already registered 1258 * in inetsw[], either statically or through pf_proto_register(). 1259 */ 1260 int 1261 ipproto_register(short ipproto) 1262 { 1263 struct protosw *pr; 1264 1265 /* Sanity checks. */ 1266 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1267 return (EPROTONOSUPPORT); 1268 1269 /* 1270 * The protocol slot must not be occupied by another protocol 1271 * already. An index pointing to IPPROTO_RAW is unused. 1272 */ 1273 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1274 if (pr == NULL) 1275 return (EPFNOSUPPORT); 1276 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1277 return (EEXIST); 1278 1279 /* Find the protocol position in inetsw[] and set the index. */ 1280 for (pr = inetdomain.dom_protosw; 1281 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1282 if (pr->pr_domain->dom_family == PF_INET && 1283 pr->pr_protocol && pr->pr_protocol == ipproto) { 1284 ip_protox[pr->pr_protocol] = pr - inetsw; 1285 return (0); 1286 } 1287 } 1288 return (EPROTONOSUPPORT); 1289 } 1290 1291 int 1292 ipproto_unregister(short ipproto) 1293 { 1294 struct protosw *pr; 1295 1296 /* Sanity checks. */ 1297 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1298 return (EPROTONOSUPPORT); 1299 1300 /* Check if the protocol was indeed registered. */ 1301 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1302 if (pr == NULL) 1303 return (EPFNOSUPPORT); 1304 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1305 return (ENOENT); 1306 1307 /* Reset the protocol slot to IPPROTO_RAW. */ 1308 ip_protox[ipproto] = pr - inetsw; 1309 return (0); 1310 } 1311 1312 /* 1313 * Given address of next destination (final or next hop), return (referenced) 1314 * internet address info of interface to be used to get there. 1315 */ 1316 struct in_ifaddr * 1317 ip_rtaddr(struct in_addr dst, u_int fibnum) 1318 { 1319 struct route sro; 1320 struct sockaddr_in *sin; 1321 struct in_ifaddr *ia; 1322 1323 bzero(&sro, sizeof(sro)); 1324 sin = (struct sockaddr_in *)&sro.ro_dst; 1325 sin->sin_family = AF_INET; 1326 sin->sin_len = sizeof(*sin); 1327 sin->sin_addr = dst; 1328 in_rtalloc_ign(&sro, 0, fibnum); 1329 1330 if (sro.ro_rt == NULL) 1331 return (NULL); 1332 1333 ia = ifatoia(sro.ro_rt->rt_ifa); 1334 ifa_ref(&ia->ia_ifa); 1335 RTFREE(sro.ro_rt); 1336 return (ia); 1337 } 1338 1339 u_char inetctlerrmap[PRC_NCMDS] = { 1340 0, 0, 0, 0, 1341 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1342 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1343 EMSGSIZE, EHOSTUNREACH, 0, 0, 1344 0, 0, EHOSTUNREACH, 0, 1345 ENOPROTOOPT, ECONNREFUSED 1346 }; 1347 1348 /* 1349 * Forward a packet. If some error occurs return the sender 1350 * an icmp packet. Note we can't always generate a meaningful 1351 * icmp message because icmp doesn't have a large enough repertoire 1352 * of codes and types. 1353 * 1354 * If not forwarding, just drop the packet. This could be confusing 1355 * if ipforwarding was zero but some routing protocol was advancing 1356 * us as a gateway to somewhere. However, we must let the routing 1357 * protocol deal with that. 1358 * 1359 * The srcrt parameter indicates whether the packet is being forwarded 1360 * via a source route. 1361 */ 1362 void 1363 ip_forward(struct mbuf *m, int srcrt) 1364 { 1365 struct ip *ip = mtod(m, struct ip *); 1366 struct in_ifaddr *ia; 1367 struct mbuf *mcopy; 1368 struct in_addr dest; 1369 struct route ro; 1370 int error, type = 0, code = 0, mtu = 0; 1371 1372 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1373 IPSTAT_INC(ips_cantforward); 1374 m_freem(m); 1375 return; 1376 } 1377 #ifdef IPSTEALTH 1378 if (!V_ipstealth) { 1379 #endif 1380 if (ip->ip_ttl <= IPTTLDEC) { 1381 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1382 0, 0); 1383 return; 1384 } 1385 #ifdef IPSTEALTH 1386 } 1387 #endif 1388 1389 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1390 #ifndef IPSEC 1391 /* 1392 * 'ia' may be NULL if there is no route for this destination. 1393 * In case of IPsec, Don't discard it just yet, but pass it to 1394 * ip_output in case of outgoing IPsec policy. 1395 */ 1396 if (!srcrt && ia == NULL) { 1397 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1398 return; 1399 } 1400 #endif 1401 1402 /* 1403 * Save the IP header and at most 8 bytes of the payload, 1404 * in case we need to generate an ICMP message to the src. 1405 * 1406 * XXX this can be optimized a lot by saving the data in a local 1407 * buffer on the stack (72 bytes at most), and only allocating the 1408 * mbuf if really necessary. The vast majority of the packets 1409 * are forwarded without having to send an ICMP back (either 1410 * because unnecessary, or because rate limited), so we are 1411 * really we are wasting a lot of work here. 1412 * 1413 * We don't use m_copy() because it might return a reference 1414 * to a shared cluster. Both this function and ip_output() 1415 * assume exclusive access to the IP header in `m', so any 1416 * data in a cluster may change before we reach icmp_error(). 1417 */ 1418 mcopy = m_gethdr(M_NOWAIT, m->m_type); 1419 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) { 1420 /* 1421 * It's probably ok if the pkthdr dup fails (because 1422 * the deep copy of the tag chain failed), but for now 1423 * be conservative and just discard the copy since 1424 * code below may some day want the tags. 1425 */ 1426 m_free(mcopy); 1427 mcopy = NULL; 1428 } 1429 if (mcopy != NULL) { 1430 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy)); 1431 mcopy->m_pkthdr.len = mcopy->m_len; 1432 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1433 } 1434 1435 #ifdef IPSTEALTH 1436 if (!V_ipstealth) { 1437 #endif 1438 ip->ip_ttl -= IPTTLDEC; 1439 #ifdef IPSTEALTH 1440 } 1441 #endif 1442 1443 /* 1444 * If forwarding packet using same interface that it came in on, 1445 * perhaps should send a redirect to sender to shortcut a hop. 1446 * Only send redirect if source is sending directly to us, 1447 * and if packet was not source routed (or has any options). 1448 * Also, don't send redirect if forwarding using a default route 1449 * or a route modified by a redirect. 1450 */ 1451 dest.s_addr = 0; 1452 if (!srcrt && V_ipsendredirects && 1453 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1454 struct sockaddr_in *sin; 1455 struct rtentry *rt; 1456 1457 bzero(&ro, sizeof(ro)); 1458 sin = (struct sockaddr_in *)&ro.ro_dst; 1459 sin->sin_family = AF_INET; 1460 sin->sin_len = sizeof(*sin); 1461 sin->sin_addr = ip->ip_dst; 1462 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1463 1464 rt = ro.ro_rt; 1465 1466 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1467 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1468 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1469 u_long src = ntohl(ip->ip_src.s_addr); 1470 1471 if (RTA(rt) && 1472 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1473 if (rt->rt_flags & RTF_GATEWAY) 1474 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1475 else 1476 dest.s_addr = ip->ip_dst.s_addr; 1477 /* Router requirements says to only send host redirects */ 1478 type = ICMP_REDIRECT; 1479 code = ICMP_REDIRECT_HOST; 1480 } 1481 } 1482 if (rt) 1483 RTFREE(rt); 1484 } 1485 1486 /* 1487 * Try to cache the route MTU from ip_output so we can consider it for 1488 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1489 */ 1490 bzero(&ro, sizeof(ro)); 1491 1492 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1493 1494 if (error == EMSGSIZE && ro.ro_rt) 1495 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1496 RO_RTFREE(&ro); 1497 1498 if (error) 1499 IPSTAT_INC(ips_cantforward); 1500 else { 1501 IPSTAT_INC(ips_forward); 1502 if (type) 1503 IPSTAT_INC(ips_redirectsent); 1504 else { 1505 if (mcopy) 1506 m_freem(mcopy); 1507 if (ia != NULL) 1508 ifa_free(&ia->ia_ifa); 1509 return; 1510 } 1511 } 1512 if (mcopy == NULL) { 1513 if (ia != NULL) 1514 ifa_free(&ia->ia_ifa); 1515 return; 1516 } 1517 1518 switch (error) { 1519 1520 case 0: /* forwarded, but need redirect */ 1521 /* type, code set above */ 1522 break; 1523 1524 case ENETUNREACH: 1525 case EHOSTUNREACH: 1526 case ENETDOWN: 1527 case EHOSTDOWN: 1528 default: 1529 type = ICMP_UNREACH; 1530 code = ICMP_UNREACH_HOST; 1531 break; 1532 1533 case EMSGSIZE: 1534 type = ICMP_UNREACH; 1535 code = ICMP_UNREACH_NEEDFRAG; 1536 1537 #ifdef IPSEC 1538 /* 1539 * If IPsec is configured for this path, 1540 * override any possibly mtu value set by ip_output. 1541 */ 1542 mtu = ip_ipsec_mtu(mcopy, mtu); 1543 #endif /* IPSEC */ 1544 /* 1545 * If the MTU was set before make sure we are below the 1546 * interface MTU. 1547 * If the MTU wasn't set before use the interface mtu or 1548 * fall back to the next smaller mtu step compared to the 1549 * current packet size. 1550 */ 1551 if (mtu != 0) { 1552 if (ia != NULL) 1553 mtu = min(mtu, ia->ia_ifp->if_mtu); 1554 } else { 1555 if (ia != NULL) 1556 mtu = ia->ia_ifp->if_mtu; 1557 else 1558 mtu = ip_next_mtu(ntohs(ip->ip_len), 0); 1559 } 1560 IPSTAT_INC(ips_cantfrag); 1561 break; 1562 1563 case ENOBUFS: 1564 /* 1565 * A router should not generate ICMP_SOURCEQUENCH as 1566 * required in RFC1812 Requirements for IP Version 4 Routers. 1567 * Source quench could be a big problem under DoS attacks, 1568 * or if the underlying interface is rate-limited. 1569 * Those who need source quench packets may re-enable them 1570 * via the net.inet.ip.sendsourcequench sysctl. 1571 */ 1572 if (V_ip_sendsourcequench == 0) { 1573 m_freem(mcopy); 1574 if (ia != NULL) 1575 ifa_free(&ia->ia_ifa); 1576 return; 1577 } else { 1578 type = ICMP_SOURCEQUENCH; 1579 code = 0; 1580 } 1581 break; 1582 1583 case EACCES: /* ipfw denied packet */ 1584 m_freem(mcopy); 1585 if (ia != NULL) 1586 ifa_free(&ia->ia_ifa); 1587 return; 1588 } 1589 if (ia != NULL) 1590 ifa_free(&ia->ia_ifa); 1591 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1592 } 1593 1594 void 1595 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1596 struct mbuf *m) 1597 { 1598 1599 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1600 struct bintime bt; 1601 1602 bintime(&bt); 1603 if (inp->inp_socket->so_options & SO_BINTIME) { 1604 *mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt), 1605 SCM_BINTIME, SOL_SOCKET); 1606 if (*mp) 1607 mp = &(*mp)->m_next; 1608 } 1609 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1610 struct timeval tv; 1611 1612 bintime2timeval(&bt, &tv); 1613 *mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv), 1614 SCM_TIMESTAMP, SOL_SOCKET); 1615 if (*mp) 1616 mp = &(*mp)->m_next; 1617 } 1618 } 1619 if (inp->inp_flags & INP_RECVDSTADDR) { 1620 *mp = sbcreatecontrol((caddr_t)&ip->ip_dst, 1621 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1622 if (*mp) 1623 mp = &(*mp)->m_next; 1624 } 1625 if (inp->inp_flags & INP_RECVTTL) { 1626 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, 1627 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1628 if (*mp) 1629 mp = &(*mp)->m_next; 1630 } 1631 #ifdef notyet 1632 /* XXX 1633 * Moving these out of udp_input() made them even more broken 1634 * than they already were. 1635 */ 1636 /* options were tossed already */ 1637 if (inp->inp_flags & INP_RECVOPTS) { 1638 *mp = sbcreatecontrol((caddr_t)opts_deleted_above, 1639 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1640 if (*mp) 1641 mp = &(*mp)->m_next; 1642 } 1643 /* ip_srcroute doesn't do what we want here, need to fix */ 1644 if (inp->inp_flags & INP_RECVRETOPTS) { 1645 *mp = sbcreatecontrol((caddr_t)ip_srcroute(m), 1646 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1647 if (*mp) 1648 mp = &(*mp)->m_next; 1649 } 1650 #endif 1651 if (inp->inp_flags & INP_RECVIF) { 1652 struct ifnet *ifp; 1653 struct sdlbuf { 1654 struct sockaddr_dl sdl; 1655 u_char pad[32]; 1656 } sdlbuf; 1657 struct sockaddr_dl *sdp; 1658 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1659 1660 if ((ifp = m->m_pkthdr.rcvif) && 1661 ifp->if_index && ifp->if_index <= V_if_index) { 1662 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1663 /* 1664 * Change our mind and don't try copy. 1665 */ 1666 if (sdp->sdl_family != AF_LINK || 1667 sdp->sdl_len > sizeof(sdlbuf)) { 1668 goto makedummy; 1669 } 1670 bcopy(sdp, sdl2, sdp->sdl_len); 1671 } else { 1672 makedummy: 1673 sdl2->sdl_len = 1674 offsetof(struct sockaddr_dl, sdl_data[0]); 1675 sdl2->sdl_family = AF_LINK; 1676 sdl2->sdl_index = 0; 1677 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1678 } 1679 *mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len, 1680 IP_RECVIF, IPPROTO_IP); 1681 if (*mp) 1682 mp = &(*mp)->m_next; 1683 } 1684 if (inp->inp_flags & INP_RECVTOS) { 1685 *mp = sbcreatecontrol((caddr_t)&ip->ip_tos, 1686 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1687 if (*mp) 1688 mp = &(*mp)->m_next; 1689 } 1690 } 1691 1692 /* 1693 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1694 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1695 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1696 * compiled. 1697 */ 1698 static VNET_DEFINE(int, ip_rsvp_on); 1699 VNET_DEFINE(struct socket *, ip_rsvpd); 1700 1701 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1702 1703 int 1704 ip_rsvp_init(struct socket *so) 1705 { 1706 1707 if (so->so_type != SOCK_RAW || 1708 so->so_proto->pr_protocol != IPPROTO_RSVP) 1709 return EOPNOTSUPP; 1710 1711 if (V_ip_rsvpd != NULL) 1712 return EADDRINUSE; 1713 1714 V_ip_rsvpd = so; 1715 /* 1716 * This may seem silly, but we need to be sure we don't over-increment 1717 * the RSVP counter, in case something slips up. 1718 */ 1719 if (!V_ip_rsvp_on) { 1720 V_ip_rsvp_on = 1; 1721 V_rsvp_on++; 1722 } 1723 1724 return 0; 1725 } 1726 1727 int 1728 ip_rsvp_done(void) 1729 { 1730 1731 V_ip_rsvpd = NULL; 1732 /* 1733 * This may seem silly, but we need to be sure we don't over-decrement 1734 * the RSVP counter, in case something slips up. 1735 */ 1736 if (V_ip_rsvp_on) { 1737 V_ip_rsvp_on = 0; 1738 V_rsvp_on--; 1739 } 1740 return 0; 1741 } 1742 1743 void 1744 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1745 { 1746 1747 if (rsvp_input_p) { /* call the real one if loaded */ 1748 rsvp_input_p(m, off); 1749 return; 1750 } 1751 1752 /* Can still get packets with rsvp_on = 0 if there is a local member 1753 * of the group to which the RSVP packet is addressed. But in this 1754 * case we want to throw the packet away. 1755 */ 1756 1757 if (!V_rsvp_on) { 1758 m_freem(m); 1759 return; 1760 } 1761 1762 if (V_ip_rsvpd != NULL) { 1763 rip_input(m, off); 1764 return; 1765 } 1766 /* Drop the packet */ 1767 m_freem(m); 1768 } 1769