1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_kdtrace.h" 40 #include "opt_route.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/domain.h> 47 #include <sys/protosw.h> 48 #include <sys/socket.h> 49 #include <sys/time.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/rwlock.h> 53 #include <sys/sdt.h> 54 #include <sys/syslog.h> 55 #include <sys/sysctl.h> 56 57 #include <net/pfil.h> 58 #include <net/if.h> 59 #include <net/if_types.h> 60 #include <net/if_var.h> 61 #include <net/if_dl.h> 62 #include <net/route.h> 63 #include <net/netisr.h> 64 #include <net/vnet.h> 65 #include <net/flowtable.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_kdtrace.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/in_var.h> 71 #include <netinet/ip.h> 72 #include <netinet/in_pcb.h> 73 #include <netinet/ip_var.h> 74 #include <netinet/ip_fw.h> 75 #include <netinet/ip_icmp.h> 76 #include <netinet/ip_options.h> 77 #include <machine/in_cksum.h> 78 #include <netinet/ip_carp.h> 79 #ifdef IPSEC 80 #include <netinet/ip_ipsec.h> 81 #endif /* IPSEC */ 82 83 #include <sys/socketvar.h> 84 85 #include <security/mac/mac_framework.h> 86 87 #ifdef CTASSERT 88 CTASSERT(sizeof(struct ip) == 20); 89 #endif 90 91 struct rwlock in_ifaddr_lock; 92 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 93 94 VNET_DEFINE(int, rsvp_on); 95 96 VNET_DEFINE(int, ipforwarding); 97 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 98 &VNET_NAME(ipforwarding), 0, 99 "Enable IP forwarding between interfaces"); 100 101 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 102 #define V_ipsendredirects VNET(ipsendredirects) 103 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 104 &VNET_NAME(ipsendredirects), 0, 105 "Enable sending IP redirects"); 106 107 static VNET_DEFINE(int, ip_keepfaith); 108 #define V_ip_keepfaith VNET(ip_keepfaith) 109 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 110 &VNET_NAME(ip_keepfaith), 0, 111 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 112 113 static VNET_DEFINE(int, ip_sendsourcequench); 114 #define V_ip_sendsourcequench VNET(ip_sendsourcequench) 115 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 116 &VNET_NAME(ip_sendsourcequench), 0, 117 "Enable the transmission of source quench packets"); 118 119 VNET_DEFINE(int, ip_do_randomid); 120 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 121 &VNET_NAME(ip_do_randomid), 0, 122 "Assign random ip_id values"); 123 124 /* 125 * XXX - Setting ip_checkinterface mostly implements the receive side of 126 * the Strong ES model described in RFC 1122, but since the routing table 127 * and transmit implementation do not implement the Strong ES model, 128 * setting this to 1 results in an odd hybrid. 129 * 130 * XXX - ip_checkinterface currently must be disabled if you use ipnat 131 * to translate the destination address to another local interface. 132 * 133 * XXX - ip_checkinterface must be disabled if you add IP aliases 134 * to the loopback interface instead of the interface where the 135 * packets for those addresses are received. 136 */ 137 static VNET_DEFINE(int, ip_checkinterface); 138 #define V_ip_checkinterface VNET(ip_checkinterface) 139 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 140 &VNET_NAME(ip_checkinterface), 0, 141 "Verify packet arrives on correct interface"); 142 143 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ 144 145 static struct netisr_handler ip_nh = { 146 .nh_name = "ip", 147 .nh_handler = ip_input, 148 .nh_proto = NETISR_IP, 149 .nh_policy = NETISR_POLICY_FLOW, 150 }; 151 152 extern struct domain inetdomain; 153 extern struct protosw inetsw[]; 154 u_char ip_protox[IPPROTO_MAX]; 155 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 156 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 157 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 158 159 static VNET_DEFINE(uma_zone_t, ipq_zone); 160 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 161 static struct mtx ipqlock; 162 163 #define V_ipq_zone VNET(ipq_zone) 164 #define V_ipq VNET(ipq) 165 166 #define IPQ_LOCK() mtx_lock(&ipqlock) 167 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 168 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 169 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 170 171 static void maxnipq_update(void); 172 static void ipq_zone_change(void *); 173 static void ip_drain_locked(void); 174 175 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 176 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 177 #define V_maxnipq VNET(maxnipq) 178 #define V_nipq VNET(nipq) 179 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, 180 &VNET_NAME(nipq), 0, 181 "Current number of IPv4 fragment reassembly queue entries"); 182 183 static VNET_DEFINE(int, maxfragsperpacket); 184 #define V_maxfragsperpacket VNET(maxfragsperpacket) 185 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 186 &VNET_NAME(maxfragsperpacket), 0, 187 "Maximum number of IPv4 fragments allowed per packet"); 188 189 #ifdef IPCTL_DEFMTU 190 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 191 &ip_mtu, 0, "Default MTU"); 192 #endif 193 194 #ifdef IPSTEALTH 195 VNET_DEFINE(int, ipstealth); 196 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 197 &VNET_NAME(ipstealth), 0, 198 "IP stealth mode, no TTL decrementation on forwarding"); 199 #endif 200 201 #ifdef FLOWTABLE 202 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048; 203 VNET_DEFINE(struct flowtable *, ip_ft); 204 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size) 205 206 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN, 207 &VNET_NAME(ip_output_flowtable_size), 2048, 208 "number of entries in the per-cpu output flow caches"); 209 #endif 210 211 static void ip_freef(struct ipqhead *, struct ipq *); 212 213 /* 214 * IP statistics are stored in the "array" of counter(9)s. 215 */ 216 VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat); 217 VNET_PCPUSTAT_SYSINIT(ipstat); 218 SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat, 219 "IP statistics (struct ipstat, netinet/ip_var.h)"); 220 221 #ifdef VIMAGE 222 VNET_PCPUSTAT_SYSUNINIT(ipstat); 223 #endif /* VIMAGE */ 224 225 /* 226 * Kernel module interface for updating ipstat. The argument is an index 227 * into ipstat treated as an array. 228 */ 229 void 230 kmod_ipstat_inc(int statnum) 231 { 232 233 counter_u64_add(VNET(ipstat)[statnum], 1); 234 } 235 236 void 237 kmod_ipstat_dec(int statnum) 238 { 239 240 counter_u64_add(VNET(ipstat)[statnum], -1); 241 } 242 243 static int 244 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 245 { 246 int error, qlimit; 247 248 netisr_getqlimit(&ip_nh, &qlimit); 249 error = sysctl_handle_int(oidp, &qlimit, 0, req); 250 if (error || !req->newptr) 251 return (error); 252 if (qlimit < 1) 253 return (EINVAL); 254 return (netisr_setqlimit(&ip_nh, qlimit)); 255 } 256 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 257 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 258 "Maximum size of the IP input queue"); 259 260 static int 261 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 262 { 263 u_int64_t qdrops_long; 264 int error, qdrops; 265 266 netisr_getqdrops(&ip_nh, &qdrops_long); 267 qdrops = qdrops_long; 268 error = sysctl_handle_int(oidp, &qdrops, 0, req); 269 if (error || !req->newptr) 270 return (error); 271 if (qdrops != 0) 272 return (EINVAL); 273 netisr_clearqdrops(&ip_nh); 274 return (0); 275 } 276 277 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 278 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 279 "Number of packets dropped from the IP input queue"); 280 281 /* 282 * IP initialization: fill in IP protocol switch table. 283 * All protocols not implemented in kernel go to raw IP protocol handler. 284 */ 285 void 286 ip_init(void) 287 { 288 struct protosw *pr; 289 int i; 290 291 V_ip_id = time_second & 0xffff; 292 293 TAILQ_INIT(&V_in_ifaddrhead); 294 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 295 296 /* Initialize IP reassembly queue. */ 297 for (i = 0; i < IPREASS_NHASH; i++) 298 TAILQ_INIT(&V_ipq[i]); 299 V_maxnipq = nmbclusters / 32; 300 V_maxfragsperpacket = 16; 301 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 302 NULL, UMA_ALIGN_PTR, 0); 303 maxnipq_update(); 304 305 /* Initialize packet filter hooks. */ 306 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF; 307 V_inet_pfil_hook.ph_af = AF_INET; 308 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0) 309 printf("%s: WARNING: unable to register pfil hook, " 310 "error %d\n", __func__, i); 311 312 #ifdef FLOWTABLE 313 if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size", 314 &V_ip_output_flowtable_size)) { 315 if (V_ip_output_flowtable_size < 256) 316 V_ip_output_flowtable_size = 256; 317 if (!powerof2(V_ip_output_flowtable_size)) { 318 printf("flowtable must be power of 2 size\n"); 319 V_ip_output_flowtable_size = 2048; 320 } 321 } else { 322 /* 323 * round up to the next power of 2 324 */ 325 V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1); 326 } 327 V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU); 328 #endif 329 330 /* Skip initialization of globals for non-default instances. */ 331 if (!IS_DEFAULT_VNET(curvnet)) 332 return; 333 334 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 335 if (pr == NULL) 336 panic("ip_init: PF_INET not found"); 337 338 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 339 for (i = 0; i < IPPROTO_MAX; i++) 340 ip_protox[i] = pr - inetsw; 341 /* 342 * Cycle through IP protocols and put them into the appropriate place 343 * in ip_protox[]. 344 */ 345 for (pr = inetdomain.dom_protosw; 346 pr < inetdomain.dom_protoswNPROTOSW; pr++) 347 if (pr->pr_domain->dom_family == PF_INET && 348 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 349 /* Be careful to only index valid IP protocols. */ 350 if (pr->pr_protocol < IPPROTO_MAX) 351 ip_protox[pr->pr_protocol] = pr - inetsw; 352 } 353 354 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 355 NULL, EVENTHANDLER_PRI_ANY); 356 357 /* Initialize various other remaining things. */ 358 IPQ_LOCK_INIT(); 359 netisr_register(&ip_nh); 360 } 361 362 #ifdef VIMAGE 363 void 364 ip_destroy(void) 365 { 366 int i; 367 368 if ((i = pfil_head_unregister(&V_inet_pfil_hook)) != 0) 369 printf("%s: WARNING: unable to unregister pfil hook, " 370 "error %d\n", __func__, i); 371 372 /* Cleanup in_ifaddr hash table; should be empty. */ 373 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 374 375 IPQ_LOCK(); 376 ip_drain_locked(); 377 IPQ_UNLOCK(); 378 379 uma_zdestroy(V_ipq_zone); 380 } 381 #endif 382 383 /* 384 * Ip input routine. Checksum and byte swap header. If fragmented 385 * try to reassemble. Process options. Pass to next level. 386 */ 387 void 388 ip_input(struct mbuf *m) 389 { 390 struct ip *ip = NULL; 391 struct in_ifaddr *ia = NULL; 392 struct ifaddr *ifa; 393 struct ifnet *ifp; 394 int checkif, hlen = 0; 395 uint16_t sum, ip_len; 396 int dchg = 0; /* dest changed after fw */ 397 struct in_addr odst; /* original dst address */ 398 399 M_ASSERTPKTHDR(m); 400 401 if (m->m_flags & M_FASTFWD_OURS) { 402 m->m_flags &= ~M_FASTFWD_OURS; 403 /* Set up some basics that will be used later. */ 404 ip = mtod(m, struct ip *); 405 hlen = ip->ip_hl << 2; 406 ip_len = ntohs(ip->ip_len); 407 goto ours; 408 } 409 410 IPSTAT_INC(ips_total); 411 412 if (m->m_pkthdr.len < sizeof(struct ip)) 413 goto tooshort; 414 415 if (m->m_len < sizeof (struct ip) && 416 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 417 IPSTAT_INC(ips_toosmall); 418 return; 419 } 420 ip = mtod(m, struct ip *); 421 422 if (ip->ip_v != IPVERSION) { 423 IPSTAT_INC(ips_badvers); 424 goto bad; 425 } 426 427 hlen = ip->ip_hl << 2; 428 if (hlen < sizeof(struct ip)) { /* minimum header length */ 429 IPSTAT_INC(ips_badhlen); 430 goto bad; 431 } 432 if (hlen > m->m_len) { 433 if ((m = m_pullup(m, hlen)) == NULL) { 434 IPSTAT_INC(ips_badhlen); 435 return; 436 } 437 ip = mtod(m, struct ip *); 438 } 439 440 IP_PROBE(receive, NULL, NULL, ip, m->m_pkthdr.rcvif, ip, NULL); 441 442 /* 127/8 must not appear on wire - RFC1122 */ 443 ifp = m->m_pkthdr.rcvif; 444 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 445 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 446 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 447 IPSTAT_INC(ips_badaddr); 448 goto bad; 449 } 450 } 451 452 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 453 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 454 } else { 455 if (hlen == sizeof(struct ip)) { 456 sum = in_cksum_hdr(ip); 457 } else { 458 sum = in_cksum(m, hlen); 459 } 460 } 461 if (sum) { 462 IPSTAT_INC(ips_badsum); 463 goto bad; 464 } 465 466 #ifdef ALTQ 467 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 468 /* packet is dropped by traffic conditioner */ 469 return; 470 #endif 471 472 ip_len = ntohs(ip->ip_len); 473 if (ip_len < hlen) { 474 IPSTAT_INC(ips_badlen); 475 goto bad; 476 } 477 478 /* 479 * Check that the amount of data in the buffers 480 * is as at least much as the IP header would have us expect. 481 * Trim mbufs if longer than we expect. 482 * Drop packet if shorter than we expect. 483 */ 484 if (m->m_pkthdr.len < ip_len) { 485 tooshort: 486 IPSTAT_INC(ips_tooshort); 487 goto bad; 488 } 489 if (m->m_pkthdr.len > ip_len) { 490 if (m->m_len == m->m_pkthdr.len) { 491 m->m_len = ip_len; 492 m->m_pkthdr.len = ip_len; 493 } else 494 m_adj(m, ip_len - m->m_pkthdr.len); 495 } 496 #ifdef IPSEC 497 /* 498 * Bypass packet filtering for packets previously handled by IPsec. 499 */ 500 if (ip_ipsec_filtertunnel(m)) 501 goto passin; 502 #endif /* IPSEC */ 503 504 /* 505 * Run through list of hooks for input packets. 506 * 507 * NB: Beware of the destination address changing (e.g. 508 * by NAT rewriting). When this happens, tell 509 * ip_forward to do the right thing. 510 */ 511 512 /* Jump over all PFIL processing if hooks are not active. */ 513 if (!PFIL_HOOKED(&V_inet_pfil_hook)) 514 goto passin; 515 516 odst = ip->ip_dst; 517 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 518 return; 519 if (m == NULL) /* consumed by filter */ 520 return; 521 522 ip = mtod(m, struct ip *); 523 dchg = (odst.s_addr != ip->ip_dst.s_addr); 524 ifp = m->m_pkthdr.rcvif; 525 526 if (m->m_flags & M_FASTFWD_OURS) { 527 m->m_flags &= ~M_FASTFWD_OURS; 528 goto ours; 529 } 530 if (m->m_flags & M_IP_NEXTHOP) { 531 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL); 532 if (dchg != 0) { 533 /* 534 * Directly ship the packet on. This allows 535 * forwarding packets originally destined to us 536 * to some other directly connected host. 537 */ 538 ip_forward(m, 1); 539 return; 540 } 541 } 542 passin: 543 544 /* 545 * Process options and, if not destined for us, 546 * ship it on. ip_dooptions returns 1 when an 547 * error was detected (causing an icmp message 548 * to be sent and the original packet to be freed). 549 */ 550 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 551 return; 552 553 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 554 * matter if it is destined to another node, or whether it is 555 * a multicast one, RSVP wants it! and prevents it from being forwarded 556 * anywhere else. Also checks if the rsvp daemon is running before 557 * grabbing the packet. 558 */ 559 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 560 goto ours; 561 562 /* 563 * Check our list of addresses, to see if the packet is for us. 564 * If we don't have any addresses, assume any unicast packet 565 * we receive might be for us (and let the upper layers deal 566 * with it). 567 */ 568 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 569 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 570 goto ours; 571 572 /* 573 * Enable a consistency check between the destination address 574 * and the arrival interface for a unicast packet (the RFC 1122 575 * strong ES model) if IP forwarding is disabled and the packet 576 * is not locally generated and the packet is not subject to 577 * 'ipfw fwd'. 578 * 579 * XXX - Checking also should be disabled if the destination 580 * address is ipnat'ed to a different interface. 581 * 582 * XXX - Checking is incompatible with IP aliases added 583 * to the loopback interface instead of the interface where 584 * the packets are received. 585 * 586 * XXX - This is the case for carp vhost IPs as well so we 587 * insert a workaround. If the packet got here, we already 588 * checked with carp_iamatch() and carp_forus(). 589 */ 590 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 591 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 592 ifp->if_carp == NULL && (dchg == 0); 593 594 /* 595 * Check for exact addresses in the hash bucket. 596 */ 597 /* IN_IFADDR_RLOCK(); */ 598 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 599 /* 600 * If the address matches, verify that the packet 601 * arrived via the correct interface if checking is 602 * enabled. 603 */ 604 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 605 (!checkif || ia->ia_ifp == ifp)) { 606 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 607 counter_u64_add(ia->ia_ifa.ifa_ibytes, 608 m->m_pkthdr.len); 609 /* IN_IFADDR_RUNLOCK(); */ 610 goto ours; 611 } 612 } 613 /* IN_IFADDR_RUNLOCK(); */ 614 615 /* 616 * Check for broadcast addresses. 617 * 618 * Only accept broadcast packets that arrive via the matching 619 * interface. Reception of forwarded directed broadcasts would 620 * be handled via ip_forward() and ether_output() with the loopback 621 * into the stack for SIMPLEX interfaces handled by ether_output(). 622 */ 623 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 624 IF_ADDR_RLOCK(ifp); 625 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 626 if (ifa->ifa_addr->sa_family != AF_INET) 627 continue; 628 ia = ifatoia(ifa); 629 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 630 ip->ip_dst.s_addr) { 631 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 632 counter_u64_add(ia->ia_ifa.ifa_ibytes, 633 m->m_pkthdr.len); 634 IF_ADDR_RUNLOCK(ifp); 635 goto ours; 636 } 637 #ifdef BOOTP_COMPAT 638 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 639 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 640 counter_u64_add(ia->ia_ifa.ifa_ibytes, 641 m->m_pkthdr.len); 642 IF_ADDR_RUNLOCK(ifp); 643 goto ours; 644 } 645 #endif 646 } 647 IF_ADDR_RUNLOCK(ifp); 648 ia = NULL; 649 } 650 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 651 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 652 IPSTAT_INC(ips_cantforward); 653 m_freem(m); 654 return; 655 } 656 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 657 if (V_ip_mrouter) { 658 /* 659 * If we are acting as a multicast router, all 660 * incoming multicast packets are passed to the 661 * kernel-level multicast forwarding function. 662 * The packet is returned (relatively) intact; if 663 * ip_mforward() returns a non-zero value, the packet 664 * must be discarded, else it may be accepted below. 665 */ 666 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 667 IPSTAT_INC(ips_cantforward); 668 m_freem(m); 669 return; 670 } 671 672 /* 673 * The process-level routing daemon needs to receive 674 * all multicast IGMP packets, whether or not this 675 * host belongs to their destination groups. 676 */ 677 if (ip->ip_p == IPPROTO_IGMP) 678 goto ours; 679 IPSTAT_INC(ips_forward); 680 } 681 /* 682 * Assume the packet is for us, to avoid prematurely taking 683 * a lock on the in_multi hash. Protocols must perform 684 * their own filtering and update statistics accordingly. 685 */ 686 goto ours; 687 } 688 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 689 goto ours; 690 if (ip->ip_dst.s_addr == INADDR_ANY) 691 goto ours; 692 693 /* 694 * FAITH(Firewall Aided Internet Translator) 695 */ 696 if (ifp && ifp->if_type == IFT_FAITH) { 697 if (V_ip_keepfaith) { 698 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 699 goto ours; 700 } 701 m_freem(m); 702 return; 703 } 704 705 /* 706 * Not for us; forward if possible and desirable. 707 */ 708 if (V_ipforwarding == 0) { 709 IPSTAT_INC(ips_cantforward); 710 m_freem(m); 711 } else { 712 #ifdef IPSEC 713 if (ip_ipsec_fwd(m)) 714 goto bad; 715 #endif /* IPSEC */ 716 ip_forward(m, dchg); 717 } 718 return; 719 720 ours: 721 #ifdef IPSTEALTH 722 /* 723 * IPSTEALTH: Process non-routing options only 724 * if the packet is destined for us. 725 */ 726 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) 727 return; 728 #endif /* IPSTEALTH */ 729 730 /* 731 * Attempt reassembly; if it succeeds, proceed. 732 * ip_reass() will return a different mbuf. 733 */ 734 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { 735 m = ip_reass(m); 736 if (m == NULL) 737 return; 738 ip = mtod(m, struct ip *); 739 /* Get the header length of the reassembled packet */ 740 hlen = ip->ip_hl << 2; 741 } 742 743 #ifdef IPSEC 744 /* 745 * enforce IPsec policy checking if we are seeing last header. 746 * note that we do not visit this with protocols with pcb layer 747 * code - like udp/tcp/raw ip. 748 */ 749 if (ip_ipsec_input(m)) 750 goto bad; 751 #endif /* IPSEC */ 752 753 /* 754 * Switch out to protocol's input routine. 755 */ 756 IPSTAT_INC(ips_delivered); 757 758 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 759 return; 760 bad: 761 m_freem(m); 762 } 763 764 /* 765 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 766 * max has slightly different semantics than the sysctl, for historical 767 * reasons. 768 */ 769 static void 770 maxnipq_update(void) 771 { 772 773 /* 774 * -1 for unlimited allocation. 775 */ 776 if (V_maxnipq < 0) 777 uma_zone_set_max(V_ipq_zone, 0); 778 /* 779 * Positive number for specific bound. 780 */ 781 if (V_maxnipq > 0) 782 uma_zone_set_max(V_ipq_zone, V_maxnipq); 783 /* 784 * Zero specifies no further fragment queue allocation -- set the 785 * bound very low, but rely on implementation elsewhere to actually 786 * prevent allocation and reclaim current queues. 787 */ 788 if (V_maxnipq == 0) 789 uma_zone_set_max(V_ipq_zone, 1); 790 } 791 792 static void 793 ipq_zone_change(void *tag) 794 { 795 796 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 797 V_maxnipq = nmbclusters / 32; 798 maxnipq_update(); 799 } 800 } 801 802 static int 803 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 804 { 805 int error, i; 806 807 i = V_maxnipq; 808 error = sysctl_handle_int(oidp, &i, 0, req); 809 if (error || !req->newptr) 810 return (error); 811 812 /* 813 * XXXRW: Might be a good idea to sanity check the argument and place 814 * an extreme upper bound. 815 */ 816 if (i < -1) 817 return (EINVAL); 818 V_maxnipq = i; 819 maxnipq_update(); 820 return (0); 821 } 822 823 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 824 NULL, 0, sysctl_maxnipq, "I", 825 "Maximum number of IPv4 fragment reassembly queue entries"); 826 827 /* 828 * Take incoming datagram fragment and try to reassemble it into 829 * whole datagram. If the argument is the first fragment or one 830 * in between the function will return NULL and store the mbuf 831 * in the fragment chain. If the argument is the last fragment 832 * the packet will be reassembled and the pointer to the new 833 * mbuf returned for further processing. Only m_tags attached 834 * to the first packet/fragment are preserved. 835 * The IP header is *NOT* adjusted out of iplen. 836 */ 837 struct mbuf * 838 ip_reass(struct mbuf *m) 839 { 840 struct ip *ip; 841 struct mbuf *p, *q, *nq, *t; 842 struct ipq *fp = NULL; 843 struct ipqhead *head; 844 int i, hlen, next; 845 u_int8_t ecn, ecn0; 846 u_short hash; 847 848 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 849 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 850 IPSTAT_INC(ips_fragments); 851 IPSTAT_INC(ips_fragdropped); 852 m_freem(m); 853 return (NULL); 854 } 855 856 ip = mtod(m, struct ip *); 857 hlen = ip->ip_hl << 2; 858 859 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 860 head = &V_ipq[hash]; 861 IPQ_LOCK(); 862 863 /* 864 * Look for queue of fragments 865 * of this datagram. 866 */ 867 TAILQ_FOREACH(fp, head, ipq_list) 868 if (ip->ip_id == fp->ipq_id && 869 ip->ip_src.s_addr == fp->ipq_src.s_addr && 870 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 871 #ifdef MAC 872 mac_ipq_match(m, fp) && 873 #endif 874 ip->ip_p == fp->ipq_p) 875 goto found; 876 877 fp = NULL; 878 879 /* 880 * Attempt to trim the number of allocated fragment queues if it 881 * exceeds the administrative limit. 882 */ 883 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 884 /* 885 * drop something from the tail of the current queue 886 * before proceeding further 887 */ 888 struct ipq *q = TAILQ_LAST(head, ipqhead); 889 if (q == NULL) { /* gak */ 890 for (i = 0; i < IPREASS_NHASH; i++) { 891 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 892 if (r) { 893 IPSTAT_ADD(ips_fragtimeout, 894 r->ipq_nfrags); 895 ip_freef(&V_ipq[i], r); 896 break; 897 } 898 } 899 } else { 900 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 901 ip_freef(head, q); 902 } 903 } 904 905 found: 906 /* 907 * Adjust ip_len to not reflect header, 908 * convert offset of this to bytes. 909 */ 910 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 911 if (ip->ip_off & htons(IP_MF)) { 912 /* 913 * Make sure that fragments have a data length 914 * that's a non-zero multiple of 8 bytes. 915 */ 916 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) { 917 IPSTAT_INC(ips_toosmall); /* XXX */ 918 goto dropfrag; 919 } 920 m->m_flags |= M_IP_FRAG; 921 } else 922 m->m_flags &= ~M_IP_FRAG; 923 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 924 925 /* 926 * Attempt reassembly; if it succeeds, proceed. 927 * ip_reass() will return a different mbuf. 928 */ 929 IPSTAT_INC(ips_fragments); 930 m->m_pkthdr.PH_loc.ptr = ip; 931 932 /* Previous ip_reass() started here. */ 933 /* 934 * Presence of header sizes in mbufs 935 * would confuse code below. 936 */ 937 m->m_data += hlen; 938 m->m_len -= hlen; 939 940 /* 941 * If first fragment to arrive, create a reassembly queue. 942 */ 943 if (fp == NULL) { 944 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 945 if (fp == NULL) 946 goto dropfrag; 947 #ifdef MAC 948 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 949 uma_zfree(V_ipq_zone, fp); 950 fp = NULL; 951 goto dropfrag; 952 } 953 mac_ipq_create(m, fp); 954 #endif 955 TAILQ_INSERT_HEAD(head, fp, ipq_list); 956 V_nipq++; 957 fp->ipq_nfrags = 1; 958 fp->ipq_ttl = IPFRAGTTL; 959 fp->ipq_p = ip->ip_p; 960 fp->ipq_id = ip->ip_id; 961 fp->ipq_src = ip->ip_src; 962 fp->ipq_dst = ip->ip_dst; 963 fp->ipq_frags = m; 964 m->m_nextpkt = NULL; 965 goto done; 966 } else { 967 fp->ipq_nfrags++; 968 #ifdef MAC 969 mac_ipq_update(m, fp); 970 #endif 971 } 972 973 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr)) 974 975 /* 976 * Handle ECN by comparing this segment with the first one; 977 * if CE is set, do not lose CE. 978 * drop if CE and not-ECT are mixed for the same packet. 979 */ 980 ecn = ip->ip_tos & IPTOS_ECN_MASK; 981 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 982 if (ecn == IPTOS_ECN_CE) { 983 if (ecn0 == IPTOS_ECN_NOTECT) 984 goto dropfrag; 985 if (ecn0 != IPTOS_ECN_CE) 986 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 987 } 988 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 989 goto dropfrag; 990 991 /* 992 * Find a segment which begins after this one does. 993 */ 994 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 995 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 996 break; 997 998 /* 999 * If there is a preceding segment, it may provide some of 1000 * our data already. If so, drop the data from the incoming 1001 * segment. If it provides all of our data, drop us, otherwise 1002 * stick new segment in the proper place. 1003 * 1004 * If some of the data is dropped from the preceding 1005 * segment, then it's checksum is invalidated. 1006 */ 1007 if (p) { 1008 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 1009 ntohs(ip->ip_off); 1010 if (i > 0) { 1011 if (i >= ntohs(ip->ip_len)) 1012 goto dropfrag; 1013 m_adj(m, i); 1014 m->m_pkthdr.csum_flags = 0; 1015 ip->ip_off = htons(ntohs(ip->ip_off) + i); 1016 ip->ip_len = htons(ntohs(ip->ip_len) - i); 1017 } 1018 m->m_nextpkt = p->m_nextpkt; 1019 p->m_nextpkt = m; 1020 } else { 1021 m->m_nextpkt = fp->ipq_frags; 1022 fp->ipq_frags = m; 1023 } 1024 1025 /* 1026 * While we overlap succeeding segments trim them or, 1027 * if they are completely covered, dequeue them. 1028 */ 1029 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 1030 ntohs(GETIP(q)->ip_off); q = nq) { 1031 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 1032 ntohs(GETIP(q)->ip_off); 1033 if (i < ntohs(GETIP(q)->ip_len)) { 1034 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 1035 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 1036 m_adj(q, i); 1037 q->m_pkthdr.csum_flags = 0; 1038 break; 1039 } 1040 nq = q->m_nextpkt; 1041 m->m_nextpkt = nq; 1042 IPSTAT_INC(ips_fragdropped); 1043 fp->ipq_nfrags--; 1044 m_freem(q); 1045 } 1046 1047 /* 1048 * Check for complete reassembly and perform frag per packet 1049 * limiting. 1050 * 1051 * Frag limiting is performed here so that the nth frag has 1052 * a chance to complete the packet before we drop the packet. 1053 * As a result, n+1 frags are actually allowed per packet, but 1054 * only n will ever be stored. (n = maxfragsperpacket.) 1055 * 1056 */ 1057 next = 0; 1058 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1059 if (ntohs(GETIP(q)->ip_off) != next) { 1060 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1061 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1062 ip_freef(head, fp); 1063 } 1064 goto done; 1065 } 1066 next += ntohs(GETIP(q)->ip_len); 1067 } 1068 /* Make sure the last packet didn't have the IP_MF flag */ 1069 if (p->m_flags & M_IP_FRAG) { 1070 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1071 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1072 ip_freef(head, fp); 1073 } 1074 goto done; 1075 } 1076 1077 /* 1078 * Reassembly is complete. Make sure the packet is a sane size. 1079 */ 1080 q = fp->ipq_frags; 1081 ip = GETIP(q); 1082 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1083 IPSTAT_INC(ips_toolong); 1084 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1085 ip_freef(head, fp); 1086 goto done; 1087 } 1088 1089 /* 1090 * Concatenate fragments. 1091 */ 1092 m = q; 1093 t = m->m_next; 1094 m->m_next = NULL; 1095 m_cat(m, t); 1096 nq = q->m_nextpkt; 1097 q->m_nextpkt = NULL; 1098 for (q = nq; q != NULL; q = nq) { 1099 nq = q->m_nextpkt; 1100 q->m_nextpkt = NULL; 1101 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1102 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1103 m_cat(m, q); 1104 } 1105 /* 1106 * In order to do checksumming faster we do 'end-around carry' here 1107 * (and not in for{} loop), though it implies we are not going to 1108 * reassemble more than 64k fragments. 1109 */ 1110 m->m_pkthdr.csum_data = 1111 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1112 #ifdef MAC 1113 mac_ipq_reassemble(fp, m); 1114 mac_ipq_destroy(fp); 1115 #endif 1116 1117 /* 1118 * Create header for new ip packet by modifying header of first 1119 * packet; dequeue and discard fragment reassembly header. 1120 * Make header visible. 1121 */ 1122 ip->ip_len = htons((ip->ip_hl << 2) + next); 1123 ip->ip_src = fp->ipq_src; 1124 ip->ip_dst = fp->ipq_dst; 1125 TAILQ_REMOVE(head, fp, ipq_list); 1126 V_nipq--; 1127 uma_zfree(V_ipq_zone, fp); 1128 m->m_len += (ip->ip_hl << 2); 1129 m->m_data -= (ip->ip_hl << 2); 1130 /* some debugging cruft by sklower, below, will go away soon */ 1131 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1132 m_fixhdr(m); 1133 IPSTAT_INC(ips_reassembled); 1134 IPQ_UNLOCK(); 1135 return (m); 1136 1137 dropfrag: 1138 IPSTAT_INC(ips_fragdropped); 1139 if (fp != NULL) 1140 fp->ipq_nfrags--; 1141 m_freem(m); 1142 done: 1143 IPQ_UNLOCK(); 1144 return (NULL); 1145 1146 #undef GETIP 1147 } 1148 1149 /* 1150 * Free a fragment reassembly header and all 1151 * associated datagrams. 1152 */ 1153 static void 1154 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1155 { 1156 struct mbuf *q; 1157 1158 IPQ_LOCK_ASSERT(); 1159 1160 while (fp->ipq_frags) { 1161 q = fp->ipq_frags; 1162 fp->ipq_frags = q->m_nextpkt; 1163 m_freem(q); 1164 } 1165 TAILQ_REMOVE(fhp, fp, ipq_list); 1166 uma_zfree(V_ipq_zone, fp); 1167 V_nipq--; 1168 } 1169 1170 /* 1171 * IP timer processing; 1172 * if a timer expires on a reassembly 1173 * queue, discard it. 1174 */ 1175 void 1176 ip_slowtimo(void) 1177 { 1178 VNET_ITERATOR_DECL(vnet_iter); 1179 struct ipq *fp; 1180 int i; 1181 1182 VNET_LIST_RLOCK_NOSLEEP(); 1183 IPQ_LOCK(); 1184 VNET_FOREACH(vnet_iter) { 1185 CURVNET_SET(vnet_iter); 1186 for (i = 0; i < IPREASS_NHASH; i++) { 1187 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1188 struct ipq *fpp; 1189 1190 fpp = fp; 1191 fp = TAILQ_NEXT(fp, ipq_list); 1192 if(--fpp->ipq_ttl == 0) { 1193 IPSTAT_ADD(ips_fragtimeout, 1194 fpp->ipq_nfrags); 1195 ip_freef(&V_ipq[i], fpp); 1196 } 1197 } 1198 } 1199 /* 1200 * If we are over the maximum number of fragments 1201 * (due to the limit being lowered), drain off 1202 * enough to get down to the new limit. 1203 */ 1204 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1205 for (i = 0; i < IPREASS_NHASH; i++) { 1206 while (V_nipq > V_maxnipq && 1207 !TAILQ_EMPTY(&V_ipq[i])) { 1208 IPSTAT_ADD(ips_fragdropped, 1209 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1210 ip_freef(&V_ipq[i], 1211 TAILQ_FIRST(&V_ipq[i])); 1212 } 1213 } 1214 } 1215 CURVNET_RESTORE(); 1216 } 1217 IPQ_UNLOCK(); 1218 VNET_LIST_RUNLOCK_NOSLEEP(); 1219 } 1220 1221 /* 1222 * Drain off all datagram fragments. 1223 */ 1224 static void 1225 ip_drain_locked(void) 1226 { 1227 int i; 1228 1229 IPQ_LOCK_ASSERT(); 1230 1231 for (i = 0; i < IPREASS_NHASH; i++) { 1232 while(!TAILQ_EMPTY(&V_ipq[i])) { 1233 IPSTAT_ADD(ips_fragdropped, 1234 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1235 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1236 } 1237 } 1238 } 1239 1240 void 1241 ip_drain(void) 1242 { 1243 VNET_ITERATOR_DECL(vnet_iter); 1244 1245 VNET_LIST_RLOCK_NOSLEEP(); 1246 IPQ_LOCK(); 1247 VNET_FOREACH(vnet_iter) { 1248 CURVNET_SET(vnet_iter); 1249 ip_drain_locked(); 1250 CURVNET_RESTORE(); 1251 } 1252 IPQ_UNLOCK(); 1253 VNET_LIST_RUNLOCK_NOSLEEP(); 1254 in_rtqdrain(); 1255 } 1256 1257 /* 1258 * The protocol to be inserted into ip_protox[] must be already registered 1259 * in inetsw[], either statically or through pf_proto_register(). 1260 */ 1261 int 1262 ipproto_register(short ipproto) 1263 { 1264 struct protosw *pr; 1265 1266 /* Sanity checks. */ 1267 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1268 return (EPROTONOSUPPORT); 1269 1270 /* 1271 * The protocol slot must not be occupied by another protocol 1272 * already. An index pointing to IPPROTO_RAW is unused. 1273 */ 1274 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1275 if (pr == NULL) 1276 return (EPFNOSUPPORT); 1277 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1278 return (EEXIST); 1279 1280 /* Find the protocol position in inetsw[] and set the index. */ 1281 for (pr = inetdomain.dom_protosw; 1282 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1283 if (pr->pr_domain->dom_family == PF_INET && 1284 pr->pr_protocol && pr->pr_protocol == ipproto) { 1285 ip_protox[pr->pr_protocol] = pr - inetsw; 1286 return (0); 1287 } 1288 } 1289 return (EPROTONOSUPPORT); 1290 } 1291 1292 int 1293 ipproto_unregister(short ipproto) 1294 { 1295 struct protosw *pr; 1296 1297 /* Sanity checks. */ 1298 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1299 return (EPROTONOSUPPORT); 1300 1301 /* Check if the protocol was indeed registered. */ 1302 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1303 if (pr == NULL) 1304 return (EPFNOSUPPORT); 1305 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1306 return (ENOENT); 1307 1308 /* Reset the protocol slot to IPPROTO_RAW. */ 1309 ip_protox[ipproto] = pr - inetsw; 1310 return (0); 1311 } 1312 1313 /* 1314 * Given address of next destination (final or next hop), return (referenced) 1315 * internet address info of interface to be used to get there. 1316 */ 1317 struct in_ifaddr * 1318 ip_rtaddr(struct in_addr dst, u_int fibnum) 1319 { 1320 struct route sro; 1321 struct sockaddr_in *sin; 1322 struct in_ifaddr *ia; 1323 1324 bzero(&sro, sizeof(sro)); 1325 sin = (struct sockaddr_in *)&sro.ro_dst; 1326 sin->sin_family = AF_INET; 1327 sin->sin_len = sizeof(*sin); 1328 sin->sin_addr = dst; 1329 in_rtalloc_ign(&sro, 0, fibnum); 1330 1331 if (sro.ro_rt == NULL) 1332 return (NULL); 1333 1334 ia = ifatoia(sro.ro_rt->rt_ifa); 1335 ifa_ref(&ia->ia_ifa); 1336 RTFREE(sro.ro_rt); 1337 return (ia); 1338 } 1339 1340 u_char inetctlerrmap[PRC_NCMDS] = { 1341 0, 0, 0, 0, 1342 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1343 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1344 EMSGSIZE, EHOSTUNREACH, 0, 0, 1345 0, 0, EHOSTUNREACH, 0, 1346 ENOPROTOOPT, ECONNREFUSED 1347 }; 1348 1349 /* 1350 * Forward a packet. If some error occurs return the sender 1351 * an icmp packet. Note we can't always generate a meaningful 1352 * icmp message because icmp doesn't have a large enough repertoire 1353 * of codes and types. 1354 * 1355 * If not forwarding, just drop the packet. This could be confusing 1356 * if ipforwarding was zero but some routing protocol was advancing 1357 * us as a gateway to somewhere. However, we must let the routing 1358 * protocol deal with that. 1359 * 1360 * The srcrt parameter indicates whether the packet is being forwarded 1361 * via a source route. 1362 */ 1363 void 1364 ip_forward(struct mbuf *m, int srcrt) 1365 { 1366 struct ip *ip = mtod(m, struct ip *); 1367 struct in_ifaddr *ia; 1368 struct mbuf *mcopy; 1369 struct in_addr dest; 1370 struct route ro; 1371 int error, type = 0, code = 0, mtu = 0; 1372 1373 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1374 IPSTAT_INC(ips_cantforward); 1375 m_freem(m); 1376 return; 1377 } 1378 #ifdef IPSTEALTH 1379 if (!V_ipstealth) { 1380 #endif 1381 if (ip->ip_ttl <= IPTTLDEC) { 1382 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1383 0, 0); 1384 return; 1385 } 1386 #ifdef IPSTEALTH 1387 } 1388 #endif 1389 1390 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1391 #ifndef IPSEC 1392 /* 1393 * 'ia' may be NULL if there is no route for this destination. 1394 * In case of IPsec, Don't discard it just yet, but pass it to 1395 * ip_output in case of outgoing IPsec policy. 1396 */ 1397 if (!srcrt && ia == NULL) { 1398 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1399 return; 1400 } 1401 #endif 1402 1403 /* 1404 * Save the IP header and at most 8 bytes of the payload, 1405 * in case we need to generate an ICMP message to the src. 1406 * 1407 * XXX this can be optimized a lot by saving the data in a local 1408 * buffer on the stack (72 bytes at most), and only allocating the 1409 * mbuf if really necessary. The vast majority of the packets 1410 * are forwarded without having to send an ICMP back (either 1411 * because unnecessary, or because rate limited), so we are 1412 * really we are wasting a lot of work here. 1413 * 1414 * We don't use m_copy() because it might return a reference 1415 * to a shared cluster. Both this function and ip_output() 1416 * assume exclusive access to the IP header in `m', so any 1417 * data in a cluster may change before we reach icmp_error(). 1418 */ 1419 mcopy = m_gethdr(M_NOWAIT, m->m_type); 1420 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) { 1421 /* 1422 * It's probably ok if the pkthdr dup fails (because 1423 * the deep copy of the tag chain failed), but for now 1424 * be conservative and just discard the copy since 1425 * code below may some day want the tags. 1426 */ 1427 m_free(mcopy); 1428 mcopy = NULL; 1429 } 1430 if (mcopy != NULL) { 1431 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy)); 1432 mcopy->m_pkthdr.len = mcopy->m_len; 1433 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1434 } 1435 1436 #ifdef IPSTEALTH 1437 if (!V_ipstealth) { 1438 #endif 1439 ip->ip_ttl -= IPTTLDEC; 1440 #ifdef IPSTEALTH 1441 } 1442 #endif 1443 1444 /* 1445 * If forwarding packet using same interface that it came in on, 1446 * perhaps should send a redirect to sender to shortcut a hop. 1447 * Only send redirect if source is sending directly to us, 1448 * and if packet was not source routed (or has any options). 1449 * Also, don't send redirect if forwarding using a default route 1450 * or a route modified by a redirect. 1451 */ 1452 dest.s_addr = 0; 1453 if (!srcrt && V_ipsendredirects && 1454 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1455 struct sockaddr_in *sin; 1456 struct rtentry *rt; 1457 1458 bzero(&ro, sizeof(ro)); 1459 sin = (struct sockaddr_in *)&ro.ro_dst; 1460 sin->sin_family = AF_INET; 1461 sin->sin_len = sizeof(*sin); 1462 sin->sin_addr = ip->ip_dst; 1463 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1464 1465 rt = ro.ro_rt; 1466 1467 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1468 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1469 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1470 u_long src = ntohl(ip->ip_src.s_addr); 1471 1472 if (RTA(rt) && 1473 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1474 if (rt->rt_flags & RTF_GATEWAY) 1475 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1476 else 1477 dest.s_addr = ip->ip_dst.s_addr; 1478 /* Router requirements says to only send host redirects */ 1479 type = ICMP_REDIRECT; 1480 code = ICMP_REDIRECT_HOST; 1481 } 1482 } 1483 if (rt) 1484 RTFREE(rt); 1485 } 1486 1487 /* 1488 * Try to cache the route MTU from ip_output so we can consider it for 1489 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1490 */ 1491 bzero(&ro, sizeof(ro)); 1492 1493 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1494 1495 if (error == EMSGSIZE && ro.ro_rt) 1496 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1497 RO_RTFREE(&ro); 1498 1499 if (error) 1500 IPSTAT_INC(ips_cantforward); 1501 else { 1502 IPSTAT_INC(ips_forward); 1503 if (type) 1504 IPSTAT_INC(ips_redirectsent); 1505 else { 1506 if (mcopy) 1507 m_freem(mcopy); 1508 if (ia != NULL) 1509 ifa_free(&ia->ia_ifa); 1510 return; 1511 } 1512 } 1513 if (mcopy == NULL) { 1514 if (ia != NULL) 1515 ifa_free(&ia->ia_ifa); 1516 return; 1517 } 1518 1519 switch (error) { 1520 1521 case 0: /* forwarded, but need redirect */ 1522 /* type, code set above */ 1523 break; 1524 1525 case ENETUNREACH: 1526 case EHOSTUNREACH: 1527 case ENETDOWN: 1528 case EHOSTDOWN: 1529 default: 1530 type = ICMP_UNREACH; 1531 code = ICMP_UNREACH_HOST; 1532 break; 1533 1534 case EMSGSIZE: 1535 type = ICMP_UNREACH; 1536 code = ICMP_UNREACH_NEEDFRAG; 1537 1538 #ifdef IPSEC 1539 /* 1540 * If IPsec is configured for this path, 1541 * override any possibly mtu value set by ip_output. 1542 */ 1543 mtu = ip_ipsec_mtu(mcopy, mtu); 1544 #endif /* IPSEC */ 1545 /* 1546 * If the MTU was set before make sure we are below the 1547 * interface MTU. 1548 * If the MTU wasn't set before use the interface mtu or 1549 * fall back to the next smaller mtu step compared to the 1550 * current packet size. 1551 */ 1552 if (mtu != 0) { 1553 if (ia != NULL) 1554 mtu = min(mtu, ia->ia_ifp->if_mtu); 1555 } else { 1556 if (ia != NULL) 1557 mtu = ia->ia_ifp->if_mtu; 1558 else 1559 mtu = ip_next_mtu(ntohs(ip->ip_len), 0); 1560 } 1561 IPSTAT_INC(ips_cantfrag); 1562 break; 1563 1564 case ENOBUFS: 1565 /* 1566 * A router should not generate ICMP_SOURCEQUENCH as 1567 * required in RFC1812 Requirements for IP Version 4 Routers. 1568 * Source quench could be a big problem under DoS attacks, 1569 * or if the underlying interface is rate-limited. 1570 * Those who need source quench packets may re-enable them 1571 * via the net.inet.ip.sendsourcequench sysctl. 1572 */ 1573 if (V_ip_sendsourcequench == 0) { 1574 m_freem(mcopy); 1575 if (ia != NULL) 1576 ifa_free(&ia->ia_ifa); 1577 return; 1578 } else { 1579 type = ICMP_SOURCEQUENCH; 1580 code = 0; 1581 } 1582 break; 1583 1584 case EACCES: /* ipfw denied packet */ 1585 m_freem(mcopy); 1586 if (ia != NULL) 1587 ifa_free(&ia->ia_ifa); 1588 return; 1589 } 1590 if (ia != NULL) 1591 ifa_free(&ia->ia_ifa); 1592 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1593 } 1594 1595 void 1596 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1597 struct mbuf *m) 1598 { 1599 1600 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1601 struct bintime bt; 1602 1603 bintime(&bt); 1604 if (inp->inp_socket->so_options & SO_BINTIME) { 1605 *mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt), 1606 SCM_BINTIME, SOL_SOCKET); 1607 if (*mp) 1608 mp = &(*mp)->m_next; 1609 } 1610 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1611 struct timeval tv; 1612 1613 bintime2timeval(&bt, &tv); 1614 *mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv), 1615 SCM_TIMESTAMP, SOL_SOCKET); 1616 if (*mp) 1617 mp = &(*mp)->m_next; 1618 } 1619 } 1620 if (inp->inp_flags & INP_RECVDSTADDR) { 1621 *mp = sbcreatecontrol((caddr_t)&ip->ip_dst, 1622 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1623 if (*mp) 1624 mp = &(*mp)->m_next; 1625 } 1626 if (inp->inp_flags & INP_RECVTTL) { 1627 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, 1628 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1629 if (*mp) 1630 mp = &(*mp)->m_next; 1631 } 1632 #ifdef notyet 1633 /* XXX 1634 * Moving these out of udp_input() made them even more broken 1635 * than they already were. 1636 */ 1637 /* options were tossed already */ 1638 if (inp->inp_flags & INP_RECVOPTS) { 1639 *mp = sbcreatecontrol((caddr_t)opts_deleted_above, 1640 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1641 if (*mp) 1642 mp = &(*mp)->m_next; 1643 } 1644 /* ip_srcroute doesn't do what we want here, need to fix */ 1645 if (inp->inp_flags & INP_RECVRETOPTS) { 1646 *mp = sbcreatecontrol((caddr_t)ip_srcroute(m), 1647 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1648 if (*mp) 1649 mp = &(*mp)->m_next; 1650 } 1651 #endif 1652 if (inp->inp_flags & INP_RECVIF) { 1653 struct ifnet *ifp; 1654 struct sdlbuf { 1655 struct sockaddr_dl sdl; 1656 u_char pad[32]; 1657 } sdlbuf; 1658 struct sockaddr_dl *sdp; 1659 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1660 1661 if ((ifp = m->m_pkthdr.rcvif) && 1662 ifp->if_index && ifp->if_index <= V_if_index) { 1663 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1664 /* 1665 * Change our mind and don't try copy. 1666 */ 1667 if (sdp->sdl_family != AF_LINK || 1668 sdp->sdl_len > sizeof(sdlbuf)) { 1669 goto makedummy; 1670 } 1671 bcopy(sdp, sdl2, sdp->sdl_len); 1672 } else { 1673 makedummy: 1674 sdl2->sdl_len = 1675 offsetof(struct sockaddr_dl, sdl_data[0]); 1676 sdl2->sdl_family = AF_LINK; 1677 sdl2->sdl_index = 0; 1678 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1679 } 1680 *mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len, 1681 IP_RECVIF, IPPROTO_IP); 1682 if (*mp) 1683 mp = &(*mp)->m_next; 1684 } 1685 if (inp->inp_flags & INP_RECVTOS) { 1686 *mp = sbcreatecontrol((caddr_t)&ip->ip_tos, 1687 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1688 if (*mp) 1689 mp = &(*mp)->m_next; 1690 } 1691 } 1692 1693 /* 1694 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1695 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1696 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1697 * compiled. 1698 */ 1699 static VNET_DEFINE(int, ip_rsvp_on); 1700 VNET_DEFINE(struct socket *, ip_rsvpd); 1701 1702 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1703 1704 int 1705 ip_rsvp_init(struct socket *so) 1706 { 1707 1708 if (so->so_type != SOCK_RAW || 1709 so->so_proto->pr_protocol != IPPROTO_RSVP) 1710 return EOPNOTSUPP; 1711 1712 if (V_ip_rsvpd != NULL) 1713 return EADDRINUSE; 1714 1715 V_ip_rsvpd = so; 1716 /* 1717 * This may seem silly, but we need to be sure we don't over-increment 1718 * the RSVP counter, in case something slips up. 1719 */ 1720 if (!V_ip_rsvp_on) { 1721 V_ip_rsvp_on = 1; 1722 V_rsvp_on++; 1723 } 1724 1725 return 0; 1726 } 1727 1728 int 1729 ip_rsvp_done(void) 1730 { 1731 1732 V_ip_rsvpd = NULL; 1733 /* 1734 * This may seem silly, but we need to be sure we don't over-decrement 1735 * the RSVP counter, in case something slips up. 1736 */ 1737 if (V_ip_rsvp_on) { 1738 V_ip_rsvp_on = 0; 1739 V_rsvp_on--; 1740 } 1741 return 0; 1742 } 1743 1744 void 1745 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1746 { 1747 1748 if (rsvp_input_p) { /* call the real one if loaded */ 1749 rsvp_input_p(m, off); 1750 return; 1751 } 1752 1753 /* Can still get packets with rsvp_on = 0 if there is a local member 1754 * of the group to which the RSVP packet is addressed. But in this 1755 * case we want to throw the packet away. 1756 */ 1757 1758 if (!V_rsvp_on) { 1759 m_freem(m); 1760 return; 1761 } 1762 1763 if (V_ip_rsvpd != NULL) { 1764 rip_input(m, off); 1765 return; 1766 } 1767 /* Drop the packet */ 1768 m_freem(m); 1769 } 1770