1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_route.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/domain.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/time.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/rwlock.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 55 #include <net/pfil.h> 56 #include <net/if.h> 57 #include <net/if_types.h> 58 #include <net/if_var.h> 59 #include <net/if_dl.h> 60 #include <net/route.h> 61 #include <net/netisr.h> 62 #include <net/vnet.h> 63 #include <net/flowtable.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_fw.h> 72 #include <netinet/ip_icmp.h> 73 #include <netinet/ip_options.h> 74 #include <machine/in_cksum.h> 75 #include <netinet/ip_carp.h> 76 #ifdef IPSEC 77 #include <netinet/ip_ipsec.h> 78 #endif /* IPSEC */ 79 80 #include <sys/socketvar.h> 81 82 #include <security/mac/mac_framework.h> 83 84 #ifdef CTASSERT 85 CTASSERT(sizeof(struct ip) == 20); 86 #endif 87 88 struct rwlock in_ifaddr_lock; 89 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 90 91 VNET_DEFINE(int, rsvp_on); 92 93 VNET_DEFINE(int, ipforwarding); 94 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 95 &VNET_NAME(ipforwarding), 0, 96 "Enable IP forwarding between interfaces"); 97 98 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 99 #define V_ipsendredirects VNET(ipsendredirects) 100 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 101 &VNET_NAME(ipsendredirects), 0, 102 "Enable sending IP redirects"); 103 104 static VNET_DEFINE(int, ip_keepfaith); 105 #define V_ip_keepfaith VNET(ip_keepfaith) 106 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 107 &VNET_NAME(ip_keepfaith), 0, 108 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 109 110 static VNET_DEFINE(int, ip_sendsourcequench); 111 #define V_ip_sendsourcequench VNET(ip_sendsourcequench) 112 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 113 &VNET_NAME(ip_sendsourcequench), 0, 114 "Enable the transmission of source quench packets"); 115 116 VNET_DEFINE(int, ip_do_randomid); 117 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 118 &VNET_NAME(ip_do_randomid), 0, 119 "Assign random ip_id values"); 120 121 /* 122 * XXX - Setting ip_checkinterface mostly implements the receive side of 123 * the Strong ES model described in RFC 1122, but since the routing table 124 * and transmit implementation do not implement the Strong ES model, 125 * setting this to 1 results in an odd hybrid. 126 * 127 * XXX - ip_checkinterface currently must be disabled if you use ipnat 128 * to translate the destination address to another local interface. 129 * 130 * XXX - ip_checkinterface must be disabled if you add IP aliases 131 * to the loopback interface instead of the interface where the 132 * packets for those addresses are received. 133 */ 134 static VNET_DEFINE(int, ip_checkinterface); 135 #define V_ip_checkinterface VNET(ip_checkinterface) 136 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 137 &VNET_NAME(ip_checkinterface), 0, 138 "Verify packet arrives on correct interface"); 139 140 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ 141 142 static struct netisr_handler ip_nh = { 143 .nh_name = "ip", 144 .nh_handler = ip_input, 145 .nh_proto = NETISR_IP, 146 .nh_policy = NETISR_POLICY_FLOW, 147 }; 148 149 extern struct domain inetdomain; 150 extern struct protosw inetsw[]; 151 u_char ip_protox[IPPROTO_MAX]; 152 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 153 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 154 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 155 156 static VNET_DEFINE(uma_zone_t, ipq_zone); 157 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 158 static struct mtx ipqlock; 159 160 #define V_ipq_zone VNET(ipq_zone) 161 #define V_ipq VNET(ipq) 162 163 #define IPQ_LOCK() mtx_lock(&ipqlock) 164 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 165 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 166 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 167 168 static void maxnipq_update(void); 169 static void ipq_zone_change(void *); 170 static void ip_drain_locked(void); 171 172 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 173 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 174 #define V_maxnipq VNET(maxnipq) 175 #define V_nipq VNET(nipq) 176 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, 177 &VNET_NAME(nipq), 0, 178 "Current number of IPv4 fragment reassembly queue entries"); 179 180 static VNET_DEFINE(int, maxfragsperpacket); 181 #define V_maxfragsperpacket VNET(maxfragsperpacket) 182 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 183 &VNET_NAME(maxfragsperpacket), 0, 184 "Maximum number of IPv4 fragments allowed per packet"); 185 186 #ifdef IPCTL_DEFMTU 187 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 188 &ip_mtu, 0, "Default MTU"); 189 #endif 190 191 #ifdef IPSTEALTH 192 VNET_DEFINE(int, ipstealth); 193 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 194 &VNET_NAME(ipstealth), 0, 195 "IP stealth mode, no TTL decrementation on forwarding"); 196 #endif 197 198 #ifdef FLOWTABLE 199 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048; 200 VNET_DEFINE(struct flowtable *, ip_ft); 201 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size) 202 203 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN, 204 &VNET_NAME(ip_output_flowtable_size), 2048, 205 "number of entries in the per-cpu output flow caches"); 206 #endif 207 208 static void ip_freef(struct ipqhead *, struct ipq *); 209 210 /* 211 * IP statistics are stored in the "array" of counter(9)s. 212 */ 213 VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat); 214 VNET_PCPUSTAT_SYSINIT(ipstat); 215 SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat, 216 "IP statistics (struct ipstat, netinet/ip_var.h)"); 217 218 #ifdef VIMAGE 219 VNET_PCPUSTAT_SYSUNINIT(ipstat); 220 #endif /* VIMAGE */ 221 222 /* 223 * Kernel module interface for updating ipstat. The argument is an index 224 * into ipstat treated as an array. 225 */ 226 void 227 kmod_ipstat_inc(int statnum) 228 { 229 230 counter_u64_add(VNET(ipstat)[statnum], 1); 231 } 232 233 void 234 kmod_ipstat_dec(int statnum) 235 { 236 237 counter_u64_add(VNET(ipstat)[statnum], -1); 238 } 239 240 static int 241 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 242 { 243 int error, qlimit; 244 245 netisr_getqlimit(&ip_nh, &qlimit); 246 error = sysctl_handle_int(oidp, &qlimit, 0, req); 247 if (error || !req->newptr) 248 return (error); 249 if (qlimit < 1) 250 return (EINVAL); 251 return (netisr_setqlimit(&ip_nh, qlimit)); 252 } 253 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 254 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 255 "Maximum size of the IP input queue"); 256 257 static int 258 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 259 { 260 u_int64_t qdrops_long; 261 int error, qdrops; 262 263 netisr_getqdrops(&ip_nh, &qdrops_long); 264 qdrops = qdrops_long; 265 error = sysctl_handle_int(oidp, &qdrops, 0, req); 266 if (error || !req->newptr) 267 return (error); 268 if (qdrops != 0) 269 return (EINVAL); 270 netisr_clearqdrops(&ip_nh); 271 return (0); 272 } 273 274 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 275 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 276 "Number of packets dropped from the IP input queue"); 277 278 /* 279 * IP initialization: fill in IP protocol switch table. 280 * All protocols not implemented in kernel go to raw IP protocol handler. 281 */ 282 void 283 ip_init(void) 284 { 285 struct protosw *pr; 286 int i; 287 288 V_ip_id = time_second & 0xffff; 289 290 TAILQ_INIT(&V_in_ifaddrhead); 291 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 292 293 /* Initialize IP reassembly queue. */ 294 for (i = 0; i < IPREASS_NHASH; i++) 295 TAILQ_INIT(&V_ipq[i]); 296 V_maxnipq = nmbclusters / 32; 297 V_maxfragsperpacket = 16; 298 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 299 NULL, UMA_ALIGN_PTR, 0); 300 maxnipq_update(); 301 302 /* Initialize packet filter hooks. */ 303 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF; 304 V_inet_pfil_hook.ph_af = AF_INET; 305 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0) 306 printf("%s: WARNING: unable to register pfil hook, " 307 "error %d\n", __func__, i); 308 309 #ifdef FLOWTABLE 310 if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size", 311 &V_ip_output_flowtable_size)) { 312 if (V_ip_output_flowtable_size < 256) 313 V_ip_output_flowtable_size = 256; 314 if (!powerof2(V_ip_output_flowtable_size)) { 315 printf("flowtable must be power of 2 size\n"); 316 V_ip_output_flowtable_size = 2048; 317 } 318 } else { 319 /* 320 * round up to the next power of 2 321 */ 322 V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1); 323 } 324 V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU); 325 #endif 326 327 /* Skip initialization of globals for non-default instances. */ 328 if (!IS_DEFAULT_VNET(curvnet)) 329 return; 330 331 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 332 if (pr == NULL) 333 panic("ip_init: PF_INET not found"); 334 335 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 336 for (i = 0; i < IPPROTO_MAX; i++) 337 ip_protox[i] = pr - inetsw; 338 /* 339 * Cycle through IP protocols and put them into the appropriate place 340 * in ip_protox[]. 341 */ 342 for (pr = inetdomain.dom_protosw; 343 pr < inetdomain.dom_protoswNPROTOSW; pr++) 344 if (pr->pr_domain->dom_family == PF_INET && 345 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 346 /* Be careful to only index valid IP protocols. */ 347 if (pr->pr_protocol < IPPROTO_MAX) 348 ip_protox[pr->pr_protocol] = pr - inetsw; 349 } 350 351 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 352 NULL, EVENTHANDLER_PRI_ANY); 353 354 /* Initialize various other remaining things. */ 355 IPQ_LOCK_INIT(); 356 netisr_register(&ip_nh); 357 } 358 359 #ifdef VIMAGE 360 void 361 ip_destroy(void) 362 { 363 364 /* Cleanup in_ifaddr hash table; should be empty. */ 365 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 366 367 IPQ_LOCK(); 368 ip_drain_locked(); 369 IPQ_UNLOCK(); 370 371 uma_zdestroy(V_ipq_zone); 372 } 373 #endif 374 375 /* 376 * Ip input routine. Checksum and byte swap header. If fragmented 377 * try to reassemble. Process options. Pass to next level. 378 */ 379 void 380 ip_input(struct mbuf *m) 381 { 382 struct ip *ip = NULL; 383 struct in_ifaddr *ia = NULL; 384 struct ifaddr *ifa; 385 struct ifnet *ifp; 386 int checkif, hlen = 0; 387 uint16_t sum, ip_len; 388 int dchg = 0; /* dest changed after fw */ 389 struct in_addr odst; /* original dst address */ 390 391 M_ASSERTPKTHDR(m); 392 393 if (m->m_flags & M_FASTFWD_OURS) { 394 m->m_flags &= ~M_FASTFWD_OURS; 395 /* Set up some basics that will be used later. */ 396 ip = mtod(m, struct ip *); 397 hlen = ip->ip_hl << 2; 398 ip_len = ntohs(ip->ip_len); 399 goto ours; 400 } 401 402 IPSTAT_INC(ips_total); 403 404 if (m->m_pkthdr.len < sizeof(struct ip)) 405 goto tooshort; 406 407 if (m->m_len < sizeof (struct ip) && 408 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 409 IPSTAT_INC(ips_toosmall); 410 return; 411 } 412 ip = mtod(m, struct ip *); 413 414 if (ip->ip_v != IPVERSION) { 415 IPSTAT_INC(ips_badvers); 416 goto bad; 417 } 418 419 hlen = ip->ip_hl << 2; 420 if (hlen < sizeof(struct ip)) { /* minimum header length */ 421 IPSTAT_INC(ips_badhlen); 422 goto bad; 423 } 424 if (hlen > m->m_len) { 425 if ((m = m_pullup(m, hlen)) == NULL) { 426 IPSTAT_INC(ips_badhlen); 427 return; 428 } 429 ip = mtod(m, struct ip *); 430 } 431 432 /* 127/8 must not appear on wire - RFC1122 */ 433 ifp = m->m_pkthdr.rcvif; 434 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 435 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 436 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 437 IPSTAT_INC(ips_badaddr); 438 goto bad; 439 } 440 } 441 442 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 443 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 444 } else { 445 if (hlen == sizeof(struct ip)) { 446 sum = in_cksum_hdr(ip); 447 } else { 448 sum = in_cksum(m, hlen); 449 } 450 } 451 if (sum) { 452 IPSTAT_INC(ips_badsum); 453 goto bad; 454 } 455 456 #ifdef ALTQ 457 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 458 /* packet is dropped by traffic conditioner */ 459 return; 460 #endif 461 462 ip_len = ntohs(ip->ip_len); 463 if (ip_len < hlen) { 464 IPSTAT_INC(ips_badlen); 465 goto bad; 466 } 467 468 /* 469 * Check that the amount of data in the buffers 470 * is as at least much as the IP header would have us expect. 471 * Trim mbufs if longer than we expect. 472 * Drop packet if shorter than we expect. 473 */ 474 if (m->m_pkthdr.len < ip_len) { 475 tooshort: 476 IPSTAT_INC(ips_tooshort); 477 goto bad; 478 } 479 if (m->m_pkthdr.len > ip_len) { 480 if (m->m_len == m->m_pkthdr.len) { 481 m->m_len = ip_len; 482 m->m_pkthdr.len = ip_len; 483 } else 484 m_adj(m, ip_len - m->m_pkthdr.len); 485 } 486 #ifdef IPSEC 487 /* 488 * Bypass packet filtering for packets previously handled by IPsec. 489 */ 490 if (ip_ipsec_filtertunnel(m)) 491 goto passin; 492 #endif /* IPSEC */ 493 494 /* 495 * Run through list of hooks for input packets. 496 * 497 * NB: Beware of the destination address changing (e.g. 498 * by NAT rewriting). When this happens, tell 499 * ip_forward to do the right thing. 500 */ 501 502 /* Jump over all PFIL processing if hooks are not active. */ 503 if (!PFIL_HOOKED(&V_inet_pfil_hook)) 504 goto passin; 505 506 odst = ip->ip_dst; 507 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 508 return; 509 if (m == NULL) /* consumed by filter */ 510 return; 511 512 ip = mtod(m, struct ip *); 513 dchg = (odst.s_addr != ip->ip_dst.s_addr); 514 ifp = m->m_pkthdr.rcvif; 515 516 if (m->m_flags & M_FASTFWD_OURS) { 517 m->m_flags &= ~M_FASTFWD_OURS; 518 goto ours; 519 } 520 if (m->m_flags & M_IP_NEXTHOP) { 521 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL); 522 if (dchg != 0) { 523 /* 524 * Directly ship the packet on. This allows 525 * forwarding packets originally destined to us 526 * to some other directly connected host. 527 */ 528 ip_forward(m, 1); 529 return; 530 } 531 } 532 passin: 533 534 /* 535 * Process options and, if not destined for us, 536 * ship it on. ip_dooptions returns 1 when an 537 * error was detected (causing an icmp message 538 * to be sent and the original packet to be freed). 539 */ 540 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 541 return; 542 543 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 544 * matter if it is destined to another node, or whether it is 545 * a multicast one, RSVP wants it! and prevents it from being forwarded 546 * anywhere else. Also checks if the rsvp daemon is running before 547 * grabbing the packet. 548 */ 549 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 550 goto ours; 551 552 /* 553 * Check our list of addresses, to see if the packet is for us. 554 * If we don't have any addresses, assume any unicast packet 555 * we receive might be for us (and let the upper layers deal 556 * with it). 557 */ 558 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 559 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 560 goto ours; 561 562 /* 563 * Enable a consistency check between the destination address 564 * and the arrival interface for a unicast packet (the RFC 1122 565 * strong ES model) if IP forwarding is disabled and the packet 566 * is not locally generated and the packet is not subject to 567 * 'ipfw fwd'. 568 * 569 * XXX - Checking also should be disabled if the destination 570 * address is ipnat'ed to a different interface. 571 * 572 * XXX - Checking is incompatible with IP aliases added 573 * to the loopback interface instead of the interface where 574 * the packets are received. 575 * 576 * XXX - This is the case for carp vhost IPs as well so we 577 * insert a workaround. If the packet got here, we already 578 * checked with carp_iamatch() and carp_forus(). 579 */ 580 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 581 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 582 ifp->if_carp == NULL && (dchg == 0); 583 584 /* 585 * Check for exact addresses in the hash bucket. 586 */ 587 /* IN_IFADDR_RLOCK(); */ 588 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 589 /* 590 * If the address matches, verify that the packet 591 * arrived via the correct interface if checking is 592 * enabled. 593 */ 594 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 595 (!checkif || ia->ia_ifp == ifp)) { 596 ifa_ref(&ia->ia_ifa); 597 /* IN_IFADDR_RUNLOCK(); */ 598 goto ours; 599 } 600 } 601 /* IN_IFADDR_RUNLOCK(); */ 602 603 /* 604 * Check for broadcast addresses. 605 * 606 * Only accept broadcast packets that arrive via the matching 607 * interface. Reception of forwarded directed broadcasts would 608 * be handled via ip_forward() and ether_output() with the loopback 609 * into the stack for SIMPLEX interfaces handled by ether_output(). 610 */ 611 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 612 IF_ADDR_RLOCK(ifp); 613 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 614 if (ifa->ifa_addr->sa_family != AF_INET) 615 continue; 616 ia = ifatoia(ifa); 617 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 618 ip->ip_dst.s_addr) { 619 ifa_ref(ifa); 620 IF_ADDR_RUNLOCK(ifp); 621 goto ours; 622 } 623 #ifdef BOOTP_COMPAT 624 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 625 ifa_ref(ifa); 626 IF_ADDR_RUNLOCK(ifp); 627 goto ours; 628 } 629 #endif 630 } 631 IF_ADDR_RUNLOCK(ifp); 632 ia = NULL; 633 } 634 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 635 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 636 IPSTAT_INC(ips_cantforward); 637 m_freem(m); 638 return; 639 } 640 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 641 if (V_ip_mrouter) { 642 /* 643 * If we are acting as a multicast router, all 644 * incoming multicast packets are passed to the 645 * kernel-level multicast forwarding function. 646 * The packet is returned (relatively) intact; if 647 * ip_mforward() returns a non-zero value, the packet 648 * must be discarded, else it may be accepted below. 649 */ 650 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 651 IPSTAT_INC(ips_cantforward); 652 m_freem(m); 653 return; 654 } 655 656 /* 657 * The process-level routing daemon needs to receive 658 * all multicast IGMP packets, whether or not this 659 * host belongs to their destination groups. 660 */ 661 if (ip->ip_p == IPPROTO_IGMP) 662 goto ours; 663 IPSTAT_INC(ips_forward); 664 } 665 /* 666 * Assume the packet is for us, to avoid prematurely taking 667 * a lock on the in_multi hash. Protocols must perform 668 * their own filtering and update statistics accordingly. 669 */ 670 goto ours; 671 } 672 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 673 goto ours; 674 if (ip->ip_dst.s_addr == INADDR_ANY) 675 goto ours; 676 677 /* 678 * FAITH(Firewall Aided Internet Translator) 679 */ 680 if (ifp && ifp->if_type == IFT_FAITH) { 681 if (V_ip_keepfaith) { 682 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 683 goto ours; 684 } 685 m_freem(m); 686 return; 687 } 688 689 /* 690 * Not for us; forward if possible and desirable. 691 */ 692 if (V_ipforwarding == 0) { 693 IPSTAT_INC(ips_cantforward); 694 m_freem(m); 695 } else { 696 #ifdef IPSEC 697 if (ip_ipsec_fwd(m)) 698 goto bad; 699 #endif /* IPSEC */ 700 ip_forward(m, dchg); 701 } 702 return; 703 704 ours: 705 #ifdef IPSTEALTH 706 /* 707 * IPSTEALTH: Process non-routing options only 708 * if the packet is destined for us. 709 */ 710 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) { 711 if (ia != NULL) 712 ifa_free(&ia->ia_ifa); 713 return; 714 } 715 #endif /* IPSTEALTH */ 716 717 /* Count the packet in the ip address stats */ 718 if (ia != NULL) { 719 ia->ia_ifa.if_ipackets++; 720 ia->ia_ifa.if_ibytes += m->m_pkthdr.len; 721 ifa_free(&ia->ia_ifa); 722 } 723 724 /* 725 * Attempt reassembly; if it succeeds, proceed. 726 * ip_reass() will return a different mbuf. 727 */ 728 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { 729 m = ip_reass(m); 730 if (m == NULL) 731 return; 732 ip = mtod(m, struct ip *); 733 /* Get the header length of the reassembled packet */ 734 hlen = ip->ip_hl << 2; 735 } 736 737 #ifdef IPSEC 738 /* 739 * enforce IPsec policy checking if we are seeing last header. 740 * note that we do not visit this with protocols with pcb layer 741 * code - like udp/tcp/raw ip. 742 */ 743 if (ip_ipsec_input(m)) 744 goto bad; 745 #endif /* IPSEC */ 746 747 /* 748 * Switch out to protocol's input routine. 749 */ 750 IPSTAT_INC(ips_delivered); 751 752 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 753 return; 754 bad: 755 m_freem(m); 756 } 757 758 /* 759 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 760 * max has slightly different semantics than the sysctl, for historical 761 * reasons. 762 */ 763 static void 764 maxnipq_update(void) 765 { 766 767 /* 768 * -1 for unlimited allocation. 769 */ 770 if (V_maxnipq < 0) 771 uma_zone_set_max(V_ipq_zone, 0); 772 /* 773 * Positive number for specific bound. 774 */ 775 if (V_maxnipq > 0) 776 uma_zone_set_max(V_ipq_zone, V_maxnipq); 777 /* 778 * Zero specifies no further fragment queue allocation -- set the 779 * bound very low, but rely on implementation elsewhere to actually 780 * prevent allocation and reclaim current queues. 781 */ 782 if (V_maxnipq == 0) 783 uma_zone_set_max(V_ipq_zone, 1); 784 } 785 786 static void 787 ipq_zone_change(void *tag) 788 { 789 790 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 791 V_maxnipq = nmbclusters / 32; 792 maxnipq_update(); 793 } 794 } 795 796 static int 797 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 798 { 799 int error, i; 800 801 i = V_maxnipq; 802 error = sysctl_handle_int(oidp, &i, 0, req); 803 if (error || !req->newptr) 804 return (error); 805 806 /* 807 * XXXRW: Might be a good idea to sanity check the argument and place 808 * an extreme upper bound. 809 */ 810 if (i < -1) 811 return (EINVAL); 812 V_maxnipq = i; 813 maxnipq_update(); 814 return (0); 815 } 816 817 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 818 NULL, 0, sysctl_maxnipq, "I", 819 "Maximum number of IPv4 fragment reassembly queue entries"); 820 821 /* 822 * Take incoming datagram fragment and try to reassemble it into 823 * whole datagram. If the argument is the first fragment or one 824 * in between the function will return NULL and store the mbuf 825 * in the fragment chain. If the argument is the last fragment 826 * the packet will be reassembled and the pointer to the new 827 * mbuf returned for further processing. Only m_tags attached 828 * to the first packet/fragment are preserved. 829 * The IP header is *NOT* adjusted out of iplen. 830 */ 831 struct mbuf * 832 ip_reass(struct mbuf *m) 833 { 834 struct ip *ip; 835 struct mbuf *p, *q, *nq, *t; 836 struct ipq *fp = NULL; 837 struct ipqhead *head; 838 int i, hlen, next; 839 u_int8_t ecn, ecn0; 840 u_short hash; 841 842 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 843 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 844 IPSTAT_INC(ips_fragments); 845 IPSTAT_INC(ips_fragdropped); 846 m_freem(m); 847 return (NULL); 848 } 849 850 ip = mtod(m, struct ip *); 851 hlen = ip->ip_hl << 2; 852 853 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 854 head = &V_ipq[hash]; 855 IPQ_LOCK(); 856 857 /* 858 * Look for queue of fragments 859 * of this datagram. 860 */ 861 TAILQ_FOREACH(fp, head, ipq_list) 862 if (ip->ip_id == fp->ipq_id && 863 ip->ip_src.s_addr == fp->ipq_src.s_addr && 864 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 865 #ifdef MAC 866 mac_ipq_match(m, fp) && 867 #endif 868 ip->ip_p == fp->ipq_p) 869 goto found; 870 871 fp = NULL; 872 873 /* 874 * Attempt to trim the number of allocated fragment queues if it 875 * exceeds the administrative limit. 876 */ 877 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 878 /* 879 * drop something from the tail of the current queue 880 * before proceeding further 881 */ 882 struct ipq *q = TAILQ_LAST(head, ipqhead); 883 if (q == NULL) { /* gak */ 884 for (i = 0; i < IPREASS_NHASH; i++) { 885 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 886 if (r) { 887 IPSTAT_ADD(ips_fragtimeout, 888 r->ipq_nfrags); 889 ip_freef(&V_ipq[i], r); 890 break; 891 } 892 } 893 } else { 894 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 895 ip_freef(head, q); 896 } 897 } 898 899 found: 900 /* 901 * Adjust ip_len to not reflect header, 902 * convert offset of this to bytes. 903 */ 904 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 905 if (ip->ip_off & htons(IP_MF)) { 906 /* 907 * Make sure that fragments have a data length 908 * that's a non-zero multiple of 8 bytes. 909 */ 910 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) { 911 IPSTAT_INC(ips_toosmall); /* XXX */ 912 goto dropfrag; 913 } 914 m->m_flags |= M_FRAG; 915 } else 916 m->m_flags &= ~M_FRAG; 917 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 918 919 /* 920 * Attempt reassembly; if it succeeds, proceed. 921 * ip_reass() will return a different mbuf. 922 */ 923 IPSTAT_INC(ips_fragments); 924 m->m_pkthdr.header = ip; 925 926 /* Previous ip_reass() started here. */ 927 /* 928 * Presence of header sizes in mbufs 929 * would confuse code below. 930 */ 931 m->m_data += hlen; 932 m->m_len -= hlen; 933 934 /* 935 * If first fragment to arrive, create a reassembly queue. 936 */ 937 if (fp == NULL) { 938 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 939 if (fp == NULL) 940 goto dropfrag; 941 #ifdef MAC 942 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 943 uma_zfree(V_ipq_zone, fp); 944 fp = NULL; 945 goto dropfrag; 946 } 947 mac_ipq_create(m, fp); 948 #endif 949 TAILQ_INSERT_HEAD(head, fp, ipq_list); 950 V_nipq++; 951 fp->ipq_nfrags = 1; 952 fp->ipq_ttl = IPFRAGTTL; 953 fp->ipq_p = ip->ip_p; 954 fp->ipq_id = ip->ip_id; 955 fp->ipq_src = ip->ip_src; 956 fp->ipq_dst = ip->ip_dst; 957 fp->ipq_frags = m; 958 m->m_nextpkt = NULL; 959 goto done; 960 } else { 961 fp->ipq_nfrags++; 962 #ifdef MAC 963 mac_ipq_update(m, fp); 964 #endif 965 } 966 967 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 968 969 /* 970 * Handle ECN by comparing this segment with the first one; 971 * if CE is set, do not lose CE. 972 * drop if CE and not-ECT are mixed for the same packet. 973 */ 974 ecn = ip->ip_tos & IPTOS_ECN_MASK; 975 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 976 if (ecn == IPTOS_ECN_CE) { 977 if (ecn0 == IPTOS_ECN_NOTECT) 978 goto dropfrag; 979 if (ecn0 != IPTOS_ECN_CE) 980 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 981 } 982 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 983 goto dropfrag; 984 985 /* 986 * Find a segment which begins after this one does. 987 */ 988 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 989 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 990 break; 991 992 /* 993 * If there is a preceding segment, it may provide some of 994 * our data already. If so, drop the data from the incoming 995 * segment. If it provides all of our data, drop us, otherwise 996 * stick new segment in the proper place. 997 * 998 * If some of the data is dropped from the preceding 999 * segment, then it's checksum is invalidated. 1000 */ 1001 if (p) { 1002 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 1003 ntohs(ip->ip_off); 1004 if (i > 0) { 1005 if (i >= ntohs(ip->ip_len)) 1006 goto dropfrag; 1007 m_adj(m, i); 1008 m->m_pkthdr.csum_flags = 0; 1009 ip->ip_off = htons(ntohs(ip->ip_off) + i); 1010 ip->ip_len = htons(ntohs(ip->ip_len) - i); 1011 } 1012 m->m_nextpkt = p->m_nextpkt; 1013 p->m_nextpkt = m; 1014 } else { 1015 m->m_nextpkt = fp->ipq_frags; 1016 fp->ipq_frags = m; 1017 } 1018 1019 /* 1020 * While we overlap succeeding segments trim them or, 1021 * if they are completely covered, dequeue them. 1022 */ 1023 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 1024 ntohs(GETIP(q)->ip_off); q = nq) { 1025 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 1026 ntohs(GETIP(q)->ip_off); 1027 if (i < ntohs(GETIP(q)->ip_len)) { 1028 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 1029 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 1030 m_adj(q, i); 1031 q->m_pkthdr.csum_flags = 0; 1032 break; 1033 } 1034 nq = q->m_nextpkt; 1035 m->m_nextpkt = nq; 1036 IPSTAT_INC(ips_fragdropped); 1037 fp->ipq_nfrags--; 1038 m_freem(q); 1039 } 1040 1041 /* 1042 * Check for complete reassembly and perform frag per packet 1043 * limiting. 1044 * 1045 * Frag limiting is performed here so that the nth frag has 1046 * a chance to complete the packet before we drop the packet. 1047 * As a result, n+1 frags are actually allowed per packet, but 1048 * only n will ever be stored. (n = maxfragsperpacket.) 1049 * 1050 */ 1051 next = 0; 1052 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1053 if (ntohs(GETIP(q)->ip_off) != next) { 1054 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1055 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1056 ip_freef(head, fp); 1057 } 1058 goto done; 1059 } 1060 next += ntohs(GETIP(q)->ip_len); 1061 } 1062 /* Make sure the last packet didn't have the IP_MF flag */ 1063 if (p->m_flags & M_FRAG) { 1064 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1065 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1066 ip_freef(head, fp); 1067 } 1068 goto done; 1069 } 1070 1071 /* 1072 * Reassembly is complete. Make sure the packet is a sane size. 1073 */ 1074 q = fp->ipq_frags; 1075 ip = GETIP(q); 1076 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1077 IPSTAT_INC(ips_toolong); 1078 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1079 ip_freef(head, fp); 1080 goto done; 1081 } 1082 1083 /* 1084 * Concatenate fragments. 1085 */ 1086 m = q; 1087 t = m->m_next; 1088 m->m_next = NULL; 1089 m_cat(m, t); 1090 nq = q->m_nextpkt; 1091 q->m_nextpkt = NULL; 1092 for (q = nq; q != NULL; q = nq) { 1093 nq = q->m_nextpkt; 1094 q->m_nextpkt = NULL; 1095 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1096 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1097 m_cat(m, q); 1098 } 1099 /* 1100 * In order to do checksumming faster we do 'end-around carry' here 1101 * (and not in for{} loop), though it implies we are not going to 1102 * reassemble more than 64k fragments. 1103 */ 1104 m->m_pkthdr.csum_data = 1105 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1106 #ifdef MAC 1107 mac_ipq_reassemble(fp, m); 1108 mac_ipq_destroy(fp); 1109 #endif 1110 1111 /* 1112 * Create header for new ip packet by modifying header of first 1113 * packet; dequeue and discard fragment reassembly header. 1114 * Make header visible. 1115 */ 1116 ip->ip_len = htons((ip->ip_hl << 2) + next); 1117 ip->ip_src = fp->ipq_src; 1118 ip->ip_dst = fp->ipq_dst; 1119 TAILQ_REMOVE(head, fp, ipq_list); 1120 V_nipq--; 1121 uma_zfree(V_ipq_zone, fp); 1122 m->m_len += (ip->ip_hl << 2); 1123 m->m_data -= (ip->ip_hl << 2); 1124 /* some debugging cruft by sklower, below, will go away soon */ 1125 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1126 m_fixhdr(m); 1127 IPSTAT_INC(ips_reassembled); 1128 IPQ_UNLOCK(); 1129 return (m); 1130 1131 dropfrag: 1132 IPSTAT_INC(ips_fragdropped); 1133 if (fp != NULL) 1134 fp->ipq_nfrags--; 1135 m_freem(m); 1136 done: 1137 IPQ_UNLOCK(); 1138 return (NULL); 1139 1140 #undef GETIP 1141 } 1142 1143 /* 1144 * Free a fragment reassembly header and all 1145 * associated datagrams. 1146 */ 1147 static void 1148 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1149 { 1150 struct mbuf *q; 1151 1152 IPQ_LOCK_ASSERT(); 1153 1154 while (fp->ipq_frags) { 1155 q = fp->ipq_frags; 1156 fp->ipq_frags = q->m_nextpkt; 1157 m_freem(q); 1158 } 1159 TAILQ_REMOVE(fhp, fp, ipq_list); 1160 uma_zfree(V_ipq_zone, fp); 1161 V_nipq--; 1162 } 1163 1164 /* 1165 * IP timer processing; 1166 * if a timer expires on a reassembly 1167 * queue, discard it. 1168 */ 1169 void 1170 ip_slowtimo(void) 1171 { 1172 VNET_ITERATOR_DECL(vnet_iter); 1173 struct ipq *fp; 1174 int i; 1175 1176 VNET_LIST_RLOCK_NOSLEEP(); 1177 IPQ_LOCK(); 1178 VNET_FOREACH(vnet_iter) { 1179 CURVNET_SET(vnet_iter); 1180 for (i = 0; i < IPREASS_NHASH; i++) { 1181 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1182 struct ipq *fpp; 1183 1184 fpp = fp; 1185 fp = TAILQ_NEXT(fp, ipq_list); 1186 if(--fpp->ipq_ttl == 0) { 1187 IPSTAT_ADD(ips_fragtimeout, 1188 fpp->ipq_nfrags); 1189 ip_freef(&V_ipq[i], fpp); 1190 } 1191 } 1192 } 1193 /* 1194 * If we are over the maximum number of fragments 1195 * (due to the limit being lowered), drain off 1196 * enough to get down to the new limit. 1197 */ 1198 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1199 for (i = 0; i < IPREASS_NHASH; i++) { 1200 while (V_nipq > V_maxnipq && 1201 !TAILQ_EMPTY(&V_ipq[i])) { 1202 IPSTAT_ADD(ips_fragdropped, 1203 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1204 ip_freef(&V_ipq[i], 1205 TAILQ_FIRST(&V_ipq[i])); 1206 } 1207 } 1208 } 1209 CURVNET_RESTORE(); 1210 } 1211 IPQ_UNLOCK(); 1212 VNET_LIST_RUNLOCK_NOSLEEP(); 1213 } 1214 1215 /* 1216 * Drain off all datagram fragments. 1217 */ 1218 static void 1219 ip_drain_locked(void) 1220 { 1221 int i; 1222 1223 IPQ_LOCK_ASSERT(); 1224 1225 for (i = 0; i < IPREASS_NHASH; i++) { 1226 while(!TAILQ_EMPTY(&V_ipq[i])) { 1227 IPSTAT_ADD(ips_fragdropped, 1228 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1229 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1230 } 1231 } 1232 } 1233 1234 void 1235 ip_drain(void) 1236 { 1237 VNET_ITERATOR_DECL(vnet_iter); 1238 1239 VNET_LIST_RLOCK_NOSLEEP(); 1240 IPQ_LOCK(); 1241 VNET_FOREACH(vnet_iter) { 1242 CURVNET_SET(vnet_iter); 1243 ip_drain_locked(); 1244 CURVNET_RESTORE(); 1245 } 1246 IPQ_UNLOCK(); 1247 VNET_LIST_RUNLOCK_NOSLEEP(); 1248 in_rtqdrain(); 1249 } 1250 1251 /* 1252 * The protocol to be inserted into ip_protox[] must be already registered 1253 * in inetsw[], either statically or through pf_proto_register(). 1254 */ 1255 int 1256 ipproto_register(short ipproto) 1257 { 1258 struct protosw *pr; 1259 1260 /* Sanity checks. */ 1261 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1262 return (EPROTONOSUPPORT); 1263 1264 /* 1265 * The protocol slot must not be occupied by another protocol 1266 * already. An index pointing to IPPROTO_RAW is unused. 1267 */ 1268 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1269 if (pr == NULL) 1270 return (EPFNOSUPPORT); 1271 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1272 return (EEXIST); 1273 1274 /* Find the protocol position in inetsw[] and set the index. */ 1275 for (pr = inetdomain.dom_protosw; 1276 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1277 if (pr->pr_domain->dom_family == PF_INET && 1278 pr->pr_protocol && pr->pr_protocol == ipproto) { 1279 ip_protox[pr->pr_protocol] = pr - inetsw; 1280 return (0); 1281 } 1282 } 1283 return (EPROTONOSUPPORT); 1284 } 1285 1286 int 1287 ipproto_unregister(short ipproto) 1288 { 1289 struct protosw *pr; 1290 1291 /* Sanity checks. */ 1292 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1293 return (EPROTONOSUPPORT); 1294 1295 /* Check if the protocol was indeed registered. */ 1296 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1297 if (pr == NULL) 1298 return (EPFNOSUPPORT); 1299 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1300 return (ENOENT); 1301 1302 /* Reset the protocol slot to IPPROTO_RAW. */ 1303 ip_protox[ipproto] = pr - inetsw; 1304 return (0); 1305 } 1306 1307 /* 1308 * Given address of next destination (final or next hop), return (referenced) 1309 * internet address info of interface to be used to get there. 1310 */ 1311 struct in_ifaddr * 1312 ip_rtaddr(struct in_addr dst, u_int fibnum) 1313 { 1314 struct route sro; 1315 struct sockaddr_in *sin; 1316 struct in_ifaddr *ia; 1317 1318 bzero(&sro, sizeof(sro)); 1319 sin = (struct sockaddr_in *)&sro.ro_dst; 1320 sin->sin_family = AF_INET; 1321 sin->sin_len = sizeof(*sin); 1322 sin->sin_addr = dst; 1323 in_rtalloc_ign(&sro, 0, fibnum); 1324 1325 if (sro.ro_rt == NULL) 1326 return (NULL); 1327 1328 ia = ifatoia(sro.ro_rt->rt_ifa); 1329 ifa_ref(&ia->ia_ifa); 1330 RTFREE(sro.ro_rt); 1331 return (ia); 1332 } 1333 1334 u_char inetctlerrmap[PRC_NCMDS] = { 1335 0, 0, 0, 0, 1336 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1337 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1338 EMSGSIZE, EHOSTUNREACH, 0, 0, 1339 0, 0, EHOSTUNREACH, 0, 1340 ENOPROTOOPT, ECONNREFUSED 1341 }; 1342 1343 /* 1344 * Forward a packet. If some error occurs return the sender 1345 * an icmp packet. Note we can't always generate a meaningful 1346 * icmp message because icmp doesn't have a large enough repertoire 1347 * of codes and types. 1348 * 1349 * If not forwarding, just drop the packet. This could be confusing 1350 * if ipforwarding was zero but some routing protocol was advancing 1351 * us as a gateway to somewhere. However, we must let the routing 1352 * protocol deal with that. 1353 * 1354 * The srcrt parameter indicates whether the packet is being forwarded 1355 * via a source route. 1356 */ 1357 void 1358 ip_forward(struct mbuf *m, int srcrt) 1359 { 1360 struct ip *ip = mtod(m, struct ip *); 1361 struct in_ifaddr *ia; 1362 struct mbuf *mcopy; 1363 struct in_addr dest; 1364 struct route ro; 1365 int error, type = 0, code = 0, mtu = 0; 1366 1367 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1368 IPSTAT_INC(ips_cantforward); 1369 m_freem(m); 1370 return; 1371 } 1372 #ifdef IPSTEALTH 1373 if (!V_ipstealth) { 1374 #endif 1375 if (ip->ip_ttl <= IPTTLDEC) { 1376 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1377 0, 0); 1378 return; 1379 } 1380 #ifdef IPSTEALTH 1381 } 1382 #endif 1383 1384 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1385 #ifndef IPSEC 1386 /* 1387 * 'ia' may be NULL if there is no route for this destination. 1388 * In case of IPsec, Don't discard it just yet, but pass it to 1389 * ip_output in case of outgoing IPsec policy. 1390 */ 1391 if (!srcrt && ia == NULL) { 1392 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1393 return; 1394 } 1395 #endif 1396 1397 /* 1398 * Save the IP header and at most 8 bytes of the payload, 1399 * in case we need to generate an ICMP message to the src. 1400 * 1401 * XXX this can be optimized a lot by saving the data in a local 1402 * buffer on the stack (72 bytes at most), and only allocating the 1403 * mbuf if really necessary. The vast majority of the packets 1404 * are forwarded without having to send an ICMP back (either 1405 * because unnecessary, or because rate limited), so we are 1406 * really we are wasting a lot of work here. 1407 * 1408 * We don't use m_copy() because it might return a reference 1409 * to a shared cluster. Both this function and ip_output() 1410 * assume exclusive access to the IP header in `m', so any 1411 * data in a cluster may change before we reach icmp_error(). 1412 */ 1413 mcopy = m_gethdr(M_NOWAIT, m->m_type); 1414 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) { 1415 /* 1416 * It's probably ok if the pkthdr dup fails (because 1417 * the deep copy of the tag chain failed), but for now 1418 * be conservative and just discard the copy since 1419 * code below may some day want the tags. 1420 */ 1421 m_free(mcopy); 1422 mcopy = NULL; 1423 } 1424 if (mcopy != NULL) { 1425 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy)); 1426 mcopy->m_pkthdr.len = mcopy->m_len; 1427 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1428 } 1429 1430 #ifdef IPSTEALTH 1431 if (!V_ipstealth) { 1432 #endif 1433 ip->ip_ttl -= IPTTLDEC; 1434 #ifdef IPSTEALTH 1435 } 1436 #endif 1437 1438 /* 1439 * If forwarding packet using same interface that it came in on, 1440 * perhaps should send a redirect to sender to shortcut a hop. 1441 * Only send redirect if source is sending directly to us, 1442 * and if packet was not source routed (or has any options). 1443 * Also, don't send redirect if forwarding using a default route 1444 * or a route modified by a redirect. 1445 */ 1446 dest.s_addr = 0; 1447 if (!srcrt && V_ipsendredirects && 1448 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1449 struct sockaddr_in *sin; 1450 struct rtentry *rt; 1451 1452 bzero(&ro, sizeof(ro)); 1453 sin = (struct sockaddr_in *)&ro.ro_dst; 1454 sin->sin_family = AF_INET; 1455 sin->sin_len = sizeof(*sin); 1456 sin->sin_addr = ip->ip_dst; 1457 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1458 1459 rt = ro.ro_rt; 1460 1461 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1462 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1463 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1464 u_long src = ntohl(ip->ip_src.s_addr); 1465 1466 if (RTA(rt) && 1467 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1468 if (rt->rt_flags & RTF_GATEWAY) 1469 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1470 else 1471 dest.s_addr = ip->ip_dst.s_addr; 1472 /* Router requirements says to only send host redirects */ 1473 type = ICMP_REDIRECT; 1474 code = ICMP_REDIRECT_HOST; 1475 } 1476 } 1477 if (rt) 1478 RTFREE(rt); 1479 } 1480 1481 /* 1482 * Try to cache the route MTU from ip_output so we can consider it for 1483 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1484 */ 1485 bzero(&ro, sizeof(ro)); 1486 1487 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1488 1489 if (error == EMSGSIZE && ro.ro_rt) 1490 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1491 RO_RTFREE(&ro); 1492 1493 if (error) 1494 IPSTAT_INC(ips_cantforward); 1495 else { 1496 IPSTAT_INC(ips_forward); 1497 if (type) 1498 IPSTAT_INC(ips_redirectsent); 1499 else { 1500 if (mcopy) 1501 m_freem(mcopy); 1502 if (ia != NULL) 1503 ifa_free(&ia->ia_ifa); 1504 return; 1505 } 1506 } 1507 if (mcopy == NULL) { 1508 if (ia != NULL) 1509 ifa_free(&ia->ia_ifa); 1510 return; 1511 } 1512 1513 switch (error) { 1514 1515 case 0: /* forwarded, but need redirect */ 1516 /* type, code set above */ 1517 break; 1518 1519 case ENETUNREACH: 1520 case EHOSTUNREACH: 1521 case ENETDOWN: 1522 case EHOSTDOWN: 1523 default: 1524 type = ICMP_UNREACH; 1525 code = ICMP_UNREACH_HOST; 1526 break; 1527 1528 case EMSGSIZE: 1529 type = ICMP_UNREACH; 1530 code = ICMP_UNREACH_NEEDFRAG; 1531 1532 #ifdef IPSEC 1533 /* 1534 * If IPsec is configured for this path, 1535 * override any possibly mtu value set by ip_output. 1536 */ 1537 mtu = ip_ipsec_mtu(mcopy, mtu); 1538 #endif /* IPSEC */ 1539 /* 1540 * If the MTU was set before make sure we are below the 1541 * interface MTU. 1542 * If the MTU wasn't set before use the interface mtu or 1543 * fall back to the next smaller mtu step compared to the 1544 * current packet size. 1545 */ 1546 if (mtu != 0) { 1547 if (ia != NULL) 1548 mtu = min(mtu, ia->ia_ifp->if_mtu); 1549 } else { 1550 if (ia != NULL) 1551 mtu = ia->ia_ifp->if_mtu; 1552 else 1553 mtu = ip_next_mtu(ntohs(ip->ip_len), 0); 1554 } 1555 IPSTAT_INC(ips_cantfrag); 1556 break; 1557 1558 case ENOBUFS: 1559 /* 1560 * A router should not generate ICMP_SOURCEQUENCH as 1561 * required in RFC1812 Requirements for IP Version 4 Routers. 1562 * Source quench could be a big problem under DoS attacks, 1563 * or if the underlying interface is rate-limited. 1564 * Those who need source quench packets may re-enable them 1565 * via the net.inet.ip.sendsourcequench sysctl. 1566 */ 1567 if (V_ip_sendsourcequench == 0) { 1568 m_freem(mcopy); 1569 if (ia != NULL) 1570 ifa_free(&ia->ia_ifa); 1571 return; 1572 } else { 1573 type = ICMP_SOURCEQUENCH; 1574 code = 0; 1575 } 1576 break; 1577 1578 case EACCES: /* ipfw denied packet */ 1579 m_freem(mcopy); 1580 if (ia != NULL) 1581 ifa_free(&ia->ia_ifa); 1582 return; 1583 } 1584 if (ia != NULL) 1585 ifa_free(&ia->ia_ifa); 1586 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1587 } 1588 1589 void 1590 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1591 struct mbuf *m) 1592 { 1593 1594 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1595 struct bintime bt; 1596 1597 bintime(&bt); 1598 if (inp->inp_socket->so_options & SO_BINTIME) { 1599 *mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt), 1600 SCM_BINTIME, SOL_SOCKET); 1601 if (*mp) 1602 mp = &(*mp)->m_next; 1603 } 1604 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1605 struct timeval tv; 1606 1607 bintime2timeval(&bt, &tv); 1608 *mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv), 1609 SCM_TIMESTAMP, SOL_SOCKET); 1610 if (*mp) 1611 mp = &(*mp)->m_next; 1612 } 1613 } 1614 if (inp->inp_flags & INP_RECVDSTADDR) { 1615 *mp = sbcreatecontrol((caddr_t)&ip->ip_dst, 1616 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1617 if (*mp) 1618 mp = &(*mp)->m_next; 1619 } 1620 if (inp->inp_flags & INP_RECVTTL) { 1621 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, 1622 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1623 if (*mp) 1624 mp = &(*mp)->m_next; 1625 } 1626 #ifdef notyet 1627 /* XXX 1628 * Moving these out of udp_input() made them even more broken 1629 * than they already were. 1630 */ 1631 /* options were tossed already */ 1632 if (inp->inp_flags & INP_RECVOPTS) { 1633 *mp = sbcreatecontrol((caddr_t)opts_deleted_above, 1634 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1635 if (*mp) 1636 mp = &(*mp)->m_next; 1637 } 1638 /* ip_srcroute doesn't do what we want here, need to fix */ 1639 if (inp->inp_flags & INP_RECVRETOPTS) { 1640 *mp = sbcreatecontrol((caddr_t)ip_srcroute(m), 1641 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1642 if (*mp) 1643 mp = &(*mp)->m_next; 1644 } 1645 #endif 1646 if (inp->inp_flags & INP_RECVIF) { 1647 struct ifnet *ifp; 1648 struct sdlbuf { 1649 struct sockaddr_dl sdl; 1650 u_char pad[32]; 1651 } sdlbuf; 1652 struct sockaddr_dl *sdp; 1653 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1654 1655 if ((ifp = m->m_pkthdr.rcvif) && 1656 ifp->if_index && ifp->if_index <= V_if_index) { 1657 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1658 /* 1659 * Change our mind and don't try copy. 1660 */ 1661 if (sdp->sdl_family != AF_LINK || 1662 sdp->sdl_len > sizeof(sdlbuf)) { 1663 goto makedummy; 1664 } 1665 bcopy(sdp, sdl2, sdp->sdl_len); 1666 } else { 1667 makedummy: 1668 sdl2->sdl_len = 1669 offsetof(struct sockaddr_dl, sdl_data[0]); 1670 sdl2->sdl_family = AF_LINK; 1671 sdl2->sdl_index = 0; 1672 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1673 } 1674 *mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len, 1675 IP_RECVIF, IPPROTO_IP); 1676 if (*mp) 1677 mp = &(*mp)->m_next; 1678 } 1679 if (inp->inp_flags & INP_RECVTOS) { 1680 *mp = sbcreatecontrol((caddr_t)&ip->ip_tos, 1681 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1682 if (*mp) 1683 mp = &(*mp)->m_next; 1684 } 1685 } 1686 1687 /* 1688 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1689 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1690 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1691 * compiled. 1692 */ 1693 static VNET_DEFINE(int, ip_rsvp_on); 1694 VNET_DEFINE(struct socket *, ip_rsvpd); 1695 1696 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1697 1698 int 1699 ip_rsvp_init(struct socket *so) 1700 { 1701 1702 if (so->so_type != SOCK_RAW || 1703 so->so_proto->pr_protocol != IPPROTO_RSVP) 1704 return EOPNOTSUPP; 1705 1706 if (V_ip_rsvpd != NULL) 1707 return EADDRINUSE; 1708 1709 V_ip_rsvpd = so; 1710 /* 1711 * This may seem silly, but we need to be sure we don't over-increment 1712 * the RSVP counter, in case something slips up. 1713 */ 1714 if (!V_ip_rsvp_on) { 1715 V_ip_rsvp_on = 1; 1716 V_rsvp_on++; 1717 } 1718 1719 return 0; 1720 } 1721 1722 int 1723 ip_rsvp_done(void) 1724 { 1725 1726 V_ip_rsvpd = NULL; 1727 /* 1728 * This may seem silly, but we need to be sure we don't over-decrement 1729 * the RSVP counter, in case something slips up. 1730 */ 1731 if (V_ip_rsvp_on) { 1732 V_ip_rsvp_on = 0; 1733 V_rsvp_on--; 1734 } 1735 return 0; 1736 } 1737 1738 void 1739 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1740 { 1741 1742 if (rsvp_input_p) { /* call the real one if loaded */ 1743 rsvp_input_p(m, off); 1744 return; 1745 } 1746 1747 /* Can still get packets with rsvp_on = 0 if there is a local member 1748 * of the group to which the RSVP packet is addressed. But in this 1749 * case we want to throw the packet away. 1750 */ 1751 1752 if (!V_rsvp_on) { 1753 m_freem(m); 1754 return; 1755 } 1756 1757 if (V_ip_rsvpd != NULL) { 1758 rip_input(m, off); 1759 return; 1760 } 1761 /* Drop the packet */ 1762 m_freem(m); 1763 } 1764