1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bootp.h" 36 #include "opt_ipfw.h" 37 #include "opt_ipstealth.h" 38 #include "opt_ipsec.h" 39 #include "opt_route.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/domain.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/time.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/rwlock.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 55 #include <net/pfil.h> 56 #include <net/if.h> 57 #include <net/if_types.h> 58 #include <net/if_var.h> 59 #include <net/if_dl.h> 60 #include <net/route.h> 61 #include <net/netisr.h> 62 #include <net/vnet.h> 63 #include <net/flowtable.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip.h> 69 #include <netinet/in_pcb.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_fw.h> 72 #include <netinet/ip_icmp.h> 73 #include <netinet/ip_options.h> 74 #include <machine/in_cksum.h> 75 #include <netinet/ip_carp.h> 76 #ifdef IPSEC 77 #include <netinet/ip_ipsec.h> 78 #endif /* IPSEC */ 79 80 #include <sys/socketvar.h> 81 82 #include <security/mac/mac_framework.h> 83 84 #ifdef CTASSERT 85 CTASSERT(sizeof(struct ip) == 20); 86 #endif 87 88 struct rwlock in_ifaddr_lock; 89 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 90 91 VNET_DEFINE(int, rsvp_on); 92 93 VNET_DEFINE(int, ipforwarding); 94 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, 95 &VNET_NAME(ipforwarding), 0, 96 "Enable IP forwarding between interfaces"); 97 98 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */ 99 #define V_ipsendredirects VNET(ipsendredirects) 100 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, 101 &VNET_NAME(ipsendredirects), 0, 102 "Enable sending IP redirects"); 103 104 static VNET_DEFINE(int, ip_keepfaith); 105 #define V_ip_keepfaith VNET(ip_keepfaith) 106 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, 107 &VNET_NAME(ip_keepfaith), 0, 108 "Enable packet capture for FAITH IPv4->IPv6 translater daemon"); 109 110 static VNET_DEFINE(int, ip_sendsourcequench); 111 #define V_ip_sendsourcequench VNET(ip_sendsourcequench) 112 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW, 113 &VNET_NAME(ip_sendsourcequench), 0, 114 "Enable the transmission of source quench packets"); 115 116 VNET_DEFINE(int, ip_do_randomid); 117 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW, 118 &VNET_NAME(ip_do_randomid), 0, 119 "Assign random ip_id values"); 120 121 /* 122 * XXX - Setting ip_checkinterface mostly implements the receive side of 123 * the Strong ES model described in RFC 1122, but since the routing table 124 * and transmit implementation do not implement the Strong ES model, 125 * setting this to 1 results in an odd hybrid. 126 * 127 * XXX - ip_checkinterface currently must be disabled if you use ipnat 128 * to translate the destination address to another local interface. 129 * 130 * XXX - ip_checkinterface must be disabled if you add IP aliases 131 * to the loopback interface instead of the interface where the 132 * packets for those addresses are received. 133 */ 134 static VNET_DEFINE(int, ip_checkinterface); 135 #define V_ip_checkinterface VNET(ip_checkinterface) 136 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, 137 &VNET_NAME(ip_checkinterface), 0, 138 "Verify packet arrives on correct interface"); 139 140 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ 141 142 static struct netisr_handler ip_nh = { 143 .nh_name = "ip", 144 .nh_handler = ip_input, 145 .nh_proto = NETISR_IP, 146 .nh_policy = NETISR_POLICY_FLOW, 147 }; 148 149 extern struct domain inetdomain; 150 extern struct protosw inetsw[]; 151 u_char ip_protox[IPPROTO_MAX]; 152 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 153 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 154 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 155 156 static VNET_DEFINE(uma_zone_t, ipq_zone); 157 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]); 158 static struct mtx ipqlock; 159 160 #define V_ipq_zone VNET(ipq_zone) 161 #define V_ipq VNET(ipq) 162 163 #define IPQ_LOCK() mtx_lock(&ipqlock) 164 #define IPQ_UNLOCK() mtx_unlock(&ipqlock) 165 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF) 166 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED) 167 168 static void maxnipq_update(void); 169 static void ipq_zone_change(void *); 170 static void ip_drain_locked(void); 171 172 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */ 173 static VNET_DEFINE(int, nipq); /* Total # of reass queues */ 174 #define V_maxnipq VNET(maxnipq) 175 #define V_nipq VNET(nipq) 176 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, 177 &VNET_NAME(nipq), 0, 178 "Current number of IPv4 fragment reassembly queue entries"); 179 180 static VNET_DEFINE(int, maxfragsperpacket); 181 #define V_maxfragsperpacket VNET(maxfragsperpacket) 182 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW, 183 &VNET_NAME(maxfragsperpacket), 0, 184 "Maximum number of IPv4 fragments allowed per packet"); 185 186 #ifdef IPCTL_DEFMTU 187 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 188 &ip_mtu, 0, "Default MTU"); 189 #endif 190 191 #ifdef IPSTEALTH 192 VNET_DEFINE(int, ipstealth); 193 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, 194 &VNET_NAME(ipstealth), 0, 195 "IP stealth mode, no TTL decrementation on forwarding"); 196 #endif 197 198 #ifdef FLOWTABLE 199 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048; 200 VNET_DEFINE(struct flowtable *, ip_ft); 201 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size) 202 203 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN, 204 &VNET_NAME(ip_output_flowtable_size), 2048, 205 "number of entries in the per-cpu output flow caches"); 206 #endif 207 208 static void ip_freef(struct ipqhead *, struct ipq *); 209 210 /* 211 * IP statistics are stored in struct ipstat_p, which is 212 * an "array" of counter(9)s. Although it isn't a real 213 * array, we treat it as array to reduce code bloat. 214 */ 215 VNET_DEFINE(struct ipstat_p, ipstatp); 216 217 static void 218 vnet_ipstatp_init(const void *unused) 219 { 220 counter_u64_t *c; 221 int i; 222 223 for (i = 0, c = (counter_u64_t *)&V_ipstatp; 224 i < sizeof(V_ipstatp) / sizeof(counter_u64_t); 225 i++, c++) { 226 *c = counter_u64_alloc(M_WAITOK); 227 counter_u64_zero(*c); 228 } 229 } 230 VNET_SYSINIT(vnet_ipstatp_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 231 vnet_ipstatp_init, NULL); 232 233 #ifdef VIMAGE 234 static void 235 vnet_ipstatp_uninit(const void *unused) 236 { 237 counter_u64_t *c; 238 int i; 239 240 for (i = 0, c = (counter_u64_t *)&V_ipstatp; 241 i < sizeof(V_ipstatp) / sizeof(counter_u64_t); 242 i++, c++) 243 counter_u64_free(*c); 244 } 245 VNET_SYSUNINIT(vnet_ipstatp_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 246 vnet_ipstatp_uninit, NULL); 247 #endif /* VIMAGE */ 248 249 static int 250 ipstat_sysctl(SYSCTL_HANDLER_ARGS) 251 { 252 struct ipstat ipstat; 253 counter_u64_t *c; 254 uint64_t *v; 255 int i; 256 257 for (i = 0, c = (counter_u64_t *)&V_ipstatp, v = (uint64_t *)&ipstat; 258 i < sizeof(V_ipstatp) / sizeof(counter_u64_t); 259 i++, c++, v++) { 260 *v = counter_u64_fetch(*c); 261 /* 262 * Old interface allowed to rewrite 'struct ipstat', and 263 * netstat(1) used it to zero the structure. To keep 264 * compatibility with old netstat(1) we will zero out 265 * statistics on every write attempt, however we no longer 266 * support writing arbitrary fake values to the statistics. 267 */ 268 if (req->newptr) 269 counter_u64_zero(*c); 270 } 271 272 return (SYSCTL_OUT(req, &ipstat, sizeof(ipstat))); 273 } 274 SYSCTL_VNET_PROC(_net_inet_ip, IPCTL_STATS, stats, CTLTYPE_OPAQUE | CTLFLAG_RW, 275 NULL, 0, ipstat_sysctl, "I", 276 "IP statistics (struct ipstat, netinet/ip_var.h)"); 277 278 /* 279 * Kernel module interface for updating ipstat. The argument is an index 280 * into ipstat treated as an array. 281 */ 282 void 283 kmod_ipstat_inc(int statnum) 284 { 285 286 counter_u64_add((counter_u64_t )&V_ipstatp + statnum, 1); 287 } 288 289 void 290 kmod_ipstat_dec(int statnum) 291 { 292 293 counter_u64_add((counter_u64_t )&V_ipstatp + statnum, -1); 294 } 295 296 static int 297 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 298 { 299 int error, qlimit; 300 301 netisr_getqlimit(&ip_nh, &qlimit); 302 error = sysctl_handle_int(oidp, &qlimit, 0, req); 303 if (error || !req->newptr) 304 return (error); 305 if (qlimit < 1) 306 return (EINVAL); 307 return (netisr_setqlimit(&ip_nh, qlimit)); 308 } 309 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 310 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I", 311 "Maximum size of the IP input queue"); 312 313 static int 314 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 315 { 316 u_int64_t qdrops_long; 317 int error, qdrops; 318 319 netisr_getqdrops(&ip_nh, &qdrops_long); 320 qdrops = qdrops_long; 321 error = sysctl_handle_int(oidp, &qdrops, 0, req); 322 if (error || !req->newptr) 323 return (error); 324 if (qdrops != 0) 325 return (EINVAL); 326 netisr_clearqdrops(&ip_nh); 327 return (0); 328 } 329 330 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 331 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", 332 "Number of packets dropped from the IP input queue"); 333 334 /* 335 * IP initialization: fill in IP protocol switch table. 336 * All protocols not implemented in kernel go to raw IP protocol handler. 337 */ 338 void 339 ip_init(void) 340 { 341 struct protosw *pr; 342 int i; 343 344 V_ip_id = time_second & 0xffff; 345 346 TAILQ_INIT(&V_in_ifaddrhead); 347 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 348 349 /* Initialize IP reassembly queue. */ 350 for (i = 0; i < IPREASS_NHASH; i++) 351 TAILQ_INIT(&V_ipq[i]); 352 V_maxnipq = nmbclusters / 32; 353 V_maxfragsperpacket = 16; 354 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 355 NULL, UMA_ALIGN_PTR, 0); 356 maxnipq_update(); 357 358 /* Initialize packet filter hooks. */ 359 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF; 360 V_inet_pfil_hook.ph_af = AF_INET; 361 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0) 362 printf("%s: WARNING: unable to register pfil hook, " 363 "error %d\n", __func__, i); 364 365 #ifdef FLOWTABLE 366 if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size", 367 &V_ip_output_flowtable_size)) { 368 if (V_ip_output_flowtable_size < 256) 369 V_ip_output_flowtable_size = 256; 370 if (!powerof2(V_ip_output_flowtable_size)) { 371 printf("flowtable must be power of 2 size\n"); 372 V_ip_output_flowtable_size = 2048; 373 } 374 } else { 375 /* 376 * round up to the next power of 2 377 */ 378 V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1); 379 } 380 V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU); 381 #endif 382 383 /* Skip initialization of globals for non-default instances. */ 384 if (!IS_DEFAULT_VNET(curvnet)) 385 return; 386 387 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 388 if (pr == NULL) 389 panic("ip_init: PF_INET not found"); 390 391 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 392 for (i = 0; i < IPPROTO_MAX; i++) 393 ip_protox[i] = pr - inetsw; 394 /* 395 * Cycle through IP protocols and put them into the appropriate place 396 * in ip_protox[]. 397 */ 398 for (pr = inetdomain.dom_protosw; 399 pr < inetdomain.dom_protoswNPROTOSW; pr++) 400 if (pr->pr_domain->dom_family == PF_INET && 401 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 402 /* Be careful to only index valid IP protocols. */ 403 if (pr->pr_protocol < IPPROTO_MAX) 404 ip_protox[pr->pr_protocol] = pr - inetsw; 405 } 406 407 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, 408 NULL, EVENTHANDLER_PRI_ANY); 409 410 /* Initialize various other remaining things. */ 411 IPQ_LOCK_INIT(); 412 netisr_register(&ip_nh); 413 } 414 415 #ifdef VIMAGE 416 void 417 ip_destroy(void) 418 { 419 420 /* Cleanup in_ifaddr hash table; should be empty. */ 421 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 422 423 IPQ_LOCK(); 424 ip_drain_locked(); 425 IPQ_UNLOCK(); 426 427 uma_zdestroy(V_ipq_zone); 428 } 429 #endif 430 431 /* 432 * Ip input routine. Checksum and byte swap header. If fragmented 433 * try to reassemble. Process options. Pass to next level. 434 */ 435 void 436 ip_input(struct mbuf *m) 437 { 438 struct ip *ip = NULL; 439 struct in_ifaddr *ia = NULL; 440 struct ifaddr *ifa; 441 struct ifnet *ifp; 442 int checkif, hlen = 0; 443 uint16_t sum, ip_len; 444 int dchg = 0; /* dest changed after fw */ 445 struct in_addr odst; /* original dst address */ 446 447 M_ASSERTPKTHDR(m); 448 449 if (m->m_flags & M_FASTFWD_OURS) { 450 m->m_flags &= ~M_FASTFWD_OURS; 451 /* Set up some basics that will be used later. */ 452 ip = mtod(m, struct ip *); 453 hlen = ip->ip_hl << 2; 454 ip_len = ntohs(ip->ip_len); 455 goto ours; 456 } 457 458 IPSTAT_INC(ips_total); 459 460 if (m->m_pkthdr.len < sizeof(struct ip)) 461 goto tooshort; 462 463 if (m->m_len < sizeof (struct ip) && 464 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 465 IPSTAT_INC(ips_toosmall); 466 return; 467 } 468 ip = mtod(m, struct ip *); 469 470 if (ip->ip_v != IPVERSION) { 471 IPSTAT_INC(ips_badvers); 472 goto bad; 473 } 474 475 hlen = ip->ip_hl << 2; 476 if (hlen < sizeof(struct ip)) { /* minimum header length */ 477 IPSTAT_INC(ips_badhlen); 478 goto bad; 479 } 480 if (hlen > m->m_len) { 481 if ((m = m_pullup(m, hlen)) == NULL) { 482 IPSTAT_INC(ips_badhlen); 483 return; 484 } 485 ip = mtod(m, struct ip *); 486 } 487 488 /* 127/8 must not appear on wire - RFC1122 */ 489 ifp = m->m_pkthdr.rcvif; 490 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || 491 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { 492 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 493 IPSTAT_INC(ips_badaddr); 494 goto bad; 495 } 496 } 497 498 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 499 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 500 } else { 501 if (hlen == sizeof(struct ip)) { 502 sum = in_cksum_hdr(ip); 503 } else { 504 sum = in_cksum(m, hlen); 505 } 506 } 507 if (sum) { 508 IPSTAT_INC(ips_badsum); 509 goto bad; 510 } 511 512 #ifdef ALTQ 513 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 514 /* packet is dropped by traffic conditioner */ 515 return; 516 #endif 517 518 ip_len = ntohs(ip->ip_len); 519 if (ip_len < hlen) { 520 IPSTAT_INC(ips_badlen); 521 goto bad; 522 } 523 524 /* 525 * Check that the amount of data in the buffers 526 * is as at least much as the IP header would have us expect. 527 * Trim mbufs if longer than we expect. 528 * Drop packet if shorter than we expect. 529 */ 530 if (m->m_pkthdr.len < ip_len) { 531 tooshort: 532 IPSTAT_INC(ips_tooshort); 533 goto bad; 534 } 535 if (m->m_pkthdr.len > ip_len) { 536 if (m->m_len == m->m_pkthdr.len) { 537 m->m_len = ip_len; 538 m->m_pkthdr.len = ip_len; 539 } else 540 m_adj(m, ip_len - m->m_pkthdr.len); 541 } 542 #ifdef IPSEC 543 /* 544 * Bypass packet filtering for packets previously handled by IPsec. 545 */ 546 if (ip_ipsec_filtertunnel(m)) 547 goto passin; 548 #endif /* IPSEC */ 549 550 /* 551 * Run through list of hooks for input packets. 552 * 553 * NB: Beware of the destination address changing (e.g. 554 * by NAT rewriting). When this happens, tell 555 * ip_forward to do the right thing. 556 */ 557 558 /* Jump over all PFIL processing if hooks are not active. */ 559 if (!PFIL_HOOKED(&V_inet_pfil_hook)) 560 goto passin; 561 562 odst = ip->ip_dst; 563 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0) 564 return; 565 if (m == NULL) /* consumed by filter */ 566 return; 567 568 ip = mtod(m, struct ip *); 569 dchg = (odst.s_addr != ip->ip_dst.s_addr); 570 ifp = m->m_pkthdr.rcvif; 571 572 if (m->m_flags & M_FASTFWD_OURS) { 573 m->m_flags &= ~M_FASTFWD_OURS; 574 goto ours; 575 } 576 if (m->m_flags & M_IP_NEXTHOP) { 577 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL); 578 if (dchg != 0) { 579 /* 580 * Directly ship the packet on. This allows 581 * forwarding packets originally destined to us 582 * to some other directly connected host. 583 */ 584 ip_forward(m, 1); 585 return; 586 } 587 } 588 passin: 589 590 /* 591 * Process options and, if not destined for us, 592 * ship it on. ip_dooptions returns 1 when an 593 * error was detected (causing an icmp message 594 * to be sent and the original packet to be freed). 595 */ 596 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 597 return; 598 599 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 600 * matter if it is destined to another node, or whether it is 601 * a multicast one, RSVP wants it! and prevents it from being forwarded 602 * anywhere else. Also checks if the rsvp daemon is running before 603 * grabbing the packet. 604 */ 605 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 606 goto ours; 607 608 /* 609 * Check our list of addresses, to see if the packet is for us. 610 * If we don't have any addresses, assume any unicast packet 611 * we receive might be for us (and let the upper layers deal 612 * with it). 613 */ 614 if (TAILQ_EMPTY(&V_in_ifaddrhead) && 615 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 616 goto ours; 617 618 /* 619 * Enable a consistency check between the destination address 620 * and the arrival interface for a unicast packet (the RFC 1122 621 * strong ES model) if IP forwarding is disabled and the packet 622 * is not locally generated and the packet is not subject to 623 * 'ipfw fwd'. 624 * 625 * XXX - Checking also should be disabled if the destination 626 * address is ipnat'ed to a different interface. 627 * 628 * XXX - Checking is incompatible with IP aliases added 629 * to the loopback interface instead of the interface where 630 * the packets are received. 631 * 632 * XXX - This is the case for carp vhost IPs as well so we 633 * insert a workaround. If the packet got here, we already 634 * checked with carp_iamatch() and carp_forus(). 635 */ 636 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 637 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 638 ifp->if_carp == NULL && (dchg == 0); 639 640 /* 641 * Check for exact addresses in the hash bucket. 642 */ 643 /* IN_IFADDR_RLOCK(); */ 644 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 645 /* 646 * If the address matches, verify that the packet 647 * arrived via the correct interface if checking is 648 * enabled. 649 */ 650 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 651 (!checkif || ia->ia_ifp == ifp)) { 652 ifa_ref(&ia->ia_ifa); 653 /* IN_IFADDR_RUNLOCK(); */ 654 goto ours; 655 } 656 } 657 /* IN_IFADDR_RUNLOCK(); */ 658 659 /* 660 * Check for broadcast addresses. 661 * 662 * Only accept broadcast packets that arrive via the matching 663 * interface. Reception of forwarded directed broadcasts would 664 * be handled via ip_forward() and ether_output() with the loopback 665 * into the stack for SIMPLEX interfaces handled by ether_output(). 666 */ 667 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 668 IF_ADDR_RLOCK(ifp); 669 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 670 if (ifa->ifa_addr->sa_family != AF_INET) 671 continue; 672 ia = ifatoia(ifa); 673 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 674 ip->ip_dst.s_addr) { 675 ifa_ref(ifa); 676 IF_ADDR_RUNLOCK(ifp); 677 goto ours; 678 } 679 #ifdef BOOTP_COMPAT 680 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 681 ifa_ref(ifa); 682 IF_ADDR_RUNLOCK(ifp); 683 goto ours; 684 } 685 #endif 686 } 687 IF_ADDR_RUNLOCK(ifp); 688 ia = NULL; 689 } 690 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 691 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 692 IPSTAT_INC(ips_cantforward); 693 m_freem(m); 694 return; 695 } 696 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 697 if (V_ip_mrouter) { 698 /* 699 * If we are acting as a multicast router, all 700 * incoming multicast packets are passed to the 701 * kernel-level multicast forwarding function. 702 * The packet is returned (relatively) intact; if 703 * ip_mforward() returns a non-zero value, the packet 704 * must be discarded, else it may be accepted below. 705 */ 706 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 707 IPSTAT_INC(ips_cantforward); 708 m_freem(m); 709 return; 710 } 711 712 /* 713 * The process-level routing daemon needs to receive 714 * all multicast IGMP packets, whether or not this 715 * host belongs to their destination groups. 716 */ 717 if (ip->ip_p == IPPROTO_IGMP) 718 goto ours; 719 IPSTAT_INC(ips_forward); 720 } 721 /* 722 * Assume the packet is for us, to avoid prematurely taking 723 * a lock on the in_multi hash. Protocols must perform 724 * their own filtering and update statistics accordingly. 725 */ 726 goto ours; 727 } 728 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 729 goto ours; 730 if (ip->ip_dst.s_addr == INADDR_ANY) 731 goto ours; 732 733 /* 734 * FAITH(Firewall Aided Internet Translator) 735 */ 736 if (ifp && ifp->if_type == IFT_FAITH) { 737 if (V_ip_keepfaith) { 738 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) 739 goto ours; 740 } 741 m_freem(m); 742 return; 743 } 744 745 /* 746 * Not for us; forward if possible and desirable. 747 */ 748 if (V_ipforwarding == 0) { 749 IPSTAT_INC(ips_cantforward); 750 m_freem(m); 751 } else { 752 #ifdef IPSEC 753 if (ip_ipsec_fwd(m)) 754 goto bad; 755 #endif /* IPSEC */ 756 ip_forward(m, dchg); 757 } 758 return; 759 760 ours: 761 #ifdef IPSTEALTH 762 /* 763 * IPSTEALTH: Process non-routing options only 764 * if the packet is destined for us. 765 */ 766 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) { 767 if (ia != NULL) 768 ifa_free(&ia->ia_ifa); 769 return; 770 } 771 #endif /* IPSTEALTH */ 772 773 /* Count the packet in the ip address stats */ 774 if (ia != NULL) { 775 ia->ia_ifa.if_ipackets++; 776 ia->ia_ifa.if_ibytes += m->m_pkthdr.len; 777 ifa_free(&ia->ia_ifa); 778 } 779 780 /* 781 * Attempt reassembly; if it succeeds, proceed. 782 * ip_reass() will return a different mbuf. 783 */ 784 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { 785 m = ip_reass(m); 786 if (m == NULL) 787 return; 788 ip = mtod(m, struct ip *); 789 /* Get the header length of the reassembled packet */ 790 hlen = ip->ip_hl << 2; 791 } 792 793 #ifdef IPSEC 794 /* 795 * enforce IPsec policy checking if we are seeing last header. 796 * note that we do not visit this with protocols with pcb layer 797 * code - like udp/tcp/raw ip. 798 */ 799 if (ip_ipsec_input(m)) 800 goto bad; 801 #endif /* IPSEC */ 802 803 /* 804 * Switch out to protocol's input routine. 805 */ 806 IPSTAT_INC(ips_delivered); 807 808 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); 809 return; 810 bad: 811 m_freem(m); 812 } 813 814 /* 815 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 816 * max has slightly different semantics than the sysctl, for historical 817 * reasons. 818 */ 819 static void 820 maxnipq_update(void) 821 { 822 823 /* 824 * -1 for unlimited allocation. 825 */ 826 if (V_maxnipq < 0) 827 uma_zone_set_max(V_ipq_zone, 0); 828 /* 829 * Positive number for specific bound. 830 */ 831 if (V_maxnipq > 0) 832 uma_zone_set_max(V_ipq_zone, V_maxnipq); 833 /* 834 * Zero specifies no further fragment queue allocation -- set the 835 * bound very low, but rely on implementation elsewhere to actually 836 * prevent allocation and reclaim current queues. 837 */ 838 if (V_maxnipq == 0) 839 uma_zone_set_max(V_ipq_zone, 1); 840 } 841 842 static void 843 ipq_zone_change(void *tag) 844 { 845 846 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) { 847 V_maxnipq = nmbclusters / 32; 848 maxnipq_update(); 849 } 850 } 851 852 static int 853 sysctl_maxnipq(SYSCTL_HANDLER_ARGS) 854 { 855 int error, i; 856 857 i = V_maxnipq; 858 error = sysctl_handle_int(oidp, &i, 0, req); 859 if (error || !req->newptr) 860 return (error); 861 862 /* 863 * XXXRW: Might be a good idea to sanity check the argument and place 864 * an extreme upper bound. 865 */ 866 if (i < -1) 867 return (EINVAL); 868 V_maxnipq = i; 869 maxnipq_update(); 870 return (0); 871 } 872 873 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW, 874 NULL, 0, sysctl_maxnipq, "I", 875 "Maximum number of IPv4 fragment reassembly queue entries"); 876 877 /* 878 * Take incoming datagram fragment and try to reassemble it into 879 * whole datagram. If the argument is the first fragment or one 880 * in between the function will return NULL and store the mbuf 881 * in the fragment chain. If the argument is the last fragment 882 * the packet will be reassembled and the pointer to the new 883 * mbuf returned for further processing. Only m_tags attached 884 * to the first packet/fragment are preserved. 885 * The IP header is *NOT* adjusted out of iplen. 886 */ 887 struct mbuf * 888 ip_reass(struct mbuf *m) 889 { 890 struct ip *ip; 891 struct mbuf *p, *q, *nq, *t; 892 struct ipq *fp = NULL; 893 struct ipqhead *head; 894 int i, hlen, next; 895 u_int8_t ecn, ecn0; 896 u_short hash; 897 898 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ 899 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { 900 IPSTAT_INC(ips_fragments); 901 IPSTAT_INC(ips_fragdropped); 902 m_freem(m); 903 return (NULL); 904 } 905 906 ip = mtod(m, struct ip *); 907 hlen = ip->ip_hl << 2; 908 909 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 910 head = &V_ipq[hash]; 911 IPQ_LOCK(); 912 913 /* 914 * Look for queue of fragments 915 * of this datagram. 916 */ 917 TAILQ_FOREACH(fp, head, ipq_list) 918 if (ip->ip_id == fp->ipq_id && 919 ip->ip_src.s_addr == fp->ipq_src.s_addr && 920 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 921 #ifdef MAC 922 mac_ipq_match(m, fp) && 923 #endif 924 ip->ip_p == fp->ipq_p) 925 goto found; 926 927 fp = NULL; 928 929 /* 930 * Attempt to trim the number of allocated fragment queues if it 931 * exceeds the administrative limit. 932 */ 933 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) { 934 /* 935 * drop something from the tail of the current queue 936 * before proceeding further 937 */ 938 struct ipq *q = TAILQ_LAST(head, ipqhead); 939 if (q == NULL) { /* gak */ 940 for (i = 0; i < IPREASS_NHASH; i++) { 941 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead); 942 if (r) { 943 IPSTAT_ADD(ips_fragtimeout, 944 r->ipq_nfrags); 945 ip_freef(&V_ipq[i], r); 946 break; 947 } 948 } 949 } else { 950 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags); 951 ip_freef(head, q); 952 } 953 } 954 955 found: 956 /* 957 * Adjust ip_len to not reflect header, 958 * convert offset of this to bytes. 959 */ 960 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 961 if (ip->ip_off & htons(IP_MF)) { 962 /* 963 * Make sure that fragments have a data length 964 * that's a non-zero multiple of 8 bytes. 965 */ 966 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) { 967 IPSTAT_INC(ips_toosmall); /* XXX */ 968 goto dropfrag; 969 } 970 m->m_flags |= M_FRAG; 971 } else 972 m->m_flags &= ~M_FRAG; 973 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 974 975 /* 976 * Attempt reassembly; if it succeeds, proceed. 977 * ip_reass() will return a different mbuf. 978 */ 979 IPSTAT_INC(ips_fragments); 980 m->m_pkthdr.header = ip; 981 982 /* Previous ip_reass() started here. */ 983 /* 984 * Presence of header sizes in mbufs 985 * would confuse code below. 986 */ 987 m->m_data += hlen; 988 m->m_len -= hlen; 989 990 /* 991 * If first fragment to arrive, create a reassembly queue. 992 */ 993 if (fp == NULL) { 994 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 995 if (fp == NULL) 996 goto dropfrag; 997 #ifdef MAC 998 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 999 uma_zfree(V_ipq_zone, fp); 1000 fp = NULL; 1001 goto dropfrag; 1002 } 1003 mac_ipq_create(m, fp); 1004 #endif 1005 TAILQ_INSERT_HEAD(head, fp, ipq_list); 1006 V_nipq++; 1007 fp->ipq_nfrags = 1; 1008 fp->ipq_ttl = IPFRAGTTL; 1009 fp->ipq_p = ip->ip_p; 1010 fp->ipq_id = ip->ip_id; 1011 fp->ipq_src = ip->ip_src; 1012 fp->ipq_dst = ip->ip_dst; 1013 fp->ipq_frags = m; 1014 m->m_nextpkt = NULL; 1015 goto done; 1016 } else { 1017 fp->ipq_nfrags++; 1018 #ifdef MAC 1019 mac_ipq_update(m, fp); 1020 #endif 1021 } 1022 1023 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) 1024 1025 /* 1026 * Handle ECN by comparing this segment with the first one; 1027 * if CE is set, do not lose CE. 1028 * drop if CE and not-ECT are mixed for the same packet. 1029 */ 1030 ecn = ip->ip_tos & IPTOS_ECN_MASK; 1031 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 1032 if (ecn == IPTOS_ECN_CE) { 1033 if (ecn0 == IPTOS_ECN_NOTECT) 1034 goto dropfrag; 1035 if (ecn0 != IPTOS_ECN_CE) 1036 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 1037 } 1038 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 1039 goto dropfrag; 1040 1041 /* 1042 * Find a segment which begins after this one does. 1043 */ 1044 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 1045 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 1046 break; 1047 1048 /* 1049 * If there is a preceding segment, it may provide some of 1050 * our data already. If so, drop the data from the incoming 1051 * segment. If it provides all of our data, drop us, otherwise 1052 * stick new segment in the proper place. 1053 * 1054 * If some of the data is dropped from the preceding 1055 * segment, then it's checksum is invalidated. 1056 */ 1057 if (p) { 1058 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 1059 ntohs(ip->ip_off); 1060 if (i > 0) { 1061 if (i >= ntohs(ip->ip_len)) 1062 goto dropfrag; 1063 m_adj(m, i); 1064 m->m_pkthdr.csum_flags = 0; 1065 ip->ip_off = htons(ntohs(ip->ip_off) + i); 1066 ip->ip_len = htons(ntohs(ip->ip_len) - i); 1067 } 1068 m->m_nextpkt = p->m_nextpkt; 1069 p->m_nextpkt = m; 1070 } else { 1071 m->m_nextpkt = fp->ipq_frags; 1072 fp->ipq_frags = m; 1073 } 1074 1075 /* 1076 * While we overlap succeeding segments trim them or, 1077 * if they are completely covered, dequeue them. 1078 */ 1079 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 1080 ntohs(GETIP(q)->ip_off); q = nq) { 1081 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 1082 ntohs(GETIP(q)->ip_off); 1083 if (i < ntohs(GETIP(q)->ip_len)) { 1084 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 1085 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 1086 m_adj(q, i); 1087 q->m_pkthdr.csum_flags = 0; 1088 break; 1089 } 1090 nq = q->m_nextpkt; 1091 m->m_nextpkt = nq; 1092 IPSTAT_INC(ips_fragdropped); 1093 fp->ipq_nfrags--; 1094 m_freem(q); 1095 } 1096 1097 /* 1098 * Check for complete reassembly and perform frag per packet 1099 * limiting. 1100 * 1101 * Frag limiting is performed here so that the nth frag has 1102 * a chance to complete the packet before we drop the packet. 1103 * As a result, n+1 frags are actually allowed per packet, but 1104 * only n will ever be stored. (n = maxfragsperpacket.) 1105 * 1106 */ 1107 next = 0; 1108 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 1109 if (ntohs(GETIP(q)->ip_off) != next) { 1110 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1111 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1112 ip_freef(head, fp); 1113 } 1114 goto done; 1115 } 1116 next += ntohs(GETIP(q)->ip_len); 1117 } 1118 /* Make sure the last packet didn't have the IP_MF flag */ 1119 if (p->m_flags & M_FRAG) { 1120 if (fp->ipq_nfrags > V_maxfragsperpacket) { 1121 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1122 ip_freef(head, fp); 1123 } 1124 goto done; 1125 } 1126 1127 /* 1128 * Reassembly is complete. Make sure the packet is a sane size. 1129 */ 1130 q = fp->ipq_frags; 1131 ip = GETIP(q); 1132 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 1133 IPSTAT_INC(ips_toolong); 1134 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 1135 ip_freef(head, fp); 1136 goto done; 1137 } 1138 1139 /* 1140 * Concatenate fragments. 1141 */ 1142 m = q; 1143 t = m->m_next; 1144 m->m_next = NULL; 1145 m_cat(m, t); 1146 nq = q->m_nextpkt; 1147 q->m_nextpkt = NULL; 1148 for (q = nq; q != NULL; q = nq) { 1149 nq = q->m_nextpkt; 1150 q->m_nextpkt = NULL; 1151 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 1152 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 1153 m_cat(m, q); 1154 } 1155 /* 1156 * In order to do checksumming faster we do 'end-around carry' here 1157 * (and not in for{} loop), though it implies we are not going to 1158 * reassemble more than 64k fragments. 1159 */ 1160 m->m_pkthdr.csum_data = 1161 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16); 1162 #ifdef MAC 1163 mac_ipq_reassemble(fp, m); 1164 mac_ipq_destroy(fp); 1165 #endif 1166 1167 /* 1168 * Create header for new ip packet by modifying header of first 1169 * packet; dequeue and discard fragment reassembly header. 1170 * Make header visible. 1171 */ 1172 ip->ip_len = htons((ip->ip_hl << 2) + next); 1173 ip->ip_src = fp->ipq_src; 1174 ip->ip_dst = fp->ipq_dst; 1175 TAILQ_REMOVE(head, fp, ipq_list); 1176 V_nipq--; 1177 uma_zfree(V_ipq_zone, fp); 1178 m->m_len += (ip->ip_hl << 2); 1179 m->m_data -= (ip->ip_hl << 2); 1180 /* some debugging cruft by sklower, below, will go away soon */ 1181 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 1182 m_fixhdr(m); 1183 IPSTAT_INC(ips_reassembled); 1184 IPQ_UNLOCK(); 1185 return (m); 1186 1187 dropfrag: 1188 IPSTAT_INC(ips_fragdropped); 1189 if (fp != NULL) 1190 fp->ipq_nfrags--; 1191 m_freem(m); 1192 done: 1193 IPQ_UNLOCK(); 1194 return (NULL); 1195 1196 #undef GETIP 1197 } 1198 1199 /* 1200 * Free a fragment reassembly header and all 1201 * associated datagrams. 1202 */ 1203 static void 1204 ip_freef(struct ipqhead *fhp, struct ipq *fp) 1205 { 1206 struct mbuf *q; 1207 1208 IPQ_LOCK_ASSERT(); 1209 1210 while (fp->ipq_frags) { 1211 q = fp->ipq_frags; 1212 fp->ipq_frags = q->m_nextpkt; 1213 m_freem(q); 1214 } 1215 TAILQ_REMOVE(fhp, fp, ipq_list); 1216 uma_zfree(V_ipq_zone, fp); 1217 V_nipq--; 1218 } 1219 1220 /* 1221 * IP timer processing; 1222 * if a timer expires on a reassembly 1223 * queue, discard it. 1224 */ 1225 void 1226 ip_slowtimo(void) 1227 { 1228 VNET_ITERATOR_DECL(vnet_iter); 1229 struct ipq *fp; 1230 int i; 1231 1232 VNET_LIST_RLOCK_NOSLEEP(); 1233 IPQ_LOCK(); 1234 VNET_FOREACH(vnet_iter) { 1235 CURVNET_SET(vnet_iter); 1236 for (i = 0; i < IPREASS_NHASH; i++) { 1237 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) { 1238 struct ipq *fpp; 1239 1240 fpp = fp; 1241 fp = TAILQ_NEXT(fp, ipq_list); 1242 if(--fpp->ipq_ttl == 0) { 1243 IPSTAT_ADD(ips_fragtimeout, 1244 fpp->ipq_nfrags); 1245 ip_freef(&V_ipq[i], fpp); 1246 } 1247 } 1248 } 1249 /* 1250 * If we are over the maximum number of fragments 1251 * (due to the limit being lowered), drain off 1252 * enough to get down to the new limit. 1253 */ 1254 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) { 1255 for (i = 0; i < IPREASS_NHASH; i++) { 1256 while (V_nipq > V_maxnipq && 1257 !TAILQ_EMPTY(&V_ipq[i])) { 1258 IPSTAT_ADD(ips_fragdropped, 1259 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1260 ip_freef(&V_ipq[i], 1261 TAILQ_FIRST(&V_ipq[i])); 1262 } 1263 } 1264 } 1265 CURVNET_RESTORE(); 1266 } 1267 IPQ_UNLOCK(); 1268 VNET_LIST_RUNLOCK_NOSLEEP(); 1269 } 1270 1271 /* 1272 * Drain off all datagram fragments. 1273 */ 1274 static void 1275 ip_drain_locked(void) 1276 { 1277 int i; 1278 1279 IPQ_LOCK_ASSERT(); 1280 1281 for (i = 0; i < IPREASS_NHASH; i++) { 1282 while(!TAILQ_EMPTY(&V_ipq[i])) { 1283 IPSTAT_ADD(ips_fragdropped, 1284 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags); 1285 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i])); 1286 } 1287 } 1288 } 1289 1290 void 1291 ip_drain(void) 1292 { 1293 VNET_ITERATOR_DECL(vnet_iter); 1294 1295 VNET_LIST_RLOCK_NOSLEEP(); 1296 IPQ_LOCK(); 1297 VNET_FOREACH(vnet_iter) { 1298 CURVNET_SET(vnet_iter); 1299 ip_drain_locked(); 1300 CURVNET_RESTORE(); 1301 } 1302 IPQ_UNLOCK(); 1303 VNET_LIST_RUNLOCK_NOSLEEP(); 1304 in_rtqdrain(); 1305 } 1306 1307 /* 1308 * The protocol to be inserted into ip_protox[] must be already registered 1309 * in inetsw[], either statically or through pf_proto_register(). 1310 */ 1311 int 1312 ipproto_register(short ipproto) 1313 { 1314 struct protosw *pr; 1315 1316 /* Sanity checks. */ 1317 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1318 return (EPROTONOSUPPORT); 1319 1320 /* 1321 * The protocol slot must not be occupied by another protocol 1322 * already. An index pointing to IPPROTO_RAW is unused. 1323 */ 1324 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1325 if (pr == NULL) 1326 return (EPFNOSUPPORT); 1327 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 1328 return (EEXIST); 1329 1330 /* Find the protocol position in inetsw[] and set the index. */ 1331 for (pr = inetdomain.dom_protosw; 1332 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 1333 if (pr->pr_domain->dom_family == PF_INET && 1334 pr->pr_protocol && pr->pr_protocol == ipproto) { 1335 ip_protox[pr->pr_protocol] = pr - inetsw; 1336 return (0); 1337 } 1338 } 1339 return (EPROTONOSUPPORT); 1340 } 1341 1342 int 1343 ipproto_unregister(short ipproto) 1344 { 1345 struct protosw *pr; 1346 1347 /* Sanity checks. */ 1348 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 1349 return (EPROTONOSUPPORT); 1350 1351 /* Check if the protocol was indeed registered. */ 1352 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 1353 if (pr == NULL) 1354 return (EPFNOSUPPORT); 1355 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 1356 return (ENOENT); 1357 1358 /* Reset the protocol slot to IPPROTO_RAW. */ 1359 ip_protox[ipproto] = pr - inetsw; 1360 return (0); 1361 } 1362 1363 /* 1364 * Given address of next destination (final or next hop), return (referenced) 1365 * internet address info of interface to be used to get there. 1366 */ 1367 struct in_ifaddr * 1368 ip_rtaddr(struct in_addr dst, u_int fibnum) 1369 { 1370 struct route sro; 1371 struct sockaddr_in *sin; 1372 struct in_ifaddr *ia; 1373 1374 bzero(&sro, sizeof(sro)); 1375 sin = (struct sockaddr_in *)&sro.ro_dst; 1376 sin->sin_family = AF_INET; 1377 sin->sin_len = sizeof(*sin); 1378 sin->sin_addr = dst; 1379 in_rtalloc_ign(&sro, 0, fibnum); 1380 1381 if (sro.ro_rt == NULL) 1382 return (NULL); 1383 1384 ia = ifatoia(sro.ro_rt->rt_ifa); 1385 ifa_ref(&ia->ia_ifa); 1386 RTFREE(sro.ro_rt); 1387 return (ia); 1388 } 1389 1390 u_char inetctlerrmap[PRC_NCMDS] = { 1391 0, 0, 0, 0, 1392 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 1393 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 1394 EMSGSIZE, EHOSTUNREACH, 0, 0, 1395 0, 0, EHOSTUNREACH, 0, 1396 ENOPROTOOPT, ECONNREFUSED 1397 }; 1398 1399 /* 1400 * Forward a packet. If some error occurs return the sender 1401 * an icmp packet. Note we can't always generate a meaningful 1402 * icmp message because icmp doesn't have a large enough repertoire 1403 * of codes and types. 1404 * 1405 * If not forwarding, just drop the packet. This could be confusing 1406 * if ipforwarding was zero but some routing protocol was advancing 1407 * us as a gateway to somewhere. However, we must let the routing 1408 * protocol deal with that. 1409 * 1410 * The srcrt parameter indicates whether the packet is being forwarded 1411 * via a source route. 1412 */ 1413 void 1414 ip_forward(struct mbuf *m, int srcrt) 1415 { 1416 struct ip *ip = mtod(m, struct ip *); 1417 struct in_ifaddr *ia; 1418 struct mbuf *mcopy; 1419 struct in_addr dest; 1420 struct route ro; 1421 int error, type = 0, code = 0, mtu = 0; 1422 1423 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 1424 IPSTAT_INC(ips_cantforward); 1425 m_freem(m); 1426 return; 1427 } 1428 #ifdef IPSTEALTH 1429 if (!V_ipstealth) { 1430 #endif 1431 if (ip->ip_ttl <= IPTTLDEC) { 1432 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 1433 0, 0); 1434 return; 1435 } 1436 #ifdef IPSTEALTH 1437 } 1438 #endif 1439 1440 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m)); 1441 #ifndef IPSEC 1442 /* 1443 * 'ia' may be NULL if there is no route for this destination. 1444 * In case of IPsec, Don't discard it just yet, but pass it to 1445 * ip_output in case of outgoing IPsec policy. 1446 */ 1447 if (!srcrt && ia == NULL) { 1448 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 1449 return; 1450 } 1451 #endif 1452 1453 /* 1454 * Save the IP header and at most 8 bytes of the payload, 1455 * in case we need to generate an ICMP message to the src. 1456 * 1457 * XXX this can be optimized a lot by saving the data in a local 1458 * buffer on the stack (72 bytes at most), and only allocating the 1459 * mbuf if really necessary. The vast majority of the packets 1460 * are forwarded without having to send an ICMP back (either 1461 * because unnecessary, or because rate limited), so we are 1462 * really we are wasting a lot of work here. 1463 * 1464 * We don't use m_copy() because it might return a reference 1465 * to a shared cluster. Both this function and ip_output() 1466 * assume exclusive access to the IP header in `m', so any 1467 * data in a cluster may change before we reach icmp_error(). 1468 */ 1469 mcopy = m_gethdr(M_NOWAIT, m->m_type); 1470 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) { 1471 /* 1472 * It's probably ok if the pkthdr dup fails (because 1473 * the deep copy of the tag chain failed), but for now 1474 * be conservative and just discard the copy since 1475 * code below may some day want the tags. 1476 */ 1477 m_free(mcopy); 1478 mcopy = NULL; 1479 } 1480 if (mcopy != NULL) { 1481 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy)); 1482 mcopy->m_pkthdr.len = mcopy->m_len; 1483 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1484 } 1485 1486 #ifdef IPSTEALTH 1487 if (!V_ipstealth) { 1488 #endif 1489 ip->ip_ttl -= IPTTLDEC; 1490 #ifdef IPSTEALTH 1491 } 1492 #endif 1493 1494 /* 1495 * If forwarding packet using same interface that it came in on, 1496 * perhaps should send a redirect to sender to shortcut a hop. 1497 * Only send redirect if source is sending directly to us, 1498 * and if packet was not source routed (or has any options). 1499 * Also, don't send redirect if forwarding using a default route 1500 * or a route modified by a redirect. 1501 */ 1502 dest.s_addr = 0; 1503 if (!srcrt && V_ipsendredirects && 1504 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1505 struct sockaddr_in *sin; 1506 struct rtentry *rt; 1507 1508 bzero(&ro, sizeof(ro)); 1509 sin = (struct sockaddr_in *)&ro.ro_dst; 1510 sin->sin_family = AF_INET; 1511 sin->sin_len = sizeof(*sin); 1512 sin->sin_addr = ip->ip_dst; 1513 in_rtalloc_ign(&ro, 0, M_GETFIB(m)); 1514 1515 rt = ro.ro_rt; 1516 1517 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && 1518 satosin(rt_key(rt))->sin_addr.s_addr != 0) { 1519 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) 1520 u_long src = ntohl(ip->ip_src.s_addr); 1521 1522 if (RTA(rt) && 1523 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { 1524 if (rt->rt_flags & RTF_GATEWAY) 1525 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr; 1526 else 1527 dest.s_addr = ip->ip_dst.s_addr; 1528 /* Router requirements says to only send host redirects */ 1529 type = ICMP_REDIRECT; 1530 code = ICMP_REDIRECT_HOST; 1531 } 1532 } 1533 if (rt) 1534 RTFREE(rt); 1535 } 1536 1537 /* 1538 * Try to cache the route MTU from ip_output so we can consider it for 1539 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191. 1540 */ 1541 bzero(&ro, sizeof(ro)); 1542 1543 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1544 1545 if (error == EMSGSIZE && ro.ro_rt) 1546 mtu = ro.ro_rt->rt_rmx.rmx_mtu; 1547 RO_RTFREE(&ro); 1548 1549 if (error) 1550 IPSTAT_INC(ips_cantforward); 1551 else { 1552 IPSTAT_INC(ips_forward); 1553 if (type) 1554 IPSTAT_INC(ips_redirectsent); 1555 else { 1556 if (mcopy) 1557 m_freem(mcopy); 1558 if (ia != NULL) 1559 ifa_free(&ia->ia_ifa); 1560 return; 1561 } 1562 } 1563 if (mcopy == NULL) { 1564 if (ia != NULL) 1565 ifa_free(&ia->ia_ifa); 1566 return; 1567 } 1568 1569 switch (error) { 1570 1571 case 0: /* forwarded, but need redirect */ 1572 /* type, code set above */ 1573 break; 1574 1575 case ENETUNREACH: 1576 case EHOSTUNREACH: 1577 case ENETDOWN: 1578 case EHOSTDOWN: 1579 default: 1580 type = ICMP_UNREACH; 1581 code = ICMP_UNREACH_HOST; 1582 break; 1583 1584 case EMSGSIZE: 1585 type = ICMP_UNREACH; 1586 code = ICMP_UNREACH_NEEDFRAG; 1587 1588 #ifdef IPSEC 1589 /* 1590 * If IPsec is configured for this path, 1591 * override any possibly mtu value set by ip_output. 1592 */ 1593 mtu = ip_ipsec_mtu(mcopy, mtu); 1594 #endif /* IPSEC */ 1595 /* 1596 * If the MTU was set before make sure we are below the 1597 * interface MTU. 1598 * If the MTU wasn't set before use the interface mtu or 1599 * fall back to the next smaller mtu step compared to the 1600 * current packet size. 1601 */ 1602 if (mtu != 0) { 1603 if (ia != NULL) 1604 mtu = min(mtu, ia->ia_ifp->if_mtu); 1605 } else { 1606 if (ia != NULL) 1607 mtu = ia->ia_ifp->if_mtu; 1608 else 1609 mtu = ip_next_mtu(ntohs(ip->ip_len), 0); 1610 } 1611 IPSTAT_INC(ips_cantfrag); 1612 break; 1613 1614 case ENOBUFS: 1615 /* 1616 * A router should not generate ICMP_SOURCEQUENCH as 1617 * required in RFC1812 Requirements for IP Version 4 Routers. 1618 * Source quench could be a big problem under DoS attacks, 1619 * or if the underlying interface is rate-limited. 1620 * Those who need source quench packets may re-enable them 1621 * via the net.inet.ip.sendsourcequench sysctl. 1622 */ 1623 if (V_ip_sendsourcequench == 0) { 1624 m_freem(mcopy); 1625 if (ia != NULL) 1626 ifa_free(&ia->ia_ifa); 1627 return; 1628 } else { 1629 type = ICMP_SOURCEQUENCH; 1630 code = 0; 1631 } 1632 break; 1633 1634 case EACCES: /* ipfw denied packet */ 1635 m_freem(mcopy); 1636 if (ia != NULL) 1637 ifa_free(&ia->ia_ifa); 1638 return; 1639 } 1640 if (ia != NULL) 1641 ifa_free(&ia->ia_ifa); 1642 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1643 } 1644 1645 void 1646 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1647 struct mbuf *m) 1648 { 1649 1650 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) { 1651 struct bintime bt; 1652 1653 bintime(&bt); 1654 if (inp->inp_socket->so_options & SO_BINTIME) { 1655 *mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt), 1656 SCM_BINTIME, SOL_SOCKET); 1657 if (*mp) 1658 mp = &(*mp)->m_next; 1659 } 1660 if (inp->inp_socket->so_options & SO_TIMESTAMP) { 1661 struct timeval tv; 1662 1663 bintime2timeval(&bt, &tv); 1664 *mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv), 1665 SCM_TIMESTAMP, SOL_SOCKET); 1666 if (*mp) 1667 mp = &(*mp)->m_next; 1668 } 1669 } 1670 if (inp->inp_flags & INP_RECVDSTADDR) { 1671 *mp = sbcreatecontrol((caddr_t)&ip->ip_dst, 1672 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1673 if (*mp) 1674 mp = &(*mp)->m_next; 1675 } 1676 if (inp->inp_flags & INP_RECVTTL) { 1677 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, 1678 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1679 if (*mp) 1680 mp = &(*mp)->m_next; 1681 } 1682 #ifdef notyet 1683 /* XXX 1684 * Moving these out of udp_input() made them even more broken 1685 * than they already were. 1686 */ 1687 /* options were tossed already */ 1688 if (inp->inp_flags & INP_RECVOPTS) { 1689 *mp = sbcreatecontrol((caddr_t)opts_deleted_above, 1690 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1691 if (*mp) 1692 mp = &(*mp)->m_next; 1693 } 1694 /* ip_srcroute doesn't do what we want here, need to fix */ 1695 if (inp->inp_flags & INP_RECVRETOPTS) { 1696 *mp = sbcreatecontrol((caddr_t)ip_srcroute(m), 1697 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1698 if (*mp) 1699 mp = &(*mp)->m_next; 1700 } 1701 #endif 1702 if (inp->inp_flags & INP_RECVIF) { 1703 struct ifnet *ifp; 1704 struct sdlbuf { 1705 struct sockaddr_dl sdl; 1706 u_char pad[32]; 1707 } sdlbuf; 1708 struct sockaddr_dl *sdp; 1709 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1710 1711 if ((ifp = m->m_pkthdr.rcvif) && 1712 ifp->if_index && ifp->if_index <= V_if_index) { 1713 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1714 /* 1715 * Change our mind and don't try copy. 1716 */ 1717 if (sdp->sdl_family != AF_LINK || 1718 sdp->sdl_len > sizeof(sdlbuf)) { 1719 goto makedummy; 1720 } 1721 bcopy(sdp, sdl2, sdp->sdl_len); 1722 } else { 1723 makedummy: 1724 sdl2->sdl_len = 1725 offsetof(struct sockaddr_dl, sdl_data[0]); 1726 sdl2->sdl_family = AF_LINK; 1727 sdl2->sdl_index = 0; 1728 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1729 } 1730 *mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len, 1731 IP_RECVIF, IPPROTO_IP); 1732 if (*mp) 1733 mp = &(*mp)->m_next; 1734 } 1735 if (inp->inp_flags & INP_RECVTOS) { 1736 *mp = sbcreatecontrol((caddr_t)&ip->ip_tos, 1737 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1738 if (*mp) 1739 mp = &(*mp)->m_next; 1740 } 1741 } 1742 1743 /* 1744 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1745 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1746 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1747 * compiled. 1748 */ 1749 static VNET_DEFINE(int, ip_rsvp_on); 1750 VNET_DEFINE(struct socket *, ip_rsvpd); 1751 1752 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1753 1754 int 1755 ip_rsvp_init(struct socket *so) 1756 { 1757 1758 if (so->so_type != SOCK_RAW || 1759 so->so_proto->pr_protocol != IPPROTO_RSVP) 1760 return EOPNOTSUPP; 1761 1762 if (V_ip_rsvpd != NULL) 1763 return EADDRINUSE; 1764 1765 V_ip_rsvpd = so; 1766 /* 1767 * This may seem silly, but we need to be sure we don't over-increment 1768 * the RSVP counter, in case something slips up. 1769 */ 1770 if (!V_ip_rsvp_on) { 1771 V_ip_rsvp_on = 1; 1772 V_rsvp_on++; 1773 } 1774 1775 return 0; 1776 } 1777 1778 int 1779 ip_rsvp_done(void) 1780 { 1781 1782 V_ip_rsvpd = NULL; 1783 /* 1784 * This may seem silly, but we need to be sure we don't over-decrement 1785 * the RSVP counter, in case something slips up. 1786 */ 1787 if (V_ip_rsvp_on) { 1788 V_ip_rsvp_on = 0; 1789 V_rsvp_on--; 1790 } 1791 return 0; 1792 } 1793 1794 void 1795 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */ 1796 { 1797 1798 if (rsvp_input_p) { /* call the real one if loaded */ 1799 rsvp_input_p(m, off); 1800 return; 1801 } 1802 1803 /* Can still get packets with rsvp_on = 0 if there is a local member 1804 * of the group to which the RSVP packet is addressed. But in this 1805 * case we want to throw the packet away. 1806 */ 1807 1808 if (!V_rsvp_on) { 1809 m_freem(m); 1810 return; 1811 } 1812 1813 if (V_ip_rsvpd != NULL) { 1814 rip_input(m, off); 1815 return; 1816 } 1817 /* Drop the packet */ 1818 m_freem(m); 1819 } 1820