1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_bootp.h" 38 #include "opt_ipstealth.h" 39 #include "opt_ipsec.h" 40 #include "opt_route.h" 41 #include "opt_rss.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/hhook.h> 46 #include <sys/mbuf.h> 47 #include <sys/malloc.h> 48 #include <sys/domain.h> 49 #include <sys/protosw.h> 50 #include <sys/socket.h> 51 #include <sys/time.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/rmlock.h> 55 #include <sys/rwlock.h> 56 #include <sys/sdt.h> 57 #include <sys/syslog.h> 58 #include <sys/sysctl.h> 59 60 #include <net/if.h> 61 #include <net/if_types.h> 62 #include <net/if_var.h> 63 #include <net/if_dl.h> 64 #include <net/pfil.h> 65 #include <net/route.h> 66 #include <net/route/nhop.h> 67 #include <net/netisr.h> 68 #include <net/rss_config.h> 69 #include <net/vnet.h> 70 71 #include <netinet/in.h> 72 #include <netinet/in_kdtrace.h> 73 #include <netinet/in_systm.h> 74 #include <netinet/in_var.h> 75 #include <netinet/ip.h> 76 #include <netinet/in_fib.h> 77 #include <netinet/in_pcb.h> 78 #include <netinet/ip_var.h> 79 #include <netinet/ip_fw.h> 80 #include <netinet/ip_icmp.h> 81 #include <netinet/ip_options.h> 82 #include <machine/in_cksum.h> 83 #include <netinet/ip_carp.h> 84 #include <netinet/in_rss.h> 85 #include <netinet/ip_mroute.h> 86 87 #include <netipsec/ipsec_support.h> 88 89 #include <sys/socketvar.h> 90 91 #include <security/mac/mac_framework.h> 92 93 #ifdef CTASSERT 94 CTASSERT(sizeof(struct ip) == 20); 95 #endif 96 97 /* IP reassembly functions are defined in ip_reass.c. */ 98 extern void ipreass_init(void); 99 extern void ipreass_drain(void); 100 extern void ipreass_slowtimo(void); 101 #ifdef VIMAGE 102 extern void ipreass_destroy(void); 103 #endif 104 105 struct rmlock in_ifaddr_lock; 106 RM_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 107 108 VNET_DEFINE(int, rsvp_on); 109 110 VNET_DEFINE(int, ipforwarding); 111 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_VNET | CTLFLAG_RW, 112 &VNET_NAME(ipforwarding), 0, 113 "Enable IP forwarding between interfaces"); 114 115 /* 116 * Respond with an ICMP host redirect when we forward a packet out of 117 * the same interface on which it was received. See RFC 792. 118 */ 119 VNET_DEFINE(int, ipsendredirects) = 1; 120 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_VNET | CTLFLAG_RW, 121 &VNET_NAME(ipsendredirects), 0, 122 "Enable sending IP redirects"); 123 124 /* 125 * XXX - Setting ip_checkinterface mostly implements the receive side of 126 * the Strong ES model described in RFC 1122, but since the routing table 127 * and transmit implementation do not implement the Strong ES model, 128 * setting this to 1 results in an odd hybrid. 129 * 130 * XXX - ip_checkinterface currently must be disabled if you use ipnat 131 * to translate the destination address to another local interface. 132 * 133 * XXX - ip_checkinterface must be disabled if you add IP aliases 134 * to the loopback interface instead of the interface where the 135 * packets for those addresses are received. 136 */ 137 VNET_DEFINE_STATIC(int, ip_checkinterface); 138 #define V_ip_checkinterface VNET(ip_checkinterface) 139 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_VNET | CTLFLAG_RW, 140 &VNET_NAME(ip_checkinterface), 0, 141 "Verify packet arrives on correct interface"); 142 143 VNET_DEFINE(pfil_head_t, inet_pfil_head); /* Packet filter hooks */ 144 145 static struct netisr_handler ip_nh = { 146 .nh_name = "ip", 147 .nh_handler = ip_input, 148 .nh_proto = NETISR_IP, 149 #ifdef RSS 150 .nh_m2cpuid = rss_soft_m2cpuid_v4, 151 .nh_policy = NETISR_POLICY_CPU, 152 .nh_dispatch = NETISR_DISPATCH_HYBRID, 153 #else 154 .nh_policy = NETISR_POLICY_FLOW, 155 #endif 156 }; 157 158 #ifdef RSS 159 /* 160 * Directly dispatched frames are currently assumed 161 * to have a flowid already calculated. 162 * 163 * It should likely have something that assert it 164 * actually has valid flow details. 165 */ 166 static struct netisr_handler ip_direct_nh = { 167 .nh_name = "ip_direct", 168 .nh_handler = ip_direct_input, 169 .nh_proto = NETISR_IP_DIRECT, 170 .nh_m2cpuid = rss_soft_m2cpuid_v4, 171 .nh_policy = NETISR_POLICY_CPU, 172 .nh_dispatch = NETISR_DISPATCH_HYBRID, 173 }; 174 #endif 175 176 extern struct domain inetdomain; 177 extern struct protosw inetsw[]; 178 u_char ip_protox[IPPROTO_MAX]; 179 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 180 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 181 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 182 183 #ifdef IPCTL_DEFMTU 184 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 185 &ip_mtu, 0, "Default MTU"); 186 #endif 187 188 #ifdef IPSTEALTH 189 VNET_DEFINE(int, ipstealth); 190 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_VNET | CTLFLAG_RW, 191 &VNET_NAME(ipstealth), 0, 192 "IP stealth mode, no TTL decrementation on forwarding"); 193 #endif 194 195 /* 196 * IP statistics are stored in the "array" of counter(9)s. 197 */ 198 VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat); 199 VNET_PCPUSTAT_SYSINIT(ipstat); 200 SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat, 201 "IP statistics (struct ipstat, netinet/ip_var.h)"); 202 203 #ifdef VIMAGE 204 VNET_PCPUSTAT_SYSUNINIT(ipstat); 205 #endif /* VIMAGE */ 206 207 /* 208 * Kernel module interface for updating ipstat. The argument is an index 209 * into ipstat treated as an array. 210 */ 211 void 212 kmod_ipstat_inc(int statnum) 213 { 214 215 counter_u64_add(VNET(ipstat)[statnum], 1); 216 } 217 218 void 219 kmod_ipstat_dec(int statnum) 220 { 221 222 counter_u64_add(VNET(ipstat)[statnum], -1); 223 } 224 225 static int 226 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 227 { 228 int error, qlimit; 229 230 netisr_getqlimit(&ip_nh, &qlimit); 231 error = sysctl_handle_int(oidp, &qlimit, 0, req); 232 if (error || !req->newptr) 233 return (error); 234 if (qlimit < 1) 235 return (EINVAL); 236 return (netisr_setqlimit(&ip_nh, qlimit)); 237 } 238 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 239 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 240 sysctl_netinet_intr_queue_maxlen, "I", 241 "Maximum size of the IP input queue"); 242 243 static int 244 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 245 { 246 u_int64_t qdrops_long; 247 int error, qdrops; 248 249 netisr_getqdrops(&ip_nh, &qdrops_long); 250 qdrops = qdrops_long; 251 error = sysctl_handle_int(oidp, &qdrops, 0, req); 252 if (error || !req->newptr) 253 return (error); 254 if (qdrops != 0) 255 return (EINVAL); 256 netisr_clearqdrops(&ip_nh); 257 return (0); 258 } 259 260 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 261 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 262 0, 0, sysctl_netinet_intr_queue_drops, "I", 263 "Number of packets dropped from the IP input queue"); 264 265 #ifdef RSS 266 static int 267 sysctl_netinet_intr_direct_queue_maxlen(SYSCTL_HANDLER_ARGS) 268 { 269 int error, qlimit; 270 271 netisr_getqlimit(&ip_direct_nh, &qlimit); 272 error = sysctl_handle_int(oidp, &qlimit, 0, req); 273 if (error || !req->newptr) 274 return (error); 275 if (qlimit < 1) 276 return (EINVAL); 277 return (netisr_setqlimit(&ip_direct_nh, qlimit)); 278 } 279 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRDQMAXLEN, intr_direct_queue_maxlen, 280 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 281 0, 0, sysctl_netinet_intr_direct_queue_maxlen, 282 "I", "Maximum size of the IP direct input queue"); 283 284 static int 285 sysctl_netinet_intr_direct_queue_drops(SYSCTL_HANDLER_ARGS) 286 { 287 u_int64_t qdrops_long; 288 int error, qdrops; 289 290 netisr_getqdrops(&ip_direct_nh, &qdrops_long); 291 qdrops = qdrops_long; 292 error = sysctl_handle_int(oidp, &qdrops, 0, req); 293 if (error || !req->newptr) 294 return (error); 295 if (qdrops != 0) 296 return (EINVAL); 297 netisr_clearqdrops(&ip_direct_nh); 298 return (0); 299 } 300 301 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRDQDROPS, intr_direct_queue_drops, 302 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, 303 sysctl_netinet_intr_direct_queue_drops, "I", 304 "Number of packets dropped from the IP direct input queue"); 305 #endif /* RSS */ 306 307 /* 308 * IP initialization: fill in IP protocol switch table. 309 * All protocols not implemented in kernel go to raw IP protocol handler. 310 */ 311 void 312 ip_init(void) 313 { 314 struct pfil_head_args args; 315 struct protosw *pr; 316 int i; 317 318 CK_STAILQ_INIT(&V_in_ifaddrhead); 319 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 320 321 /* Initialize IP reassembly queue. */ 322 ipreass_init(); 323 324 /* Initialize packet filter hooks. */ 325 args.pa_version = PFIL_VERSION; 326 args.pa_flags = PFIL_IN | PFIL_OUT; 327 args.pa_type = PFIL_TYPE_IP4; 328 args.pa_headname = PFIL_INET_NAME; 329 V_inet_pfil_head = pfil_head_register(&args); 330 331 if (hhook_head_register(HHOOK_TYPE_IPSEC_IN, AF_INET, 332 &V_ipsec_hhh_in[HHOOK_IPSEC_INET], 333 HHOOK_WAITOK | HHOOK_HEADISINVNET) != 0) 334 printf("%s: WARNING: unable to register input helper hook\n", 335 __func__); 336 if (hhook_head_register(HHOOK_TYPE_IPSEC_OUT, AF_INET, 337 &V_ipsec_hhh_out[HHOOK_IPSEC_INET], 338 HHOOK_WAITOK | HHOOK_HEADISINVNET) != 0) 339 printf("%s: WARNING: unable to register output helper hook\n", 340 __func__); 341 342 /* Skip initialization of globals for non-default instances. */ 343 #ifdef VIMAGE 344 if (!IS_DEFAULT_VNET(curvnet)) { 345 netisr_register_vnet(&ip_nh); 346 #ifdef RSS 347 netisr_register_vnet(&ip_direct_nh); 348 #endif 349 return; 350 } 351 #endif 352 353 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 354 if (pr == NULL) 355 panic("ip_init: PF_INET not found"); 356 357 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 358 for (i = 0; i < IPPROTO_MAX; i++) 359 ip_protox[i] = pr - inetsw; 360 /* 361 * Cycle through IP protocols and put them into the appropriate place 362 * in ip_protox[]. 363 */ 364 for (pr = inetdomain.dom_protosw; 365 pr < inetdomain.dom_protoswNPROTOSW; pr++) 366 if (pr->pr_domain->dom_family == PF_INET && 367 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 368 /* Be careful to only index valid IP protocols. */ 369 if (pr->pr_protocol < IPPROTO_MAX) 370 ip_protox[pr->pr_protocol] = pr - inetsw; 371 } 372 373 netisr_register(&ip_nh); 374 #ifdef RSS 375 netisr_register(&ip_direct_nh); 376 #endif 377 } 378 379 #ifdef VIMAGE 380 static void 381 ip_destroy(void *unused __unused) 382 { 383 int error; 384 385 #ifdef RSS 386 netisr_unregister_vnet(&ip_direct_nh); 387 #endif 388 netisr_unregister_vnet(&ip_nh); 389 390 pfil_head_unregister(V_inet_pfil_head); 391 error = hhook_head_deregister(V_ipsec_hhh_in[HHOOK_IPSEC_INET]); 392 if (error != 0) { 393 printf("%s: WARNING: unable to deregister input helper hook " 394 "type HHOOK_TYPE_IPSEC_IN, id HHOOK_IPSEC_INET: " 395 "error %d returned\n", __func__, error); 396 } 397 error = hhook_head_deregister(V_ipsec_hhh_out[HHOOK_IPSEC_INET]); 398 if (error != 0) { 399 printf("%s: WARNING: unable to deregister output helper hook " 400 "type HHOOK_TYPE_IPSEC_OUT, id HHOOK_IPSEC_INET: " 401 "error %d returned\n", __func__, error); 402 } 403 404 /* Remove the IPv4 addresses from all interfaces. */ 405 in_ifscrub_all(); 406 407 /* Make sure the IPv4 routes are gone as well. */ 408 rib_flush_routes_family(AF_INET); 409 410 /* Destroy IP reassembly queue. */ 411 ipreass_destroy(); 412 413 /* Cleanup in_ifaddr hash table; should be empty. */ 414 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 415 } 416 417 VNET_SYSUNINIT(ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ip_destroy, NULL); 418 #endif 419 420 #ifdef RSS 421 /* 422 * IP direct input routine. 423 * 424 * This is called when reinjecting completed fragments where 425 * all of the previous checking and book-keeping has been done. 426 */ 427 void 428 ip_direct_input(struct mbuf *m) 429 { 430 struct ip *ip; 431 int hlen; 432 433 ip = mtod(m, struct ip *); 434 hlen = ip->ip_hl << 2; 435 436 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 437 if (IPSEC_ENABLED(ipv4)) { 438 if (IPSEC_INPUT(ipv4, m, hlen, ip->ip_p) != 0) 439 return; 440 } 441 #endif /* IPSEC */ 442 IPSTAT_INC(ips_delivered); 443 (*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p); 444 return; 445 } 446 #endif 447 448 /* 449 * Ip input routine. Checksum and byte swap header. If fragmented 450 * try to reassemble. Process options. Pass to next level. 451 */ 452 void 453 ip_input(struct mbuf *m) 454 { 455 MROUTER_RLOCK_TRACKER; 456 struct rm_priotracker in_ifa_tracker; 457 struct ip *ip = NULL; 458 struct in_ifaddr *ia = NULL; 459 struct ifaddr *ifa; 460 struct ifnet *ifp; 461 int checkif, hlen = 0; 462 uint16_t sum, ip_len; 463 int dchg = 0; /* dest changed after fw */ 464 struct in_addr odst; /* original dst address */ 465 466 M_ASSERTPKTHDR(m); 467 NET_EPOCH_ASSERT(); 468 469 if (m->m_flags & M_FASTFWD_OURS) { 470 m->m_flags &= ~M_FASTFWD_OURS; 471 /* Set up some basics that will be used later. */ 472 ip = mtod(m, struct ip *); 473 hlen = ip->ip_hl << 2; 474 ip_len = ntohs(ip->ip_len); 475 goto ours; 476 } 477 478 IPSTAT_INC(ips_total); 479 480 if (m->m_pkthdr.len < sizeof(struct ip)) 481 goto tooshort; 482 483 if (m->m_len < sizeof (struct ip) && 484 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 485 IPSTAT_INC(ips_toosmall); 486 return; 487 } 488 ip = mtod(m, struct ip *); 489 490 if (ip->ip_v != IPVERSION) { 491 IPSTAT_INC(ips_badvers); 492 goto bad; 493 } 494 495 hlen = ip->ip_hl << 2; 496 if (hlen < sizeof(struct ip)) { /* minimum header length */ 497 IPSTAT_INC(ips_badhlen); 498 goto bad; 499 } 500 if (hlen > m->m_len) { 501 if ((m = m_pullup(m, hlen)) == NULL) { 502 IPSTAT_INC(ips_badhlen); 503 return; 504 } 505 ip = mtod(m, struct ip *); 506 } 507 508 IP_PROBE(receive, NULL, NULL, ip, m->m_pkthdr.rcvif, ip, NULL); 509 510 /* IN_LOOPBACK must not appear on the wire - RFC1122 */ 511 ifp = m->m_pkthdr.rcvif; 512 if (IN_LOOPBACK(ntohl(ip->ip_dst.s_addr)) || 513 IN_LOOPBACK(ntohl(ip->ip_src.s_addr))) { 514 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 515 IPSTAT_INC(ips_badaddr); 516 goto bad; 517 } 518 } 519 520 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 521 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 522 } else { 523 if (hlen == sizeof(struct ip)) { 524 sum = in_cksum_hdr(ip); 525 } else { 526 sum = in_cksum(m, hlen); 527 } 528 } 529 if (sum) { 530 IPSTAT_INC(ips_badsum); 531 goto bad; 532 } 533 534 #ifdef ALTQ 535 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 536 /* packet is dropped by traffic conditioner */ 537 return; 538 #endif 539 540 ip_len = ntohs(ip->ip_len); 541 if (ip_len < hlen) { 542 IPSTAT_INC(ips_badlen); 543 goto bad; 544 } 545 546 /* 547 * Check that the amount of data in the buffers 548 * is as at least much as the IP header would have us expect. 549 * Trim mbufs if longer than we expect. 550 * Drop packet if shorter than we expect. 551 */ 552 if (m->m_pkthdr.len < ip_len) { 553 tooshort: 554 IPSTAT_INC(ips_tooshort); 555 goto bad; 556 } 557 if (m->m_pkthdr.len > ip_len) { 558 if (m->m_len == m->m_pkthdr.len) { 559 m->m_len = ip_len; 560 m->m_pkthdr.len = ip_len; 561 } else 562 m_adj(m, ip_len - m->m_pkthdr.len); 563 } 564 565 /* 566 * Try to forward the packet, but if we fail continue. 567 * ip_tryforward() does not generate redirects, so fall 568 * through to normal processing if redirects are required. 569 * ip_tryforward() does inbound and outbound packet firewall 570 * processing. If firewall has decided that destination becomes 571 * our local address, it sets M_FASTFWD_OURS flag. In this 572 * case skip another inbound firewall processing and update 573 * ip pointer. 574 */ 575 if (V_ipforwarding != 0 576 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 577 && (!IPSEC_ENABLED(ipv4) || 578 IPSEC_CAPS(ipv4, m, IPSEC_CAP_OPERABLE) == 0) 579 #endif 580 ) { 581 if ((m = ip_tryforward(m)) == NULL) 582 return; 583 if (m->m_flags & M_FASTFWD_OURS) { 584 m->m_flags &= ~M_FASTFWD_OURS; 585 ip = mtod(m, struct ip *); 586 goto ours; 587 } 588 } 589 590 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 591 /* 592 * Bypass packet filtering for packets previously handled by IPsec. 593 */ 594 if (IPSEC_ENABLED(ipv4) && 595 IPSEC_CAPS(ipv4, m, IPSEC_CAP_BYPASS_FILTER) != 0) 596 goto passin; 597 #endif 598 599 /* 600 * Run through list of hooks for input packets. 601 * 602 * NB: Beware of the destination address changing (e.g. 603 * by NAT rewriting). When this happens, tell 604 * ip_forward to do the right thing. 605 */ 606 607 /* Jump over all PFIL processing if hooks are not active. */ 608 if (!PFIL_HOOKED_IN(V_inet_pfil_head)) 609 goto passin; 610 611 odst = ip->ip_dst; 612 if (pfil_run_hooks(V_inet_pfil_head, &m, ifp, PFIL_IN, NULL) != 613 PFIL_PASS) 614 return; 615 if (m == NULL) /* consumed by filter */ 616 return; 617 618 ip = mtod(m, struct ip *); 619 dchg = (odst.s_addr != ip->ip_dst.s_addr); 620 ifp = m->m_pkthdr.rcvif; 621 622 if (m->m_flags & M_FASTFWD_OURS) { 623 m->m_flags &= ~M_FASTFWD_OURS; 624 goto ours; 625 } 626 if (m->m_flags & M_IP_NEXTHOP) { 627 if (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL) { 628 /* 629 * Directly ship the packet on. This allows 630 * forwarding packets originally destined to us 631 * to some other directly connected host. 632 */ 633 ip_forward(m, 1); 634 return; 635 } 636 } 637 passin: 638 639 /* 640 * Process options and, if not destined for us, 641 * ship it on. ip_dooptions returns 1 when an 642 * error was detected (causing an icmp message 643 * to be sent and the original packet to be freed). 644 */ 645 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 646 return; 647 648 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 649 * matter if it is destined to another node, or whether it is 650 * a multicast one, RSVP wants it! and prevents it from being forwarded 651 * anywhere else. Also checks if the rsvp daemon is running before 652 * grabbing the packet. 653 */ 654 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 655 goto ours; 656 657 /* 658 * Check our list of addresses, to see if the packet is for us. 659 * If we don't have any addresses, assume any unicast packet 660 * we receive might be for us (and let the upper layers deal 661 * with it). 662 */ 663 if (CK_STAILQ_EMPTY(&V_in_ifaddrhead) && 664 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 665 goto ours; 666 667 /* 668 * Enable a consistency check between the destination address 669 * and the arrival interface for a unicast packet (the RFC 1122 670 * strong ES model) if IP forwarding is disabled and the packet 671 * is not locally generated and the packet is not subject to 672 * 'ipfw fwd'. 673 * 674 * XXX - Checking also should be disabled if the destination 675 * address is ipnat'ed to a different interface. 676 * 677 * XXX - Checking is incompatible with IP aliases added 678 * to the loopback interface instead of the interface where 679 * the packets are received. 680 * 681 * XXX - This is the case for carp vhost IPs as well so we 682 * insert a workaround. If the packet got here, we already 683 * checked with carp_iamatch() and carp_forus(). 684 */ 685 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 686 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 687 ifp->if_carp == NULL && (dchg == 0); 688 689 /* 690 * Check for exact addresses in the hash bucket. 691 */ 692 IN_IFADDR_RLOCK(&in_ifa_tracker); 693 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 694 /* 695 * If the address matches, verify that the packet 696 * arrived via the correct interface if checking is 697 * enabled. 698 */ 699 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 700 (!checkif || ia->ia_ifp == ifp)) { 701 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 702 counter_u64_add(ia->ia_ifa.ifa_ibytes, 703 m->m_pkthdr.len); 704 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 705 goto ours; 706 } 707 } 708 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 709 710 /* 711 * Check for broadcast addresses. 712 * 713 * Only accept broadcast packets that arrive via the matching 714 * interface. Reception of forwarded directed broadcasts would 715 * be handled via ip_forward() and ether_output() with the loopback 716 * into the stack for SIMPLEX interfaces handled by ether_output(). 717 */ 718 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 719 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 720 if (ifa->ifa_addr->sa_family != AF_INET) 721 continue; 722 ia = ifatoia(ifa); 723 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 724 ip->ip_dst.s_addr) { 725 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 726 counter_u64_add(ia->ia_ifa.ifa_ibytes, 727 m->m_pkthdr.len); 728 goto ours; 729 } 730 #ifdef BOOTP_COMPAT 731 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 732 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 733 counter_u64_add(ia->ia_ifa.ifa_ibytes, 734 m->m_pkthdr.len); 735 goto ours; 736 } 737 #endif 738 } 739 ia = NULL; 740 } 741 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */ 742 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) { 743 IPSTAT_INC(ips_cantforward); 744 m_freem(m); 745 return; 746 } 747 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 748 MROUTER_RLOCK(); 749 if (V_ip_mrouter) { 750 /* 751 * If we are acting as a multicast router, all 752 * incoming multicast packets are passed to the 753 * kernel-level multicast forwarding function. 754 * The packet is returned (relatively) intact; if 755 * ip_mforward() returns a non-zero value, the packet 756 * must be discarded, else it may be accepted below. 757 */ 758 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 759 MROUTER_RUNLOCK(); 760 IPSTAT_INC(ips_cantforward); 761 m_freem(m); 762 return; 763 } 764 765 /* 766 * The process-level routing daemon needs to receive 767 * all multicast IGMP packets, whether or not this 768 * host belongs to their destination groups. 769 */ 770 if (ip->ip_p == IPPROTO_IGMP) { 771 MROUTER_RUNLOCK(); 772 goto ours; 773 } 774 IPSTAT_INC(ips_forward); 775 } 776 MROUTER_RUNLOCK(); 777 /* 778 * Assume the packet is for us, to avoid prematurely taking 779 * a lock on the in_multi hash. Protocols must perform 780 * their own filtering and update statistics accordingly. 781 */ 782 goto ours; 783 } 784 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 785 goto ours; 786 if (ip->ip_dst.s_addr == INADDR_ANY) 787 goto ours; 788 789 /* 790 * Not for us; forward if possible and desirable. 791 */ 792 if (V_ipforwarding == 0) { 793 IPSTAT_INC(ips_cantforward); 794 m_freem(m); 795 } else { 796 ip_forward(m, dchg); 797 } 798 return; 799 800 ours: 801 #ifdef IPSTEALTH 802 /* 803 * IPSTEALTH: Process non-routing options only 804 * if the packet is destined for us. 805 */ 806 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) 807 return; 808 #endif /* IPSTEALTH */ 809 810 /* 811 * Attempt reassembly; if it succeeds, proceed. 812 * ip_reass() will return a different mbuf. 813 */ 814 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { 815 /* XXXGL: shouldn't we save & set m_flags? */ 816 m = ip_reass(m); 817 if (m == NULL) 818 return; 819 ip = mtod(m, struct ip *); 820 /* Get the header length of the reassembled packet */ 821 hlen = ip->ip_hl << 2; 822 } 823 824 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 825 if (IPSEC_ENABLED(ipv4)) { 826 if (IPSEC_INPUT(ipv4, m, hlen, ip->ip_p) != 0) 827 return; 828 } 829 #endif /* IPSEC */ 830 831 /* 832 * Switch out to protocol's input routine. 833 */ 834 IPSTAT_INC(ips_delivered); 835 836 (*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p); 837 return; 838 bad: 839 m_freem(m); 840 } 841 842 /* 843 * IP timer processing; 844 * if a timer expires on a reassembly 845 * queue, discard it. 846 */ 847 void 848 ip_slowtimo(void) 849 { 850 VNET_ITERATOR_DECL(vnet_iter); 851 852 VNET_LIST_RLOCK_NOSLEEP(); 853 VNET_FOREACH(vnet_iter) { 854 CURVNET_SET(vnet_iter); 855 ipreass_slowtimo(); 856 CURVNET_RESTORE(); 857 } 858 VNET_LIST_RUNLOCK_NOSLEEP(); 859 } 860 861 void 862 ip_drain(void) 863 { 864 VNET_ITERATOR_DECL(vnet_iter); 865 866 VNET_LIST_RLOCK_NOSLEEP(); 867 VNET_FOREACH(vnet_iter) { 868 CURVNET_SET(vnet_iter); 869 ipreass_drain(); 870 CURVNET_RESTORE(); 871 } 872 VNET_LIST_RUNLOCK_NOSLEEP(); 873 } 874 875 /* 876 * The protocol to be inserted into ip_protox[] must be already registered 877 * in inetsw[], either statically or through pf_proto_register(). 878 */ 879 int 880 ipproto_register(short ipproto) 881 { 882 struct protosw *pr; 883 884 /* Sanity checks. */ 885 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 886 return (EPROTONOSUPPORT); 887 888 /* 889 * The protocol slot must not be occupied by another protocol 890 * already. An index pointing to IPPROTO_RAW is unused. 891 */ 892 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 893 if (pr == NULL) 894 return (EPFNOSUPPORT); 895 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 896 return (EEXIST); 897 898 /* Find the protocol position in inetsw[] and set the index. */ 899 for (pr = inetdomain.dom_protosw; 900 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 901 if (pr->pr_domain->dom_family == PF_INET && 902 pr->pr_protocol && pr->pr_protocol == ipproto) { 903 ip_protox[pr->pr_protocol] = pr - inetsw; 904 return (0); 905 } 906 } 907 return (EPROTONOSUPPORT); 908 } 909 910 int 911 ipproto_unregister(short ipproto) 912 { 913 struct protosw *pr; 914 915 /* Sanity checks. */ 916 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 917 return (EPROTONOSUPPORT); 918 919 /* Check if the protocol was indeed registered. */ 920 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 921 if (pr == NULL) 922 return (EPFNOSUPPORT); 923 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 924 return (ENOENT); 925 926 /* Reset the protocol slot to IPPROTO_RAW. */ 927 ip_protox[ipproto] = pr - inetsw; 928 return (0); 929 } 930 931 u_char inetctlerrmap[PRC_NCMDS] = { 932 0, 0, 0, 0, 933 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 934 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 935 EMSGSIZE, EHOSTUNREACH, 0, 0, 936 0, 0, EHOSTUNREACH, 0, 937 ENOPROTOOPT, ECONNREFUSED 938 }; 939 940 /* 941 * Forward a packet. If some error occurs return the sender 942 * an icmp packet. Note we can't always generate a meaningful 943 * icmp message because icmp doesn't have a large enough repertoire 944 * of codes and types. 945 * 946 * If not forwarding, just drop the packet. This could be confusing 947 * if ipforwarding was zero but some routing protocol was advancing 948 * us as a gateway to somewhere. However, we must let the routing 949 * protocol deal with that. 950 * 951 * The srcrt parameter indicates whether the packet is being forwarded 952 * via a source route. 953 */ 954 void 955 ip_forward(struct mbuf *m, int srcrt) 956 { 957 struct ip *ip = mtod(m, struct ip *); 958 struct in_ifaddr *ia; 959 struct mbuf *mcopy; 960 struct sockaddr_in *sin; 961 struct in_addr dest; 962 struct route ro; 963 uint32_t flowid; 964 int error, type = 0, code = 0, mtu = 0; 965 966 NET_EPOCH_ASSERT(); 967 968 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 969 IPSTAT_INC(ips_cantforward); 970 m_freem(m); 971 return; 972 } 973 if ( 974 #ifdef IPSTEALTH 975 V_ipstealth == 0 && 976 #endif 977 ip->ip_ttl <= IPTTLDEC) { 978 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, 0); 979 return; 980 } 981 982 bzero(&ro, sizeof(ro)); 983 sin = (struct sockaddr_in *)&ro.ro_dst; 984 sin->sin_family = AF_INET; 985 sin->sin_len = sizeof(*sin); 986 sin->sin_addr = ip->ip_dst; 987 flowid = m->m_pkthdr.flowid; 988 ro.ro_nh = fib4_lookup(M_GETFIB(m), ip->ip_dst, 0, NHR_REF, flowid); 989 if (ro.ro_nh != NULL) { 990 ia = ifatoia(ro.ro_nh->nh_ifa); 991 } else 992 ia = NULL; 993 /* 994 * Save the IP header and at most 8 bytes of the payload, 995 * in case we need to generate an ICMP message to the src. 996 * 997 * XXX this can be optimized a lot by saving the data in a local 998 * buffer on the stack (72 bytes at most), and only allocating the 999 * mbuf if really necessary. The vast majority of the packets 1000 * are forwarded without having to send an ICMP back (either 1001 * because unnecessary, or because rate limited), so we are 1002 * really we are wasting a lot of work here. 1003 * 1004 * We don't use m_copym() because it might return a reference 1005 * to a shared cluster. Both this function and ip_output() 1006 * assume exclusive access to the IP header in `m', so any 1007 * data in a cluster may change before we reach icmp_error(). 1008 */ 1009 mcopy = m_gethdr(M_NOWAIT, m->m_type); 1010 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) { 1011 /* 1012 * It's probably ok if the pkthdr dup fails (because 1013 * the deep copy of the tag chain failed), but for now 1014 * be conservative and just discard the copy since 1015 * code below may some day want the tags. 1016 */ 1017 m_free(mcopy); 1018 mcopy = NULL; 1019 } 1020 if (mcopy != NULL) { 1021 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy)); 1022 mcopy->m_pkthdr.len = mcopy->m_len; 1023 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1024 } 1025 #ifdef IPSTEALTH 1026 if (V_ipstealth == 0) 1027 #endif 1028 ip->ip_ttl -= IPTTLDEC; 1029 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 1030 if (IPSEC_ENABLED(ipv4)) { 1031 if ((error = IPSEC_FORWARD(ipv4, m)) != 0) { 1032 /* mbuf consumed by IPsec */ 1033 RO_NHFREE(&ro); 1034 m_freem(mcopy); 1035 if (error != EINPROGRESS) 1036 IPSTAT_INC(ips_cantforward); 1037 return; 1038 } 1039 /* No IPsec processing required */ 1040 } 1041 #endif /* IPSEC */ 1042 /* 1043 * If forwarding packet using same interface that it came in on, 1044 * perhaps should send a redirect to sender to shortcut a hop. 1045 * Only send redirect if source is sending directly to us, 1046 * and if packet was not source routed (or has any options). 1047 * Also, don't send redirect if forwarding using a default route 1048 * or a route modified by a redirect. 1049 */ 1050 dest.s_addr = 0; 1051 if (!srcrt && V_ipsendredirects && 1052 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1053 struct nhop_object *nh; 1054 1055 nh = ro.ro_nh; 1056 1057 if (nh != NULL && ((nh->nh_flags & (NHF_REDIRECT|NHF_DEFAULT)) == 0)) { 1058 struct in_ifaddr *nh_ia = (struct in_ifaddr *)(nh->nh_ifa); 1059 u_long src = ntohl(ip->ip_src.s_addr); 1060 1061 if (nh_ia != NULL && 1062 (src & nh_ia->ia_subnetmask) == nh_ia->ia_subnet) { 1063 if (nh->nh_flags & NHF_GATEWAY) 1064 dest.s_addr = nh->gw4_sa.sin_addr.s_addr; 1065 else 1066 dest.s_addr = ip->ip_dst.s_addr; 1067 /* Router requirements says to only send host redirects */ 1068 type = ICMP_REDIRECT; 1069 code = ICMP_REDIRECT_HOST; 1070 } 1071 } 1072 } 1073 1074 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1075 1076 if (error == EMSGSIZE && ro.ro_nh) 1077 mtu = ro.ro_nh->nh_mtu; 1078 RO_NHFREE(&ro); 1079 1080 if (error) 1081 IPSTAT_INC(ips_cantforward); 1082 else { 1083 IPSTAT_INC(ips_forward); 1084 if (type) 1085 IPSTAT_INC(ips_redirectsent); 1086 else { 1087 if (mcopy) 1088 m_freem(mcopy); 1089 return; 1090 } 1091 } 1092 if (mcopy == NULL) 1093 return; 1094 1095 switch (error) { 1096 case 0: /* forwarded, but need redirect */ 1097 /* type, code set above */ 1098 break; 1099 1100 case ENETUNREACH: 1101 case EHOSTUNREACH: 1102 case ENETDOWN: 1103 case EHOSTDOWN: 1104 default: 1105 type = ICMP_UNREACH; 1106 code = ICMP_UNREACH_HOST; 1107 break; 1108 1109 case EMSGSIZE: 1110 type = ICMP_UNREACH; 1111 code = ICMP_UNREACH_NEEDFRAG; 1112 /* 1113 * If the MTU was set before make sure we are below the 1114 * interface MTU. 1115 * If the MTU wasn't set before use the interface mtu or 1116 * fall back to the next smaller mtu step compared to the 1117 * current packet size. 1118 */ 1119 if (mtu != 0) { 1120 if (ia != NULL) 1121 mtu = min(mtu, ia->ia_ifp->if_mtu); 1122 } else { 1123 if (ia != NULL) 1124 mtu = ia->ia_ifp->if_mtu; 1125 else 1126 mtu = ip_next_mtu(ntohs(ip->ip_len), 0); 1127 } 1128 IPSTAT_INC(ips_cantfrag); 1129 break; 1130 1131 case ENOBUFS: 1132 case EACCES: /* ipfw denied packet */ 1133 m_freem(mcopy); 1134 return; 1135 } 1136 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1137 } 1138 1139 #define CHECK_SO_CT(sp, ct) \ 1140 (((sp->so_options & SO_TIMESTAMP) && (sp->so_ts_clock == ct)) ? 1 : 0) 1141 1142 void 1143 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1144 struct mbuf *m) 1145 { 1146 bool stamped; 1147 1148 stamped = false; 1149 if ((inp->inp_socket->so_options & SO_BINTIME) || 1150 CHECK_SO_CT(inp->inp_socket, SO_TS_BINTIME)) { 1151 struct bintime boottimebin, bt; 1152 struct timespec ts1; 1153 1154 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1155 M_TSTMP)) { 1156 mbuf_tstmp2timespec(m, &ts1); 1157 timespec2bintime(&ts1, &bt); 1158 getboottimebin(&boottimebin); 1159 bintime_add(&bt, &boottimebin); 1160 } else { 1161 bintime(&bt); 1162 } 1163 *mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt), 1164 SCM_BINTIME, SOL_SOCKET); 1165 if (*mp != NULL) { 1166 mp = &(*mp)->m_next; 1167 stamped = true; 1168 } 1169 } 1170 if (CHECK_SO_CT(inp->inp_socket, SO_TS_REALTIME_MICRO)) { 1171 struct bintime boottimebin, bt1; 1172 struct timespec ts1; 1173 struct timeval tv; 1174 1175 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1176 M_TSTMP)) { 1177 mbuf_tstmp2timespec(m, &ts1); 1178 timespec2bintime(&ts1, &bt1); 1179 getboottimebin(&boottimebin); 1180 bintime_add(&bt1, &boottimebin); 1181 bintime2timeval(&bt1, &tv); 1182 } else { 1183 microtime(&tv); 1184 } 1185 *mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv), 1186 SCM_TIMESTAMP, SOL_SOCKET); 1187 if (*mp != NULL) { 1188 mp = &(*mp)->m_next; 1189 stamped = true; 1190 } 1191 } else if (CHECK_SO_CT(inp->inp_socket, SO_TS_REALTIME)) { 1192 struct bintime boottimebin; 1193 struct timespec ts, ts1; 1194 1195 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1196 M_TSTMP)) { 1197 mbuf_tstmp2timespec(m, &ts); 1198 getboottimebin(&boottimebin); 1199 bintime2timespec(&boottimebin, &ts1); 1200 timespecadd(&ts, &ts1, &ts); 1201 } else { 1202 nanotime(&ts); 1203 } 1204 *mp = sbcreatecontrol((caddr_t)&ts, sizeof(ts), 1205 SCM_REALTIME, SOL_SOCKET); 1206 if (*mp != NULL) { 1207 mp = &(*mp)->m_next; 1208 stamped = true; 1209 } 1210 } else if (CHECK_SO_CT(inp->inp_socket, SO_TS_MONOTONIC)) { 1211 struct timespec ts; 1212 1213 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1214 M_TSTMP)) 1215 mbuf_tstmp2timespec(m, &ts); 1216 else 1217 nanouptime(&ts); 1218 *mp = sbcreatecontrol((caddr_t)&ts, sizeof(ts), 1219 SCM_MONOTONIC, SOL_SOCKET); 1220 if (*mp != NULL) { 1221 mp = &(*mp)->m_next; 1222 stamped = true; 1223 } 1224 } 1225 if (stamped && (m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1226 M_TSTMP)) { 1227 struct sock_timestamp_info sti; 1228 1229 bzero(&sti, sizeof(sti)); 1230 sti.st_info_flags = ST_INFO_HW; 1231 if ((m->m_flags & M_TSTMP_HPREC) != 0) 1232 sti.st_info_flags |= ST_INFO_HW_HPREC; 1233 *mp = sbcreatecontrol((caddr_t)&sti, sizeof(sti), SCM_TIME_INFO, 1234 SOL_SOCKET); 1235 if (*mp != NULL) 1236 mp = &(*mp)->m_next; 1237 } 1238 if (inp->inp_flags & INP_RECVDSTADDR) { 1239 *mp = sbcreatecontrol((caddr_t)&ip->ip_dst, 1240 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1241 if (*mp) 1242 mp = &(*mp)->m_next; 1243 } 1244 if (inp->inp_flags & INP_RECVTTL) { 1245 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, 1246 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1247 if (*mp) 1248 mp = &(*mp)->m_next; 1249 } 1250 #ifdef notyet 1251 /* XXX 1252 * Moving these out of udp_input() made them even more broken 1253 * than they already were. 1254 */ 1255 /* options were tossed already */ 1256 if (inp->inp_flags & INP_RECVOPTS) { 1257 *mp = sbcreatecontrol((caddr_t)opts_deleted_above, 1258 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1259 if (*mp) 1260 mp = &(*mp)->m_next; 1261 } 1262 /* ip_srcroute doesn't do what we want here, need to fix */ 1263 if (inp->inp_flags & INP_RECVRETOPTS) { 1264 *mp = sbcreatecontrol((caddr_t)ip_srcroute(m), 1265 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1266 if (*mp) 1267 mp = &(*mp)->m_next; 1268 } 1269 #endif 1270 if (inp->inp_flags & INP_RECVIF) { 1271 struct ifnet *ifp; 1272 struct sdlbuf { 1273 struct sockaddr_dl sdl; 1274 u_char pad[32]; 1275 } sdlbuf; 1276 struct sockaddr_dl *sdp; 1277 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1278 1279 if ((ifp = m->m_pkthdr.rcvif) && 1280 ifp->if_index && ifp->if_index <= V_if_index) { 1281 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1282 /* 1283 * Change our mind and don't try copy. 1284 */ 1285 if (sdp->sdl_family != AF_LINK || 1286 sdp->sdl_len > sizeof(sdlbuf)) { 1287 goto makedummy; 1288 } 1289 bcopy(sdp, sdl2, sdp->sdl_len); 1290 } else { 1291 makedummy: 1292 sdl2->sdl_len = 1293 offsetof(struct sockaddr_dl, sdl_data[0]); 1294 sdl2->sdl_family = AF_LINK; 1295 sdl2->sdl_index = 0; 1296 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1297 } 1298 *mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len, 1299 IP_RECVIF, IPPROTO_IP); 1300 if (*mp) 1301 mp = &(*mp)->m_next; 1302 } 1303 if (inp->inp_flags & INP_RECVTOS) { 1304 *mp = sbcreatecontrol((caddr_t)&ip->ip_tos, 1305 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1306 if (*mp) 1307 mp = &(*mp)->m_next; 1308 } 1309 1310 if (inp->inp_flags2 & INP_RECVFLOWID) { 1311 uint32_t flowid, flow_type; 1312 1313 flowid = m->m_pkthdr.flowid; 1314 flow_type = M_HASHTYPE_GET(m); 1315 1316 /* 1317 * XXX should handle the failure of one or the 1318 * other - don't populate both? 1319 */ 1320 *mp = sbcreatecontrol((caddr_t) &flowid, 1321 sizeof(uint32_t), IP_FLOWID, IPPROTO_IP); 1322 if (*mp) 1323 mp = &(*mp)->m_next; 1324 *mp = sbcreatecontrol((caddr_t) &flow_type, 1325 sizeof(uint32_t), IP_FLOWTYPE, IPPROTO_IP); 1326 if (*mp) 1327 mp = &(*mp)->m_next; 1328 } 1329 1330 #ifdef RSS 1331 if (inp->inp_flags2 & INP_RECVRSSBUCKETID) { 1332 uint32_t flowid, flow_type; 1333 uint32_t rss_bucketid; 1334 1335 flowid = m->m_pkthdr.flowid; 1336 flow_type = M_HASHTYPE_GET(m); 1337 1338 if (rss_hash2bucket(flowid, flow_type, &rss_bucketid) == 0) { 1339 *mp = sbcreatecontrol((caddr_t) &rss_bucketid, 1340 sizeof(uint32_t), IP_RSSBUCKETID, IPPROTO_IP); 1341 if (*mp) 1342 mp = &(*mp)->m_next; 1343 } 1344 } 1345 #endif 1346 } 1347 1348 /* 1349 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1350 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1351 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1352 * compiled. 1353 */ 1354 VNET_DEFINE_STATIC(int, ip_rsvp_on); 1355 VNET_DEFINE(struct socket *, ip_rsvpd); 1356 1357 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1358 1359 int 1360 ip_rsvp_init(struct socket *so) 1361 { 1362 1363 if (so->so_type != SOCK_RAW || 1364 so->so_proto->pr_protocol != IPPROTO_RSVP) 1365 return EOPNOTSUPP; 1366 1367 if (V_ip_rsvpd != NULL) 1368 return EADDRINUSE; 1369 1370 V_ip_rsvpd = so; 1371 /* 1372 * This may seem silly, but we need to be sure we don't over-increment 1373 * the RSVP counter, in case something slips up. 1374 */ 1375 if (!V_ip_rsvp_on) { 1376 V_ip_rsvp_on = 1; 1377 V_rsvp_on++; 1378 } 1379 1380 return 0; 1381 } 1382 1383 int 1384 ip_rsvp_done(void) 1385 { 1386 1387 V_ip_rsvpd = NULL; 1388 /* 1389 * This may seem silly, but we need to be sure we don't over-decrement 1390 * the RSVP counter, in case something slips up. 1391 */ 1392 if (V_ip_rsvp_on) { 1393 V_ip_rsvp_on = 0; 1394 V_rsvp_on--; 1395 } 1396 return 0; 1397 } 1398 1399 int 1400 rsvp_input(struct mbuf **mp, int *offp, int proto) 1401 { 1402 struct mbuf *m; 1403 1404 m = *mp; 1405 *mp = NULL; 1406 1407 if (rsvp_input_p) { /* call the real one if loaded */ 1408 *mp = m; 1409 rsvp_input_p(mp, offp, proto); 1410 return (IPPROTO_DONE); 1411 } 1412 1413 /* Can still get packets with rsvp_on = 0 if there is a local member 1414 * of the group to which the RSVP packet is addressed. But in this 1415 * case we want to throw the packet away. 1416 */ 1417 1418 if (!V_rsvp_on) { 1419 m_freem(m); 1420 return (IPPROTO_DONE); 1421 } 1422 1423 if (V_ip_rsvpd != NULL) { 1424 *mp = m; 1425 rip_input(mp, offp, proto); 1426 return (IPPROTO_DONE); 1427 } 1428 /* Drop the packet */ 1429 m_freem(m); 1430 return (IPPROTO_DONE); 1431 } 1432