1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_bootp.h" 38 #include "opt_ipstealth.h" 39 #include "opt_ipsec.h" 40 #include "opt_route.h" 41 #include "opt_rss.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/hhook.h> 46 #include <sys/mbuf.h> 47 #include <sys/malloc.h> 48 #include <sys/domain.h> 49 #include <sys/protosw.h> 50 #include <sys/socket.h> 51 #include <sys/time.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/rmlock.h> 55 #include <sys/rwlock.h> 56 #include <sys/sdt.h> 57 #include <sys/syslog.h> 58 #include <sys/sysctl.h> 59 60 #include <net/if.h> 61 #include <net/if_types.h> 62 #include <net/if_var.h> 63 #include <net/if_dl.h> 64 #include <net/pfil.h> 65 #include <net/route.h> 66 #include <net/route/nhop.h> 67 #include <net/netisr.h> 68 #include <net/rss_config.h> 69 #include <net/vnet.h> 70 71 #include <netinet/in.h> 72 #include <netinet/in_kdtrace.h> 73 #include <netinet/in_systm.h> 74 #include <netinet/in_var.h> 75 #include <netinet/ip.h> 76 #include <netinet/in_fib.h> 77 #include <netinet/in_pcb.h> 78 #include <netinet/ip_var.h> 79 #include <netinet/ip_fw.h> 80 #include <netinet/ip_icmp.h> 81 #include <netinet/ip_options.h> 82 #include <machine/in_cksum.h> 83 #include <netinet/ip_carp.h> 84 #include <netinet/in_rss.h> 85 #include <netinet/ip_mroute.h> 86 87 #include <netipsec/ipsec_support.h> 88 89 #include <sys/socketvar.h> 90 91 #include <security/mac/mac_framework.h> 92 93 #ifdef CTASSERT 94 CTASSERT(sizeof(struct ip) == 20); 95 #endif 96 97 /* IP reassembly functions are defined in ip_reass.c. */ 98 extern void ipreass_init(void); 99 extern void ipreass_drain(void); 100 extern void ipreass_slowtimo(void); 101 #ifdef VIMAGE 102 extern void ipreass_destroy(void); 103 #endif 104 105 struct rmlock in_ifaddr_lock; 106 RM_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock"); 107 108 VNET_DEFINE(int, rsvp_on); 109 110 VNET_DEFINE(int, ipforwarding); 111 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_VNET | CTLFLAG_RW, 112 &VNET_NAME(ipforwarding), 0, 113 "Enable IP forwarding between interfaces"); 114 115 /* 116 * Respond with an ICMP host redirect when we forward a packet out of 117 * the same interface on which it was received. See RFC 792. 118 */ 119 VNET_DEFINE(int, ipsendredirects) = 1; 120 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_VNET | CTLFLAG_RW, 121 &VNET_NAME(ipsendredirects), 0, 122 "Enable sending IP redirects"); 123 124 /* 125 * XXX - Setting ip_checkinterface mostly implements the receive side of 126 * the Strong ES model described in RFC 1122, but since the routing table 127 * and transmit implementation do not implement the Strong ES model, 128 * setting this to 1 results in an odd hybrid. 129 * 130 * XXX - ip_checkinterface currently must be disabled if you use ipnat 131 * to translate the destination address to another local interface. 132 * 133 * XXX - ip_checkinterface must be disabled if you add IP aliases 134 * to the loopback interface instead of the interface where the 135 * packets for those addresses are received. 136 */ 137 VNET_DEFINE_STATIC(int, ip_checkinterface); 138 #define V_ip_checkinterface VNET(ip_checkinterface) 139 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_VNET | CTLFLAG_RW, 140 &VNET_NAME(ip_checkinterface), 0, 141 "Verify packet arrives on correct interface"); 142 143 VNET_DEFINE(pfil_head_t, inet_pfil_head); /* Packet filter hooks */ 144 145 static struct netisr_handler ip_nh = { 146 .nh_name = "ip", 147 .nh_handler = ip_input, 148 .nh_proto = NETISR_IP, 149 #ifdef RSS 150 .nh_m2cpuid = rss_soft_m2cpuid_v4, 151 .nh_policy = NETISR_POLICY_CPU, 152 .nh_dispatch = NETISR_DISPATCH_HYBRID, 153 #else 154 .nh_policy = NETISR_POLICY_FLOW, 155 #endif 156 }; 157 158 #ifdef RSS 159 /* 160 * Directly dispatched frames are currently assumed 161 * to have a flowid already calculated. 162 * 163 * It should likely have something that assert it 164 * actually has valid flow details. 165 */ 166 static struct netisr_handler ip_direct_nh = { 167 .nh_name = "ip_direct", 168 .nh_handler = ip_direct_input, 169 .nh_proto = NETISR_IP_DIRECT, 170 .nh_m2cpuid = rss_soft_m2cpuid_v4, 171 .nh_policy = NETISR_POLICY_CPU, 172 .nh_dispatch = NETISR_DISPATCH_HYBRID, 173 }; 174 #endif 175 176 extern struct domain inetdomain; 177 extern struct protosw inetsw[]; 178 u_char ip_protox[IPPROTO_MAX]; 179 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ 180 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ 181 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ 182 183 #ifdef IPCTL_DEFMTU 184 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, 185 &ip_mtu, 0, "Default MTU"); 186 #endif 187 188 #ifdef IPSTEALTH 189 VNET_DEFINE(int, ipstealth); 190 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_VNET | CTLFLAG_RW, 191 &VNET_NAME(ipstealth), 0, 192 "IP stealth mode, no TTL decrementation on forwarding"); 193 #endif 194 195 /* 196 * IP statistics are stored in the "array" of counter(9)s. 197 */ 198 VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat); 199 VNET_PCPUSTAT_SYSINIT(ipstat); 200 SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat, 201 "IP statistics (struct ipstat, netinet/ip_var.h)"); 202 203 #ifdef VIMAGE 204 VNET_PCPUSTAT_SYSUNINIT(ipstat); 205 #endif /* VIMAGE */ 206 207 /* 208 * Kernel module interface for updating ipstat. The argument is an index 209 * into ipstat treated as an array. 210 */ 211 void 212 kmod_ipstat_inc(int statnum) 213 { 214 215 counter_u64_add(VNET(ipstat)[statnum], 1); 216 } 217 218 void 219 kmod_ipstat_dec(int statnum) 220 { 221 222 counter_u64_add(VNET(ipstat)[statnum], -1); 223 } 224 225 static int 226 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS) 227 { 228 int error, qlimit; 229 230 netisr_getqlimit(&ip_nh, &qlimit); 231 error = sysctl_handle_int(oidp, &qlimit, 0, req); 232 if (error || !req->newptr) 233 return (error); 234 if (qlimit < 1) 235 return (EINVAL); 236 return (netisr_setqlimit(&ip_nh, qlimit)); 237 } 238 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, 239 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 240 sysctl_netinet_intr_queue_maxlen, "I", 241 "Maximum size of the IP input queue"); 242 243 static int 244 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) 245 { 246 u_int64_t qdrops_long; 247 int error, qdrops; 248 249 netisr_getqdrops(&ip_nh, &qdrops_long); 250 qdrops = qdrops_long; 251 error = sysctl_handle_int(oidp, &qdrops, 0, req); 252 if (error || !req->newptr) 253 return (error); 254 if (qdrops != 0) 255 return (EINVAL); 256 netisr_clearqdrops(&ip_nh); 257 return (0); 258 } 259 260 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, 261 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 262 0, 0, sysctl_netinet_intr_queue_drops, "I", 263 "Number of packets dropped from the IP input queue"); 264 265 #ifdef RSS 266 static int 267 sysctl_netinet_intr_direct_queue_maxlen(SYSCTL_HANDLER_ARGS) 268 { 269 int error, qlimit; 270 271 netisr_getqlimit(&ip_direct_nh, &qlimit); 272 error = sysctl_handle_int(oidp, &qlimit, 0, req); 273 if (error || !req->newptr) 274 return (error); 275 if (qlimit < 1) 276 return (EINVAL); 277 return (netisr_setqlimit(&ip_direct_nh, qlimit)); 278 } 279 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRDQMAXLEN, intr_direct_queue_maxlen, 280 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 281 0, 0, sysctl_netinet_intr_direct_queue_maxlen, 282 "I", "Maximum size of the IP direct input queue"); 283 284 static int 285 sysctl_netinet_intr_direct_queue_drops(SYSCTL_HANDLER_ARGS) 286 { 287 u_int64_t qdrops_long; 288 int error, qdrops; 289 290 netisr_getqdrops(&ip_direct_nh, &qdrops_long); 291 qdrops = qdrops_long; 292 error = sysctl_handle_int(oidp, &qdrops, 0, req); 293 if (error || !req->newptr) 294 return (error); 295 if (qdrops != 0) 296 return (EINVAL); 297 netisr_clearqdrops(&ip_direct_nh); 298 return (0); 299 } 300 301 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRDQDROPS, intr_direct_queue_drops, 302 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, 303 sysctl_netinet_intr_direct_queue_drops, "I", 304 "Number of packets dropped from the IP direct input queue"); 305 #endif /* RSS */ 306 307 /* 308 * IP initialization: fill in IP protocol switch table. 309 * All protocols not implemented in kernel go to raw IP protocol handler. 310 */ 311 void 312 ip_init(void) 313 { 314 struct pfil_head_args args; 315 struct protosw *pr; 316 int i; 317 318 CK_STAILQ_INIT(&V_in_ifaddrhead); 319 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask); 320 321 /* Initialize IP reassembly queue. */ 322 ipreass_init(); 323 324 /* Initialize packet filter hooks. */ 325 args.pa_version = PFIL_VERSION; 326 args.pa_flags = PFIL_IN | PFIL_OUT; 327 args.pa_type = PFIL_TYPE_IP4; 328 args.pa_headname = PFIL_INET_NAME; 329 V_inet_pfil_head = pfil_head_register(&args); 330 331 if (hhook_head_register(HHOOK_TYPE_IPSEC_IN, AF_INET, 332 &V_ipsec_hhh_in[HHOOK_IPSEC_INET], 333 HHOOK_WAITOK | HHOOK_HEADISINVNET) != 0) 334 printf("%s: WARNING: unable to register input helper hook\n", 335 __func__); 336 if (hhook_head_register(HHOOK_TYPE_IPSEC_OUT, AF_INET, 337 &V_ipsec_hhh_out[HHOOK_IPSEC_INET], 338 HHOOK_WAITOK | HHOOK_HEADISINVNET) != 0) 339 printf("%s: WARNING: unable to register output helper hook\n", 340 __func__); 341 342 /* Skip initialization of globals for non-default instances. */ 343 #ifdef VIMAGE 344 if (!IS_DEFAULT_VNET(curvnet)) { 345 netisr_register_vnet(&ip_nh); 346 #ifdef RSS 347 netisr_register_vnet(&ip_direct_nh); 348 #endif 349 return; 350 } 351 #endif 352 353 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 354 if (pr == NULL) 355 panic("ip_init: PF_INET not found"); 356 357 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ 358 for (i = 0; i < IPPROTO_MAX; i++) 359 ip_protox[i] = pr - inetsw; 360 /* 361 * Cycle through IP protocols and put them into the appropriate place 362 * in ip_protox[]. 363 */ 364 for (pr = inetdomain.dom_protosw; 365 pr < inetdomain.dom_protoswNPROTOSW; pr++) 366 if (pr->pr_domain->dom_family == PF_INET && 367 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { 368 /* Be careful to only index valid IP protocols. */ 369 if (pr->pr_protocol < IPPROTO_MAX) 370 ip_protox[pr->pr_protocol] = pr - inetsw; 371 } 372 373 netisr_register(&ip_nh); 374 #ifdef RSS 375 netisr_register(&ip_direct_nh); 376 #endif 377 } 378 379 #ifdef VIMAGE 380 static void 381 ip_destroy(void *unused __unused) 382 { 383 int error; 384 385 #ifdef RSS 386 netisr_unregister_vnet(&ip_direct_nh); 387 #endif 388 netisr_unregister_vnet(&ip_nh); 389 390 pfil_head_unregister(V_inet_pfil_head); 391 error = hhook_head_deregister(V_ipsec_hhh_in[HHOOK_IPSEC_INET]); 392 if (error != 0) { 393 printf("%s: WARNING: unable to deregister input helper hook " 394 "type HHOOK_TYPE_IPSEC_IN, id HHOOK_IPSEC_INET: " 395 "error %d returned\n", __func__, error); 396 } 397 error = hhook_head_deregister(V_ipsec_hhh_out[HHOOK_IPSEC_INET]); 398 if (error != 0) { 399 printf("%s: WARNING: unable to deregister output helper hook " 400 "type HHOOK_TYPE_IPSEC_OUT, id HHOOK_IPSEC_INET: " 401 "error %d returned\n", __func__, error); 402 } 403 404 /* Remove the IPv4 addresses from all interfaces. */ 405 in_ifscrub_all(); 406 407 /* Make sure the IPv4 routes are gone as well. */ 408 rib_flush_routes_family(AF_INET); 409 410 /* Destroy IP reassembly queue. */ 411 ipreass_destroy(); 412 413 /* Cleanup in_ifaddr hash table; should be empty. */ 414 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); 415 } 416 417 VNET_SYSUNINIT(ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ip_destroy, NULL); 418 #endif 419 420 #ifdef RSS 421 /* 422 * IP direct input routine. 423 * 424 * This is called when reinjecting completed fragments where 425 * all of the previous checking and book-keeping has been done. 426 */ 427 void 428 ip_direct_input(struct mbuf *m) 429 { 430 struct ip *ip; 431 int hlen; 432 433 ip = mtod(m, struct ip *); 434 hlen = ip->ip_hl << 2; 435 436 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 437 if (IPSEC_ENABLED(ipv4)) { 438 if (IPSEC_INPUT(ipv4, m, hlen, ip->ip_p) != 0) 439 return; 440 } 441 #endif /* IPSEC */ 442 IPSTAT_INC(ips_delivered); 443 (*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p); 444 return; 445 } 446 #endif 447 448 /* 449 * Ip input routine. Checksum and byte swap header. If fragmented 450 * try to reassemble. Process options. Pass to next level. 451 */ 452 void 453 ip_input(struct mbuf *m) 454 { 455 MROUTER_RLOCK_TRACKER; 456 struct rm_priotracker in_ifa_tracker; 457 struct ip *ip = NULL; 458 struct in_ifaddr *ia = NULL; 459 struct ifaddr *ifa; 460 struct ifnet *ifp; 461 int checkif, hlen = 0; 462 uint16_t sum, ip_len; 463 int dchg = 0; /* dest changed after fw */ 464 struct in_addr odst; /* original dst address */ 465 466 M_ASSERTPKTHDR(m); 467 NET_EPOCH_ASSERT(); 468 469 if (m->m_flags & M_FASTFWD_OURS) { 470 m->m_flags &= ~M_FASTFWD_OURS; 471 /* Set up some basics that will be used later. */ 472 ip = mtod(m, struct ip *); 473 hlen = ip->ip_hl << 2; 474 ip_len = ntohs(ip->ip_len); 475 goto ours; 476 } 477 478 IPSTAT_INC(ips_total); 479 480 if (m->m_pkthdr.len < sizeof(struct ip)) 481 goto tooshort; 482 483 if (m->m_len < sizeof (struct ip) && 484 (m = m_pullup(m, sizeof (struct ip))) == NULL) { 485 IPSTAT_INC(ips_toosmall); 486 return; 487 } 488 ip = mtod(m, struct ip *); 489 490 if (ip->ip_v != IPVERSION) { 491 IPSTAT_INC(ips_badvers); 492 goto bad; 493 } 494 495 hlen = ip->ip_hl << 2; 496 if (hlen < sizeof(struct ip)) { /* minimum header length */ 497 IPSTAT_INC(ips_badhlen); 498 goto bad; 499 } 500 if (hlen > m->m_len) { 501 if ((m = m_pullup(m, hlen)) == NULL) { 502 IPSTAT_INC(ips_badhlen); 503 return; 504 } 505 ip = mtod(m, struct ip *); 506 } 507 508 IP_PROBE(receive, NULL, NULL, ip, m->m_pkthdr.rcvif, ip, NULL); 509 510 /* IN_LOOPBACK must not appear on the wire - RFC1122 */ 511 ifp = m->m_pkthdr.rcvif; 512 if (IN_LOOPBACK(ntohl(ip->ip_dst.s_addr)) || 513 IN_LOOPBACK(ntohl(ip->ip_src.s_addr))) { 514 if ((ifp->if_flags & IFF_LOOPBACK) == 0) { 515 IPSTAT_INC(ips_badaddr); 516 goto bad; 517 } 518 } 519 520 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 521 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 522 } else { 523 if (hlen == sizeof(struct ip)) { 524 sum = in_cksum_hdr(ip); 525 } else { 526 sum = in_cksum(m, hlen); 527 } 528 } 529 if (sum) { 530 IPSTAT_INC(ips_badsum); 531 goto bad; 532 } 533 534 #ifdef ALTQ 535 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) 536 /* packet is dropped by traffic conditioner */ 537 return; 538 #endif 539 540 ip_len = ntohs(ip->ip_len); 541 if (ip_len < hlen) { 542 IPSTAT_INC(ips_badlen); 543 goto bad; 544 } 545 546 /* 547 * Check that the amount of data in the buffers 548 * is as at least much as the IP header would have us expect. 549 * Trim mbufs if longer than we expect. 550 * Drop packet if shorter than we expect. 551 */ 552 if (m->m_pkthdr.len < ip_len) { 553 tooshort: 554 IPSTAT_INC(ips_tooshort); 555 goto bad; 556 } 557 if (m->m_pkthdr.len > ip_len) { 558 if (m->m_len == m->m_pkthdr.len) { 559 m->m_len = ip_len; 560 m->m_pkthdr.len = ip_len; 561 } else 562 m_adj(m, ip_len - m->m_pkthdr.len); 563 } 564 565 /* 566 * Try to forward the packet, but if we fail continue. 567 * ip_tryforward() does not generate redirects, so fall 568 * through to normal processing if redirects are required. 569 * ip_tryforward() does inbound and outbound packet firewall 570 * processing. If firewall has decided that destination becomes 571 * our local address, it sets M_FASTFWD_OURS flag. In this 572 * case skip another inbound firewall processing and update 573 * ip pointer. 574 */ 575 if (V_ipforwarding != 0 576 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 577 && (!IPSEC_ENABLED(ipv4) || 578 IPSEC_CAPS(ipv4, m, IPSEC_CAP_OPERABLE) == 0) 579 #endif 580 ) { 581 if ((m = ip_tryforward(m)) == NULL) 582 return; 583 if (m->m_flags & M_FASTFWD_OURS) { 584 m->m_flags &= ~M_FASTFWD_OURS; 585 ip = mtod(m, struct ip *); 586 goto ours; 587 } 588 } 589 590 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 591 /* 592 * Bypass packet filtering for packets previously handled by IPsec. 593 */ 594 if (IPSEC_ENABLED(ipv4) && 595 IPSEC_CAPS(ipv4, m, IPSEC_CAP_BYPASS_FILTER) != 0) 596 goto passin; 597 #endif 598 599 /* 600 * Run through list of hooks for input packets. 601 * 602 * NB: Beware of the destination address changing (e.g. 603 * by NAT rewriting). When this happens, tell 604 * ip_forward to do the right thing. 605 */ 606 607 /* Jump over all PFIL processing if hooks are not active. */ 608 if (!PFIL_HOOKED_IN(V_inet_pfil_head)) 609 goto passin; 610 611 odst = ip->ip_dst; 612 if (pfil_run_hooks(V_inet_pfil_head, &m, ifp, PFIL_IN, NULL) != 613 PFIL_PASS) 614 return; 615 if (m == NULL) /* consumed by filter */ 616 return; 617 618 ip = mtod(m, struct ip *); 619 dchg = (odst.s_addr != ip->ip_dst.s_addr); 620 ifp = m->m_pkthdr.rcvif; 621 622 if (m->m_flags & M_FASTFWD_OURS) { 623 m->m_flags &= ~M_FASTFWD_OURS; 624 goto ours; 625 } 626 if (m->m_flags & M_IP_NEXTHOP) { 627 if (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL) { 628 /* 629 * Directly ship the packet on. This allows 630 * forwarding packets originally destined to us 631 * to some other directly connected host. 632 */ 633 ip_forward(m, 1); 634 return; 635 } 636 } 637 passin: 638 639 /* 640 * Process options and, if not destined for us, 641 * ship it on. ip_dooptions returns 1 when an 642 * error was detected (causing an icmp message 643 * to be sent and the original packet to be freed). 644 */ 645 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0)) 646 return; 647 648 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no 649 * matter if it is destined to another node, or whether it is 650 * a multicast one, RSVP wants it! and prevents it from being forwarded 651 * anywhere else. Also checks if the rsvp daemon is running before 652 * grabbing the packet. 653 */ 654 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP) 655 goto ours; 656 657 /* 658 * Check our list of addresses, to see if the packet is for us. 659 * If we don't have any addresses, assume any unicast packet 660 * we receive might be for us (and let the upper layers deal 661 * with it). 662 */ 663 if (CK_STAILQ_EMPTY(&V_in_ifaddrhead) && 664 (m->m_flags & (M_MCAST|M_BCAST)) == 0) 665 goto ours; 666 667 /* 668 * Enable a consistency check between the destination address 669 * and the arrival interface for a unicast packet (the RFC 1122 670 * strong ES model) if IP forwarding is disabled and the packet 671 * is not locally generated and the packet is not subject to 672 * 'ipfw fwd'. 673 * 674 * XXX - Checking also should be disabled if the destination 675 * address is ipnat'ed to a different interface. 676 * 677 * XXX - Checking is incompatible with IP aliases added 678 * to the loopback interface instead of the interface where 679 * the packets are received. 680 * 681 * XXX - This is the case for carp vhost IPs as well so we 682 * insert a workaround. If the packet got here, we already 683 * checked with carp_iamatch() and carp_forus(). 684 */ 685 checkif = V_ip_checkinterface && (V_ipforwarding == 0) && 686 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) && 687 ifp->if_carp == NULL && (dchg == 0); 688 689 /* 690 * Check for exact addresses in the hash bucket. 691 */ 692 IN_IFADDR_RLOCK(&in_ifa_tracker); 693 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) { 694 /* 695 * If the address matches, verify that the packet 696 * arrived via the correct interface if checking is 697 * enabled. 698 */ 699 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr && 700 (!checkif || ia->ia_ifp == ifp)) { 701 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 702 counter_u64_add(ia->ia_ifa.ifa_ibytes, 703 m->m_pkthdr.len); 704 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 705 goto ours; 706 } 707 } 708 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 709 710 /* 711 * Check for broadcast addresses. 712 * 713 * Only accept broadcast packets that arrive via the matching 714 * interface. Reception of forwarded directed broadcasts would 715 * be handled via ip_forward() and ether_output() with the loopback 716 * into the stack for SIMPLEX interfaces handled by ether_output(). 717 */ 718 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) { 719 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 720 if (ifa->ifa_addr->sa_family != AF_INET) 721 continue; 722 ia = ifatoia(ifa); 723 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == 724 ip->ip_dst.s_addr) { 725 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 726 counter_u64_add(ia->ia_ifa.ifa_ibytes, 727 m->m_pkthdr.len); 728 goto ours; 729 } 730 #ifdef BOOTP_COMPAT 731 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) { 732 counter_u64_add(ia->ia_ifa.ifa_ipackets, 1); 733 counter_u64_add(ia->ia_ifa.ifa_ibytes, 734 m->m_pkthdr.len); 735 goto ours; 736 } 737 #endif 738 } 739 ia = NULL; 740 } 741 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 742 MROUTER_RLOCK(); 743 /* 744 * RFC 3927 2.7: Do not forward multicast packets from 745 * IN_LINKLOCAL. 746 */ 747 if (V_ip_mrouter && !IN_LINKLOCAL(ntohl(ip->ip_src.s_addr))) { 748 /* 749 * If we are acting as a multicast router, all 750 * incoming multicast packets are passed to the 751 * kernel-level multicast forwarding function. 752 * The packet is returned (relatively) intact; if 753 * ip_mforward() returns a non-zero value, the packet 754 * must be discarded, else it may be accepted below. 755 */ 756 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) { 757 MROUTER_RUNLOCK(); 758 IPSTAT_INC(ips_cantforward); 759 m_freem(m); 760 return; 761 } 762 763 /* 764 * The process-level routing daemon needs to receive 765 * all multicast IGMP packets, whether or not this 766 * host belongs to their destination groups. 767 */ 768 if (ip->ip_p == IPPROTO_IGMP) { 769 MROUTER_RUNLOCK(); 770 goto ours; 771 } 772 IPSTAT_INC(ips_forward); 773 } 774 MROUTER_RUNLOCK(); 775 /* 776 * Assume the packet is for us, to avoid prematurely taking 777 * a lock on the in_multi hash. Protocols must perform 778 * their own filtering and update statistics accordingly. 779 */ 780 goto ours; 781 } 782 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) 783 goto ours; 784 if (ip->ip_dst.s_addr == INADDR_ANY) 785 goto ours; 786 /* RFC 3927 2.7: Do not forward packets to or from IN_LINKLOCAL. */ 787 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) || 788 IN_LINKLOCAL(ntohl(ip->ip_src.s_addr))) { 789 IPSTAT_INC(ips_cantforward); 790 m_freem(m); 791 return; 792 } 793 794 /* 795 * Not for us; forward if possible and desirable. 796 */ 797 if (V_ipforwarding == 0) { 798 IPSTAT_INC(ips_cantforward); 799 m_freem(m); 800 } else { 801 ip_forward(m, dchg); 802 } 803 return; 804 805 ours: 806 #ifdef IPSTEALTH 807 /* 808 * IPSTEALTH: Process non-routing options only 809 * if the packet is destined for us. 810 */ 811 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) 812 return; 813 #endif /* IPSTEALTH */ 814 815 /* 816 * Attempt reassembly; if it succeeds, proceed. 817 * ip_reass() will return a different mbuf. 818 */ 819 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { 820 /* XXXGL: shouldn't we save & set m_flags? */ 821 m = ip_reass(m); 822 if (m == NULL) 823 return; 824 ip = mtod(m, struct ip *); 825 /* Get the header length of the reassembled packet */ 826 hlen = ip->ip_hl << 2; 827 } 828 829 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 830 if (IPSEC_ENABLED(ipv4)) { 831 if (IPSEC_INPUT(ipv4, m, hlen, ip->ip_p) != 0) 832 return; 833 } 834 #endif /* IPSEC */ 835 836 /* 837 * Switch out to protocol's input routine. 838 */ 839 IPSTAT_INC(ips_delivered); 840 841 (*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p); 842 return; 843 bad: 844 m_freem(m); 845 } 846 847 /* 848 * IP timer processing; 849 * if a timer expires on a reassembly 850 * queue, discard it. 851 */ 852 void 853 ip_slowtimo(void) 854 { 855 VNET_ITERATOR_DECL(vnet_iter); 856 857 VNET_LIST_RLOCK_NOSLEEP(); 858 VNET_FOREACH(vnet_iter) { 859 CURVNET_SET(vnet_iter); 860 ipreass_slowtimo(); 861 CURVNET_RESTORE(); 862 } 863 VNET_LIST_RUNLOCK_NOSLEEP(); 864 } 865 866 void 867 ip_drain(void) 868 { 869 VNET_ITERATOR_DECL(vnet_iter); 870 871 VNET_LIST_RLOCK_NOSLEEP(); 872 VNET_FOREACH(vnet_iter) { 873 CURVNET_SET(vnet_iter); 874 ipreass_drain(); 875 CURVNET_RESTORE(); 876 } 877 VNET_LIST_RUNLOCK_NOSLEEP(); 878 } 879 880 /* 881 * The protocol to be inserted into ip_protox[] must be already registered 882 * in inetsw[], either statically or through pf_proto_register(). 883 */ 884 int 885 ipproto_register(short ipproto) 886 { 887 struct protosw *pr; 888 889 /* Sanity checks. */ 890 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 891 return (EPROTONOSUPPORT); 892 893 /* 894 * The protocol slot must not be occupied by another protocol 895 * already. An index pointing to IPPROTO_RAW is unused. 896 */ 897 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 898 if (pr == NULL) 899 return (EPFNOSUPPORT); 900 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */ 901 return (EEXIST); 902 903 /* Find the protocol position in inetsw[] and set the index. */ 904 for (pr = inetdomain.dom_protosw; 905 pr < inetdomain.dom_protoswNPROTOSW; pr++) { 906 if (pr->pr_domain->dom_family == PF_INET && 907 pr->pr_protocol && pr->pr_protocol == ipproto) { 908 ip_protox[pr->pr_protocol] = pr - inetsw; 909 return (0); 910 } 911 } 912 return (EPROTONOSUPPORT); 913 } 914 915 int 916 ipproto_unregister(short ipproto) 917 { 918 struct protosw *pr; 919 920 /* Sanity checks. */ 921 if (ipproto <= 0 || ipproto >= IPPROTO_MAX) 922 return (EPROTONOSUPPORT); 923 924 /* Check if the protocol was indeed registered. */ 925 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); 926 if (pr == NULL) 927 return (EPFNOSUPPORT); 928 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */ 929 return (ENOENT); 930 931 /* Reset the protocol slot to IPPROTO_RAW. */ 932 ip_protox[ipproto] = pr - inetsw; 933 return (0); 934 } 935 936 u_char inetctlerrmap[PRC_NCMDS] = { 937 0, 0, 0, 0, 938 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, 939 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, 940 EMSGSIZE, EHOSTUNREACH, 0, 0, 941 0, 0, EHOSTUNREACH, 0, 942 ENOPROTOOPT, ECONNREFUSED 943 }; 944 945 /* 946 * Forward a packet. If some error occurs return the sender 947 * an icmp packet. Note we can't always generate a meaningful 948 * icmp message because icmp doesn't have a large enough repertoire 949 * of codes and types. 950 * 951 * If not forwarding, just drop the packet. This could be confusing 952 * if ipforwarding was zero but some routing protocol was advancing 953 * us as a gateway to somewhere. However, we must let the routing 954 * protocol deal with that. 955 * 956 * The srcrt parameter indicates whether the packet is being forwarded 957 * via a source route. 958 */ 959 void 960 ip_forward(struct mbuf *m, int srcrt) 961 { 962 struct ip *ip = mtod(m, struct ip *); 963 struct in_ifaddr *ia; 964 struct mbuf *mcopy; 965 struct sockaddr_in *sin; 966 struct in_addr dest; 967 struct route ro; 968 uint32_t flowid; 969 int error, type = 0, code = 0, mtu = 0; 970 971 NET_EPOCH_ASSERT(); 972 973 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { 974 IPSTAT_INC(ips_cantforward); 975 m_freem(m); 976 return; 977 } 978 if ( 979 #ifdef IPSTEALTH 980 V_ipstealth == 0 && 981 #endif 982 ip->ip_ttl <= IPTTLDEC) { 983 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, 0); 984 return; 985 } 986 987 bzero(&ro, sizeof(ro)); 988 sin = (struct sockaddr_in *)&ro.ro_dst; 989 sin->sin_family = AF_INET; 990 sin->sin_len = sizeof(*sin); 991 sin->sin_addr = ip->ip_dst; 992 flowid = m->m_pkthdr.flowid; 993 ro.ro_nh = fib4_lookup(M_GETFIB(m), ip->ip_dst, 0, NHR_REF, flowid); 994 if (ro.ro_nh != NULL) { 995 ia = ifatoia(ro.ro_nh->nh_ifa); 996 } else 997 ia = NULL; 998 /* 999 * Save the IP header and at most 8 bytes of the payload, 1000 * in case we need to generate an ICMP message to the src. 1001 * 1002 * XXX this can be optimized a lot by saving the data in a local 1003 * buffer on the stack (72 bytes at most), and only allocating the 1004 * mbuf if really necessary. The vast majority of the packets 1005 * are forwarded without having to send an ICMP back (either 1006 * because unnecessary, or because rate limited), so we are 1007 * really we are wasting a lot of work here. 1008 * 1009 * We don't use m_copym() because it might return a reference 1010 * to a shared cluster. Both this function and ip_output() 1011 * assume exclusive access to the IP header in `m', so any 1012 * data in a cluster may change before we reach icmp_error(). 1013 */ 1014 mcopy = m_gethdr(M_NOWAIT, m->m_type); 1015 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) { 1016 /* 1017 * It's probably ok if the pkthdr dup fails (because 1018 * the deep copy of the tag chain failed), but for now 1019 * be conservative and just discard the copy since 1020 * code below may some day want the tags. 1021 */ 1022 m_free(mcopy); 1023 mcopy = NULL; 1024 } 1025 if (mcopy != NULL) { 1026 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy)); 1027 mcopy->m_pkthdr.len = mcopy->m_len; 1028 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t)); 1029 } 1030 #ifdef IPSTEALTH 1031 if (V_ipstealth == 0) 1032 #endif 1033 ip->ip_ttl -= IPTTLDEC; 1034 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 1035 if (IPSEC_ENABLED(ipv4)) { 1036 if ((error = IPSEC_FORWARD(ipv4, m)) != 0) { 1037 /* mbuf consumed by IPsec */ 1038 RO_NHFREE(&ro); 1039 m_freem(mcopy); 1040 if (error != EINPROGRESS) 1041 IPSTAT_INC(ips_cantforward); 1042 return; 1043 } 1044 /* No IPsec processing required */ 1045 } 1046 #endif /* IPSEC */ 1047 /* 1048 * If forwarding packet using same interface that it came in on, 1049 * perhaps should send a redirect to sender to shortcut a hop. 1050 * Only send redirect if source is sending directly to us, 1051 * and if packet was not source routed (or has any options). 1052 * Also, don't send redirect if forwarding using a default route 1053 * or a route modified by a redirect. 1054 */ 1055 dest.s_addr = 0; 1056 if (!srcrt && V_ipsendredirects && 1057 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) { 1058 struct nhop_object *nh; 1059 1060 nh = ro.ro_nh; 1061 1062 if (nh != NULL && ((nh->nh_flags & (NHF_REDIRECT|NHF_DEFAULT)) == 0)) { 1063 struct in_ifaddr *nh_ia = (struct in_ifaddr *)(nh->nh_ifa); 1064 u_long src = ntohl(ip->ip_src.s_addr); 1065 1066 if (nh_ia != NULL && 1067 (src & nh_ia->ia_subnetmask) == nh_ia->ia_subnet) { 1068 if (nh->nh_flags & NHF_GATEWAY) 1069 dest.s_addr = nh->gw4_sa.sin_addr.s_addr; 1070 else 1071 dest.s_addr = ip->ip_dst.s_addr; 1072 /* Router requirements says to only send host redirects */ 1073 type = ICMP_REDIRECT; 1074 code = ICMP_REDIRECT_HOST; 1075 } 1076 } 1077 } 1078 1079 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL); 1080 1081 if (error == EMSGSIZE && ro.ro_nh) 1082 mtu = ro.ro_nh->nh_mtu; 1083 RO_NHFREE(&ro); 1084 1085 if (error) 1086 IPSTAT_INC(ips_cantforward); 1087 else { 1088 IPSTAT_INC(ips_forward); 1089 if (type) 1090 IPSTAT_INC(ips_redirectsent); 1091 else { 1092 if (mcopy) 1093 m_freem(mcopy); 1094 return; 1095 } 1096 } 1097 if (mcopy == NULL) 1098 return; 1099 1100 switch (error) { 1101 case 0: /* forwarded, but need redirect */ 1102 /* type, code set above */ 1103 break; 1104 1105 case ENETUNREACH: 1106 case EHOSTUNREACH: 1107 case ENETDOWN: 1108 case EHOSTDOWN: 1109 default: 1110 type = ICMP_UNREACH; 1111 code = ICMP_UNREACH_HOST; 1112 break; 1113 1114 case EMSGSIZE: 1115 type = ICMP_UNREACH; 1116 code = ICMP_UNREACH_NEEDFRAG; 1117 /* 1118 * If the MTU was set before make sure we are below the 1119 * interface MTU. 1120 * If the MTU wasn't set before use the interface mtu or 1121 * fall back to the next smaller mtu step compared to the 1122 * current packet size. 1123 */ 1124 if (mtu != 0) { 1125 if (ia != NULL) 1126 mtu = min(mtu, ia->ia_ifp->if_mtu); 1127 } else { 1128 if (ia != NULL) 1129 mtu = ia->ia_ifp->if_mtu; 1130 else 1131 mtu = ip_next_mtu(ntohs(ip->ip_len), 0); 1132 } 1133 IPSTAT_INC(ips_cantfrag); 1134 break; 1135 1136 case ENOBUFS: 1137 case EACCES: /* ipfw denied packet */ 1138 m_freem(mcopy); 1139 return; 1140 } 1141 icmp_error(mcopy, type, code, dest.s_addr, mtu); 1142 } 1143 1144 #define CHECK_SO_CT(sp, ct) \ 1145 (((sp->so_options & SO_TIMESTAMP) && (sp->so_ts_clock == ct)) ? 1 : 0) 1146 1147 void 1148 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, 1149 struct mbuf *m) 1150 { 1151 bool stamped; 1152 1153 stamped = false; 1154 if ((inp->inp_socket->so_options & SO_BINTIME) || 1155 CHECK_SO_CT(inp->inp_socket, SO_TS_BINTIME)) { 1156 struct bintime boottimebin, bt; 1157 struct timespec ts1; 1158 1159 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1160 M_TSTMP)) { 1161 mbuf_tstmp2timespec(m, &ts1); 1162 timespec2bintime(&ts1, &bt); 1163 getboottimebin(&boottimebin); 1164 bintime_add(&bt, &boottimebin); 1165 } else { 1166 bintime(&bt); 1167 } 1168 *mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt), 1169 SCM_BINTIME, SOL_SOCKET); 1170 if (*mp != NULL) { 1171 mp = &(*mp)->m_next; 1172 stamped = true; 1173 } 1174 } 1175 if (CHECK_SO_CT(inp->inp_socket, SO_TS_REALTIME_MICRO)) { 1176 struct bintime boottimebin, bt1; 1177 struct timespec ts1; 1178 struct timeval tv; 1179 1180 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1181 M_TSTMP)) { 1182 mbuf_tstmp2timespec(m, &ts1); 1183 timespec2bintime(&ts1, &bt1); 1184 getboottimebin(&boottimebin); 1185 bintime_add(&bt1, &boottimebin); 1186 bintime2timeval(&bt1, &tv); 1187 } else { 1188 microtime(&tv); 1189 } 1190 *mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv), 1191 SCM_TIMESTAMP, SOL_SOCKET); 1192 if (*mp != NULL) { 1193 mp = &(*mp)->m_next; 1194 stamped = true; 1195 } 1196 } else if (CHECK_SO_CT(inp->inp_socket, SO_TS_REALTIME)) { 1197 struct bintime boottimebin; 1198 struct timespec ts, ts1; 1199 1200 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1201 M_TSTMP)) { 1202 mbuf_tstmp2timespec(m, &ts); 1203 getboottimebin(&boottimebin); 1204 bintime2timespec(&boottimebin, &ts1); 1205 timespecadd(&ts, &ts1, &ts); 1206 } else { 1207 nanotime(&ts); 1208 } 1209 *mp = sbcreatecontrol((caddr_t)&ts, sizeof(ts), 1210 SCM_REALTIME, SOL_SOCKET); 1211 if (*mp != NULL) { 1212 mp = &(*mp)->m_next; 1213 stamped = true; 1214 } 1215 } else if (CHECK_SO_CT(inp->inp_socket, SO_TS_MONOTONIC)) { 1216 struct timespec ts; 1217 1218 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1219 M_TSTMP)) 1220 mbuf_tstmp2timespec(m, &ts); 1221 else 1222 nanouptime(&ts); 1223 *mp = sbcreatecontrol((caddr_t)&ts, sizeof(ts), 1224 SCM_MONOTONIC, SOL_SOCKET); 1225 if (*mp != NULL) { 1226 mp = &(*mp)->m_next; 1227 stamped = true; 1228 } 1229 } 1230 if (stamped && (m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | 1231 M_TSTMP)) { 1232 struct sock_timestamp_info sti; 1233 1234 bzero(&sti, sizeof(sti)); 1235 sti.st_info_flags = ST_INFO_HW; 1236 if ((m->m_flags & M_TSTMP_HPREC) != 0) 1237 sti.st_info_flags |= ST_INFO_HW_HPREC; 1238 *mp = sbcreatecontrol((caddr_t)&sti, sizeof(sti), SCM_TIME_INFO, 1239 SOL_SOCKET); 1240 if (*mp != NULL) 1241 mp = &(*mp)->m_next; 1242 } 1243 if (inp->inp_flags & INP_RECVDSTADDR) { 1244 *mp = sbcreatecontrol((caddr_t)&ip->ip_dst, 1245 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); 1246 if (*mp) 1247 mp = &(*mp)->m_next; 1248 } 1249 if (inp->inp_flags & INP_RECVTTL) { 1250 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, 1251 sizeof(u_char), IP_RECVTTL, IPPROTO_IP); 1252 if (*mp) 1253 mp = &(*mp)->m_next; 1254 } 1255 #ifdef notyet 1256 /* XXX 1257 * Moving these out of udp_input() made them even more broken 1258 * than they already were. 1259 */ 1260 /* options were tossed already */ 1261 if (inp->inp_flags & INP_RECVOPTS) { 1262 *mp = sbcreatecontrol((caddr_t)opts_deleted_above, 1263 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); 1264 if (*mp) 1265 mp = &(*mp)->m_next; 1266 } 1267 /* ip_srcroute doesn't do what we want here, need to fix */ 1268 if (inp->inp_flags & INP_RECVRETOPTS) { 1269 *mp = sbcreatecontrol((caddr_t)ip_srcroute(m), 1270 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); 1271 if (*mp) 1272 mp = &(*mp)->m_next; 1273 } 1274 #endif 1275 if (inp->inp_flags & INP_RECVIF) { 1276 struct ifnet *ifp; 1277 struct sdlbuf { 1278 struct sockaddr_dl sdl; 1279 u_char pad[32]; 1280 } sdlbuf; 1281 struct sockaddr_dl *sdp; 1282 struct sockaddr_dl *sdl2 = &sdlbuf.sdl; 1283 1284 if ((ifp = m->m_pkthdr.rcvif) && 1285 ifp->if_index && ifp->if_index <= V_if_index) { 1286 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr; 1287 /* 1288 * Change our mind and don't try copy. 1289 */ 1290 if (sdp->sdl_family != AF_LINK || 1291 sdp->sdl_len > sizeof(sdlbuf)) { 1292 goto makedummy; 1293 } 1294 bcopy(sdp, sdl2, sdp->sdl_len); 1295 } else { 1296 makedummy: 1297 sdl2->sdl_len = 1298 offsetof(struct sockaddr_dl, sdl_data[0]); 1299 sdl2->sdl_family = AF_LINK; 1300 sdl2->sdl_index = 0; 1301 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; 1302 } 1303 *mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len, 1304 IP_RECVIF, IPPROTO_IP); 1305 if (*mp) 1306 mp = &(*mp)->m_next; 1307 } 1308 if (inp->inp_flags & INP_RECVTOS) { 1309 *mp = sbcreatecontrol((caddr_t)&ip->ip_tos, 1310 sizeof(u_char), IP_RECVTOS, IPPROTO_IP); 1311 if (*mp) 1312 mp = &(*mp)->m_next; 1313 } 1314 1315 if (inp->inp_flags2 & INP_RECVFLOWID) { 1316 uint32_t flowid, flow_type; 1317 1318 flowid = m->m_pkthdr.flowid; 1319 flow_type = M_HASHTYPE_GET(m); 1320 1321 /* 1322 * XXX should handle the failure of one or the 1323 * other - don't populate both? 1324 */ 1325 *mp = sbcreatecontrol((caddr_t) &flowid, 1326 sizeof(uint32_t), IP_FLOWID, IPPROTO_IP); 1327 if (*mp) 1328 mp = &(*mp)->m_next; 1329 *mp = sbcreatecontrol((caddr_t) &flow_type, 1330 sizeof(uint32_t), IP_FLOWTYPE, IPPROTO_IP); 1331 if (*mp) 1332 mp = &(*mp)->m_next; 1333 } 1334 1335 #ifdef RSS 1336 if (inp->inp_flags2 & INP_RECVRSSBUCKETID) { 1337 uint32_t flowid, flow_type; 1338 uint32_t rss_bucketid; 1339 1340 flowid = m->m_pkthdr.flowid; 1341 flow_type = M_HASHTYPE_GET(m); 1342 1343 if (rss_hash2bucket(flowid, flow_type, &rss_bucketid) == 0) { 1344 *mp = sbcreatecontrol((caddr_t) &rss_bucketid, 1345 sizeof(uint32_t), IP_RSSBUCKETID, IPPROTO_IP); 1346 if (*mp) 1347 mp = &(*mp)->m_next; 1348 } 1349 } 1350 #endif 1351 } 1352 1353 /* 1354 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the 1355 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on 1356 * locking. This code remains in ip_input.c as ip_mroute.c is optionally 1357 * compiled. 1358 */ 1359 VNET_DEFINE_STATIC(int, ip_rsvp_on); 1360 VNET_DEFINE(struct socket *, ip_rsvpd); 1361 1362 #define V_ip_rsvp_on VNET(ip_rsvp_on) 1363 1364 int 1365 ip_rsvp_init(struct socket *so) 1366 { 1367 1368 if (so->so_type != SOCK_RAW || 1369 so->so_proto->pr_protocol != IPPROTO_RSVP) 1370 return EOPNOTSUPP; 1371 1372 if (V_ip_rsvpd != NULL) 1373 return EADDRINUSE; 1374 1375 V_ip_rsvpd = so; 1376 /* 1377 * This may seem silly, but we need to be sure we don't over-increment 1378 * the RSVP counter, in case something slips up. 1379 */ 1380 if (!V_ip_rsvp_on) { 1381 V_ip_rsvp_on = 1; 1382 V_rsvp_on++; 1383 } 1384 1385 return 0; 1386 } 1387 1388 int 1389 ip_rsvp_done(void) 1390 { 1391 1392 V_ip_rsvpd = NULL; 1393 /* 1394 * This may seem silly, but we need to be sure we don't over-decrement 1395 * the RSVP counter, in case something slips up. 1396 */ 1397 if (V_ip_rsvp_on) { 1398 V_ip_rsvp_on = 0; 1399 V_rsvp_on--; 1400 } 1401 return 0; 1402 } 1403 1404 int 1405 rsvp_input(struct mbuf **mp, int *offp, int proto) 1406 { 1407 struct mbuf *m; 1408 1409 m = *mp; 1410 *mp = NULL; 1411 1412 if (rsvp_input_p) { /* call the real one if loaded */ 1413 *mp = m; 1414 rsvp_input_p(mp, offp, proto); 1415 return (IPPROTO_DONE); 1416 } 1417 1418 /* Can still get packets with rsvp_on = 0 if there is a local member 1419 * of the group to which the RSVP packet is addressed. But in this 1420 * case we want to throw the packet away. 1421 */ 1422 1423 if (!V_rsvp_on) { 1424 m_freem(m); 1425 return (IPPROTO_DONE); 1426 } 1427 1428 if (V_ip_rsvpd != NULL) { 1429 *mp = m; 1430 rip_input(mp, offp, proto); 1431 return (IPPROTO_DONE); 1432 } 1433 /* Drop the packet */ 1434 m_freem(m); 1435 return (IPPROTO_DONE); 1436 } 1437