1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 #include "opt_ipsec.h" 41 42 #include <sys/param.h> 43 #include <sys/jail.h> 44 #include <sys/kernel.h> 45 #include <sys/eventhandler.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/protosw.h> 52 #include <sys/rmlock.h> 53 #include <sys/rwlock.h> 54 #include <sys/signalvar.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/sx.h> 58 #include <sys/sysctl.h> 59 #include <sys/systm.h> 60 61 #include <vm/uma.h> 62 63 #include <net/if.h> 64 #include <net/if_var.h> 65 #include <net/route.h> 66 #include <net/vnet.h> 67 68 #include <netinet/in.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/in_pcb.h> 71 #include <netinet/in_var.h> 72 #include <netinet/if_ether.h> 73 #include <netinet/ip.h> 74 #include <netinet/ip_var.h> 75 #include <netinet/ip_mroute.h> 76 #include <netinet/ip_icmp.h> 77 78 #include <netipsec/ipsec_support.h> 79 80 #include <machine/stdarg.h> 81 #include <security/mac/mac_framework.h> 82 83 VNET_DEFINE(int, ip_defttl) = IPDEFTTL; 84 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW, 85 &VNET_NAME(ip_defttl), 0, 86 "Maximum TTL on IP packets"); 87 88 VNET_DEFINE(struct inpcbhead, ripcb); 89 VNET_DEFINE(struct inpcbinfo, ripcbinfo); 90 91 #define V_ripcb VNET(ripcb) 92 #define V_ripcbinfo VNET(ripcbinfo) 93 94 /* 95 * Control and data hooks for ipfw, dummynet, divert and so on. 96 * The data hooks are not used here but it is convenient 97 * to keep them all in one place. 98 */ 99 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL; 100 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL; 101 102 int (*ip_dn_ctl_ptr)(struct sockopt *); 103 int (*ip_dn_io_ptr)(struct mbuf **, struct ip_fw_args *); 104 void (*ip_divert_ptr)(struct mbuf *, bool); 105 int (*ng_ipfw_input_p)(struct mbuf **, struct ip_fw_args *, bool); 106 107 #ifdef INET 108 /* 109 * Hooks for multicast routing. They all default to NULL, so leave them not 110 * initialized and rely on BSS being set to 0. 111 */ 112 113 /* 114 * The socket used to communicate with the multicast routing daemon. 115 */ 116 VNET_DEFINE(struct socket *, ip_mrouter); 117 118 /* 119 * The various mrouter and rsvp functions. 120 */ 121 int (*ip_mrouter_set)(struct socket *, struct sockopt *); 122 int (*ip_mrouter_get)(struct socket *, struct sockopt *); 123 int (*ip_mrouter_done)(void); 124 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, 125 struct ip_moptions *); 126 int (*mrt_ioctl)(u_long, caddr_t, int); 127 int (*legal_vif_num)(int); 128 u_long (*ip_mcast_src)(int); 129 130 int (*rsvp_input_p)(struct mbuf **, int *, int); 131 int (*ip_rsvp_vif)(struct socket *, struct sockopt *); 132 void (*ip_rsvp_force_done)(struct socket *); 133 #endif /* INET */ 134 135 extern struct protosw inetsw[]; 136 137 u_long rip_sendspace = 9216; 138 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 139 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 140 141 u_long rip_recvspace = 9216; 142 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 143 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 144 145 /* 146 * Hash functions 147 */ 148 149 #define INP_PCBHASH_RAW_SIZE 256 150 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \ 151 (((proto) + (laddr) + (faddr)) % (mask) + 1) 152 153 #ifdef INET 154 static void 155 rip_inshash(struct inpcb *inp) 156 { 157 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 158 struct inpcbhead *pcbhash; 159 int hash; 160 161 INP_INFO_WLOCK_ASSERT(pcbinfo); 162 INP_WLOCK_ASSERT(inp); 163 164 if (inp->inp_ip_p != 0 && 165 inp->inp_laddr.s_addr != INADDR_ANY && 166 inp->inp_faddr.s_addr != INADDR_ANY) { 167 hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr, 168 inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask); 169 } else 170 hash = 0; 171 pcbhash = &pcbinfo->ipi_hashbase[hash]; 172 CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash); 173 } 174 175 static void 176 rip_delhash(struct inpcb *inp) 177 { 178 179 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo); 180 INP_WLOCK_ASSERT(inp); 181 182 CK_LIST_REMOVE(inp, inp_hash); 183 } 184 #endif /* INET */ 185 186 /* 187 * Raw interface to IP protocol. 188 */ 189 190 /* 191 * Initialize raw connection block q. 192 */ 193 static void 194 rip_zone_change(void *tag) 195 { 196 197 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets); 198 } 199 200 static int 201 rip_inpcb_init(void *mem, int size, int flags) 202 { 203 struct inpcb *inp = mem; 204 205 INP_LOCK_INIT(inp, "inp", "rawinp"); 206 return (0); 207 } 208 209 void 210 rip_init(void) 211 { 212 213 in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE, 214 1, "ripcb", rip_inpcb_init, IPI_HASHFIELDS_NONE); 215 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL, 216 EVENTHANDLER_PRI_ANY); 217 } 218 219 #ifdef VIMAGE 220 static void 221 rip_destroy(void *unused __unused) 222 { 223 224 in_pcbinfo_destroy(&V_ripcbinfo); 225 } 226 VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL); 227 #endif 228 229 #ifdef INET 230 static int 231 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n, 232 struct sockaddr_in *ripsrc) 233 { 234 int policyfail = 0; 235 236 INP_LOCK_ASSERT(last); 237 238 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 239 /* check AH/ESP integrity. */ 240 if (IPSEC_ENABLED(ipv4)) { 241 if (IPSEC_CHECK_POLICY(ipv4, n, last) != 0) 242 policyfail = 1; 243 } 244 #endif /* IPSEC */ 245 #ifdef MAC 246 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0) 247 policyfail = 1; 248 #endif 249 /* Check the minimum TTL for socket. */ 250 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 251 policyfail = 1; 252 if (!policyfail) { 253 struct mbuf *opts = NULL; 254 struct socket *so; 255 256 so = last->inp_socket; 257 if ((last->inp_flags & INP_CONTROLOPTS) || 258 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 259 ip_savecontrol(last, &opts, ip, n); 260 SOCKBUF_LOCK(&so->so_rcv); 261 if (sbappendaddr_locked(&so->so_rcv, 262 (struct sockaddr *)ripsrc, n, opts) == 0) { 263 /* should notify about lost packet */ 264 m_freem(n); 265 if (opts) 266 m_freem(opts); 267 SOCKBUF_UNLOCK(&so->so_rcv); 268 } else 269 sorwakeup_locked(so); 270 } else 271 m_freem(n); 272 return (policyfail); 273 } 274 275 /* 276 * Setup generic address and protocol structures for raw_input routine, then 277 * pass them along with mbuf chain. 278 */ 279 int 280 rip_input(struct mbuf **mp, int *offp, int proto) 281 { 282 struct ifnet *ifp; 283 struct mbuf *m = *mp; 284 struct ip *ip = mtod(m, struct ip *); 285 struct inpcb *inp, *last; 286 struct sockaddr_in ripsrc; 287 struct epoch_tracker et; 288 int hash; 289 290 *mp = NULL; 291 292 bzero(&ripsrc, sizeof(ripsrc)); 293 ripsrc.sin_len = sizeof(ripsrc); 294 ripsrc.sin_family = AF_INET; 295 ripsrc.sin_addr = ip->ip_src; 296 last = NULL; 297 298 ifp = m->m_pkthdr.rcvif; 299 300 hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr, 301 ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask); 302 INP_INFO_RLOCK_ET(&V_ripcbinfo, et); 303 CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) { 304 if (inp->inp_ip_p != proto) 305 continue; 306 #ifdef INET6 307 /* XXX inp locking */ 308 if ((inp->inp_vflag & INP_IPV4) == 0) 309 continue; 310 #endif 311 if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 312 continue; 313 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr) 314 continue; 315 if (last != NULL) { 316 struct mbuf *n; 317 318 n = m_copym(m, 0, M_COPYALL, M_NOWAIT); 319 if (n != NULL) 320 (void) rip_append(last, ip, n, &ripsrc); 321 /* XXX count dropped packet */ 322 INP_RUNLOCK(last); 323 last = NULL; 324 } 325 INP_RLOCK(inp); 326 if (__predict_false(inp->inp_flags2 & INP_FREED)) 327 goto skip_1; 328 if (jailed_without_vnet(inp->inp_cred)) { 329 /* 330 * XXX: If faddr was bound to multicast group, 331 * jailed raw socket will drop datagram. 332 */ 333 if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 334 goto skip_1; 335 } 336 last = inp; 337 continue; 338 skip_1: 339 INP_RUNLOCK(inp); 340 } 341 CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) { 342 if (inp->inp_ip_p && inp->inp_ip_p != proto) 343 continue; 344 #ifdef INET6 345 /* XXX inp locking */ 346 if ((inp->inp_vflag & INP_IPV4) == 0) 347 continue; 348 #endif 349 if (!in_nullhost(inp->inp_laddr) && 350 !in_hosteq(inp->inp_laddr, ip->ip_dst)) 351 continue; 352 if (!in_nullhost(inp->inp_faddr) && 353 !in_hosteq(inp->inp_faddr, ip->ip_src)) 354 continue; 355 if (last != NULL) { 356 struct mbuf *n; 357 358 n = m_copym(m, 0, M_COPYALL, M_NOWAIT); 359 if (n != NULL) 360 (void) rip_append(last, ip, n, &ripsrc); 361 /* XXX count dropped packet */ 362 INP_RUNLOCK(last); 363 last = NULL; 364 } 365 INP_RLOCK(inp); 366 if (__predict_false(inp->inp_flags2 & INP_FREED)) 367 goto skip_2; 368 if (jailed_without_vnet(inp->inp_cred)) { 369 /* 370 * Allow raw socket in jail to receive multicast; 371 * assume process had PRIV_NETINET_RAW at attach, 372 * and fall through into normal filter path if so. 373 */ 374 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) && 375 prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 376 goto skip_2; 377 } 378 /* 379 * If this raw socket has multicast state, and we 380 * have received a multicast, check if this socket 381 * should receive it, as multicast filtering is now 382 * the responsibility of the transport layer. 383 */ 384 if (inp->inp_moptions != NULL && 385 IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 386 /* 387 * If the incoming datagram is for IGMP, allow it 388 * through unconditionally to the raw socket. 389 * 390 * In the case of IGMPv2, we may not have explicitly 391 * joined the group, and may have set IFF_ALLMULTI 392 * on the interface. imo_multi_filter() may discard 393 * control traffic we actually need to see. 394 * 395 * Userland multicast routing daemons should continue 396 * filter the control traffic appropriately. 397 */ 398 int blocked; 399 400 blocked = MCAST_PASS; 401 if (proto != IPPROTO_IGMP) { 402 struct sockaddr_in group; 403 404 bzero(&group, sizeof(struct sockaddr_in)); 405 group.sin_len = sizeof(struct sockaddr_in); 406 group.sin_family = AF_INET; 407 group.sin_addr = ip->ip_dst; 408 409 blocked = imo_multi_filter(inp->inp_moptions, 410 ifp, 411 (struct sockaddr *)&group, 412 (struct sockaddr *)&ripsrc); 413 } 414 415 if (blocked != MCAST_PASS) { 416 IPSTAT_INC(ips_notmember); 417 goto skip_2; 418 } 419 } 420 last = inp; 421 continue; 422 skip_2: 423 INP_RUNLOCK(inp); 424 } 425 INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et); 426 if (last != NULL) { 427 if (rip_append(last, ip, m, &ripsrc) != 0) 428 IPSTAT_INC(ips_delivered); 429 INP_RUNLOCK(last); 430 } else { 431 if (inetsw[ip_protox[ip->ip_p]].pr_input == rip_input) { 432 IPSTAT_INC(ips_noproto); 433 IPSTAT_DEC(ips_delivered); 434 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0); 435 } else { 436 m_freem(m); 437 } 438 } 439 return (IPPROTO_DONE); 440 } 441 442 /* 443 * Generate IP header and pass packet to ip_output. Tack on options user may 444 * have setup with control call. 445 */ 446 int 447 rip_output(struct mbuf *m, struct socket *so, ...) 448 { 449 struct ip *ip; 450 int error; 451 struct inpcb *inp = sotoinpcb(so); 452 va_list ap; 453 u_long dst; 454 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 455 IP_ALLOWBROADCAST; 456 int cnt; 457 u_char opttype, optlen, *cp; 458 459 va_start(ap, so); 460 dst = va_arg(ap, u_long); 461 va_end(ap); 462 463 /* 464 * If the user handed us a complete IP packet, use it. Otherwise, 465 * allocate an mbuf for a header and fill it in. 466 */ 467 if ((inp->inp_flags & INP_HDRINCL) == 0) { 468 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 469 m_freem(m); 470 return(EMSGSIZE); 471 } 472 M_PREPEND(m, sizeof(struct ip), M_NOWAIT); 473 if (m == NULL) 474 return(ENOBUFS); 475 476 INP_RLOCK(inp); 477 ip = mtod(m, struct ip *); 478 ip->ip_tos = inp->inp_ip_tos; 479 if (inp->inp_flags & INP_DONTFRAG) 480 ip->ip_off = htons(IP_DF); 481 else 482 ip->ip_off = htons(0); 483 ip->ip_p = inp->inp_ip_p; 484 ip->ip_len = htons(m->m_pkthdr.len); 485 ip->ip_src = inp->inp_laddr; 486 ip->ip_dst.s_addr = dst; 487 if (jailed(inp->inp_cred)) { 488 /* 489 * prison_local_ip4() would be good enough but would 490 * let a source of INADDR_ANY pass, which we do not 491 * want to see from jails. 492 */ 493 if (ip->ip_src.s_addr == INADDR_ANY) { 494 error = in_pcbladdr(inp, &ip->ip_dst, &ip->ip_src, 495 inp->inp_cred); 496 } else { 497 error = prison_local_ip4(inp->inp_cred, 498 &ip->ip_src); 499 } 500 if (error != 0) { 501 INP_RUNLOCK(inp); 502 m_freem(m); 503 return (error); 504 } 505 } 506 ip->ip_ttl = inp->inp_ip_ttl; 507 } else { 508 if (m->m_pkthdr.len > IP_MAXPACKET) { 509 m_freem(m); 510 return(EMSGSIZE); 511 } 512 INP_RLOCK(inp); 513 ip = mtod(m, struct ip *); 514 error = prison_check_ip4(inp->inp_cred, &ip->ip_src); 515 if (error != 0) { 516 INP_RUNLOCK(inp); 517 m_freem(m); 518 return (error); 519 } 520 521 /* 522 * Don't allow both user specified and setsockopt options, 523 * and don't allow packet length sizes that will crash. 524 */ 525 if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options) 526 || (ntohs(ip->ip_len) != m->m_pkthdr.len) 527 || (ntohs(ip->ip_len) < (ip->ip_hl << 2))) { 528 INP_RUNLOCK(inp); 529 m_freem(m); 530 return (EINVAL); 531 } 532 /* 533 * Don't allow IP options which do not have the required 534 * structure as specified in section 3.1 of RFC 791 on 535 * pages 15-23. 536 */ 537 cp = (u_char *)(ip + 1); 538 cnt = (ip->ip_hl << 2) - sizeof (struct ip); 539 for (; cnt > 0; cnt -= optlen, cp += optlen) { 540 opttype = cp[IPOPT_OPTVAL]; 541 if (opttype == IPOPT_EOL) 542 break; 543 if (opttype == IPOPT_NOP) { 544 optlen = 1; 545 continue; 546 } 547 if (cnt < IPOPT_OLEN + sizeof(u_char)) { 548 INP_RUNLOCK(inp); 549 m_freem(m); 550 return (EINVAL); 551 } 552 optlen = cp[IPOPT_OLEN]; 553 if (optlen < IPOPT_OLEN + sizeof(u_char) || 554 optlen > cnt) { 555 INP_RUNLOCK(inp); 556 m_freem(m); 557 return (EINVAL); 558 } 559 } 560 /* 561 * This doesn't allow application to specify ID of zero, 562 * but we got this limitation from the beginning of history. 563 */ 564 if (ip->ip_id == 0) 565 ip_fillid(ip); 566 567 /* 568 * XXX prevent ip_output from overwriting header fields. 569 */ 570 flags |= IP_RAWOUTPUT; 571 IPSTAT_INC(ips_rawout); 572 } 573 574 if (inp->inp_flags & INP_ONESBCAST) 575 flags |= IP_SENDONES; 576 577 #ifdef MAC 578 mac_inpcb_create_mbuf(inp, m); 579 #endif 580 581 error = ip_output(m, inp->inp_options, NULL, flags, 582 inp->inp_moptions, inp); 583 INP_RUNLOCK(inp); 584 return (error); 585 } 586 587 /* 588 * Raw IP socket option processing. 589 * 590 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 591 * only be created by a privileged process, and as such, socket option 592 * operations to manage system properties on any raw socket were allowed to 593 * take place without explicit additional access control checks. However, 594 * raw sockets can now also be created in jail(), and therefore explicit 595 * checks are now required. Likewise, raw sockets can be used by a process 596 * after it gives up privilege, so some caution is required. For options 597 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 598 * performed in ip_ctloutput() and therefore no check occurs here. 599 * Unilaterally checking priv_check() here breaks normal IP socket option 600 * operations on raw sockets. 601 * 602 * When adding new socket options here, make sure to add access control 603 * checks here as necessary. 604 * 605 * XXX-BZ inp locking? 606 */ 607 int 608 rip_ctloutput(struct socket *so, struct sockopt *sopt) 609 { 610 struct inpcb *inp = sotoinpcb(so); 611 int error, optval; 612 613 if (sopt->sopt_level != IPPROTO_IP) { 614 if ((sopt->sopt_level == SOL_SOCKET) && 615 (sopt->sopt_name == SO_SETFIB)) { 616 inp->inp_inc.inc_fibnum = so->so_fibnum; 617 return (0); 618 } 619 return (EINVAL); 620 } 621 622 error = 0; 623 switch (sopt->sopt_dir) { 624 case SOPT_GET: 625 switch (sopt->sopt_name) { 626 case IP_HDRINCL: 627 optval = inp->inp_flags & INP_HDRINCL; 628 error = sooptcopyout(sopt, &optval, sizeof optval); 629 break; 630 631 case IP_FW3: /* generic ipfw v.3 functions */ 632 case IP_FW_ADD: /* ADD actually returns the body... */ 633 case IP_FW_GET: 634 case IP_FW_TABLE_GETSIZE: 635 case IP_FW_TABLE_LIST: 636 case IP_FW_NAT_GET_CONFIG: 637 case IP_FW_NAT_GET_LOG: 638 if (V_ip_fw_ctl_ptr != NULL) 639 error = V_ip_fw_ctl_ptr(sopt); 640 else 641 error = ENOPROTOOPT; 642 break; 643 644 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 645 case IP_DUMMYNET_GET: 646 if (ip_dn_ctl_ptr != NULL) 647 error = ip_dn_ctl_ptr(sopt); 648 else 649 error = ENOPROTOOPT; 650 break ; 651 652 case MRT_INIT: 653 case MRT_DONE: 654 case MRT_ADD_VIF: 655 case MRT_DEL_VIF: 656 case MRT_ADD_MFC: 657 case MRT_DEL_MFC: 658 case MRT_VERSION: 659 case MRT_ASSERT: 660 case MRT_API_SUPPORT: 661 case MRT_API_CONFIG: 662 case MRT_ADD_BW_UPCALL: 663 case MRT_DEL_BW_UPCALL: 664 error = priv_check(curthread, PRIV_NETINET_MROUTE); 665 if (error != 0) 666 return (error); 667 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 668 EOPNOTSUPP; 669 break; 670 671 default: 672 error = ip_ctloutput(so, sopt); 673 break; 674 } 675 break; 676 677 case SOPT_SET: 678 switch (sopt->sopt_name) { 679 case IP_HDRINCL: 680 error = sooptcopyin(sopt, &optval, sizeof optval, 681 sizeof optval); 682 if (error) 683 break; 684 if (optval) 685 inp->inp_flags |= INP_HDRINCL; 686 else 687 inp->inp_flags &= ~INP_HDRINCL; 688 break; 689 690 case IP_FW3: /* generic ipfw v.3 functions */ 691 case IP_FW_ADD: 692 case IP_FW_DEL: 693 case IP_FW_FLUSH: 694 case IP_FW_ZERO: 695 case IP_FW_RESETLOG: 696 case IP_FW_TABLE_ADD: 697 case IP_FW_TABLE_DEL: 698 case IP_FW_TABLE_FLUSH: 699 case IP_FW_NAT_CFG: 700 case IP_FW_NAT_DEL: 701 if (V_ip_fw_ctl_ptr != NULL) 702 error = V_ip_fw_ctl_ptr(sopt); 703 else 704 error = ENOPROTOOPT; 705 break; 706 707 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 708 case IP_DUMMYNET_CONFIGURE: 709 case IP_DUMMYNET_DEL: 710 case IP_DUMMYNET_FLUSH: 711 if (ip_dn_ctl_ptr != NULL) 712 error = ip_dn_ctl_ptr(sopt); 713 else 714 error = ENOPROTOOPT ; 715 break ; 716 717 case IP_RSVP_ON: 718 error = priv_check(curthread, PRIV_NETINET_MROUTE); 719 if (error != 0) 720 return (error); 721 error = ip_rsvp_init(so); 722 break; 723 724 case IP_RSVP_OFF: 725 error = priv_check(curthread, PRIV_NETINET_MROUTE); 726 if (error != 0) 727 return (error); 728 error = ip_rsvp_done(); 729 break; 730 731 case IP_RSVP_VIF_ON: 732 case IP_RSVP_VIF_OFF: 733 error = priv_check(curthread, PRIV_NETINET_MROUTE); 734 if (error != 0) 735 return (error); 736 error = ip_rsvp_vif ? 737 ip_rsvp_vif(so, sopt) : EINVAL; 738 break; 739 740 case MRT_INIT: 741 case MRT_DONE: 742 case MRT_ADD_VIF: 743 case MRT_DEL_VIF: 744 case MRT_ADD_MFC: 745 case MRT_DEL_MFC: 746 case MRT_VERSION: 747 case MRT_ASSERT: 748 case MRT_API_SUPPORT: 749 case MRT_API_CONFIG: 750 case MRT_ADD_BW_UPCALL: 751 case MRT_DEL_BW_UPCALL: 752 error = priv_check(curthread, PRIV_NETINET_MROUTE); 753 if (error != 0) 754 return (error); 755 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 756 EOPNOTSUPP; 757 break; 758 759 default: 760 error = ip_ctloutput(so, sopt); 761 break; 762 } 763 break; 764 } 765 766 return (error); 767 } 768 769 /* 770 * This function exists solely to receive the PRC_IFDOWN messages which are 771 * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls 772 * in_ifadown() to remove all routes corresponding to that address. It also 773 * receives the PRC_IFUP messages from if_up() and reinstalls the interface 774 * routes. 775 */ 776 void 777 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 778 { 779 struct rm_priotracker in_ifa_tracker; 780 struct in_ifaddr *ia; 781 struct ifnet *ifp; 782 int err; 783 int flags; 784 785 switch (cmd) { 786 case PRC_IFDOWN: 787 IN_IFADDR_RLOCK(&in_ifa_tracker); 788 CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 789 if (ia->ia_ifa.ifa_addr == sa 790 && (ia->ia_flags & IFA_ROUTE)) { 791 ifa_ref(&ia->ia_ifa); 792 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 793 /* 794 * in_scrubprefix() kills the interface route. 795 */ 796 in_scrubprefix(ia, 0); 797 /* 798 * in_ifadown gets rid of all the rest of the 799 * routes. This is not quite the right thing 800 * to do, but at least if we are running a 801 * routing process they will come back. 802 */ 803 in_ifadown(&ia->ia_ifa, 0); 804 ifa_free(&ia->ia_ifa); 805 break; 806 } 807 } 808 if (ia == NULL) /* If ia matched, already unlocked. */ 809 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 810 break; 811 812 case PRC_IFUP: 813 IN_IFADDR_RLOCK(&in_ifa_tracker); 814 CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 815 if (ia->ia_ifa.ifa_addr == sa) 816 break; 817 } 818 if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) { 819 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 820 return; 821 } 822 ifa_ref(&ia->ia_ifa); 823 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 824 flags = RTF_UP; 825 ifp = ia->ia_ifa.ifa_ifp; 826 827 if ((ifp->if_flags & IFF_LOOPBACK) 828 || (ifp->if_flags & IFF_POINTOPOINT)) 829 flags |= RTF_HOST; 830 831 err = ifa_del_loopback_route((struct ifaddr *)ia, sa); 832 833 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 834 if (err == 0) 835 ia->ia_flags |= IFA_ROUTE; 836 837 err = ifa_add_loopback_route((struct ifaddr *)ia, sa); 838 839 ifa_free(&ia->ia_ifa); 840 break; 841 } 842 } 843 844 static int 845 rip_attach(struct socket *so, int proto, struct thread *td) 846 { 847 struct inpcb *inp; 848 int error; 849 850 inp = sotoinpcb(so); 851 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 852 853 error = priv_check(td, PRIV_NETINET_RAW); 854 if (error) 855 return (error); 856 if (proto >= IPPROTO_MAX || proto < 0) 857 return EPROTONOSUPPORT; 858 error = soreserve(so, rip_sendspace, rip_recvspace); 859 if (error) 860 return (error); 861 INP_INFO_WLOCK(&V_ripcbinfo); 862 error = in_pcballoc(so, &V_ripcbinfo); 863 if (error) { 864 INP_INFO_WUNLOCK(&V_ripcbinfo); 865 return (error); 866 } 867 inp = (struct inpcb *)so->so_pcb; 868 inp->inp_vflag |= INP_IPV4; 869 inp->inp_ip_p = proto; 870 inp->inp_ip_ttl = V_ip_defttl; 871 rip_inshash(inp); 872 INP_INFO_WUNLOCK(&V_ripcbinfo); 873 INP_WUNLOCK(inp); 874 return (0); 875 } 876 877 static void 878 rip_detach(struct socket *so) 879 { 880 struct inpcb *inp; 881 882 inp = sotoinpcb(so); 883 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 884 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 885 ("rip_detach: not closed")); 886 887 INP_INFO_WLOCK(&V_ripcbinfo); 888 INP_WLOCK(inp); 889 rip_delhash(inp); 890 if (so == V_ip_mrouter && ip_mrouter_done) 891 ip_mrouter_done(); 892 if (ip_rsvp_force_done) 893 ip_rsvp_force_done(so); 894 if (so == V_ip_rsvpd) 895 ip_rsvp_done(); 896 in_pcbdetach(inp); 897 in_pcbfree(inp); 898 INP_INFO_WUNLOCK(&V_ripcbinfo); 899 } 900 901 static void 902 rip_dodisconnect(struct socket *so, struct inpcb *inp) 903 { 904 struct inpcbinfo *pcbinfo; 905 906 pcbinfo = inp->inp_pcbinfo; 907 INP_INFO_WLOCK(pcbinfo); 908 INP_WLOCK(inp); 909 rip_delhash(inp); 910 inp->inp_faddr.s_addr = INADDR_ANY; 911 rip_inshash(inp); 912 SOCK_LOCK(so); 913 so->so_state &= ~SS_ISCONNECTED; 914 SOCK_UNLOCK(so); 915 INP_WUNLOCK(inp); 916 INP_INFO_WUNLOCK(pcbinfo); 917 } 918 919 static void 920 rip_abort(struct socket *so) 921 { 922 struct inpcb *inp; 923 924 inp = sotoinpcb(so); 925 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 926 927 rip_dodisconnect(so, inp); 928 } 929 930 static void 931 rip_close(struct socket *so) 932 { 933 struct inpcb *inp; 934 935 inp = sotoinpcb(so); 936 KASSERT(inp != NULL, ("rip_close: inp == NULL")); 937 938 rip_dodisconnect(so, inp); 939 } 940 941 static int 942 rip_disconnect(struct socket *so) 943 { 944 struct inpcb *inp; 945 946 if ((so->so_state & SS_ISCONNECTED) == 0) 947 return (ENOTCONN); 948 949 inp = sotoinpcb(so); 950 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL")); 951 952 rip_dodisconnect(so, inp); 953 return (0); 954 } 955 956 static int 957 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 958 { 959 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 960 struct inpcb *inp; 961 int error; 962 963 if (nam->sa_len != sizeof(*addr)) 964 return (EINVAL); 965 966 error = prison_check_ip4(td->td_ucred, &addr->sin_addr); 967 if (error != 0) 968 return (error); 969 970 inp = sotoinpcb(so); 971 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 972 973 if (CK_STAILQ_EMPTY(&V_ifnet) || 974 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 975 (addr->sin_addr.s_addr && 976 (inp->inp_flags & INP_BINDANY) == 0 && 977 ifa_ifwithaddr_check((struct sockaddr *)addr) == 0)) 978 return (EADDRNOTAVAIL); 979 980 INP_INFO_WLOCK(&V_ripcbinfo); 981 INP_WLOCK(inp); 982 rip_delhash(inp); 983 inp->inp_laddr = addr->sin_addr; 984 rip_inshash(inp); 985 INP_WUNLOCK(inp); 986 INP_INFO_WUNLOCK(&V_ripcbinfo); 987 return (0); 988 } 989 990 static int 991 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 992 { 993 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 994 struct inpcb *inp; 995 996 if (nam->sa_len != sizeof(*addr)) 997 return (EINVAL); 998 if (CK_STAILQ_EMPTY(&V_ifnet)) 999 return (EADDRNOTAVAIL); 1000 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 1001 return (EAFNOSUPPORT); 1002 1003 inp = sotoinpcb(so); 1004 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 1005 1006 INP_INFO_WLOCK(&V_ripcbinfo); 1007 INP_WLOCK(inp); 1008 rip_delhash(inp); 1009 inp->inp_faddr = addr->sin_addr; 1010 rip_inshash(inp); 1011 soisconnected(so); 1012 INP_WUNLOCK(inp); 1013 INP_INFO_WUNLOCK(&V_ripcbinfo); 1014 return (0); 1015 } 1016 1017 static int 1018 rip_shutdown(struct socket *so) 1019 { 1020 struct inpcb *inp; 1021 1022 inp = sotoinpcb(so); 1023 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 1024 1025 INP_WLOCK(inp); 1026 socantsendmore(so); 1027 INP_WUNLOCK(inp); 1028 return (0); 1029 } 1030 1031 static int 1032 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 1033 struct mbuf *control, struct thread *td) 1034 { 1035 struct inpcb *inp; 1036 u_long dst; 1037 1038 inp = sotoinpcb(so); 1039 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 1040 1041 /* 1042 * Note: 'dst' reads below are unlocked. 1043 */ 1044 if (so->so_state & SS_ISCONNECTED) { 1045 if (nam) { 1046 m_freem(m); 1047 return (EISCONN); 1048 } 1049 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 1050 } else { 1051 if (nam == NULL) { 1052 m_freem(m); 1053 return (ENOTCONN); 1054 } 1055 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 1056 } 1057 return (rip_output(m, so, dst)); 1058 } 1059 #endif /* INET */ 1060 1061 static int 1062 rip_pcblist(SYSCTL_HANDLER_ARGS) 1063 { 1064 int error, i, n; 1065 struct inpcb *inp, **inp_list; 1066 inp_gen_t gencnt; 1067 struct xinpgen xig; 1068 struct epoch_tracker et; 1069 1070 /* 1071 * The process of preparing the TCB list is too time-consuming and 1072 * resource-intensive to repeat twice on every request. 1073 */ 1074 if (req->oldptr == 0) { 1075 n = V_ripcbinfo.ipi_count; 1076 n += imax(n / 8, 10); 1077 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); 1078 return (0); 1079 } 1080 1081 if (req->newptr != 0) 1082 return (EPERM); 1083 1084 /* 1085 * OK, now we're committed to doing something. 1086 */ 1087 INP_INFO_WLOCK(&V_ripcbinfo); 1088 gencnt = V_ripcbinfo.ipi_gencnt; 1089 n = V_ripcbinfo.ipi_count; 1090 INP_INFO_WUNLOCK(&V_ripcbinfo); 1091 1092 bzero(&xig, sizeof(xig)); 1093 xig.xig_len = sizeof xig; 1094 xig.xig_count = n; 1095 xig.xig_gen = gencnt; 1096 xig.xig_sogen = so_gencnt; 1097 error = SYSCTL_OUT(req, &xig, sizeof xig); 1098 if (error) 1099 return (error); 1100 1101 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 1102 1103 INP_INFO_RLOCK_ET(&V_ripcbinfo, et); 1104 for (inp = CK_LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n; 1105 inp = CK_LIST_NEXT(inp, inp_list)) { 1106 INP_WLOCK(inp); 1107 if (inp->inp_gencnt <= gencnt && 1108 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 1109 in_pcbref(inp); 1110 inp_list[i++] = inp; 1111 } 1112 INP_WUNLOCK(inp); 1113 } 1114 INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et); 1115 n = i; 1116 1117 error = 0; 1118 for (i = 0; i < n; i++) { 1119 inp = inp_list[i]; 1120 INP_RLOCK(inp); 1121 if (inp->inp_gencnt <= gencnt) { 1122 struct xinpcb xi; 1123 1124 in_pcbtoxinpcb(inp, &xi); 1125 INP_RUNLOCK(inp); 1126 error = SYSCTL_OUT(req, &xi, sizeof xi); 1127 } else 1128 INP_RUNLOCK(inp); 1129 } 1130 INP_INFO_WLOCK(&V_ripcbinfo); 1131 for (i = 0; i < n; i++) { 1132 inp = inp_list[i]; 1133 INP_RLOCK(inp); 1134 if (!in_pcbrele_rlocked(inp)) 1135 INP_RUNLOCK(inp); 1136 } 1137 INP_INFO_WUNLOCK(&V_ripcbinfo); 1138 1139 if (!error) { 1140 struct epoch_tracker et; 1141 /* 1142 * Give the user an updated idea of our state. If the 1143 * generation differs from what we told her before, she knows 1144 * that something happened while we were processing this 1145 * request, and it might be necessary to retry. 1146 */ 1147 INP_INFO_RLOCK_ET(&V_ripcbinfo, et); 1148 xig.xig_gen = V_ripcbinfo.ipi_gencnt; 1149 xig.xig_sogen = so_gencnt; 1150 xig.xig_count = V_ripcbinfo.ipi_count; 1151 INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et); 1152 error = SYSCTL_OUT(req, &xig, sizeof xig); 1153 } 1154 free(inp_list, M_TEMP); 1155 return (error); 1156 } 1157 1158 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, 1159 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 1160 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 1161 1162 #ifdef INET 1163 struct pr_usrreqs rip_usrreqs = { 1164 .pru_abort = rip_abort, 1165 .pru_attach = rip_attach, 1166 .pru_bind = rip_bind, 1167 .pru_connect = rip_connect, 1168 .pru_control = in_control, 1169 .pru_detach = rip_detach, 1170 .pru_disconnect = rip_disconnect, 1171 .pru_peeraddr = in_getpeeraddr, 1172 .pru_send = rip_send, 1173 .pru_shutdown = rip_shutdown, 1174 .pru_sockaddr = in_getsockaddr, 1175 .pru_sosetlabel = in_pcbsosetlabel, 1176 .pru_close = rip_close, 1177 }; 1178 #endif /* INET */ 1179