1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 #include "opt_ipsec.h" 41 #include "opt_route.h" 42 43 #include <sys/param.h> 44 #include <sys/jail.h> 45 #include <sys/kernel.h> 46 #include <sys/eventhandler.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/protosw.h> 53 #include <sys/rmlock.h> 54 #include <sys/rwlock.h> 55 #include <sys/signalvar.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/sx.h> 59 #include <sys/sysctl.h> 60 #include <sys/systm.h> 61 62 #include <vm/uma.h> 63 64 #include <net/if.h> 65 #include <net/if_var.h> 66 #include <net/route.h> 67 #include <net/route/route_ctl.h> 68 #include <net/vnet.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/in_fib.h> 73 #include <netinet/in_pcb.h> 74 #include <netinet/in_var.h> 75 #include <netinet/if_ether.h> 76 #include <netinet/ip.h> 77 #include <netinet/ip_var.h> 78 #include <netinet/ip_mroute.h> 79 #include <netinet/ip_icmp.h> 80 81 #include <netipsec/ipsec_support.h> 82 83 #include <machine/stdarg.h> 84 #include <security/mac/mac_framework.h> 85 86 VNET_DEFINE(int, ip_defttl) = IPDEFTTL; 87 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW, 88 &VNET_NAME(ip_defttl), 0, 89 "Maximum TTL on IP packets"); 90 91 VNET_DEFINE(struct inpcbhead, ripcb); 92 VNET_DEFINE(struct inpcbinfo, ripcbinfo); 93 94 #define V_ripcb VNET(ripcb) 95 #define V_ripcbinfo VNET(ripcbinfo) 96 97 /* 98 * Control and data hooks for ipfw, dummynet, divert and so on. 99 * The data hooks are not used here but it is convenient 100 * to keep them all in one place. 101 */ 102 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL; 103 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL; 104 105 int (*ip_dn_ctl_ptr)(struct sockopt *); 106 int (*ip_dn_io_ptr)(struct mbuf **, struct ip_fw_args *); 107 void (*ip_divert_ptr)(struct mbuf *, bool); 108 int (*ng_ipfw_input_p)(struct mbuf **, struct ip_fw_args *, bool); 109 110 #ifdef INET 111 /* 112 * Hooks for multicast routing. They all default to NULL, so leave them not 113 * initialized and rely on BSS being set to 0. 114 */ 115 116 /* 117 * The socket used to communicate with the multicast routing daemon. 118 */ 119 VNET_DEFINE(struct socket *, ip_mrouter); 120 121 /* 122 * The various mrouter and rsvp functions. 123 */ 124 int (*ip_mrouter_set)(struct socket *, struct sockopt *); 125 int (*ip_mrouter_get)(struct socket *, struct sockopt *); 126 int (*ip_mrouter_done)(void); 127 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, 128 struct ip_moptions *); 129 int (*mrt_ioctl)(u_long, caddr_t, int); 130 int (*legal_vif_num)(int); 131 u_long (*ip_mcast_src)(int); 132 133 int (*rsvp_input_p)(struct mbuf **, int *, int); 134 int (*ip_rsvp_vif)(struct socket *, struct sockopt *); 135 void (*ip_rsvp_force_done)(struct socket *); 136 #endif /* INET */ 137 138 extern struct protosw inetsw[]; 139 140 u_long rip_sendspace = 9216; 141 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 142 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 143 144 u_long rip_recvspace = 9216; 145 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 146 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 147 148 /* 149 * Hash functions 150 */ 151 152 #define INP_PCBHASH_RAW_SIZE 256 153 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \ 154 (((proto) + (laddr) + (faddr)) % (mask) + 1) 155 156 #ifdef INET 157 static void 158 rip_inshash(struct inpcb *inp) 159 { 160 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 161 struct inpcbhead *pcbhash; 162 int hash; 163 164 INP_INFO_WLOCK_ASSERT(pcbinfo); 165 INP_WLOCK_ASSERT(inp); 166 167 if (inp->inp_ip_p != 0 && 168 inp->inp_laddr.s_addr != INADDR_ANY && 169 inp->inp_faddr.s_addr != INADDR_ANY) { 170 hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr, 171 inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask); 172 } else 173 hash = 0; 174 pcbhash = &pcbinfo->ipi_hashbase[hash]; 175 CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash); 176 } 177 178 static void 179 rip_delhash(struct inpcb *inp) 180 { 181 182 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo); 183 INP_WLOCK_ASSERT(inp); 184 185 CK_LIST_REMOVE(inp, inp_hash); 186 } 187 #endif /* INET */ 188 189 /* 190 * Raw interface to IP protocol. 191 */ 192 193 /* 194 * Initialize raw connection block q. 195 */ 196 static void 197 rip_zone_change(void *tag) 198 { 199 200 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets); 201 } 202 203 static int 204 rip_inpcb_init(void *mem, int size, int flags) 205 { 206 struct inpcb *inp = mem; 207 208 INP_LOCK_INIT(inp, "inp", "rawinp"); 209 return (0); 210 } 211 212 void 213 rip_init(void) 214 { 215 216 in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE, 217 1, "ripcb", rip_inpcb_init, IPI_HASHFIELDS_NONE); 218 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL, 219 EVENTHANDLER_PRI_ANY); 220 } 221 222 #ifdef VIMAGE 223 static void 224 rip_destroy(void *unused __unused) 225 { 226 227 in_pcbinfo_destroy(&V_ripcbinfo); 228 } 229 VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL); 230 #endif 231 232 #ifdef INET 233 static int 234 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n, 235 struct sockaddr_in *ripsrc) 236 { 237 int policyfail = 0; 238 239 INP_LOCK_ASSERT(last); 240 241 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 242 /* check AH/ESP integrity. */ 243 if (IPSEC_ENABLED(ipv4)) { 244 if (IPSEC_CHECK_POLICY(ipv4, n, last) != 0) 245 policyfail = 1; 246 } 247 #endif /* IPSEC */ 248 #ifdef MAC 249 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0) 250 policyfail = 1; 251 #endif 252 /* Check the minimum TTL for socket. */ 253 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 254 policyfail = 1; 255 if (!policyfail) { 256 struct mbuf *opts = NULL; 257 struct socket *so; 258 259 so = last->inp_socket; 260 if ((last->inp_flags & INP_CONTROLOPTS) || 261 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 262 ip_savecontrol(last, &opts, ip, n); 263 SOCKBUF_LOCK(&so->so_rcv); 264 if (sbappendaddr_locked(&so->so_rcv, 265 (struct sockaddr *)ripsrc, n, opts) == 0) { 266 soroverflow_locked(so); 267 m_freem(n); 268 if (opts) 269 m_freem(opts); 270 } else 271 sorwakeup_locked(so); 272 } else 273 m_freem(n); 274 return (policyfail); 275 } 276 277 /* 278 * Setup generic address and protocol structures for raw_input routine, then 279 * pass them along with mbuf chain. 280 */ 281 int 282 rip_input(struct mbuf **mp, int *offp, int proto) 283 { 284 struct ifnet *ifp; 285 struct mbuf *m = *mp; 286 struct ip *ip = mtod(m, struct ip *); 287 struct inpcb *inp, *last; 288 struct sockaddr_in ripsrc; 289 int hash; 290 291 NET_EPOCH_ASSERT(); 292 293 *mp = NULL; 294 295 bzero(&ripsrc, sizeof(ripsrc)); 296 ripsrc.sin_len = sizeof(ripsrc); 297 ripsrc.sin_family = AF_INET; 298 ripsrc.sin_addr = ip->ip_src; 299 last = NULL; 300 301 ifp = m->m_pkthdr.rcvif; 302 303 hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr, 304 ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask); 305 CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) { 306 if (inp->inp_ip_p != proto) 307 continue; 308 #ifdef INET6 309 /* XXX inp locking */ 310 if ((inp->inp_vflag & INP_IPV4) == 0) 311 continue; 312 #endif 313 if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 314 continue; 315 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr) 316 continue; 317 if (last != NULL) { 318 struct mbuf *n; 319 320 n = m_copym(m, 0, M_COPYALL, M_NOWAIT); 321 if (n != NULL) 322 (void) rip_append(last, ip, n, &ripsrc); 323 /* XXX count dropped packet */ 324 INP_RUNLOCK(last); 325 last = NULL; 326 } 327 INP_RLOCK(inp); 328 if (__predict_false(inp->inp_flags2 & INP_FREED)) 329 goto skip_1; 330 if (jailed_without_vnet(inp->inp_cred)) { 331 /* 332 * XXX: If faddr was bound to multicast group, 333 * jailed raw socket will drop datagram. 334 */ 335 if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 336 goto skip_1; 337 } 338 last = inp; 339 continue; 340 skip_1: 341 INP_RUNLOCK(inp); 342 } 343 CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) { 344 if (inp->inp_ip_p && inp->inp_ip_p != proto) 345 continue; 346 #ifdef INET6 347 /* XXX inp locking */ 348 if ((inp->inp_vflag & INP_IPV4) == 0) 349 continue; 350 #endif 351 if (!in_nullhost(inp->inp_laddr) && 352 !in_hosteq(inp->inp_laddr, ip->ip_dst)) 353 continue; 354 if (!in_nullhost(inp->inp_faddr) && 355 !in_hosteq(inp->inp_faddr, ip->ip_src)) 356 continue; 357 if (last != NULL) { 358 struct mbuf *n; 359 360 n = m_copym(m, 0, M_COPYALL, M_NOWAIT); 361 if (n != NULL) 362 (void) rip_append(last, ip, n, &ripsrc); 363 /* XXX count dropped packet */ 364 INP_RUNLOCK(last); 365 last = NULL; 366 } 367 INP_RLOCK(inp); 368 if (__predict_false(inp->inp_flags2 & INP_FREED)) 369 goto skip_2; 370 if (jailed_without_vnet(inp->inp_cred)) { 371 /* 372 * Allow raw socket in jail to receive multicast; 373 * assume process had PRIV_NETINET_RAW at attach, 374 * and fall through into normal filter path if so. 375 */ 376 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) && 377 prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 378 goto skip_2; 379 } 380 /* 381 * If this raw socket has multicast state, and we 382 * have received a multicast, check if this socket 383 * should receive it, as multicast filtering is now 384 * the responsibility of the transport layer. 385 */ 386 if (inp->inp_moptions != NULL && 387 IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 388 /* 389 * If the incoming datagram is for IGMP, allow it 390 * through unconditionally to the raw socket. 391 * 392 * In the case of IGMPv2, we may not have explicitly 393 * joined the group, and may have set IFF_ALLMULTI 394 * on the interface. imo_multi_filter() may discard 395 * control traffic we actually need to see. 396 * 397 * Userland multicast routing daemons should continue 398 * filter the control traffic appropriately. 399 */ 400 int blocked; 401 402 blocked = MCAST_PASS; 403 if (proto != IPPROTO_IGMP) { 404 struct sockaddr_in group; 405 406 bzero(&group, sizeof(struct sockaddr_in)); 407 group.sin_len = sizeof(struct sockaddr_in); 408 group.sin_family = AF_INET; 409 group.sin_addr = ip->ip_dst; 410 411 blocked = imo_multi_filter(inp->inp_moptions, 412 ifp, 413 (struct sockaddr *)&group, 414 (struct sockaddr *)&ripsrc); 415 } 416 417 if (blocked != MCAST_PASS) { 418 IPSTAT_INC(ips_notmember); 419 goto skip_2; 420 } 421 } 422 last = inp; 423 continue; 424 skip_2: 425 INP_RUNLOCK(inp); 426 } 427 if (last != NULL) { 428 if (rip_append(last, ip, m, &ripsrc) != 0) 429 IPSTAT_INC(ips_delivered); 430 INP_RUNLOCK(last); 431 } else { 432 if (inetsw[ip_protox[ip->ip_p]].pr_input == rip_input) { 433 IPSTAT_INC(ips_noproto); 434 IPSTAT_DEC(ips_delivered); 435 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0); 436 } else { 437 m_freem(m); 438 } 439 } 440 return (IPPROTO_DONE); 441 } 442 443 /* 444 * Generate IP header and pass packet to ip_output. Tack on options user may 445 * have setup with control call. 446 */ 447 int 448 rip_output(struct mbuf *m, struct socket *so, ...) 449 { 450 struct epoch_tracker et; 451 struct ip *ip; 452 int error; 453 struct inpcb *inp = sotoinpcb(so); 454 va_list ap; 455 u_long dst; 456 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 457 IP_ALLOWBROADCAST; 458 int cnt, hlen; 459 u_char opttype, optlen, *cp; 460 461 va_start(ap, so); 462 dst = va_arg(ap, u_long); 463 va_end(ap); 464 465 /* 466 * If the user handed us a complete IP packet, use it. Otherwise, 467 * allocate an mbuf for a header and fill it in. 468 */ 469 if ((inp->inp_flags & INP_HDRINCL) == 0) { 470 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 471 m_freem(m); 472 return(EMSGSIZE); 473 } 474 M_PREPEND(m, sizeof(struct ip), M_NOWAIT); 475 if (m == NULL) 476 return(ENOBUFS); 477 478 INP_RLOCK(inp); 479 ip = mtod(m, struct ip *); 480 ip->ip_tos = inp->inp_ip_tos; 481 if (inp->inp_flags & INP_DONTFRAG) 482 ip->ip_off = htons(IP_DF); 483 else 484 ip->ip_off = htons(0); 485 ip->ip_p = inp->inp_ip_p; 486 ip->ip_len = htons(m->m_pkthdr.len); 487 ip->ip_src = inp->inp_laddr; 488 ip->ip_dst.s_addr = dst; 489 #ifdef ROUTE_MPATH 490 if (CALC_FLOWID_OUTBOUND) { 491 uint32_t hash_type, hash_val; 492 493 hash_val = fib4_calc_software_hash(ip->ip_src, 494 ip->ip_dst, 0, 0, ip->ip_p, &hash_type); 495 m->m_pkthdr.flowid = hash_val; 496 M_HASHTYPE_SET(m, hash_type); 497 flags |= IP_NODEFAULTFLOWID; 498 } 499 #endif 500 if (jailed(inp->inp_cred)) { 501 /* 502 * prison_local_ip4() would be good enough but would 503 * let a source of INADDR_ANY pass, which we do not 504 * want to see from jails. 505 */ 506 if (ip->ip_src.s_addr == INADDR_ANY) { 507 NET_EPOCH_ENTER(et); 508 error = in_pcbladdr(inp, &ip->ip_dst, 509 &ip->ip_src, inp->inp_cred); 510 NET_EPOCH_EXIT(et); 511 } else { 512 error = prison_local_ip4(inp->inp_cred, 513 &ip->ip_src); 514 } 515 if (error != 0) { 516 INP_RUNLOCK(inp); 517 m_freem(m); 518 return (error); 519 } 520 } 521 ip->ip_ttl = inp->inp_ip_ttl; 522 } else { 523 if (m->m_pkthdr.len > IP_MAXPACKET) { 524 m_freem(m); 525 return (EMSGSIZE); 526 } 527 if (m->m_pkthdr.len < sizeof(*ip)) { 528 m_freem(m); 529 return (EINVAL); 530 } 531 m = m_pullup(m, sizeof(*ip)); 532 if (m == NULL) 533 return (ENOMEM); 534 ip = mtod(m, struct ip *); 535 hlen = ip->ip_hl << 2; 536 if (m->m_len < hlen) { 537 m = m_pullup(m, hlen); 538 if (m == NULL) 539 return (EINVAL); 540 ip = mtod(m, struct ip *); 541 } 542 #ifdef ROUTE_MPATH 543 if (CALC_FLOWID_OUTBOUND) { 544 uint32_t hash_type, hash_val; 545 546 hash_val = fib4_calc_software_hash(ip->ip_dst, 547 ip->ip_src, 0, 0, ip->ip_p, &hash_type); 548 m->m_pkthdr.flowid = hash_val; 549 M_HASHTYPE_SET(m, hash_type); 550 flags |= IP_NODEFAULTFLOWID; 551 } 552 #endif 553 INP_RLOCK(inp); 554 /* 555 * Don't allow both user specified and setsockopt options, 556 * and don't allow packet length sizes that will crash. 557 */ 558 if ((hlen < sizeof (*ip)) 559 || ((hlen > sizeof (*ip)) && inp->inp_options) 560 || (ntohs(ip->ip_len) != m->m_pkthdr.len)) { 561 INP_RUNLOCK(inp); 562 m_freem(m); 563 return (EINVAL); 564 } 565 error = prison_check_ip4(inp->inp_cred, &ip->ip_src); 566 if (error != 0) { 567 INP_RUNLOCK(inp); 568 m_freem(m); 569 return (error); 570 } 571 /* 572 * Don't allow IP options which do not have the required 573 * structure as specified in section 3.1 of RFC 791 on 574 * pages 15-23. 575 */ 576 cp = (u_char *)(ip + 1); 577 cnt = hlen - sizeof (struct ip); 578 for (; cnt > 0; cnt -= optlen, cp += optlen) { 579 opttype = cp[IPOPT_OPTVAL]; 580 if (opttype == IPOPT_EOL) 581 break; 582 if (opttype == IPOPT_NOP) { 583 optlen = 1; 584 continue; 585 } 586 if (cnt < IPOPT_OLEN + sizeof(u_char)) { 587 INP_RUNLOCK(inp); 588 m_freem(m); 589 return (EINVAL); 590 } 591 optlen = cp[IPOPT_OLEN]; 592 if (optlen < IPOPT_OLEN + sizeof(u_char) || 593 optlen > cnt) { 594 INP_RUNLOCK(inp); 595 m_freem(m); 596 return (EINVAL); 597 } 598 } 599 /* 600 * This doesn't allow application to specify ID of zero, 601 * but we got this limitation from the beginning of history. 602 */ 603 if (ip->ip_id == 0) 604 ip_fillid(ip); 605 606 /* 607 * XXX prevent ip_output from overwriting header fields. 608 */ 609 flags |= IP_RAWOUTPUT; 610 IPSTAT_INC(ips_rawout); 611 } 612 613 if (inp->inp_flags & INP_ONESBCAST) 614 flags |= IP_SENDONES; 615 616 #ifdef MAC 617 mac_inpcb_create_mbuf(inp, m); 618 #endif 619 620 NET_EPOCH_ENTER(et); 621 error = ip_output(m, inp->inp_options, NULL, flags, 622 inp->inp_moptions, inp); 623 NET_EPOCH_EXIT(et); 624 INP_RUNLOCK(inp); 625 return (error); 626 } 627 628 /* 629 * Raw IP socket option processing. 630 * 631 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 632 * only be created by a privileged process, and as such, socket option 633 * operations to manage system properties on any raw socket were allowed to 634 * take place without explicit additional access control checks. However, 635 * raw sockets can now also be created in jail(), and therefore explicit 636 * checks are now required. Likewise, raw sockets can be used by a process 637 * after it gives up privilege, so some caution is required. For options 638 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 639 * performed in ip_ctloutput() and therefore no check occurs here. 640 * Unilaterally checking priv_check() here breaks normal IP socket option 641 * operations on raw sockets. 642 * 643 * When adding new socket options here, make sure to add access control 644 * checks here as necessary. 645 * 646 * XXX-BZ inp locking? 647 */ 648 int 649 rip_ctloutput(struct socket *so, struct sockopt *sopt) 650 { 651 struct inpcb *inp = sotoinpcb(so); 652 int error, optval; 653 654 if (sopt->sopt_level != IPPROTO_IP) { 655 if ((sopt->sopt_level == SOL_SOCKET) && 656 (sopt->sopt_name == SO_SETFIB)) { 657 inp->inp_inc.inc_fibnum = so->so_fibnum; 658 return (0); 659 } 660 return (EINVAL); 661 } 662 663 error = 0; 664 switch (sopt->sopt_dir) { 665 case SOPT_GET: 666 switch (sopt->sopt_name) { 667 case IP_HDRINCL: 668 optval = inp->inp_flags & INP_HDRINCL; 669 error = sooptcopyout(sopt, &optval, sizeof optval); 670 break; 671 672 case IP_FW3: /* generic ipfw v.3 functions */ 673 case IP_FW_ADD: /* ADD actually returns the body... */ 674 case IP_FW_GET: 675 case IP_FW_TABLE_GETSIZE: 676 case IP_FW_TABLE_LIST: 677 case IP_FW_NAT_GET_CONFIG: 678 case IP_FW_NAT_GET_LOG: 679 if (V_ip_fw_ctl_ptr != NULL) 680 error = V_ip_fw_ctl_ptr(sopt); 681 else 682 error = ENOPROTOOPT; 683 break; 684 685 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 686 case IP_DUMMYNET_GET: 687 if (ip_dn_ctl_ptr != NULL) 688 error = ip_dn_ctl_ptr(sopt); 689 else 690 error = ENOPROTOOPT; 691 break ; 692 693 case MRT_INIT: 694 case MRT_DONE: 695 case MRT_ADD_VIF: 696 case MRT_DEL_VIF: 697 case MRT_ADD_MFC: 698 case MRT_DEL_MFC: 699 case MRT_VERSION: 700 case MRT_ASSERT: 701 case MRT_API_SUPPORT: 702 case MRT_API_CONFIG: 703 case MRT_ADD_BW_UPCALL: 704 case MRT_DEL_BW_UPCALL: 705 error = priv_check(curthread, PRIV_NETINET_MROUTE); 706 if (error != 0) 707 return (error); 708 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 709 EOPNOTSUPP; 710 break; 711 712 default: 713 error = ip_ctloutput(so, sopt); 714 break; 715 } 716 break; 717 718 case SOPT_SET: 719 switch (sopt->sopt_name) { 720 case IP_HDRINCL: 721 error = sooptcopyin(sopt, &optval, sizeof optval, 722 sizeof optval); 723 if (error) 724 break; 725 if (optval) 726 inp->inp_flags |= INP_HDRINCL; 727 else 728 inp->inp_flags &= ~INP_HDRINCL; 729 break; 730 731 case IP_FW3: /* generic ipfw v.3 functions */ 732 case IP_FW_ADD: 733 case IP_FW_DEL: 734 case IP_FW_FLUSH: 735 case IP_FW_ZERO: 736 case IP_FW_RESETLOG: 737 case IP_FW_TABLE_ADD: 738 case IP_FW_TABLE_DEL: 739 case IP_FW_TABLE_FLUSH: 740 case IP_FW_NAT_CFG: 741 case IP_FW_NAT_DEL: 742 if (V_ip_fw_ctl_ptr != NULL) 743 error = V_ip_fw_ctl_ptr(sopt); 744 else 745 error = ENOPROTOOPT; 746 break; 747 748 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 749 case IP_DUMMYNET_CONFIGURE: 750 case IP_DUMMYNET_DEL: 751 case IP_DUMMYNET_FLUSH: 752 if (ip_dn_ctl_ptr != NULL) 753 error = ip_dn_ctl_ptr(sopt); 754 else 755 error = ENOPROTOOPT ; 756 break ; 757 758 case IP_RSVP_ON: 759 error = priv_check(curthread, PRIV_NETINET_MROUTE); 760 if (error != 0) 761 return (error); 762 error = ip_rsvp_init(so); 763 break; 764 765 case IP_RSVP_OFF: 766 error = priv_check(curthread, PRIV_NETINET_MROUTE); 767 if (error != 0) 768 return (error); 769 error = ip_rsvp_done(); 770 break; 771 772 case IP_RSVP_VIF_ON: 773 case IP_RSVP_VIF_OFF: 774 error = priv_check(curthread, PRIV_NETINET_MROUTE); 775 if (error != 0) 776 return (error); 777 error = ip_rsvp_vif ? 778 ip_rsvp_vif(so, sopt) : EINVAL; 779 break; 780 781 case MRT_INIT: 782 case MRT_DONE: 783 case MRT_ADD_VIF: 784 case MRT_DEL_VIF: 785 case MRT_ADD_MFC: 786 case MRT_DEL_MFC: 787 case MRT_VERSION: 788 case MRT_ASSERT: 789 case MRT_API_SUPPORT: 790 case MRT_API_CONFIG: 791 case MRT_ADD_BW_UPCALL: 792 case MRT_DEL_BW_UPCALL: 793 error = priv_check(curthread, PRIV_NETINET_MROUTE); 794 if (error != 0) 795 return (error); 796 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 797 EOPNOTSUPP; 798 break; 799 800 default: 801 error = ip_ctloutput(so, sopt); 802 break; 803 } 804 break; 805 } 806 807 return (error); 808 } 809 810 /* 811 * This function exists solely to receive the PRC_IFDOWN messages which are 812 * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls 813 * in_ifadown() to remove all routes corresponding to that address. It also 814 * receives the PRC_IFUP messages from if_up() and reinstalls the interface 815 * routes. 816 */ 817 void 818 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 819 { 820 struct rm_priotracker in_ifa_tracker; 821 struct in_ifaddr *ia; 822 struct ifnet *ifp; 823 int err; 824 int flags; 825 826 switch (cmd) { 827 case PRC_IFDOWN: 828 IN_IFADDR_RLOCK(&in_ifa_tracker); 829 CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 830 if (ia->ia_ifa.ifa_addr == sa 831 && (ia->ia_flags & IFA_ROUTE)) { 832 ifa_ref(&ia->ia_ifa); 833 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 834 /* 835 * in_scrubprefix() kills the interface route. 836 */ 837 in_scrubprefix(ia, 0); 838 /* 839 * in_ifadown gets rid of all the rest of the 840 * routes. This is not quite the right thing 841 * to do, but at least if we are running a 842 * routing process they will come back. 843 */ 844 in_ifadown(&ia->ia_ifa, 0); 845 ifa_free(&ia->ia_ifa); 846 break; 847 } 848 } 849 if (ia == NULL) /* If ia matched, already unlocked. */ 850 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 851 break; 852 853 case PRC_IFUP: 854 IN_IFADDR_RLOCK(&in_ifa_tracker); 855 CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 856 if (ia->ia_ifa.ifa_addr == sa) 857 break; 858 } 859 if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) { 860 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 861 return; 862 } 863 ifa_ref(&ia->ia_ifa); 864 IN_IFADDR_RUNLOCK(&in_ifa_tracker); 865 flags = RTF_UP; 866 ifp = ia->ia_ifa.ifa_ifp; 867 868 if ((ifp->if_flags & IFF_LOOPBACK) 869 || (ifp->if_flags & IFF_POINTOPOINT)) 870 flags |= RTF_HOST; 871 872 err = ifa_del_loopback_route((struct ifaddr *)ia, sa); 873 874 rt_addrmsg(RTM_ADD, &ia->ia_ifa, ia->ia_ifp->if_fib); 875 err = in_handle_ifaddr_route(RTM_ADD, ia); 876 if (err == 0) 877 ia->ia_flags |= IFA_ROUTE; 878 879 err = ifa_add_loopback_route((struct ifaddr *)ia, sa); 880 881 ifa_free(&ia->ia_ifa); 882 break; 883 } 884 } 885 886 static int 887 rip_attach(struct socket *so, int proto, struct thread *td) 888 { 889 struct inpcb *inp; 890 int error; 891 892 inp = sotoinpcb(so); 893 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 894 895 error = priv_check(td, PRIV_NETINET_RAW); 896 if (error) 897 return (error); 898 if (proto >= IPPROTO_MAX || proto < 0) 899 return EPROTONOSUPPORT; 900 error = soreserve(so, rip_sendspace, rip_recvspace); 901 if (error) 902 return (error); 903 INP_INFO_WLOCK(&V_ripcbinfo); 904 error = in_pcballoc(so, &V_ripcbinfo); 905 if (error) { 906 INP_INFO_WUNLOCK(&V_ripcbinfo); 907 return (error); 908 } 909 inp = (struct inpcb *)so->so_pcb; 910 inp->inp_vflag |= INP_IPV4; 911 inp->inp_ip_p = proto; 912 inp->inp_ip_ttl = V_ip_defttl; 913 rip_inshash(inp); 914 INP_INFO_WUNLOCK(&V_ripcbinfo); 915 INP_WUNLOCK(inp); 916 return (0); 917 } 918 919 static void 920 rip_detach(struct socket *so) 921 { 922 struct inpcb *inp; 923 924 inp = sotoinpcb(so); 925 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 926 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 927 ("rip_detach: not closed")); 928 929 INP_INFO_WLOCK(&V_ripcbinfo); 930 INP_WLOCK(inp); 931 rip_delhash(inp); 932 if (so == V_ip_mrouter && ip_mrouter_done) 933 ip_mrouter_done(); 934 if (ip_rsvp_force_done) 935 ip_rsvp_force_done(so); 936 if (so == V_ip_rsvpd) 937 ip_rsvp_done(); 938 in_pcbdetach(inp); 939 in_pcbfree(inp); 940 INP_INFO_WUNLOCK(&V_ripcbinfo); 941 } 942 943 static void 944 rip_dodisconnect(struct socket *so, struct inpcb *inp) 945 { 946 struct inpcbinfo *pcbinfo; 947 948 pcbinfo = inp->inp_pcbinfo; 949 INP_INFO_WLOCK(pcbinfo); 950 INP_WLOCK(inp); 951 rip_delhash(inp); 952 inp->inp_faddr.s_addr = INADDR_ANY; 953 rip_inshash(inp); 954 SOCK_LOCK(so); 955 so->so_state &= ~SS_ISCONNECTED; 956 SOCK_UNLOCK(so); 957 INP_WUNLOCK(inp); 958 INP_INFO_WUNLOCK(pcbinfo); 959 } 960 961 static void 962 rip_abort(struct socket *so) 963 { 964 struct inpcb *inp; 965 966 inp = sotoinpcb(so); 967 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 968 969 rip_dodisconnect(so, inp); 970 } 971 972 static void 973 rip_close(struct socket *so) 974 { 975 struct inpcb *inp; 976 977 inp = sotoinpcb(so); 978 KASSERT(inp != NULL, ("rip_close: inp == NULL")); 979 980 rip_dodisconnect(so, inp); 981 } 982 983 static int 984 rip_disconnect(struct socket *so) 985 { 986 struct inpcb *inp; 987 988 if ((so->so_state & SS_ISCONNECTED) == 0) 989 return (ENOTCONN); 990 991 inp = sotoinpcb(so); 992 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL")); 993 994 rip_dodisconnect(so, inp); 995 return (0); 996 } 997 998 static int 999 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 1000 { 1001 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 1002 struct inpcb *inp; 1003 int error; 1004 1005 if (nam->sa_family != AF_INET) 1006 return (EAFNOSUPPORT); 1007 if (nam->sa_len != sizeof(*addr)) 1008 return (EINVAL); 1009 1010 error = prison_check_ip4(td->td_ucred, &addr->sin_addr); 1011 if (error != 0) 1012 return (error); 1013 1014 inp = sotoinpcb(so); 1015 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 1016 1017 if (CK_STAILQ_EMPTY(&V_ifnet) || 1018 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 1019 (addr->sin_addr.s_addr && 1020 (inp->inp_flags & INP_BINDANY) == 0 && 1021 ifa_ifwithaddr_check((struct sockaddr *)addr) == 0)) 1022 return (EADDRNOTAVAIL); 1023 1024 INP_INFO_WLOCK(&V_ripcbinfo); 1025 INP_WLOCK(inp); 1026 rip_delhash(inp); 1027 inp->inp_laddr = addr->sin_addr; 1028 rip_inshash(inp); 1029 INP_WUNLOCK(inp); 1030 INP_INFO_WUNLOCK(&V_ripcbinfo); 1031 return (0); 1032 } 1033 1034 static int 1035 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 1036 { 1037 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 1038 struct inpcb *inp; 1039 1040 if (nam->sa_len != sizeof(*addr)) 1041 return (EINVAL); 1042 if (CK_STAILQ_EMPTY(&V_ifnet)) 1043 return (EADDRNOTAVAIL); 1044 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 1045 return (EAFNOSUPPORT); 1046 1047 inp = sotoinpcb(so); 1048 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 1049 1050 INP_INFO_WLOCK(&V_ripcbinfo); 1051 INP_WLOCK(inp); 1052 rip_delhash(inp); 1053 inp->inp_faddr = addr->sin_addr; 1054 rip_inshash(inp); 1055 soisconnected(so); 1056 INP_WUNLOCK(inp); 1057 INP_INFO_WUNLOCK(&V_ripcbinfo); 1058 return (0); 1059 } 1060 1061 static int 1062 rip_shutdown(struct socket *so) 1063 { 1064 struct inpcb *inp; 1065 1066 inp = sotoinpcb(so); 1067 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 1068 1069 INP_WLOCK(inp); 1070 socantsendmore(so); 1071 INP_WUNLOCK(inp); 1072 return (0); 1073 } 1074 1075 static int 1076 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 1077 struct mbuf *control, struct thread *td) 1078 { 1079 struct inpcb *inp; 1080 u_long dst; 1081 int error; 1082 1083 inp = sotoinpcb(so); 1084 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 1085 1086 if (control != NULL) { 1087 m_freem(control); 1088 control = NULL; 1089 } 1090 1091 /* 1092 * Note: 'dst' reads below are unlocked. 1093 */ 1094 if (so->so_state & SS_ISCONNECTED) { 1095 if (nam) { 1096 error = EISCONN; 1097 goto release; 1098 } 1099 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 1100 } else { 1101 error = 0; 1102 if (nam == NULL) 1103 error = ENOTCONN; 1104 else if (nam->sa_family != AF_INET) 1105 error = EAFNOSUPPORT; 1106 else if (nam->sa_len != sizeof(struct sockaddr_in)) 1107 error = EINVAL; 1108 if (error != 0) 1109 goto release; 1110 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 1111 } 1112 return (rip_output(m, so, dst)); 1113 1114 release: 1115 m_freem(m); 1116 return (error); 1117 } 1118 #endif /* INET */ 1119 1120 static int 1121 rip_pcblist(SYSCTL_HANDLER_ARGS) 1122 { 1123 struct xinpgen xig; 1124 struct epoch_tracker et; 1125 struct inpcb *inp; 1126 int error; 1127 1128 if (req->newptr != 0) 1129 return (EPERM); 1130 1131 if (req->oldptr == 0) { 1132 int n; 1133 1134 n = V_ripcbinfo.ipi_count; 1135 n += imax(n / 8, 10); 1136 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); 1137 return (0); 1138 } 1139 1140 if ((error = sysctl_wire_old_buffer(req, 0)) != 0) 1141 return (error); 1142 1143 bzero(&xig, sizeof(xig)); 1144 xig.xig_len = sizeof xig; 1145 xig.xig_count = V_ripcbinfo.ipi_count; 1146 xig.xig_gen = V_ripcbinfo.ipi_gencnt; 1147 xig.xig_sogen = so_gencnt; 1148 error = SYSCTL_OUT(req, &xig, sizeof xig); 1149 if (error) 1150 return (error); 1151 1152 NET_EPOCH_ENTER(et); 1153 for (inp = CK_LIST_FIRST(V_ripcbinfo.ipi_listhead); 1154 inp != NULL; 1155 inp = CK_LIST_NEXT(inp, inp_list)) { 1156 INP_RLOCK(inp); 1157 if (inp->inp_gencnt <= xig.xig_gen && 1158 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 1159 struct xinpcb xi; 1160 1161 in_pcbtoxinpcb(inp, &xi); 1162 INP_RUNLOCK(inp); 1163 error = SYSCTL_OUT(req, &xi, sizeof xi); 1164 if (error) 1165 break; 1166 } else 1167 INP_RUNLOCK(inp); 1168 } 1169 NET_EPOCH_EXIT(et); 1170 1171 if (!error) { 1172 /* 1173 * Give the user an updated idea of our state. If the 1174 * generation differs from what we told her before, she knows 1175 * that something happened while we were processing this 1176 * request, and it might be necessary to retry. 1177 */ 1178 xig.xig_gen = V_ripcbinfo.ipi_gencnt; 1179 xig.xig_sogen = so_gencnt; 1180 xig.xig_count = V_ripcbinfo.ipi_count; 1181 error = SYSCTL_OUT(req, &xig, sizeof xig); 1182 } 1183 1184 return (error); 1185 } 1186 1187 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, 1188 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1189 rip_pcblist, "S,xinpcb", 1190 "List of active raw IP sockets"); 1191 1192 #ifdef INET 1193 struct pr_usrreqs rip_usrreqs = { 1194 .pru_abort = rip_abort, 1195 .pru_attach = rip_attach, 1196 .pru_bind = rip_bind, 1197 .pru_connect = rip_connect, 1198 .pru_control = in_control, 1199 .pru_detach = rip_detach, 1200 .pru_disconnect = rip_disconnect, 1201 .pru_peeraddr = in_getpeeraddr, 1202 .pru_send = rip_send, 1203 .pru_shutdown = rip_shutdown, 1204 .pru_sockaddr = in_getsockaddr, 1205 .pru_sosetlabel = in_pcbsosetlabel, 1206 .pru_close = rip_close, 1207 }; 1208 #endif /* INET */ 1209