1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 4. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_ipsec.h" 39 40 #include <sys/param.h> 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/protosw.h> 49 #include <sys/rwlock.h> 50 #include <sys/signalvar.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/sx.h> 54 #include <sys/sysctl.h> 55 #include <sys/systm.h> 56 57 #include <vm/uma.h> 58 59 #include <net/if.h> 60 #include <net/route.h> 61 #include <net/vnet.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_pcb.h> 66 #include <netinet/in_var.h> 67 #include <netinet/if_ether.h> 68 #include <netinet/ip.h> 69 #include <netinet/ip_var.h> 70 #include <netinet/ip_mroute.h> 71 72 #ifdef IPSEC 73 #include <netipsec/ipsec.h> 74 #endif /*IPSEC*/ 75 76 #include <security/mac/mac_framework.h> 77 78 VNET_DEFINE(int, ip_defttl) = IPDEFTTL; 79 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, 80 &VNET_NAME(ip_defttl), 0, 81 "Maximum TTL on IP packets"); 82 83 VNET_DEFINE(struct inpcbhead, ripcb); 84 VNET_DEFINE(struct inpcbinfo, ripcbinfo); 85 86 #define V_ripcb VNET(ripcb) 87 #define V_ripcbinfo VNET(ripcbinfo) 88 89 /* 90 * Control and data hooks for ipfw, dummynet, divert and so on. 91 * The data hooks are not used here but it is convenient 92 * to keep them all in one place. 93 */ 94 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL; 95 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL; 96 97 int (*ip_dn_ctl_ptr)(struct sockopt *); 98 int (*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *); 99 void (*ip_divert_ptr)(struct mbuf *, int); 100 int (*ng_ipfw_input_p)(struct mbuf **, int, 101 struct ip_fw_args *, int); 102 103 /* Hook for telling pf that the destination address changed */ 104 void (*m_addr_chg_pf_p)(struct mbuf *m); 105 106 #ifdef INET 107 /* 108 * Hooks for multicast routing. They all default to NULL, so leave them not 109 * initialized and rely on BSS being set to 0. 110 */ 111 112 /* 113 * The socket used to communicate with the multicast routing daemon. 114 */ 115 VNET_DEFINE(struct socket *, ip_mrouter); 116 117 /* 118 * The various mrouter and rsvp functions. 119 */ 120 int (*ip_mrouter_set)(struct socket *, struct sockopt *); 121 int (*ip_mrouter_get)(struct socket *, struct sockopt *); 122 int (*ip_mrouter_done)(void); 123 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, 124 struct ip_moptions *); 125 int (*mrt_ioctl)(u_long, caddr_t, int); 126 int (*legal_vif_num)(int); 127 u_long (*ip_mcast_src)(int); 128 129 void (*rsvp_input_p)(struct mbuf *m, int off); 130 int (*ip_rsvp_vif)(struct socket *, struct sockopt *); 131 void (*ip_rsvp_force_done)(struct socket *); 132 #endif /* INET */ 133 134 u_long rip_sendspace = 9216; 135 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 136 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 137 138 u_long rip_recvspace = 9216; 139 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 140 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 141 142 /* 143 * Hash functions 144 */ 145 146 #define INP_PCBHASH_RAW_SIZE 256 147 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \ 148 (((proto) + (laddr) + (faddr)) % (mask) + 1) 149 150 #ifdef INET 151 static void 152 rip_inshash(struct inpcb *inp) 153 { 154 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 155 struct inpcbhead *pcbhash; 156 int hash; 157 158 INP_INFO_WLOCK_ASSERT(pcbinfo); 159 INP_WLOCK_ASSERT(inp); 160 161 if (inp->inp_ip_p != 0 && 162 inp->inp_laddr.s_addr != INADDR_ANY && 163 inp->inp_faddr.s_addr != INADDR_ANY) { 164 hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr, 165 inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask); 166 } else 167 hash = 0; 168 pcbhash = &pcbinfo->ipi_hashbase[hash]; 169 LIST_INSERT_HEAD(pcbhash, inp, inp_hash); 170 } 171 172 static void 173 rip_delhash(struct inpcb *inp) 174 { 175 176 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo); 177 INP_WLOCK_ASSERT(inp); 178 179 LIST_REMOVE(inp, inp_hash); 180 } 181 #endif /* INET */ 182 183 /* 184 * Raw interface to IP protocol. 185 */ 186 187 /* 188 * Initialize raw connection block q. 189 */ 190 static void 191 rip_zone_change(void *tag) 192 { 193 194 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets); 195 } 196 197 static int 198 rip_inpcb_init(void *mem, int size, int flags) 199 { 200 struct inpcb *inp = mem; 201 202 INP_LOCK_INIT(inp, "inp", "rawinp"); 203 return (0); 204 } 205 206 void 207 rip_init(void) 208 { 209 210 in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE, 211 1, "ripcb", rip_inpcb_init, NULL, UMA_ZONE_NOFREE, 212 IPI_HASHFIELDS_NONE); 213 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL, 214 EVENTHANDLER_PRI_ANY); 215 } 216 217 #ifdef VIMAGE 218 void 219 rip_destroy(void) 220 { 221 222 in_pcbinfo_destroy(&V_ripcbinfo); 223 } 224 #endif 225 226 #ifdef INET 227 static int 228 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n, 229 struct sockaddr_in *ripsrc) 230 { 231 int policyfail = 0; 232 233 INP_LOCK_ASSERT(last); 234 235 #ifdef IPSEC 236 /* check AH/ESP integrity. */ 237 if (ipsec4_in_reject(n, last)) { 238 policyfail = 1; 239 } 240 #endif /* IPSEC */ 241 #ifdef MAC 242 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0) 243 policyfail = 1; 244 #endif 245 /* Check the minimum TTL for socket. */ 246 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 247 policyfail = 1; 248 if (!policyfail) { 249 struct mbuf *opts = NULL; 250 struct socket *so; 251 252 so = last->inp_socket; 253 if ((last->inp_flags & INP_CONTROLOPTS) || 254 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 255 ip_savecontrol(last, &opts, ip, n); 256 SOCKBUF_LOCK(&so->so_rcv); 257 if (sbappendaddr_locked(&so->so_rcv, 258 (struct sockaddr *)ripsrc, n, opts) == 0) { 259 /* should notify about lost packet */ 260 m_freem(n); 261 if (opts) 262 m_freem(opts); 263 SOCKBUF_UNLOCK(&so->so_rcv); 264 } else 265 sorwakeup_locked(so); 266 } else 267 m_freem(n); 268 return (policyfail); 269 } 270 271 /* 272 * Setup generic address and protocol structures for raw_input routine, then 273 * pass them along with mbuf chain. 274 */ 275 void 276 rip_input(struct mbuf *m, int off) 277 { 278 struct ifnet *ifp; 279 struct ip *ip = mtod(m, struct ip *); 280 int proto = ip->ip_p; 281 struct inpcb *inp, *last; 282 struct sockaddr_in ripsrc; 283 int hash; 284 285 bzero(&ripsrc, sizeof(ripsrc)); 286 ripsrc.sin_len = sizeof(ripsrc); 287 ripsrc.sin_family = AF_INET; 288 ripsrc.sin_addr = ip->ip_src; 289 last = NULL; 290 291 ifp = m->m_pkthdr.rcvif; 292 /* 293 * Add back the IP header length which was 294 * removed by ip_input(). Raw sockets do 295 * not modify the packet except for some 296 * byte order swaps. 297 */ 298 ip->ip_len += off; 299 300 hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr, 301 ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask); 302 INP_INFO_RLOCK(&V_ripcbinfo); 303 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) { 304 if (inp->inp_ip_p != proto) 305 continue; 306 #ifdef INET6 307 /* XXX inp locking */ 308 if ((inp->inp_vflag & INP_IPV4) == 0) 309 continue; 310 #endif 311 if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 312 continue; 313 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr) 314 continue; 315 if (jailed_without_vnet(inp->inp_cred)) { 316 /* 317 * XXX: If faddr was bound to multicast group, 318 * jailed raw socket will drop datagram. 319 */ 320 if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 321 continue; 322 } 323 if (last != NULL) { 324 struct mbuf *n; 325 326 n = m_copy(m, 0, (int)M_COPYALL); 327 if (n != NULL) 328 (void) rip_append(last, ip, n, &ripsrc); 329 /* XXX count dropped packet */ 330 INP_RUNLOCK(last); 331 } 332 INP_RLOCK(inp); 333 last = inp; 334 } 335 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) { 336 if (inp->inp_ip_p && inp->inp_ip_p != proto) 337 continue; 338 #ifdef INET6 339 /* XXX inp locking */ 340 if ((inp->inp_vflag & INP_IPV4) == 0) 341 continue; 342 #endif 343 if (!in_nullhost(inp->inp_laddr) && 344 !in_hosteq(inp->inp_laddr, ip->ip_dst)) 345 continue; 346 if (!in_nullhost(inp->inp_faddr) && 347 !in_hosteq(inp->inp_faddr, ip->ip_src)) 348 continue; 349 if (jailed_without_vnet(inp->inp_cred)) { 350 /* 351 * Allow raw socket in jail to receive multicast; 352 * assume process had PRIV_NETINET_RAW at attach, 353 * and fall through into normal filter path if so. 354 */ 355 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) && 356 prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 357 continue; 358 } 359 /* 360 * If this raw socket has multicast state, and we 361 * have received a multicast, check if this socket 362 * should receive it, as multicast filtering is now 363 * the responsibility of the transport layer. 364 */ 365 if (inp->inp_moptions != NULL && 366 IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 367 /* 368 * If the incoming datagram is for IGMP, allow it 369 * through unconditionally to the raw socket. 370 * 371 * In the case of IGMPv2, we may not have explicitly 372 * joined the group, and may have set IFF_ALLMULTI 373 * on the interface. imo_multi_filter() may discard 374 * control traffic we actually need to see. 375 * 376 * Userland multicast routing daemons should continue 377 * filter the control traffic appropriately. 378 */ 379 int blocked; 380 381 blocked = MCAST_PASS; 382 if (proto != IPPROTO_IGMP) { 383 struct sockaddr_in group; 384 385 bzero(&group, sizeof(struct sockaddr_in)); 386 group.sin_len = sizeof(struct sockaddr_in); 387 group.sin_family = AF_INET; 388 group.sin_addr = ip->ip_dst; 389 390 blocked = imo_multi_filter(inp->inp_moptions, 391 ifp, 392 (struct sockaddr *)&group, 393 (struct sockaddr *)&ripsrc); 394 } 395 396 if (blocked != MCAST_PASS) { 397 IPSTAT_INC(ips_notmember); 398 continue; 399 } 400 } 401 if (last != NULL) { 402 struct mbuf *n; 403 404 n = m_copy(m, 0, (int)M_COPYALL); 405 if (n != NULL) 406 (void) rip_append(last, ip, n, &ripsrc); 407 /* XXX count dropped packet */ 408 INP_RUNLOCK(last); 409 } 410 INP_RLOCK(inp); 411 last = inp; 412 } 413 INP_INFO_RUNLOCK(&V_ripcbinfo); 414 if (last != NULL) { 415 if (rip_append(last, ip, m, &ripsrc) != 0) 416 IPSTAT_INC(ips_delivered); 417 INP_RUNLOCK(last); 418 } else { 419 m_freem(m); 420 IPSTAT_INC(ips_noproto); 421 IPSTAT_DEC(ips_delivered); 422 } 423 } 424 425 /* 426 * Generate IP header and pass packet to ip_output. Tack on options user may 427 * have setup with control call. 428 */ 429 int 430 rip_output(struct mbuf *m, struct socket *so, u_long dst) 431 { 432 struct ip *ip; 433 int error; 434 struct inpcb *inp = sotoinpcb(so); 435 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 436 IP_ALLOWBROADCAST; 437 438 /* 439 * If the user handed us a complete IP packet, use it. Otherwise, 440 * allocate an mbuf for a header and fill it in. 441 */ 442 if ((inp->inp_flags & INP_HDRINCL) == 0) { 443 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 444 m_freem(m); 445 return(EMSGSIZE); 446 } 447 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); 448 if (m == NULL) 449 return(ENOBUFS); 450 451 INP_RLOCK(inp); 452 ip = mtod(m, struct ip *); 453 ip->ip_tos = inp->inp_ip_tos; 454 if (inp->inp_flags & INP_DONTFRAG) 455 ip->ip_off = IP_DF; 456 else 457 ip->ip_off = 0; 458 ip->ip_p = inp->inp_ip_p; 459 ip->ip_len = m->m_pkthdr.len; 460 ip->ip_src = inp->inp_laddr; 461 if (jailed(inp->inp_cred)) { 462 /* 463 * prison_local_ip4() would be good enough but would 464 * let a source of INADDR_ANY pass, which we do not 465 * want to see from jails. We do not go through the 466 * pain of in_pcbladdr() for raw sockets. 467 */ 468 if (ip->ip_src.s_addr == INADDR_ANY) 469 error = prison_get_ip4(inp->inp_cred, 470 &ip->ip_src); 471 else 472 error = prison_local_ip4(inp->inp_cred, 473 &ip->ip_src); 474 if (error != 0) { 475 INP_RUNLOCK(inp); 476 m_freem(m); 477 return (error); 478 } 479 } 480 ip->ip_dst.s_addr = dst; 481 ip->ip_ttl = inp->inp_ip_ttl; 482 } else { 483 if (m->m_pkthdr.len > IP_MAXPACKET) { 484 m_freem(m); 485 return(EMSGSIZE); 486 } 487 INP_RLOCK(inp); 488 ip = mtod(m, struct ip *); 489 error = prison_check_ip4(inp->inp_cred, &ip->ip_src); 490 if (error != 0) { 491 INP_RUNLOCK(inp); 492 m_freem(m); 493 return (error); 494 } 495 496 /* 497 * Don't allow both user specified and setsockopt options, 498 * and don't allow packet length sizes that will crash. 499 */ 500 if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options) 501 || (ip->ip_len > m->m_pkthdr.len) 502 || (ip->ip_len < (ip->ip_hl << 2))) { 503 INP_RUNLOCK(inp); 504 m_freem(m); 505 return (EINVAL); 506 } 507 if (ip->ip_id == 0) 508 ip->ip_id = ip_newid(); 509 510 /* 511 * XXX prevent ip_output from overwriting header fields. 512 */ 513 flags |= IP_RAWOUTPUT; 514 IPSTAT_INC(ips_rawout); 515 } 516 517 if (inp->inp_flags & INP_ONESBCAST) 518 flags |= IP_SENDONES; 519 520 #ifdef MAC 521 mac_inpcb_create_mbuf(inp, m); 522 #endif 523 524 error = ip_output(m, inp->inp_options, NULL, flags, 525 inp->inp_moptions, inp); 526 INP_RUNLOCK(inp); 527 return (error); 528 } 529 530 /* 531 * Raw IP socket option processing. 532 * 533 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 534 * only be created by a privileged process, and as such, socket option 535 * operations to manage system properties on any raw socket were allowed to 536 * take place without explicit additional access control checks. However, 537 * raw sockets can now also be created in jail(), and therefore explicit 538 * checks are now required. Likewise, raw sockets can be used by a process 539 * after it gives up privilege, so some caution is required. For options 540 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 541 * performed in ip_ctloutput() and therefore no check occurs here. 542 * Unilaterally checking priv_check() here breaks normal IP socket option 543 * operations on raw sockets. 544 * 545 * When adding new socket options here, make sure to add access control 546 * checks here as necessary. 547 */ 548 int 549 rip_ctloutput(struct socket *so, struct sockopt *sopt) 550 { 551 struct inpcb *inp = sotoinpcb(so); 552 int error, optval; 553 554 if (sopt->sopt_level != IPPROTO_IP) { 555 if ((sopt->sopt_level == SOL_SOCKET) && 556 (sopt->sopt_name == SO_SETFIB)) { 557 inp->inp_inc.inc_fibnum = so->so_fibnum; 558 return (0); 559 } 560 return (EINVAL); 561 } 562 563 error = 0; 564 switch (sopt->sopt_dir) { 565 case SOPT_GET: 566 switch (sopt->sopt_name) { 567 case IP_HDRINCL: 568 optval = inp->inp_flags & INP_HDRINCL; 569 error = sooptcopyout(sopt, &optval, sizeof optval); 570 break; 571 572 case IP_FW3: /* generic ipfw v.3 functions */ 573 case IP_FW_ADD: /* ADD actually returns the body... */ 574 case IP_FW_GET: 575 case IP_FW_TABLE_GETSIZE: 576 case IP_FW_TABLE_LIST: 577 case IP_FW_NAT_GET_CONFIG: 578 case IP_FW_NAT_GET_LOG: 579 if (V_ip_fw_ctl_ptr != NULL) 580 error = V_ip_fw_ctl_ptr(sopt); 581 else 582 error = ENOPROTOOPT; 583 break; 584 585 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 586 case IP_DUMMYNET_GET: 587 if (ip_dn_ctl_ptr != NULL) 588 error = ip_dn_ctl_ptr(sopt); 589 else 590 error = ENOPROTOOPT; 591 break ; 592 593 case MRT_INIT: 594 case MRT_DONE: 595 case MRT_ADD_VIF: 596 case MRT_DEL_VIF: 597 case MRT_ADD_MFC: 598 case MRT_DEL_MFC: 599 case MRT_VERSION: 600 case MRT_ASSERT: 601 case MRT_API_SUPPORT: 602 case MRT_API_CONFIG: 603 case MRT_ADD_BW_UPCALL: 604 case MRT_DEL_BW_UPCALL: 605 error = priv_check(curthread, PRIV_NETINET_MROUTE); 606 if (error != 0) 607 return (error); 608 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 609 EOPNOTSUPP; 610 break; 611 612 default: 613 error = ip_ctloutput(so, sopt); 614 break; 615 } 616 break; 617 618 case SOPT_SET: 619 switch (sopt->sopt_name) { 620 case IP_HDRINCL: 621 error = sooptcopyin(sopt, &optval, sizeof optval, 622 sizeof optval); 623 if (error) 624 break; 625 if (optval) 626 inp->inp_flags |= INP_HDRINCL; 627 else 628 inp->inp_flags &= ~INP_HDRINCL; 629 break; 630 631 case IP_FW3: /* generic ipfw v.3 functions */ 632 case IP_FW_ADD: 633 case IP_FW_DEL: 634 case IP_FW_FLUSH: 635 case IP_FW_ZERO: 636 case IP_FW_RESETLOG: 637 case IP_FW_TABLE_ADD: 638 case IP_FW_TABLE_DEL: 639 case IP_FW_TABLE_FLUSH: 640 case IP_FW_NAT_CFG: 641 case IP_FW_NAT_DEL: 642 if (V_ip_fw_ctl_ptr != NULL) 643 error = V_ip_fw_ctl_ptr(sopt); 644 else 645 error = ENOPROTOOPT; 646 break; 647 648 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 649 case IP_DUMMYNET_CONFIGURE: 650 case IP_DUMMYNET_DEL: 651 case IP_DUMMYNET_FLUSH: 652 if (ip_dn_ctl_ptr != NULL) 653 error = ip_dn_ctl_ptr(sopt); 654 else 655 error = ENOPROTOOPT ; 656 break ; 657 658 case IP_RSVP_ON: 659 error = priv_check(curthread, PRIV_NETINET_MROUTE); 660 if (error != 0) 661 return (error); 662 error = ip_rsvp_init(so); 663 break; 664 665 case IP_RSVP_OFF: 666 error = priv_check(curthread, PRIV_NETINET_MROUTE); 667 if (error != 0) 668 return (error); 669 error = ip_rsvp_done(); 670 break; 671 672 case IP_RSVP_VIF_ON: 673 case IP_RSVP_VIF_OFF: 674 error = priv_check(curthread, PRIV_NETINET_MROUTE); 675 if (error != 0) 676 return (error); 677 error = ip_rsvp_vif ? 678 ip_rsvp_vif(so, sopt) : EINVAL; 679 break; 680 681 case MRT_INIT: 682 case MRT_DONE: 683 case MRT_ADD_VIF: 684 case MRT_DEL_VIF: 685 case MRT_ADD_MFC: 686 case MRT_DEL_MFC: 687 case MRT_VERSION: 688 case MRT_ASSERT: 689 case MRT_API_SUPPORT: 690 case MRT_API_CONFIG: 691 case MRT_ADD_BW_UPCALL: 692 case MRT_DEL_BW_UPCALL: 693 error = priv_check(curthread, PRIV_NETINET_MROUTE); 694 if (error != 0) 695 return (error); 696 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 697 EOPNOTSUPP; 698 break; 699 700 default: 701 error = ip_ctloutput(so, sopt); 702 break; 703 } 704 break; 705 } 706 707 return (error); 708 } 709 710 /* 711 * This function exists solely to receive the PRC_IFDOWN messages which are 712 * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls 713 * in_ifadown() to remove all routes corresponding to that address. It also 714 * receives the PRC_IFUP messages from if_up() and reinstalls the interface 715 * routes. 716 */ 717 void 718 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 719 { 720 struct in_ifaddr *ia; 721 struct ifnet *ifp; 722 int err; 723 int flags; 724 725 switch (cmd) { 726 case PRC_IFDOWN: 727 IN_IFADDR_RLOCK(); 728 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 729 if (ia->ia_ifa.ifa_addr == sa 730 && (ia->ia_flags & IFA_ROUTE)) { 731 ifa_ref(&ia->ia_ifa); 732 IN_IFADDR_RUNLOCK(); 733 /* 734 * in_ifscrub kills the interface route. 735 */ 736 in_ifscrub(ia->ia_ifp, ia, 0); 737 /* 738 * in_ifadown gets rid of all the rest of the 739 * routes. This is not quite the right thing 740 * to do, but at least if we are running a 741 * routing process they will come back. 742 */ 743 in_ifadown(&ia->ia_ifa, 0); 744 ifa_free(&ia->ia_ifa); 745 break; 746 } 747 } 748 if (ia == NULL) /* If ia matched, already unlocked. */ 749 IN_IFADDR_RUNLOCK(); 750 break; 751 752 case PRC_IFUP: 753 IN_IFADDR_RLOCK(); 754 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 755 if (ia->ia_ifa.ifa_addr == sa) 756 break; 757 } 758 if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) { 759 IN_IFADDR_RUNLOCK(); 760 return; 761 } 762 ifa_ref(&ia->ia_ifa); 763 IN_IFADDR_RUNLOCK(); 764 flags = RTF_UP; 765 ifp = ia->ia_ifa.ifa_ifp; 766 767 if ((ifp->if_flags & IFF_LOOPBACK) 768 || (ifp->if_flags & IFF_POINTOPOINT)) 769 flags |= RTF_HOST; 770 771 err = ifa_del_loopback_route((struct ifaddr *)ia, sa); 772 if (err == 0) 773 ia->ia_flags &= ~IFA_RTSELF; 774 775 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 776 if (err == 0) 777 ia->ia_flags |= IFA_ROUTE; 778 779 err = ifa_add_loopback_route((struct ifaddr *)ia, sa); 780 if (err == 0) 781 ia->ia_flags |= IFA_RTSELF; 782 783 ifa_free(&ia->ia_ifa); 784 break; 785 } 786 } 787 788 static int 789 rip_attach(struct socket *so, int proto, struct thread *td) 790 { 791 struct inpcb *inp; 792 int error; 793 794 inp = sotoinpcb(so); 795 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 796 797 error = priv_check(td, PRIV_NETINET_RAW); 798 if (error) 799 return (error); 800 if (proto >= IPPROTO_MAX || proto < 0) 801 return EPROTONOSUPPORT; 802 error = soreserve(so, rip_sendspace, rip_recvspace); 803 if (error) 804 return (error); 805 INP_INFO_WLOCK(&V_ripcbinfo); 806 error = in_pcballoc(so, &V_ripcbinfo); 807 if (error) { 808 INP_INFO_WUNLOCK(&V_ripcbinfo); 809 return (error); 810 } 811 inp = (struct inpcb *)so->so_pcb; 812 inp->inp_vflag |= INP_IPV4; 813 inp->inp_ip_p = proto; 814 inp->inp_ip_ttl = V_ip_defttl; 815 rip_inshash(inp); 816 INP_INFO_WUNLOCK(&V_ripcbinfo); 817 INP_WUNLOCK(inp); 818 return (0); 819 } 820 821 static void 822 rip_detach(struct socket *so) 823 { 824 struct inpcb *inp; 825 826 inp = sotoinpcb(so); 827 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 828 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 829 ("rip_detach: not closed")); 830 831 INP_INFO_WLOCK(&V_ripcbinfo); 832 INP_WLOCK(inp); 833 rip_delhash(inp); 834 if (so == V_ip_mrouter && ip_mrouter_done) 835 ip_mrouter_done(); 836 if (ip_rsvp_force_done) 837 ip_rsvp_force_done(so); 838 if (so == V_ip_rsvpd) 839 ip_rsvp_done(); 840 in_pcbdetach(inp); 841 in_pcbfree(inp); 842 INP_INFO_WUNLOCK(&V_ripcbinfo); 843 } 844 845 static void 846 rip_dodisconnect(struct socket *so, struct inpcb *inp) 847 { 848 struct inpcbinfo *pcbinfo; 849 850 pcbinfo = inp->inp_pcbinfo; 851 INP_INFO_WLOCK(pcbinfo); 852 INP_WLOCK(inp); 853 rip_delhash(inp); 854 inp->inp_faddr.s_addr = INADDR_ANY; 855 rip_inshash(inp); 856 SOCK_LOCK(so); 857 so->so_state &= ~SS_ISCONNECTED; 858 SOCK_UNLOCK(so); 859 INP_WUNLOCK(inp); 860 INP_INFO_WUNLOCK(pcbinfo); 861 } 862 863 static void 864 rip_abort(struct socket *so) 865 { 866 struct inpcb *inp; 867 868 inp = sotoinpcb(so); 869 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 870 871 rip_dodisconnect(so, inp); 872 } 873 874 static void 875 rip_close(struct socket *so) 876 { 877 struct inpcb *inp; 878 879 inp = sotoinpcb(so); 880 KASSERT(inp != NULL, ("rip_close: inp == NULL")); 881 882 rip_dodisconnect(so, inp); 883 } 884 885 static int 886 rip_disconnect(struct socket *so) 887 { 888 struct inpcb *inp; 889 890 if ((so->so_state & SS_ISCONNECTED) == 0) 891 return (ENOTCONN); 892 893 inp = sotoinpcb(so); 894 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL")); 895 896 rip_dodisconnect(so, inp); 897 return (0); 898 } 899 900 static int 901 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 902 { 903 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 904 struct inpcb *inp; 905 int error; 906 907 if (nam->sa_len != sizeof(*addr)) 908 return (EINVAL); 909 910 error = prison_check_ip4(td->td_ucred, &addr->sin_addr); 911 if (error != 0) 912 return (error); 913 914 inp = sotoinpcb(so); 915 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 916 917 if (TAILQ_EMPTY(&V_ifnet) || 918 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 919 (addr->sin_addr.s_addr && 920 (inp->inp_flags & INP_BINDANY) == 0 && 921 ifa_ifwithaddr_check((struct sockaddr *)addr) == 0)) 922 return (EADDRNOTAVAIL); 923 924 INP_INFO_WLOCK(&V_ripcbinfo); 925 INP_WLOCK(inp); 926 rip_delhash(inp); 927 inp->inp_laddr = addr->sin_addr; 928 rip_inshash(inp); 929 INP_WUNLOCK(inp); 930 INP_INFO_WUNLOCK(&V_ripcbinfo); 931 return (0); 932 } 933 934 static int 935 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 936 { 937 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 938 struct inpcb *inp; 939 940 if (nam->sa_len != sizeof(*addr)) 941 return (EINVAL); 942 if (TAILQ_EMPTY(&V_ifnet)) 943 return (EADDRNOTAVAIL); 944 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 945 return (EAFNOSUPPORT); 946 947 inp = sotoinpcb(so); 948 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 949 950 INP_INFO_WLOCK(&V_ripcbinfo); 951 INP_WLOCK(inp); 952 rip_delhash(inp); 953 inp->inp_faddr = addr->sin_addr; 954 rip_inshash(inp); 955 soisconnected(so); 956 INP_WUNLOCK(inp); 957 INP_INFO_WUNLOCK(&V_ripcbinfo); 958 return (0); 959 } 960 961 static int 962 rip_shutdown(struct socket *so) 963 { 964 struct inpcb *inp; 965 966 inp = sotoinpcb(so); 967 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 968 969 INP_WLOCK(inp); 970 socantsendmore(so); 971 INP_WUNLOCK(inp); 972 return (0); 973 } 974 975 static int 976 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 977 struct mbuf *control, struct thread *td) 978 { 979 struct inpcb *inp; 980 u_long dst; 981 982 inp = sotoinpcb(so); 983 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 984 985 /* 986 * Note: 'dst' reads below are unlocked. 987 */ 988 if (so->so_state & SS_ISCONNECTED) { 989 if (nam) { 990 m_freem(m); 991 return (EISCONN); 992 } 993 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 994 } else { 995 if (nam == NULL) { 996 m_freem(m); 997 return (ENOTCONN); 998 } 999 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 1000 } 1001 return (rip_output(m, so, dst)); 1002 } 1003 #endif /* INET */ 1004 1005 static int 1006 rip_pcblist(SYSCTL_HANDLER_ARGS) 1007 { 1008 int error, i, n; 1009 struct inpcb *inp, **inp_list; 1010 inp_gen_t gencnt; 1011 struct xinpgen xig; 1012 1013 /* 1014 * The process of preparing the TCB list is too time-consuming and 1015 * resource-intensive to repeat twice on every request. 1016 */ 1017 if (req->oldptr == 0) { 1018 n = V_ripcbinfo.ipi_count; 1019 n += imax(n / 8, 10); 1020 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); 1021 return (0); 1022 } 1023 1024 if (req->newptr != 0) 1025 return (EPERM); 1026 1027 /* 1028 * OK, now we're committed to doing something. 1029 */ 1030 INP_INFO_RLOCK(&V_ripcbinfo); 1031 gencnt = V_ripcbinfo.ipi_gencnt; 1032 n = V_ripcbinfo.ipi_count; 1033 INP_INFO_RUNLOCK(&V_ripcbinfo); 1034 1035 xig.xig_len = sizeof xig; 1036 xig.xig_count = n; 1037 xig.xig_gen = gencnt; 1038 xig.xig_sogen = so_gencnt; 1039 error = SYSCTL_OUT(req, &xig, sizeof xig); 1040 if (error) 1041 return (error); 1042 1043 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 1044 if (inp_list == 0) 1045 return (ENOMEM); 1046 1047 INP_INFO_RLOCK(&V_ripcbinfo); 1048 for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n; 1049 inp = LIST_NEXT(inp, inp_list)) { 1050 INP_WLOCK(inp); 1051 if (inp->inp_gencnt <= gencnt && 1052 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 1053 in_pcbref(inp); 1054 inp_list[i++] = inp; 1055 } 1056 INP_WUNLOCK(inp); 1057 } 1058 INP_INFO_RUNLOCK(&V_ripcbinfo); 1059 n = i; 1060 1061 error = 0; 1062 for (i = 0; i < n; i++) { 1063 inp = inp_list[i]; 1064 INP_RLOCK(inp); 1065 if (inp->inp_gencnt <= gencnt) { 1066 struct xinpcb xi; 1067 1068 bzero(&xi, sizeof(xi)); 1069 xi.xi_len = sizeof xi; 1070 /* XXX should avoid extra copy */ 1071 bcopy(inp, &xi.xi_inp, sizeof *inp); 1072 if (inp->inp_socket) 1073 sotoxsocket(inp->inp_socket, &xi.xi_socket); 1074 INP_RUNLOCK(inp); 1075 error = SYSCTL_OUT(req, &xi, sizeof xi); 1076 } else 1077 INP_RUNLOCK(inp); 1078 } 1079 INP_INFO_WLOCK(&V_ripcbinfo); 1080 for (i = 0; i < n; i++) { 1081 inp = inp_list[i]; 1082 INP_RLOCK(inp); 1083 if (!in_pcbrele_rlocked(inp)) 1084 INP_RUNLOCK(inp); 1085 } 1086 INP_INFO_WUNLOCK(&V_ripcbinfo); 1087 1088 if (!error) { 1089 /* 1090 * Give the user an updated idea of our state. If the 1091 * generation differs from what we told her before, she knows 1092 * that something happened while we were processing this 1093 * request, and it might be necessary to retry. 1094 */ 1095 INP_INFO_RLOCK(&V_ripcbinfo); 1096 xig.xig_gen = V_ripcbinfo.ipi_gencnt; 1097 xig.xig_sogen = so_gencnt; 1098 xig.xig_count = V_ripcbinfo.ipi_count; 1099 INP_INFO_RUNLOCK(&V_ripcbinfo); 1100 error = SYSCTL_OUT(req, &xig, sizeof xig); 1101 } 1102 free(inp_list, M_TEMP); 1103 return (error); 1104 } 1105 1106 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, 1107 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 1108 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 1109 1110 #ifdef INET 1111 struct pr_usrreqs rip_usrreqs = { 1112 .pru_abort = rip_abort, 1113 .pru_attach = rip_attach, 1114 .pru_bind = rip_bind, 1115 .pru_connect = rip_connect, 1116 .pru_control = in_control, 1117 .pru_detach = rip_detach, 1118 .pru_disconnect = rip_disconnect, 1119 .pru_peeraddr = in_getpeeraddr, 1120 .pru_send = rip_send, 1121 .pru_shutdown = rip_shutdown, 1122 .pru_sockaddr = in_getsockaddr, 1123 .pru_sosetlabel = in_pcbsosetlabel, 1124 .pru_close = rip_close, 1125 }; 1126 #endif /* INET */ 1127