1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 4. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_ipsec.h" 39 40 #include <sys/param.h> 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/protosw.h> 49 #include <sys/rwlock.h> 50 #include <sys/signalvar.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/sx.h> 54 #include <sys/sysctl.h> 55 #include <sys/systm.h> 56 57 #include <vm/uma.h> 58 59 #include <net/if.h> 60 #include <net/route.h> 61 #include <net/vnet.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_pcb.h> 66 #include <netinet/in_var.h> 67 #include <netinet/if_ether.h> 68 #include <netinet/ip.h> 69 #include <netinet/ip_var.h> 70 #include <netinet/ip_mroute.h> 71 72 #ifdef IPSEC 73 #include <netipsec/ipsec.h> 74 #endif /*IPSEC*/ 75 76 #include <security/mac/mac_framework.h> 77 78 VNET_DEFINE(int, ip_defttl) = IPDEFTTL; 79 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, 80 &VNET_NAME(ip_defttl), 0, 81 "Maximum TTL on IP packets"); 82 83 VNET_DEFINE(struct inpcbhead, ripcb); 84 VNET_DEFINE(struct inpcbinfo, ripcbinfo); 85 86 #define V_ripcb VNET(ripcb) 87 #define V_ripcbinfo VNET(ripcbinfo) 88 89 /* 90 * Control and data hooks for ipfw, dummynet, divert and so on. 91 * The data hooks are not used here but it is convenient 92 * to keep them all in one place. 93 */ 94 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL; 95 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL; 96 97 int (*ip_dn_ctl_ptr)(struct sockopt *); 98 int (*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *); 99 void (*ip_divert_ptr)(struct mbuf *, int); 100 int (*ng_ipfw_input_p)(struct mbuf **, int, 101 struct ip_fw_args *, int); 102 103 #ifdef INET 104 /* 105 * Hooks for multicast routing. They all default to NULL, so leave them not 106 * initialized and rely on BSS being set to 0. 107 */ 108 109 /* 110 * The socket used to communicate with the multicast routing daemon. 111 */ 112 VNET_DEFINE(struct socket *, ip_mrouter); 113 114 /* 115 * The various mrouter and rsvp functions. 116 */ 117 int (*ip_mrouter_set)(struct socket *, struct sockopt *); 118 int (*ip_mrouter_get)(struct socket *, struct sockopt *); 119 int (*ip_mrouter_done)(void); 120 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, 121 struct ip_moptions *); 122 int (*mrt_ioctl)(u_long, caddr_t, int); 123 int (*legal_vif_num)(int); 124 u_long (*ip_mcast_src)(int); 125 126 void (*rsvp_input_p)(struct mbuf *m, int off); 127 int (*ip_rsvp_vif)(struct socket *, struct sockopt *); 128 void (*ip_rsvp_force_done)(struct socket *); 129 #endif /* INET */ 130 131 u_long rip_sendspace = 9216; 132 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 133 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 134 135 u_long rip_recvspace = 9216; 136 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 137 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 138 139 /* 140 * Hash functions 141 */ 142 143 #define INP_PCBHASH_RAW_SIZE 256 144 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \ 145 (((proto) + (laddr) + (faddr)) % (mask) + 1) 146 147 #ifdef INET 148 static void 149 rip_inshash(struct inpcb *inp) 150 { 151 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 152 struct inpcbhead *pcbhash; 153 int hash; 154 155 INP_INFO_WLOCK_ASSERT(pcbinfo); 156 INP_WLOCK_ASSERT(inp); 157 158 if (inp->inp_ip_p != 0 && 159 inp->inp_laddr.s_addr != INADDR_ANY && 160 inp->inp_faddr.s_addr != INADDR_ANY) { 161 hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr, 162 inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask); 163 } else 164 hash = 0; 165 pcbhash = &pcbinfo->ipi_hashbase[hash]; 166 LIST_INSERT_HEAD(pcbhash, inp, inp_hash); 167 } 168 169 static void 170 rip_delhash(struct inpcb *inp) 171 { 172 173 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo); 174 INP_WLOCK_ASSERT(inp); 175 176 LIST_REMOVE(inp, inp_hash); 177 } 178 #endif /* INET */ 179 180 /* 181 * Raw interface to IP protocol. 182 */ 183 184 /* 185 * Initialize raw connection block q. 186 */ 187 static void 188 rip_zone_change(void *tag) 189 { 190 191 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets); 192 } 193 194 static int 195 rip_inpcb_init(void *mem, int size, int flags) 196 { 197 struct inpcb *inp = mem; 198 199 INP_LOCK_INIT(inp, "inp", "rawinp"); 200 return (0); 201 } 202 203 void 204 rip_init(void) 205 { 206 207 in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE, 208 1, "ripcb", rip_inpcb_init, NULL, UMA_ZONE_NOFREE, 209 IPI_HASHFIELDS_NONE); 210 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL, 211 EVENTHANDLER_PRI_ANY); 212 } 213 214 #ifdef VIMAGE 215 void 216 rip_destroy(void) 217 { 218 219 in_pcbinfo_destroy(&V_ripcbinfo); 220 } 221 #endif 222 223 #ifdef INET 224 static int 225 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n, 226 struct sockaddr_in *ripsrc) 227 { 228 int policyfail = 0; 229 230 INP_LOCK_ASSERT(last); 231 232 #ifdef IPSEC 233 /* check AH/ESP integrity. */ 234 if (ipsec4_in_reject(n, last)) { 235 policyfail = 1; 236 } 237 #endif /* IPSEC */ 238 #ifdef MAC 239 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0) 240 policyfail = 1; 241 #endif 242 /* Check the minimum TTL for socket. */ 243 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 244 policyfail = 1; 245 if (!policyfail) { 246 struct mbuf *opts = NULL; 247 struct socket *so; 248 249 so = last->inp_socket; 250 if ((last->inp_flags & INP_CONTROLOPTS) || 251 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 252 ip_savecontrol(last, &opts, ip, n); 253 SOCKBUF_LOCK(&so->so_rcv); 254 if (sbappendaddr_locked(&so->so_rcv, 255 (struct sockaddr *)ripsrc, n, opts) == 0) { 256 /* should notify about lost packet */ 257 m_freem(n); 258 if (opts) 259 m_freem(opts); 260 SOCKBUF_UNLOCK(&so->so_rcv); 261 } else 262 sorwakeup_locked(so); 263 } else 264 m_freem(n); 265 return (policyfail); 266 } 267 268 /* 269 * Setup generic address and protocol structures for raw_input routine, then 270 * pass them along with mbuf chain. 271 */ 272 void 273 rip_input(struct mbuf *m, int off) 274 { 275 struct ifnet *ifp; 276 struct ip *ip = mtod(m, struct ip *); 277 int proto = ip->ip_p; 278 struct inpcb *inp, *last; 279 struct sockaddr_in ripsrc; 280 int hash; 281 282 bzero(&ripsrc, sizeof(ripsrc)); 283 ripsrc.sin_len = sizeof(ripsrc); 284 ripsrc.sin_family = AF_INET; 285 ripsrc.sin_addr = ip->ip_src; 286 last = NULL; 287 288 ifp = m->m_pkthdr.rcvif; 289 /* 290 * Add back the IP header length which was 291 * removed by ip_input(). Raw sockets do 292 * not modify the packet except for some 293 * byte order swaps. 294 */ 295 ip->ip_len += off; 296 297 hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr, 298 ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask); 299 INP_INFO_RLOCK(&V_ripcbinfo); 300 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) { 301 if (inp->inp_ip_p != proto) 302 continue; 303 #ifdef INET6 304 /* XXX inp locking */ 305 if ((inp->inp_vflag & INP_IPV4) == 0) 306 continue; 307 #endif 308 if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 309 continue; 310 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr) 311 continue; 312 if (jailed_without_vnet(inp->inp_cred)) { 313 /* 314 * XXX: If faddr was bound to multicast group, 315 * jailed raw socket will drop datagram. 316 */ 317 if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 318 continue; 319 } 320 if (last != NULL) { 321 struct mbuf *n; 322 323 n = m_copy(m, 0, (int)M_COPYALL); 324 if (n != NULL) 325 (void) rip_append(last, ip, n, &ripsrc); 326 /* XXX count dropped packet */ 327 INP_RUNLOCK(last); 328 } 329 INP_RLOCK(inp); 330 last = inp; 331 } 332 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) { 333 if (inp->inp_ip_p && inp->inp_ip_p != proto) 334 continue; 335 #ifdef INET6 336 /* XXX inp locking */ 337 if ((inp->inp_vflag & INP_IPV4) == 0) 338 continue; 339 #endif 340 if (!in_nullhost(inp->inp_laddr) && 341 !in_hosteq(inp->inp_laddr, ip->ip_dst)) 342 continue; 343 if (!in_nullhost(inp->inp_faddr) && 344 !in_hosteq(inp->inp_faddr, ip->ip_src)) 345 continue; 346 if (jailed_without_vnet(inp->inp_cred)) { 347 /* 348 * Allow raw socket in jail to receive multicast; 349 * assume process had PRIV_NETINET_RAW at attach, 350 * and fall through into normal filter path if so. 351 */ 352 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) && 353 prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 354 continue; 355 } 356 /* 357 * If this raw socket has multicast state, and we 358 * have received a multicast, check if this socket 359 * should receive it, as multicast filtering is now 360 * the responsibility of the transport layer. 361 */ 362 if (inp->inp_moptions != NULL && 363 IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 364 /* 365 * If the incoming datagram is for IGMP, allow it 366 * through unconditionally to the raw socket. 367 * 368 * In the case of IGMPv2, we may not have explicitly 369 * joined the group, and may have set IFF_ALLMULTI 370 * on the interface. imo_multi_filter() may discard 371 * control traffic we actually need to see. 372 * 373 * Userland multicast routing daemons should continue 374 * filter the control traffic appropriately. 375 */ 376 int blocked; 377 378 blocked = MCAST_PASS; 379 if (proto != IPPROTO_IGMP) { 380 struct sockaddr_in group; 381 382 bzero(&group, sizeof(struct sockaddr_in)); 383 group.sin_len = sizeof(struct sockaddr_in); 384 group.sin_family = AF_INET; 385 group.sin_addr = ip->ip_dst; 386 387 blocked = imo_multi_filter(inp->inp_moptions, 388 ifp, 389 (struct sockaddr *)&group, 390 (struct sockaddr *)&ripsrc); 391 } 392 393 if (blocked != MCAST_PASS) { 394 IPSTAT_INC(ips_notmember); 395 continue; 396 } 397 } 398 if (last != NULL) { 399 struct mbuf *n; 400 401 n = m_copy(m, 0, (int)M_COPYALL); 402 if (n != NULL) 403 (void) rip_append(last, ip, n, &ripsrc); 404 /* XXX count dropped packet */ 405 INP_RUNLOCK(last); 406 } 407 INP_RLOCK(inp); 408 last = inp; 409 } 410 INP_INFO_RUNLOCK(&V_ripcbinfo); 411 if (last != NULL) { 412 if (rip_append(last, ip, m, &ripsrc) != 0) 413 IPSTAT_INC(ips_delivered); 414 INP_RUNLOCK(last); 415 } else { 416 m_freem(m); 417 IPSTAT_INC(ips_noproto); 418 IPSTAT_DEC(ips_delivered); 419 } 420 } 421 422 /* 423 * Generate IP header and pass packet to ip_output. Tack on options user may 424 * have setup with control call. 425 */ 426 int 427 rip_output(struct mbuf *m, struct socket *so, u_long dst) 428 { 429 struct ip *ip; 430 int error; 431 struct inpcb *inp = sotoinpcb(so); 432 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 433 IP_ALLOWBROADCAST; 434 435 /* 436 * If the user handed us a complete IP packet, use it. Otherwise, 437 * allocate an mbuf for a header and fill it in. 438 */ 439 if ((inp->inp_flags & INP_HDRINCL) == 0) { 440 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 441 m_freem(m); 442 return(EMSGSIZE); 443 } 444 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); 445 if (m == NULL) 446 return(ENOBUFS); 447 448 INP_RLOCK(inp); 449 ip = mtod(m, struct ip *); 450 ip->ip_tos = inp->inp_ip_tos; 451 if (inp->inp_flags & INP_DONTFRAG) 452 ip->ip_off = IP_DF; 453 else 454 ip->ip_off = 0; 455 ip->ip_p = inp->inp_ip_p; 456 ip->ip_len = m->m_pkthdr.len; 457 ip->ip_src = inp->inp_laddr; 458 if (jailed(inp->inp_cred)) { 459 /* 460 * prison_local_ip4() would be good enough but would 461 * let a source of INADDR_ANY pass, which we do not 462 * want to see from jails. We do not go through the 463 * pain of in_pcbladdr() for raw sockets. 464 */ 465 if (ip->ip_src.s_addr == INADDR_ANY) 466 error = prison_get_ip4(inp->inp_cred, 467 &ip->ip_src); 468 else 469 error = prison_local_ip4(inp->inp_cred, 470 &ip->ip_src); 471 if (error != 0) { 472 INP_RUNLOCK(inp); 473 m_freem(m); 474 return (error); 475 } 476 } 477 ip->ip_dst.s_addr = dst; 478 ip->ip_ttl = inp->inp_ip_ttl; 479 } else { 480 if (m->m_pkthdr.len > IP_MAXPACKET) { 481 m_freem(m); 482 return(EMSGSIZE); 483 } 484 INP_RLOCK(inp); 485 ip = mtod(m, struct ip *); 486 error = prison_check_ip4(inp->inp_cred, &ip->ip_src); 487 if (error != 0) { 488 INP_RUNLOCK(inp); 489 m_freem(m); 490 return (error); 491 } 492 493 /* 494 * Don't allow both user specified and setsockopt options, 495 * and don't allow packet length sizes that will crash. 496 */ 497 if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options) 498 || (ip->ip_len > m->m_pkthdr.len) 499 || (ip->ip_len < (ip->ip_hl << 2))) { 500 INP_RUNLOCK(inp); 501 m_freem(m); 502 return (EINVAL); 503 } 504 if (ip->ip_id == 0) 505 ip->ip_id = ip_newid(); 506 507 /* 508 * XXX prevent ip_output from overwriting header fields. 509 */ 510 flags |= IP_RAWOUTPUT; 511 IPSTAT_INC(ips_rawout); 512 } 513 514 if (inp->inp_flags & INP_ONESBCAST) 515 flags |= IP_SENDONES; 516 517 #ifdef MAC 518 mac_inpcb_create_mbuf(inp, m); 519 #endif 520 521 error = ip_output(m, inp->inp_options, NULL, flags, 522 inp->inp_moptions, inp); 523 INP_RUNLOCK(inp); 524 return (error); 525 } 526 527 /* 528 * Raw IP socket option processing. 529 * 530 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 531 * only be created by a privileged process, and as such, socket option 532 * operations to manage system properties on any raw socket were allowed to 533 * take place without explicit additional access control checks. However, 534 * raw sockets can now also be created in jail(), and therefore explicit 535 * checks are now required. Likewise, raw sockets can be used by a process 536 * after it gives up privilege, so some caution is required. For options 537 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 538 * performed in ip_ctloutput() and therefore no check occurs here. 539 * Unilaterally checking priv_check() here breaks normal IP socket option 540 * operations on raw sockets. 541 * 542 * When adding new socket options here, make sure to add access control 543 * checks here as necessary. 544 * 545 * XXX-BZ inp locking? 546 */ 547 int 548 rip_ctloutput(struct socket *so, struct sockopt *sopt) 549 { 550 struct inpcb *inp = sotoinpcb(so); 551 int error, optval; 552 553 if (sopt->sopt_level != IPPROTO_IP) { 554 if ((sopt->sopt_level == SOL_SOCKET) && 555 (sopt->sopt_name == SO_SETFIB)) { 556 inp->inp_inc.inc_fibnum = so->so_fibnum; 557 return (0); 558 } 559 return (EINVAL); 560 } 561 562 error = 0; 563 switch (sopt->sopt_dir) { 564 case SOPT_GET: 565 switch (sopt->sopt_name) { 566 case IP_HDRINCL: 567 optval = inp->inp_flags & INP_HDRINCL; 568 error = sooptcopyout(sopt, &optval, sizeof optval); 569 break; 570 571 case IP_FW3: /* generic ipfw v.3 functions */ 572 case IP_FW_ADD: /* ADD actually returns the body... */ 573 case IP_FW_GET: 574 case IP_FW_TABLE_GETSIZE: 575 case IP_FW_TABLE_LIST: 576 case IP_FW_NAT_GET_CONFIG: 577 case IP_FW_NAT_GET_LOG: 578 if (V_ip_fw_ctl_ptr != NULL) 579 error = V_ip_fw_ctl_ptr(sopt); 580 else 581 error = ENOPROTOOPT; 582 break; 583 584 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 585 case IP_DUMMYNET_GET: 586 if (ip_dn_ctl_ptr != NULL) 587 error = ip_dn_ctl_ptr(sopt); 588 else 589 error = ENOPROTOOPT; 590 break ; 591 592 case MRT_INIT: 593 case MRT_DONE: 594 case MRT_ADD_VIF: 595 case MRT_DEL_VIF: 596 case MRT_ADD_MFC: 597 case MRT_DEL_MFC: 598 case MRT_VERSION: 599 case MRT_ASSERT: 600 case MRT_API_SUPPORT: 601 case MRT_API_CONFIG: 602 case MRT_ADD_BW_UPCALL: 603 case MRT_DEL_BW_UPCALL: 604 error = priv_check(curthread, PRIV_NETINET_MROUTE); 605 if (error != 0) 606 return (error); 607 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 608 EOPNOTSUPP; 609 break; 610 611 default: 612 error = ip_ctloutput(so, sopt); 613 break; 614 } 615 break; 616 617 case SOPT_SET: 618 switch (sopt->sopt_name) { 619 case IP_HDRINCL: 620 error = sooptcopyin(sopt, &optval, sizeof optval, 621 sizeof optval); 622 if (error) 623 break; 624 if (optval) 625 inp->inp_flags |= INP_HDRINCL; 626 else 627 inp->inp_flags &= ~INP_HDRINCL; 628 break; 629 630 case IP_FW3: /* generic ipfw v.3 functions */ 631 case IP_FW_ADD: 632 case IP_FW_DEL: 633 case IP_FW_FLUSH: 634 case IP_FW_ZERO: 635 case IP_FW_RESETLOG: 636 case IP_FW_TABLE_ADD: 637 case IP_FW_TABLE_DEL: 638 case IP_FW_TABLE_FLUSH: 639 case IP_FW_NAT_CFG: 640 case IP_FW_NAT_DEL: 641 if (V_ip_fw_ctl_ptr != NULL) 642 error = V_ip_fw_ctl_ptr(sopt); 643 else 644 error = ENOPROTOOPT; 645 break; 646 647 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 648 case IP_DUMMYNET_CONFIGURE: 649 case IP_DUMMYNET_DEL: 650 case IP_DUMMYNET_FLUSH: 651 if (ip_dn_ctl_ptr != NULL) 652 error = ip_dn_ctl_ptr(sopt); 653 else 654 error = ENOPROTOOPT ; 655 break ; 656 657 case IP_RSVP_ON: 658 error = priv_check(curthread, PRIV_NETINET_MROUTE); 659 if (error != 0) 660 return (error); 661 error = ip_rsvp_init(so); 662 break; 663 664 case IP_RSVP_OFF: 665 error = priv_check(curthread, PRIV_NETINET_MROUTE); 666 if (error != 0) 667 return (error); 668 error = ip_rsvp_done(); 669 break; 670 671 case IP_RSVP_VIF_ON: 672 case IP_RSVP_VIF_OFF: 673 error = priv_check(curthread, PRIV_NETINET_MROUTE); 674 if (error != 0) 675 return (error); 676 error = ip_rsvp_vif ? 677 ip_rsvp_vif(so, sopt) : EINVAL; 678 break; 679 680 case MRT_INIT: 681 case MRT_DONE: 682 case MRT_ADD_VIF: 683 case MRT_DEL_VIF: 684 case MRT_ADD_MFC: 685 case MRT_DEL_MFC: 686 case MRT_VERSION: 687 case MRT_ASSERT: 688 case MRT_API_SUPPORT: 689 case MRT_API_CONFIG: 690 case MRT_ADD_BW_UPCALL: 691 case MRT_DEL_BW_UPCALL: 692 error = priv_check(curthread, PRIV_NETINET_MROUTE); 693 if (error != 0) 694 return (error); 695 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 696 EOPNOTSUPP; 697 break; 698 699 default: 700 error = ip_ctloutput(so, sopt); 701 break; 702 } 703 break; 704 } 705 706 return (error); 707 } 708 709 /* 710 * This function exists solely to receive the PRC_IFDOWN messages which are 711 * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls 712 * in_ifadown() to remove all routes corresponding to that address. It also 713 * receives the PRC_IFUP messages from if_up() and reinstalls the interface 714 * routes. 715 */ 716 void 717 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 718 { 719 struct in_ifaddr *ia; 720 struct ifnet *ifp; 721 int err; 722 int flags; 723 724 switch (cmd) { 725 case PRC_IFDOWN: 726 IN_IFADDR_RLOCK(); 727 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 728 if (ia->ia_ifa.ifa_addr == sa 729 && (ia->ia_flags & IFA_ROUTE)) { 730 ifa_ref(&ia->ia_ifa); 731 IN_IFADDR_RUNLOCK(); 732 /* 733 * in_ifscrub kills the interface route. 734 */ 735 in_ifscrub(ia->ia_ifp, ia, 0); 736 /* 737 * in_ifadown gets rid of all the rest of the 738 * routes. This is not quite the right thing 739 * to do, but at least if we are running a 740 * routing process they will come back. 741 */ 742 in_ifadown(&ia->ia_ifa, 0); 743 ifa_free(&ia->ia_ifa); 744 break; 745 } 746 } 747 if (ia == NULL) /* If ia matched, already unlocked. */ 748 IN_IFADDR_RUNLOCK(); 749 break; 750 751 case PRC_IFUP: 752 IN_IFADDR_RLOCK(); 753 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 754 if (ia->ia_ifa.ifa_addr == sa) 755 break; 756 } 757 if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) { 758 IN_IFADDR_RUNLOCK(); 759 return; 760 } 761 ifa_ref(&ia->ia_ifa); 762 IN_IFADDR_RUNLOCK(); 763 flags = RTF_UP; 764 ifp = ia->ia_ifa.ifa_ifp; 765 766 if ((ifp->if_flags & IFF_LOOPBACK) 767 || (ifp->if_flags & IFF_POINTOPOINT)) 768 flags |= RTF_HOST; 769 770 err = ifa_del_loopback_route((struct ifaddr *)ia, sa); 771 if (err == 0) 772 ia->ia_flags &= ~IFA_RTSELF; 773 774 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 775 if (err == 0) 776 ia->ia_flags |= IFA_ROUTE; 777 778 err = ifa_add_loopback_route((struct ifaddr *)ia, sa); 779 if (err == 0) 780 ia->ia_flags |= IFA_RTSELF; 781 782 ifa_free(&ia->ia_ifa); 783 break; 784 } 785 } 786 787 static int 788 rip_attach(struct socket *so, int proto, struct thread *td) 789 { 790 struct inpcb *inp; 791 int error; 792 793 inp = sotoinpcb(so); 794 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 795 796 error = priv_check(td, PRIV_NETINET_RAW); 797 if (error) 798 return (error); 799 if (proto >= IPPROTO_MAX || proto < 0) 800 return EPROTONOSUPPORT; 801 error = soreserve(so, rip_sendspace, rip_recvspace); 802 if (error) 803 return (error); 804 INP_INFO_WLOCK(&V_ripcbinfo); 805 error = in_pcballoc(so, &V_ripcbinfo); 806 if (error) { 807 INP_INFO_WUNLOCK(&V_ripcbinfo); 808 return (error); 809 } 810 inp = (struct inpcb *)so->so_pcb; 811 inp->inp_vflag |= INP_IPV4; 812 inp->inp_ip_p = proto; 813 inp->inp_ip_ttl = V_ip_defttl; 814 rip_inshash(inp); 815 INP_INFO_WUNLOCK(&V_ripcbinfo); 816 INP_WUNLOCK(inp); 817 return (0); 818 } 819 820 static void 821 rip_detach(struct socket *so) 822 { 823 struct inpcb *inp; 824 825 inp = sotoinpcb(so); 826 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 827 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 828 ("rip_detach: not closed")); 829 830 INP_INFO_WLOCK(&V_ripcbinfo); 831 INP_WLOCK(inp); 832 rip_delhash(inp); 833 if (so == V_ip_mrouter && ip_mrouter_done) 834 ip_mrouter_done(); 835 if (ip_rsvp_force_done) 836 ip_rsvp_force_done(so); 837 if (so == V_ip_rsvpd) 838 ip_rsvp_done(); 839 in_pcbdetach(inp); 840 in_pcbfree(inp); 841 INP_INFO_WUNLOCK(&V_ripcbinfo); 842 } 843 844 static void 845 rip_dodisconnect(struct socket *so, struct inpcb *inp) 846 { 847 struct inpcbinfo *pcbinfo; 848 849 pcbinfo = inp->inp_pcbinfo; 850 INP_INFO_WLOCK(pcbinfo); 851 INP_WLOCK(inp); 852 rip_delhash(inp); 853 inp->inp_faddr.s_addr = INADDR_ANY; 854 rip_inshash(inp); 855 SOCK_LOCK(so); 856 so->so_state &= ~SS_ISCONNECTED; 857 SOCK_UNLOCK(so); 858 INP_WUNLOCK(inp); 859 INP_INFO_WUNLOCK(pcbinfo); 860 } 861 862 static void 863 rip_abort(struct socket *so) 864 { 865 struct inpcb *inp; 866 867 inp = sotoinpcb(so); 868 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 869 870 rip_dodisconnect(so, inp); 871 } 872 873 static void 874 rip_close(struct socket *so) 875 { 876 struct inpcb *inp; 877 878 inp = sotoinpcb(so); 879 KASSERT(inp != NULL, ("rip_close: inp == NULL")); 880 881 rip_dodisconnect(so, inp); 882 } 883 884 static int 885 rip_disconnect(struct socket *so) 886 { 887 struct inpcb *inp; 888 889 if ((so->so_state & SS_ISCONNECTED) == 0) 890 return (ENOTCONN); 891 892 inp = sotoinpcb(so); 893 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL")); 894 895 rip_dodisconnect(so, inp); 896 return (0); 897 } 898 899 static int 900 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 901 { 902 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 903 struct inpcb *inp; 904 int error; 905 906 if (nam->sa_len != sizeof(*addr)) 907 return (EINVAL); 908 909 error = prison_check_ip4(td->td_ucred, &addr->sin_addr); 910 if (error != 0) 911 return (error); 912 913 inp = sotoinpcb(so); 914 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 915 916 if (TAILQ_EMPTY(&V_ifnet) || 917 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 918 (addr->sin_addr.s_addr && 919 (inp->inp_flags & INP_BINDANY) == 0 && 920 ifa_ifwithaddr_check((struct sockaddr *)addr) == 0)) 921 return (EADDRNOTAVAIL); 922 923 INP_INFO_WLOCK(&V_ripcbinfo); 924 INP_WLOCK(inp); 925 rip_delhash(inp); 926 inp->inp_laddr = addr->sin_addr; 927 rip_inshash(inp); 928 INP_WUNLOCK(inp); 929 INP_INFO_WUNLOCK(&V_ripcbinfo); 930 return (0); 931 } 932 933 static int 934 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 935 { 936 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 937 struct inpcb *inp; 938 939 if (nam->sa_len != sizeof(*addr)) 940 return (EINVAL); 941 if (TAILQ_EMPTY(&V_ifnet)) 942 return (EADDRNOTAVAIL); 943 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 944 return (EAFNOSUPPORT); 945 946 inp = sotoinpcb(so); 947 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 948 949 INP_INFO_WLOCK(&V_ripcbinfo); 950 INP_WLOCK(inp); 951 rip_delhash(inp); 952 inp->inp_faddr = addr->sin_addr; 953 rip_inshash(inp); 954 soisconnected(so); 955 INP_WUNLOCK(inp); 956 INP_INFO_WUNLOCK(&V_ripcbinfo); 957 return (0); 958 } 959 960 static int 961 rip_shutdown(struct socket *so) 962 { 963 struct inpcb *inp; 964 965 inp = sotoinpcb(so); 966 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 967 968 INP_WLOCK(inp); 969 socantsendmore(so); 970 INP_WUNLOCK(inp); 971 return (0); 972 } 973 974 static int 975 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 976 struct mbuf *control, struct thread *td) 977 { 978 struct inpcb *inp; 979 u_long dst; 980 981 inp = sotoinpcb(so); 982 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 983 984 /* 985 * Note: 'dst' reads below are unlocked. 986 */ 987 if (so->so_state & SS_ISCONNECTED) { 988 if (nam) { 989 m_freem(m); 990 return (EISCONN); 991 } 992 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 993 } else { 994 if (nam == NULL) { 995 m_freem(m); 996 return (ENOTCONN); 997 } 998 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 999 } 1000 return (rip_output(m, so, dst)); 1001 } 1002 #endif /* INET */ 1003 1004 static int 1005 rip_pcblist(SYSCTL_HANDLER_ARGS) 1006 { 1007 int error, i, n; 1008 struct inpcb *inp, **inp_list; 1009 inp_gen_t gencnt; 1010 struct xinpgen xig; 1011 1012 /* 1013 * The process of preparing the TCB list is too time-consuming and 1014 * resource-intensive to repeat twice on every request. 1015 */ 1016 if (req->oldptr == 0) { 1017 n = V_ripcbinfo.ipi_count; 1018 n += imax(n / 8, 10); 1019 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); 1020 return (0); 1021 } 1022 1023 if (req->newptr != 0) 1024 return (EPERM); 1025 1026 /* 1027 * OK, now we're committed to doing something. 1028 */ 1029 INP_INFO_RLOCK(&V_ripcbinfo); 1030 gencnt = V_ripcbinfo.ipi_gencnt; 1031 n = V_ripcbinfo.ipi_count; 1032 INP_INFO_RUNLOCK(&V_ripcbinfo); 1033 1034 xig.xig_len = sizeof xig; 1035 xig.xig_count = n; 1036 xig.xig_gen = gencnt; 1037 xig.xig_sogen = so_gencnt; 1038 error = SYSCTL_OUT(req, &xig, sizeof xig); 1039 if (error) 1040 return (error); 1041 1042 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 1043 if (inp_list == 0) 1044 return (ENOMEM); 1045 1046 INP_INFO_RLOCK(&V_ripcbinfo); 1047 for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n; 1048 inp = LIST_NEXT(inp, inp_list)) { 1049 INP_WLOCK(inp); 1050 if (inp->inp_gencnt <= gencnt && 1051 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 1052 in_pcbref(inp); 1053 inp_list[i++] = inp; 1054 } 1055 INP_WUNLOCK(inp); 1056 } 1057 INP_INFO_RUNLOCK(&V_ripcbinfo); 1058 n = i; 1059 1060 error = 0; 1061 for (i = 0; i < n; i++) { 1062 inp = inp_list[i]; 1063 INP_RLOCK(inp); 1064 if (inp->inp_gencnt <= gencnt) { 1065 struct xinpcb xi; 1066 1067 bzero(&xi, sizeof(xi)); 1068 xi.xi_len = sizeof xi; 1069 /* XXX should avoid extra copy */ 1070 bcopy(inp, &xi.xi_inp, sizeof *inp); 1071 if (inp->inp_socket) 1072 sotoxsocket(inp->inp_socket, &xi.xi_socket); 1073 INP_RUNLOCK(inp); 1074 error = SYSCTL_OUT(req, &xi, sizeof xi); 1075 } else 1076 INP_RUNLOCK(inp); 1077 } 1078 INP_INFO_WLOCK(&V_ripcbinfo); 1079 for (i = 0; i < n; i++) { 1080 inp = inp_list[i]; 1081 INP_RLOCK(inp); 1082 if (!in_pcbrele_rlocked(inp)) 1083 INP_RUNLOCK(inp); 1084 } 1085 INP_INFO_WUNLOCK(&V_ripcbinfo); 1086 1087 if (!error) { 1088 /* 1089 * Give the user an updated idea of our state. If the 1090 * generation differs from what we told her before, she knows 1091 * that something happened while we were processing this 1092 * request, and it might be necessary to retry. 1093 */ 1094 INP_INFO_RLOCK(&V_ripcbinfo); 1095 xig.xig_gen = V_ripcbinfo.ipi_gencnt; 1096 xig.xig_sogen = so_gencnt; 1097 xig.xig_count = V_ripcbinfo.ipi_count; 1098 INP_INFO_RUNLOCK(&V_ripcbinfo); 1099 error = SYSCTL_OUT(req, &xig, sizeof xig); 1100 } 1101 free(inp_list, M_TEMP); 1102 return (error); 1103 } 1104 1105 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, 1106 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 1107 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 1108 1109 #ifdef INET 1110 struct pr_usrreqs rip_usrreqs = { 1111 .pru_abort = rip_abort, 1112 .pru_attach = rip_attach, 1113 .pru_bind = rip_bind, 1114 .pru_connect = rip_connect, 1115 .pru_control = in_control, 1116 .pru_detach = rip_detach, 1117 .pru_disconnect = rip_disconnect, 1118 .pru_peeraddr = in_getpeeraddr, 1119 .pru_send = rip_send, 1120 .pru_shutdown = rip_shutdown, 1121 .pru_sockaddr = in_getsockaddr, 1122 .pru_sosetlabel = in_pcbsosetlabel, 1123 .pru_close = rip_close, 1124 }; 1125 #endif /* INET */ 1126