1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 30 * $FreeBSD$ 31 */ 32 33 #include "opt_inet6.h" 34 #include "opt_ipsec.h" 35 #include "opt_mac.h" 36 37 #include <sys/param.h> 38 #include <sys/jail.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/protosw.h> 46 #include <sys/signalvar.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sx.h> 50 #include <sys/sysctl.h> 51 #include <sys/systm.h> 52 53 #include <vm/uma.h> 54 55 #include <net/if.h> 56 #include <net/route.h> 57 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/in_pcb.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip.h> 63 #include <netinet/ip_var.h> 64 #include <netinet/ip_mroute.h> 65 66 #include <netinet/ip_fw.h> 67 #include <netinet/ip_dummynet.h> 68 69 #ifdef FAST_IPSEC 70 #include <netipsec/ipsec.h> 71 #endif /*FAST_IPSEC*/ 72 73 #ifdef IPSEC 74 #include <netinet6/ipsec.h> 75 #endif /*IPSEC*/ 76 77 #include <security/mac/mac_framework.h> 78 79 struct inpcbhead ripcb; 80 struct inpcbinfo ripcbinfo; 81 82 /* control hooks for ipfw and dummynet */ 83 ip_fw_ctl_t *ip_fw_ctl_ptr = NULL; 84 ip_dn_ctl_t *ip_dn_ctl_ptr = NULL; 85 86 /* 87 * hooks for multicast routing. They all default to NULL, 88 * so leave them not initialized and rely on BSS being set to 0. 89 */ 90 91 /* The socket used to communicate with the multicast routing daemon. */ 92 struct socket *ip_mrouter; 93 94 /* The various mrouter and rsvp functions */ 95 int (*ip_mrouter_set)(struct socket *, struct sockopt *); 96 int (*ip_mrouter_get)(struct socket *, struct sockopt *); 97 int (*ip_mrouter_done)(void); 98 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, 99 struct ip_moptions *); 100 int (*mrt_ioctl)(int, caddr_t); 101 int (*legal_vif_num)(int); 102 u_long (*ip_mcast_src)(int); 103 104 void (*rsvp_input_p)(struct mbuf *m, int off); 105 int (*ip_rsvp_vif)(struct socket *, struct sockopt *); 106 void (*ip_rsvp_force_done)(struct socket *); 107 108 /* 109 * Raw interface to IP protocol. 110 */ 111 112 /* 113 * Initialize raw connection block q. 114 */ 115 static void 116 rip_zone_change(void *tag) 117 { 118 119 uma_zone_set_max(ripcbinfo.ipi_zone, maxsockets); 120 } 121 122 static int 123 rip_inpcb_init(void *mem, int size, int flags) 124 { 125 struct inpcb *inp = mem; 126 127 INP_LOCK_INIT(inp, "inp", "rawinp"); 128 return (0); 129 } 130 131 void 132 rip_init() 133 { 134 INP_INFO_LOCK_INIT(&ripcbinfo, "rip"); 135 LIST_INIT(&ripcb); 136 ripcbinfo.listhead = &ripcb; 137 /* 138 * XXX We don't use the hash list for raw IP, but it's easier 139 * to allocate a one entry hash list than it is to check all 140 * over the place for hashbase == NULL. 141 */ 142 ripcbinfo.hashbase = hashinit(1, M_PCB, &ripcbinfo.hashmask); 143 ripcbinfo.porthashbase = hashinit(1, M_PCB, &ripcbinfo.porthashmask); 144 ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb), 145 NULL, NULL, rip_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 146 uma_zone_set_max(ripcbinfo.ipi_zone, maxsockets); 147 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, 148 NULL, EVENTHANDLER_PRI_ANY); 149 } 150 151 static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET }; 152 153 static int 154 raw_append(struct inpcb *last, struct ip *ip, struct mbuf *n) 155 { 156 int policyfail = 0; 157 158 INP_LOCK_ASSERT(last); 159 160 #if defined(IPSEC) || defined(FAST_IPSEC) 161 /* check AH/ESP integrity. */ 162 if (ipsec4_in_reject(n, last)) { 163 policyfail = 1; 164 #ifdef IPSEC 165 ipsecstat.in_polvio++; 166 #endif /*IPSEC*/ 167 /* do not inject data to pcb */ 168 } 169 #endif /*IPSEC || FAST_IPSEC*/ 170 #ifdef MAC 171 if (!policyfail && mac_check_inpcb_deliver(last, n) != 0) 172 policyfail = 1; 173 #endif 174 /* Check the minimum TTL for socket. */ 175 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 176 policyfail = 1; 177 if (!policyfail) { 178 struct mbuf *opts = NULL; 179 struct socket *so; 180 181 so = last->inp_socket; 182 if ((last->inp_flags & INP_CONTROLOPTS) || 183 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 184 ip_savecontrol(last, &opts, ip, n); 185 SOCKBUF_LOCK(&so->so_rcv); 186 if (sbappendaddr_locked(&so->so_rcv, 187 (struct sockaddr *)&ripsrc, n, opts) == 0) { 188 /* should notify about lost packet */ 189 m_freem(n); 190 if (opts) 191 m_freem(opts); 192 SOCKBUF_UNLOCK(&so->so_rcv); 193 } else 194 sorwakeup_locked(so); 195 } else 196 m_freem(n); 197 return policyfail; 198 } 199 200 /* 201 * Setup generic address and protocol structures 202 * for raw_input routine, then pass them along with 203 * mbuf chain. 204 */ 205 void 206 rip_input(struct mbuf *m, int off) 207 { 208 struct ip *ip = mtod(m, struct ip *); 209 int proto = ip->ip_p; 210 struct inpcb *inp, *last; 211 212 INP_INFO_RLOCK(&ripcbinfo); 213 ripsrc.sin_addr = ip->ip_src; 214 last = NULL; 215 LIST_FOREACH(inp, &ripcb, inp_list) { 216 INP_LOCK(inp); 217 if (inp->inp_ip_p && inp->inp_ip_p != proto) { 218 docontinue: 219 INP_UNLOCK(inp); 220 continue; 221 } 222 #ifdef INET6 223 if ((inp->inp_vflag & INP_IPV4) == 0) 224 goto docontinue; 225 #endif 226 if (inp->inp_laddr.s_addr && 227 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 228 goto docontinue; 229 if (inp->inp_faddr.s_addr && 230 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 231 goto docontinue; 232 if (jailed(inp->inp_socket->so_cred)) 233 if (htonl(prison_getip(inp->inp_socket->so_cred)) != 234 ip->ip_dst.s_addr) 235 goto docontinue; 236 if (last) { 237 struct mbuf *n; 238 239 n = m_copy(m, 0, (int)M_COPYALL); 240 if (n != NULL) 241 (void) raw_append(last, ip, n); 242 /* XXX count dropped packet */ 243 INP_UNLOCK(last); 244 } 245 last = inp; 246 } 247 if (last != NULL) { 248 if (raw_append(last, ip, m) != 0) 249 ipstat.ips_delivered--; 250 INP_UNLOCK(last); 251 } else { 252 m_freem(m); 253 ipstat.ips_noproto++; 254 ipstat.ips_delivered--; 255 } 256 INP_INFO_RUNLOCK(&ripcbinfo); 257 } 258 259 /* 260 * Generate IP header and pass packet to ip_output. 261 * Tack on options user may have setup with control call. 262 */ 263 int 264 rip_output(struct mbuf *m, struct socket *so, u_long dst) 265 { 266 struct ip *ip; 267 int error; 268 struct inpcb *inp = sotoinpcb(so); 269 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 270 IP_ALLOWBROADCAST; 271 272 /* 273 * If the user handed us a complete IP packet, use it. 274 * Otherwise, allocate an mbuf for a header and fill it in. 275 */ 276 if ((inp->inp_flags & INP_HDRINCL) == 0) { 277 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 278 m_freem(m); 279 return(EMSGSIZE); 280 } 281 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); 282 if (m == NULL) 283 return(ENOBUFS); 284 285 INP_LOCK(inp); 286 ip = mtod(m, struct ip *); 287 ip->ip_tos = inp->inp_ip_tos; 288 if (inp->inp_flags & INP_DONTFRAG) 289 ip->ip_off = IP_DF; 290 else 291 ip->ip_off = 0; 292 ip->ip_p = inp->inp_ip_p; 293 ip->ip_len = m->m_pkthdr.len; 294 if (jailed(inp->inp_socket->so_cred)) 295 ip->ip_src.s_addr = 296 htonl(prison_getip(inp->inp_socket->so_cred)); 297 else 298 ip->ip_src = inp->inp_laddr; 299 ip->ip_dst.s_addr = dst; 300 ip->ip_ttl = inp->inp_ip_ttl; 301 } else { 302 if (m->m_pkthdr.len > IP_MAXPACKET) { 303 m_freem(m); 304 return(EMSGSIZE); 305 } 306 INP_LOCK(inp); 307 ip = mtod(m, struct ip *); 308 if (jailed(inp->inp_socket->so_cred)) { 309 if (ip->ip_src.s_addr != 310 htonl(prison_getip(inp->inp_socket->so_cred))) { 311 INP_UNLOCK(inp); 312 m_freem(m); 313 return (EPERM); 314 } 315 } 316 /* don't allow both user specified and setsockopt options, 317 and don't allow packet length sizes that will crash */ 318 if (((ip->ip_hl != (sizeof (*ip) >> 2)) 319 && inp->inp_options) 320 || (ip->ip_len > m->m_pkthdr.len) 321 || (ip->ip_len < (ip->ip_hl << 2))) { 322 INP_UNLOCK(inp); 323 m_freem(m); 324 return EINVAL; 325 } 326 if (ip->ip_id == 0) 327 ip->ip_id = ip_newid(); 328 /* XXX prevent ip_output from overwriting header fields */ 329 flags |= IP_RAWOUTPUT; 330 ipstat.ips_rawout++; 331 } 332 333 if (inp->inp_flags & INP_ONESBCAST) 334 flags |= IP_SENDONES; 335 336 #ifdef MAC 337 mac_create_mbuf_from_inpcb(inp, m); 338 #endif 339 340 error = ip_output(m, inp->inp_options, NULL, flags, 341 inp->inp_moptions, inp); 342 INP_UNLOCK(inp); 343 return error; 344 } 345 346 /* 347 * Raw IP socket option processing. 348 * 349 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 350 * only be created by a privileged process, and as such, socket option 351 * operations to manage system properties on any raw socket were allowed to 352 * take place without explicit additional access control checks. However, 353 * raw sockets can now also be created in jail(), and therefore explicit 354 * checks are now required. Likewise, raw sockets can be used by a process 355 * after it gives up privilege, so some caution is required. For options 356 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 357 * performed in ip_ctloutput() and therefore no check occurs here. 358 * Unilaterally checking suser() here breaks normal IP socket option 359 * operations on raw sockets. 360 * 361 * When adding new socket options here, make sure to add access control 362 * checks here as necessary. 363 */ 364 int 365 rip_ctloutput(struct socket *so, struct sockopt *sopt) 366 { 367 struct inpcb *inp = sotoinpcb(so); 368 int error, optval; 369 370 if (sopt->sopt_level != IPPROTO_IP) 371 return (EINVAL); 372 373 error = 0; 374 switch (sopt->sopt_dir) { 375 case SOPT_GET: 376 switch (sopt->sopt_name) { 377 case IP_HDRINCL: 378 optval = inp->inp_flags & INP_HDRINCL; 379 error = sooptcopyout(sopt, &optval, sizeof optval); 380 break; 381 382 case IP_FW_ADD: /* ADD actually returns the body... */ 383 case IP_FW_GET: 384 case IP_FW_TABLE_GETSIZE: 385 case IP_FW_TABLE_LIST: 386 case IP_FW_NAT_GET_CONFIG: 387 case IP_FW_NAT_GET_LOG: 388 /* 389 * XXXRW: Isn't this checked one layer down? Yes, it 390 * is. 391 */ 392 error = priv_check(curthread, PRIV_NETINET_IPFW); 393 if (error != 0) 394 return (error); 395 if (ip_fw_ctl_ptr != NULL) 396 error = ip_fw_ctl_ptr(sopt); 397 else 398 error = ENOPROTOOPT; 399 break; 400 401 case IP_DUMMYNET_GET: 402 error = priv_check(curthread, PRIV_NETINET_DUMMYNET); 403 if (error != 0) 404 return (error); 405 if (ip_dn_ctl_ptr != NULL) 406 error = ip_dn_ctl_ptr(sopt); 407 else 408 error = ENOPROTOOPT; 409 break ; 410 411 case MRT_INIT: 412 case MRT_DONE: 413 case MRT_ADD_VIF: 414 case MRT_DEL_VIF: 415 case MRT_ADD_MFC: 416 case MRT_DEL_MFC: 417 case MRT_VERSION: 418 case MRT_ASSERT: 419 case MRT_API_SUPPORT: 420 case MRT_API_CONFIG: 421 case MRT_ADD_BW_UPCALL: 422 case MRT_DEL_BW_UPCALL: 423 error = priv_check(curthread, PRIV_NETINET_MROUTE); 424 if (error != 0) 425 return (error); 426 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 427 EOPNOTSUPP; 428 break; 429 430 default: 431 error = ip_ctloutput(so, sopt); 432 break; 433 } 434 break; 435 436 case SOPT_SET: 437 switch (sopt->sopt_name) { 438 case IP_HDRINCL: 439 error = sooptcopyin(sopt, &optval, sizeof optval, 440 sizeof optval); 441 if (error) 442 break; 443 if (optval) 444 inp->inp_flags |= INP_HDRINCL; 445 else 446 inp->inp_flags &= ~INP_HDRINCL; 447 break; 448 449 case IP_FW_ADD: 450 case IP_FW_DEL: 451 case IP_FW_FLUSH: 452 case IP_FW_ZERO: 453 case IP_FW_RESETLOG: 454 case IP_FW_TABLE_ADD: 455 case IP_FW_TABLE_DEL: 456 case IP_FW_TABLE_FLUSH: 457 case IP_FW_NAT_CFG: 458 case IP_FW_NAT_DEL: 459 /* 460 * XXXRW: Isn't this checked one layer down? 461 */ 462 error = priv_check(curthread, PRIV_NETINET_IPFW); 463 if (error != 0) 464 return (error); 465 if (ip_fw_ctl_ptr != NULL) 466 error = ip_fw_ctl_ptr(sopt); 467 else 468 error = ENOPROTOOPT; 469 break; 470 471 case IP_DUMMYNET_CONFIGURE: 472 case IP_DUMMYNET_DEL: 473 case IP_DUMMYNET_FLUSH: 474 error = priv_check(curthread, PRIV_NETINET_DUMMYNET); 475 if (error != 0) 476 return (error); 477 if (ip_dn_ctl_ptr != NULL) 478 error = ip_dn_ctl_ptr(sopt); 479 else 480 error = ENOPROTOOPT ; 481 break ; 482 483 case IP_RSVP_ON: 484 error = priv_check(curthread, PRIV_NETINET_MROUTE); 485 if (error != 0) 486 return (error); 487 error = ip_rsvp_init(so); 488 break; 489 490 case IP_RSVP_OFF: 491 error = priv_check(curthread, PRIV_NETINET_MROUTE); 492 if (error != 0) 493 return (error); 494 error = ip_rsvp_done(); 495 break; 496 497 case IP_RSVP_VIF_ON: 498 case IP_RSVP_VIF_OFF: 499 error = priv_check(curthread, PRIV_NETINET_MROUTE); 500 if (error != 0) 501 return (error); 502 error = ip_rsvp_vif ? 503 ip_rsvp_vif(so, sopt) : EINVAL; 504 break; 505 506 case MRT_INIT: 507 case MRT_DONE: 508 case MRT_ADD_VIF: 509 case MRT_DEL_VIF: 510 case MRT_ADD_MFC: 511 case MRT_DEL_MFC: 512 case MRT_VERSION: 513 case MRT_ASSERT: 514 case MRT_API_SUPPORT: 515 case MRT_API_CONFIG: 516 case MRT_ADD_BW_UPCALL: 517 case MRT_DEL_BW_UPCALL: 518 error = priv_check(curthread, PRIV_NETINET_MROUTE); 519 if (error != 0) 520 return (error); 521 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 522 EOPNOTSUPP; 523 break; 524 525 default: 526 error = ip_ctloutput(so, sopt); 527 break; 528 } 529 break; 530 } 531 532 return (error); 533 } 534 535 /* 536 * This function exists solely to receive the PRC_IFDOWN messages which 537 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, 538 * and calls in_ifadown() to remove all routes corresponding to that address. 539 * It also receives the PRC_IFUP messages from if_up() and reinstalls the 540 * interface routes. 541 */ 542 void 543 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 544 { 545 struct in_ifaddr *ia; 546 struct ifnet *ifp; 547 int err; 548 int flags; 549 550 switch (cmd) { 551 case PRC_IFDOWN: 552 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 553 if (ia->ia_ifa.ifa_addr == sa 554 && (ia->ia_flags & IFA_ROUTE)) { 555 /* 556 * in_ifscrub kills the interface route. 557 */ 558 in_ifscrub(ia->ia_ifp, ia); 559 /* 560 * in_ifadown gets rid of all the rest of 561 * the routes. This is not quite the right 562 * thing to do, but at least if we are running 563 * a routing process they will come back. 564 */ 565 in_ifadown(&ia->ia_ifa, 0); 566 break; 567 } 568 } 569 break; 570 571 case PRC_IFUP: 572 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 573 if (ia->ia_ifa.ifa_addr == sa) 574 break; 575 } 576 if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) 577 return; 578 flags = RTF_UP; 579 ifp = ia->ia_ifa.ifa_ifp; 580 581 if ((ifp->if_flags & IFF_LOOPBACK) 582 || (ifp->if_flags & IFF_POINTOPOINT)) 583 flags |= RTF_HOST; 584 585 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 586 if (err == 0) 587 ia->ia_flags |= IFA_ROUTE; 588 break; 589 } 590 } 591 592 u_long rip_sendspace = 9216; 593 u_long rip_recvspace = 9216; 594 595 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 596 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 597 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 598 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 599 600 static int 601 rip_attach(struct socket *so, int proto, struct thread *td) 602 { 603 struct inpcb *inp; 604 int error; 605 606 inp = sotoinpcb(so); 607 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 608 /* 609 * XXXRW: Centralize privilege decision in kern_jail.c. 610 */ 611 if (jailed(td->td_ucred) && !jail_allow_raw_sockets) 612 return (EPERM); 613 error = priv_check_cred(td->td_ucred, PRIV_NETINET_RAW, 614 SUSER_ALLOWJAIL); 615 if (error) 616 return error; 617 if (proto >= IPPROTO_MAX || proto < 0) 618 return EPROTONOSUPPORT; 619 error = soreserve(so, rip_sendspace, rip_recvspace); 620 if (error) 621 return error; 622 INP_INFO_WLOCK(&ripcbinfo); 623 error = in_pcballoc(so, &ripcbinfo); 624 if (error) { 625 INP_INFO_WUNLOCK(&ripcbinfo); 626 return error; 627 } 628 inp = (struct inpcb *)so->so_pcb; 629 INP_INFO_WUNLOCK(&ripcbinfo); 630 inp->inp_vflag |= INP_IPV4; 631 inp->inp_ip_p = proto; 632 inp->inp_ip_ttl = ip_defttl; 633 INP_UNLOCK(inp); 634 return 0; 635 } 636 637 static void 638 rip_detach(struct socket *so) 639 { 640 struct inpcb *inp; 641 642 inp = sotoinpcb(so); 643 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 644 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 645 ("rip_detach: not closed")); 646 647 INP_INFO_WLOCK(&ripcbinfo); 648 INP_LOCK(inp); 649 if (so == ip_mrouter && ip_mrouter_done) 650 ip_mrouter_done(); 651 if (ip_rsvp_force_done) 652 ip_rsvp_force_done(so); 653 if (so == ip_rsvpd) 654 ip_rsvp_done(); 655 in_pcbdetach(inp); 656 in_pcbfree(inp); 657 INP_INFO_WUNLOCK(&ripcbinfo); 658 } 659 660 static void 661 rip_dodisconnect(struct socket *so, struct inpcb *inp) 662 { 663 664 INP_LOCK_ASSERT(inp); 665 666 inp->inp_faddr.s_addr = INADDR_ANY; 667 SOCK_LOCK(so); 668 so->so_state &= ~SS_ISCONNECTED; 669 SOCK_UNLOCK(so); 670 } 671 672 static void 673 rip_abort(struct socket *so) 674 { 675 struct inpcb *inp; 676 677 inp = sotoinpcb(so); 678 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 679 680 INP_INFO_WLOCK(&ripcbinfo); 681 INP_LOCK(inp); 682 rip_dodisconnect(so, inp); 683 INP_UNLOCK(inp); 684 INP_INFO_WUNLOCK(&ripcbinfo); 685 } 686 687 static void 688 rip_close(struct socket *so) 689 { 690 struct inpcb *inp; 691 692 inp = sotoinpcb(so); 693 KASSERT(inp != NULL, ("rip_close: inp == NULL")); 694 695 INP_INFO_WLOCK(&ripcbinfo); 696 INP_LOCK(inp); 697 rip_dodisconnect(so, inp); 698 INP_UNLOCK(inp); 699 INP_INFO_WUNLOCK(&ripcbinfo); 700 } 701 702 static int 703 rip_disconnect(struct socket *so) 704 { 705 struct inpcb *inp; 706 707 if ((so->so_state & SS_ISCONNECTED) == 0) 708 return ENOTCONN; 709 710 inp = sotoinpcb(so); 711 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL")); 712 INP_INFO_WLOCK(&ripcbinfo); 713 INP_LOCK(inp); 714 rip_dodisconnect(so, inp); 715 INP_UNLOCK(inp); 716 INP_INFO_WUNLOCK(&ripcbinfo); 717 return (0); 718 } 719 720 static int 721 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 722 { 723 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 724 struct inpcb *inp; 725 726 if (nam->sa_len != sizeof(*addr)) 727 return EINVAL; 728 729 if (jailed(td->td_ucred)) { 730 if (addr->sin_addr.s_addr == INADDR_ANY) 731 addr->sin_addr.s_addr = 732 htonl(prison_getip(td->td_ucred)); 733 if (htonl(prison_getip(td->td_ucred)) != addr->sin_addr.s_addr) 734 return (EADDRNOTAVAIL); 735 } 736 737 if (TAILQ_EMPTY(&ifnet) || 738 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 739 (addr->sin_addr.s_addr && 740 ifa_ifwithaddr((struct sockaddr *)addr) == 0)) 741 return EADDRNOTAVAIL; 742 743 inp = sotoinpcb(so); 744 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 745 INP_INFO_WLOCK(&ripcbinfo); 746 INP_LOCK(inp); 747 inp->inp_laddr = addr->sin_addr; 748 INP_UNLOCK(inp); 749 INP_INFO_WUNLOCK(&ripcbinfo); 750 return 0; 751 } 752 753 static int 754 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 755 { 756 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 757 struct inpcb *inp; 758 759 if (nam->sa_len != sizeof(*addr)) 760 return EINVAL; 761 if (TAILQ_EMPTY(&ifnet)) 762 return EADDRNOTAVAIL; 763 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 764 return EAFNOSUPPORT; 765 766 inp = sotoinpcb(so); 767 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 768 INP_INFO_WLOCK(&ripcbinfo); 769 INP_LOCK(inp); 770 inp->inp_faddr = addr->sin_addr; 771 soisconnected(so); 772 INP_UNLOCK(inp); 773 INP_INFO_WUNLOCK(&ripcbinfo); 774 return 0; 775 } 776 777 static int 778 rip_shutdown(struct socket *so) 779 { 780 struct inpcb *inp; 781 782 inp = sotoinpcb(so); 783 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 784 INP_LOCK(inp); 785 socantsendmore(so); 786 INP_UNLOCK(inp); 787 return 0; 788 } 789 790 static int 791 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 792 struct mbuf *control, struct thread *td) 793 { 794 struct inpcb *inp; 795 u_long dst; 796 797 inp = sotoinpcb(so); 798 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 799 /* 800 * Note: 'dst' reads below are unlocked. 801 */ 802 if (so->so_state & SS_ISCONNECTED) { 803 if (nam) { 804 m_freem(m); 805 return EISCONN; 806 } 807 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 808 } else { 809 if (nam == NULL) { 810 m_freem(m); 811 return ENOTCONN; 812 } 813 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 814 } 815 return rip_output(m, so, dst); 816 } 817 818 static int 819 rip_pcblist(SYSCTL_HANDLER_ARGS) 820 { 821 int error, i, n; 822 struct inpcb *inp, **inp_list; 823 inp_gen_t gencnt; 824 struct xinpgen xig; 825 826 /* 827 * The process of preparing the TCB list is too time-consuming and 828 * resource-intensive to repeat twice on every request. 829 */ 830 if (req->oldptr == 0) { 831 n = ripcbinfo.ipi_count; 832 req->oldidx = 2 * (sizeof xig) 833 + (n + n/8) * sizeof(struct xinpcb); 834 return 0; 835 } 836 837 if (req->newptr != 0) 838 return EPERM; 839 840 /* 841 * OK, now we're committed to doing something. 842 */ 843 INP_INFO_RLOCK(&ripcbinfo); 844 gencnt = ripcbinfo.ipi_gencnt; 845 n = ripcbinfo.ipi_count; 846 INP_INFO_RUNLOCK(&ripcbinfo); 847 848 xig.xig_len = sizeof xig; 849 xig.xig_count = n; 850 xig.xig_gen = gencnt; 851 xig.xig_sogen = so_gencnt; 852 error = SYSCTL_OUT(req, &xig, sizeof xig); 853 if (error) 854 return error; 855 856 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 857 if (inp_list == 0) 858 return ENOMEM; 859 860 INP_INFO_RLOCK(&ripcbinfo); 861 for (inp = LIST_FIRST(ripcbinfo.listhead), i = 0; inp && i < n; 862 inp = LIST_NEXT(inp, inp_list)) { 863 INP_LOCK(inp); 864 if (inp->inp_gencnt <= gencnt && 865 cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0) { 866 /* XXX held references? */ 867 inp_list[i++] = inp; 868 } 869 INP_UNLOCK(inp); 870 } 871 INP_INFO_RUNLOCK(&ripcbinfo); 872 n = i; 873 874 error = 0; 875 for (i = 0; i < n; i++) { 876 inp = inp_list[i]; 877 INP_LOCK(inp); 878 if (inp->inp_gencnt <= gencnt) { 879 struct xinpcb xi; 880 bzero(&xi, sizeof(xi)); 881 xi.xi_len = sizeof xi; 882 /* XXX should avoid extra copy */ 883 bcopy(inp, &xi.xi_inp, sizeof *inp); 884 if (inp->inp_socket) 885 sotoxsocket(inp->inp_socket, &xi.xi_socket); 886 INP_UNLOCK(inp); 887 error = SYSCTL_OUT(req, &xi, sizeof xi); 888 } else 889 INP_UNLOCK(inp); 890 } 891 if (!error) { 892 /* 893 * Give the user an updated idea of our state. 894 * If the generation differs from what we told 895 * her before, she knows that something happened 896 * while we were processing this request, and it 897 * might be necessary to retry. 898 */ 899 INP_INFO_RLOCK(&ripcbinfo); 900 xig.xig_gen = ripcbinfo.ipi_gencnt; 901 xig.xig_sogen = so_gencnt; 902 xig.xig_count = ripcbinfo.ipi_count; 903 INP_INFO_RUNLOCK(&ripcbinfo); 904 error = SYSCTL_OUT(req, &xig, sizeof xig); 905 } 906 free(inp_list, M_TEMP); 907 return error; 908 } 909 910 /* 911 * This is the wrapper function for in_setsockaddr. We just pass down 912 * the pcbinfo for in_setpeeraddr to lock. 913 */ 914 static int 915 rip_sockaddr(struct socket *so, struct sockaddr **nam) 916 { 917 return (in_setsockaddr(so, nam, &ripcbinfo)); 918 } 919 920 /* 921 * This is the wrapper function for in_setpeeraddr. We just pass down 922 * the pcbinfo for in_setpeeraddr to lock. 923 */ 924 static int 925 rip_peeraddr(struct socket *so, struct sockaddr **nam) 926 { 927 return (in_setpeeraddr(so, nam, &ripcbinfo)); 928 } 929 930 931 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0, 932 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 933 934 struct pr_usrreqs rip_usrreqs = { 935 .pru_abort = rip_abort, 936 .pru_attach = rip_attach, 937 .pru_bind = rip_bind, 938 .pru_connect = rip_connect, 939 .pru_control = in_control, 940 .pru_detach = rip_detach, 941 .pru_disconnect = rip_disconnect, 942 .pru_peeraddr = rip_peeraddr, 943 .pru_send = rip_send, 944 .pru_shutdown = rip_shutdown, 945 .pru_sockaddr = rip_sockaddr, 946 .pru_sosetlabel = in_pcbsosetlabel, 947 .pru_close = rip_close, 948 }; 949