1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet6.h" 36 #include "opt_ipsec.h" 37 #include "opt_mac.h" 38 39 #include <sys/param.h> 40 #include <sys/jail.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/protosw.h> 48 #include <sys/signalvar.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sx.h> 52 #include <sys/sysctl.h> 53 #include <sys/systm.h> 54 55 #include <vm/uma.h> 56 57 #include <net/if.h> 58 #include <net/route.h> 59 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/in_pcb.h> 63 #include <netinet/in_var.h> 64 #include <netinet/ip.h> 65 #include <netinet/ip_var.h> 66 #include <netinet/ip_mroute.h> 67 68 #include <netinet/ip_fw.h> 69 #include <netinet/ip_dummynet.h> 70 71 #ifdef IPSEC 72 #include <netipsec/ipsec.h> 73 #endif /*IPSEC*/ 74 75 #include <security/mac/mac_framework.h> 76 77 struct inpcbhead ripcb; 78 struct inpcbinfo ripcbinfo; 79 80 /* control hooks for ipfw and dummynet */ 81 ip_fw_ctl_t *ip_fw_ctl_ptr = NULL; 82 ip_dn_ctl_t *ip_dn_ctl_ptr = NULL; 83 84 /* 85 * hooks for multicast routing. They all default to NULL, 86 * so leave them not initialized and rely on BSS being set to 0. 87 */ 88 89 /* The socket used to communicate with the multicast routing daemon. */ 90 struct socket *ip_mrouter; 91 92 /* The various mrouter and rsvp functions */ 93 int (*ip_mrouter_set)(struct socket *, struct sockopt *); 94 int (*ip_mrouter_get)(struct socket *, struct sockopt *); 95 int (*ip_mrouter_done)(void); 96 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, 97 struct ip_moptions *); 98 int (*mrt_ioctl)(int, caddr_t, int); 99 int (*legal_vif_num)(int); 100 u_long (*ip_mcast_src)(int); 101 102 void (*rsvp_input_p)(struct mbuf *m, int off); 103 int (*ip_rsvp_vif)(struct socket *, struct sockopt *); 104 void (*ip_rsvp_force_done)(struct socket *); 105 106 /* 107 * Raw interface to IP protocol. 108 */ 109 110 /* 111 * Initialize raw connection block q. 112 */ 113 static void 114 rip_zone_change(void *tag) 115 { 116 117 uma_zone_set_max(ripcbinfo.ipi_zone, maxsockets); 118 } 119 120 static int 121 rip_inpcb_init(void *mem, int size, int flags) 122 { 123 struct inpcb *inp = mem; 124 125 INP_LOCK_INIT(inp, "inp", "rawinp"); 126 return (0); 127 } 128 129 void 130 rip_init(void) 131 { 132 133 INP_INFO_LOCK_INIT(&ripcbinfo, "rip"); 134 LIST_INIT(&ripcb); 135 ripcbinfo.ipi_listhead = &ripcb; 136 /* 137 * XXX We don't use the hash list for raw IP, but it's easier 138 * to allocate a one entry hash list than it is to check all 139 * over the place for hashbase == NULL. 140 */ 141 ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask); 142 ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, 143 &ripcbinfo.ipi_porthashmask); 144 ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb), 145 NULL, NULL, rip_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 146 uma_zone_set_max(ripcbinfo.ipi_zone, maxsockets); 147 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, 148 NULL, EVENTHANDLER_PRI_ANY); 149 } 150 151 static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET }; 152 153 static int 154 raw_append(struct inpcb *last, struct ip *ip, struct mbuf *n) 155 { 156 int policyfail = 0; 157 158 INP_RLOCK_ASSERT(last); 159 160 #ifdef IPSEC 161 /* check AH/ESP integrity. */ 162 if (ipsec4_in_reject(n, last)) { 163 policyfail = 1; 164 } 165 #endif /* IPSEC */ 166 #ifdef MAC 167 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0) 168 policyfail = 1; 169 #endif 170 /* Check the minimum TTL for socket. */ 171 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 172 policyfail = 1; 173 if (!policyfail) { 174 struct mbuf *opts = NULL; 175 struct socket *so; 176 177 so = last->inp_socket; 178 if ((last->inp_flags & INP_CONTROLOPTS) || 179 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 180 ip_savecontrol(last, &opts, ip, n); 181 SOCKBUF_LOCK(&so->so_rcv); 182 if (sbappendaddr_locked(&so->so_rcv, 183 (struct sockaddr *)&ripsrc, n, opts) == 0) { 184 /* should notify about lost packet */ 185 m_freem(n); 186 if (opts) 187 m_freem(opts); 188 SOCKBUF_UNLOCK(&so->so_rcv); 189 } else 190 sorwakeup_locked(so); 191 } else 192 m_freem(n); 193 return policyfail; 194 } 195 196 /* 197 * Setup generic address and protocol structures 198 * for raw_input routine, then pass them along with 199 * mbuf chain. 200 */ 201 void 202 rip_input(struct mbuf *m, int off) 203 { 204 struct ip *ip = mtod(m, struct ip *); 205 int proto = ip->ip_p; 206 struct inpcb *inp, *last; 207 208 INP_INFO_RLOCK(&ripcbinfo); 209 ripsrc.sin_addr = ip->ip_src; 210 last = NULL; 211 LIST_FOREACH(inp, &ripcb, inp_list) { 212 INP_RLOCK(inp); 213 if (inp->inp_ip_p && inp->inp_ip_p != proto) { 214 docontinue: 215 INP_RUNLOCK(inp); 216 continue; 217 } 218 #ifdef INET6 219 if ((inp->inp_vflag & INP_IPV4) == 0) 220 goto docontinue; 221 #endif 222 if (inp->inp_laddr.s_addr && 223 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 224 goto docontinue; 225 if (inp->inp_faddr.s_addr && 226 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 227 goto docontinue; 228 if (jailed(inp->inp_socket->so_cred)) 229 if (htonl(prison_getip(inp->inp_socket->so_cred)) != 230 ip->ip_dst.s_addr) 231 goto docontinue; 232 if (last) { 233 struct mbuf *n; 234 235 n = m_copy(m, 0, (int)M_COPYALL); 236 if (n != NULL) 237 (void) raw_append(last, ip, n); 238 /* XXX count dropped packet */ 239 INP_RUNLOCK(last); 240 } 241 last = inp; 242 } 243 if (last != NULL) { 244 if (raw_append(last, ip, m) != 0) 245 ipstat.ips_delivered--; 246 INP_RUNLOCK(last); 247 } else { 248 m_freem(m); 249 ipstat.ips_noproto++; 250 ipstat.ips_delivered--; 251 } 252 INP_INFO_RUNLOCK(&ripcbinfo); 253 } 254 255 /* 256 * Generate IP header and pass packet to ip_output. 257 * Tack on options user may have setup with control call. 258 */ 259 int 260 rip_output(struct mbuf *m, struct socket *so, u_long dst) 261 { 262 struct ip *ip; 263 int error; 264 struct inpcb *inp = sotoinpcb(so); 265 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 266 IP_ALLOWBROADCAST; 267 268 /* 269 * If the user handed us a complete IP packet, use it. 270 * Otherwise, allocate an mbuf for a header and fill it in. 271 */ 272 if ((inp->inp_flags & INP_HDRINCL) == 0) { 273 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 274 m_freem(m); 275 return(EMSGSIZE); 276 } 277 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); 278 if (m == NULL) 279 return(ENOBUFS); 280 281 INP_RLOCK(inp); 282 ip = mtod(m, struct ip *); 283 ip->ip_tos = inp->inp_ip_tos; 284 if (inp->inp_flags & INP_DONTFRAG) 285 ip->ip_off = IP_DF; 286 else 287 ip->ip_off = 0; 288 ip->ip_p = inp->inp_ip_p; 289 ip->ip_len = m->m_pkthdr.len; 290 if (jailed(inp->inp_socket->so_cred)) 291 ip->ip_src.s_addr = 292 htonl(prison_getip(inp->inp_socket->so_cred)); 293 else 294 ip->ip_src = inp->inp_laddr; 295 ip->ip_dst.s_addr = dst; 296 ip->ip_ttl = inp->inp_ip_ttl; 297 } else { 298 if (m->m_pkthdr.len > IP_MAXPACKET) { 299 m_freem(m); 300 return(EMSGSIZE); 301 } 302 INP_RLOCK(inp); 303 ip = mtod(m, struct ip *); 304 if (jailed(inp->inp_socket->so_cred)) { 305 if (ip->ip_src.s_addr != 306 htonl(prison_getip(inp->inp_socket->so_cred))) { 307 INP_RUNLOCK(inp); 308 m_freem(m); 309 return (EPERM); 310 } 311 } 312 /* don't allow both user specified and setsockopt options, 313 and don't allow packet length sizes that will crash */ 314 if (((ip->ip_hl != (sizeof (*ip) >> 2)) 315 && inp->inp_options) 316 || (ip->ip_len > m->m_pkthdr.len) 317 || (ip->ip_len < (ip->ip_hl << 2))) { 318 INP_RUNLOCK(inp); 319 m_freem(m); 320 return EINVAL; 321 } 322 if (ip->ip_id == 0) 323 ip->ip_id = ip_newid(); 324 /* XXX prevent ip_output from overwriting header fields */ 325 flags |= IP_RAWOUTPUT; 326 ipstat.ips_rawout++; 327 } 328 329 if (inp->inp_flags & INP_ONESBCAST) 330 flags |= IP_SENDONES; 331 332 #ifdef MAC 333 mac_inpcb_create_mbuf(inp, m); 334 #endif 335 336 error = ip_output(m, inp->inp_options, NULL, flags, 337 inp->inp_moptions, inp); 338 INP_RUNLOCK(inp); 339 return error; 340 } 341 342 /* 343 * Raw IP socket option processing. 344 * 345 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 346 * only be created by a privileged process, and as such, socket option 347 * operations to manage system properties on any raw socket were allowed to 348 * take place without explicit additional access control checks. However, 349 * raw sockets can now also be created in jail(), and therefore explicit 350 * checks are now required. Likewise, raw sockets can be used by a process 351 * after it gives up privilege, so some caution is required. For options 352 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 353 * performed in ip_ctloutput() and therefore no check occurs here. 354 * Unilaterally checking priv_check() here breaks normal IP socket option 355 * operations on raw sockets. 356 * 357 * When adding new socket options here, make sure to add access control 358 * checks here as necessary. 359 */ 360 int 361 rip_ctloutput(struct socket *so, struct sockopt *sopt) 362 { 363 struct inpcb *inp = sotoinpcb(so); 364 int error, optval; 365 366 if (sopt->sopt_level != IPPROTO_IP) 367 return (EINVAL); 368 369 error = 0; 370 switch (sopt->sopt_dir) { 371 case SOPT_GET: 372 switch (sopt->sopt_name) { 373 case IP_HDRINCL: 374 optval = inp->inp_flags & INP_HDRINCL; 375 error = sooptcopyout(sopt, &optval, sizeof optval); 376 break; 377 378 case IP_FW_ADD: /* ADD actually returns the body... */ 379 case IP_FW_GET: 380 case IP_FW_TABLE_GETSIZE: 381 case IP_FW_TABLE_LIST: 382 case IP_FW_NAT_GET_CONFIG: 383 case IP_FW_NAT_GET_LOG: 384 if (ip_fw_ctl_ptr != NULL) 385 error = ip_fw_ctl_ptr(sopt); 386 else 387 error = ENOPROTOOPT; 388 break; 389 390 case IP_DUMMYNET_GET: 391 if (ip_dn_ctl_ptr != NULL) 392 error = ip_dn_ctl_ptr(sopt); 393 else 394 error = ENOPROTOOPT; 395 break ; 396 397 case MRT_INIT: 398 case MRT_DONE: 399 case MRT_ADD_VIF: 400 case MRT_DEL_VIF: 401 case MRT_ADD_MFC: 402 case MRT_DEL_MFC: 403 case MRT_VERSION: 404 case MRT_ASSERT: 405 case MRT_API_SUPPORT: 406 case MRT_API_CONFIG: 407 case MRT_ADD_BW_UPCALL: 408 case MRT_DEL_BW_UPCALL: 409 error = priv_check(curthread, PRIV_NETINET_MROUTE); 410 if (error != 0) 411 return (error); 412 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 413 EOPNOTSUPP; 414 break; 415 416 default: 417 error = ip_ctloutput(so, sopt); 418 break; 419 } 420 break; 421 422 case SOPT_SET: 423 switch (sopt->sopt_name) { 424 case IP_HDRINCL: 425 error = sooptcopyin(sopt, &optval, sizeof optval, 426 sizeof optval); 427 if (error) 428 break; 429 if (optval) 430 inp->inp_flags |= INP_HDRINCL; 431 else 432 inp->inp_flags &= ~INP_HDRINCL; 433 break; 434 435 case IP_FW_ADD: 436 case IP_FW_DEL: 437 case IP_FW_FLUSH: 438 case IP_FW_ZERO: 439 case IP_FW_RESETLOG: 440 case IP_FW_TABLE_ADD: 441 case IP_FW_TABLE_DEL: 442 case IP_FW_TABLE_FLUSH: 443 case IP_FW_NAT_CFG: 444 case IP_FW_NAT_DEL: 445 if (ip_fw_ctl_ptr != NULL) 446 error = ip_fw_ctl_ptr(sopt); 447 else 448 error = ENOPROTOOPT; 449 break; 450 451 case IP_DUMMYNET_CONFIGURE: 452 case IP_DUMMYNET_DEL: 453 case IP_DUMMYNET_FLUSH: 454 if (ip_dn_ctl_ptr != NULL) 455 error = ip_dn_ctl_ptr(sopt); 456 else 457 error = ENOPROTOOPT ; 458 break ; 459 460 case IP_RSVP_ON: 461 error = priv_check(curthread, PRIV_NETINET_MROUTE); 462 if (error != 0) 463 return (error); 464 error = ip_rsvp_init(so); 465 break; 466 467 case IP_RSVP_OFF: 468 error = priv_check(curthread, PRIV_NETINET_MROUTE); 469 if (error != 0) 470 return (error); 471 error = ip_rsvp_done(); 472 break; 473 474 case IP_RSVP_VIF_ON: 475 case IP_RSVP_VIF_OFF: 476 error = priv_check(curthread, PRIV_NETINET_MROUTE); 477 if (error != 0) 478 return (error); 479 error = ip_rsvp_vif ? 480 ip_rsvp_vif(so, sopt) : EINVAL; 481 break; 482 483 case MRT_INIT: 484 case MRT_DONE: 485 case MRT_ADD_VIF: 486 case MRT_DEL_VIF: 487 case MRT_ADD_MFC: 488 case MRT_DEL_MFC: 489 case MRT_VERSION: 490 case MRT_ASSERT: 491 case MRT_API_SUPPORT: 492 case MRT_API_CONFIG: 493 case MRT_ADD_BW_UPCALL: 494 case MRT_DEL_BW_UPCALL: 495 error = priv_check(curthread, PRIV_NETINET_MROUTE); 496 if (error != 0) 497 return (error); 498 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 499 EOPNOTSUPP; 500 break; 501 502 default: 503 error = ip_ctloutput(so, sopt); 504 break; 505 } 506 break; 507 } 508 509 return (error); 510 } 511 512 /* 513 * This function exists solely to receive the PRC_IFDOWN messages which 514 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, 515 * and calls in_ifadown() to remove all routes corresponding to that address. 516 * It also receives the PRC_IFUP messages from if_up() and reinstalls the 517 * interface routes. 518 */ 519 void 520 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 521 { 522 struct in_ifaddr *ia; 523 struct ifnet *ifp; 524 int err; 525 int flags; 526 527 switch (cmd) { 528 case PRC_IFDOWN: 529 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 530 if (ia->ia_ifa.ifa_addr == sa 531 && (ia->ia_flags & IFA_ROUTE)) { 532 /* 533 * in_ifscrub kills the interface route. 534 */ 535 in_ifscrub(ia->ia_ifp, ia); 536 /* 537 * in_ifadown gets rid of all the rest of 538 * the routes. This is not quite the right 539 * thing to do, but at least if we are running 540 * a routing process they will come back. 541 */ 542 in_ifadown(&ia->ia_ifa, 0); 543 break; 544 } 545 } 546 break; 547 548 case PRC_IFUP: 549 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 550 if (ia->ia_ifa.ifa_addr == sa) 551 break; 552 } 553 if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) 554 return; 555 flags = RTF_UP; 556 ifp = ia->ia_ifa.ifa_ifp; 557 558 if ((ifp->if_flags & IFF_LOOPBACK) 559 || (ifp->if_flags & IFF_POINTOPOINT)) 560 flags |= RTF_HOST; 561 562 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 563 if (err == 0) 564 ia->ia_flags |= IFA_ROUTE; 565 break; 566 } 567 } 568 569 u_long rip_sendspace = 9216; 570 u_long rip_recvspace = 9216; 571 572 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 573 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 574 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 575 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 576 577 static int 578 rip_attach(struct socket *so, int proto, struct thread *td) 579 { 580 struct inpcb *inp; 581 int error; 582 583 inp = sotoinpcb(so); 584 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 585 586 error = priv_check(td, PRIV_NETINET_RAW); 587 if (error) 588 return error; 589 if (proto >= IPPROTO_MAX || proto < 0) 590 return EPROTONOSUPPORT; 591 error = soreserve(so, rip_sendspace, rip_recvspace); 592 if (error) 593 return error; 594 INP_INFO_WLOCK(&ripcbinfo); 595 error = in_pcballoc(so, &ripcbinfo); 596 if (error) { 597 INP_INFO_WUNLOCK(&ripcbinfo); 598 return error; 599 } 600 inp = (struct inpcb *)so->so_pcb; 601 INP_INFO_WUNLOCK(&ripcbinfo); 602 inp->inp_vflag |= INP_IPV4; 603 inp->inp_ip_p = proto; 604 inp->inp_ip_ttl = ip_defttl; 605 INP_WUNLOCK(inp); 606 return 0; 607 } 608 609 static void 610 rip_detach(struct socket *so) 611 { 612 struct inpcb *inp; 613 614 inp = sotoinpcb(so); 615 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 616 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 617 ("rip_detach: not closed")); 618 619 INP_INFO_WLOCK(&ripcbinfo); 620 INP_WLOCK(inp); 621 if (so == ip_mrouter && ip_mrouter_done) 622 ip_mrouter_done(); 623 if (ip_rsvp_force_done) 624 ip_rsvp_force_done(so); 625 if (so == ip_rsvpd) 626 ip_rsvp_done(); 627 in_pcbdetach(inp); 628 in_pcbfree(inp); 629 INP_INFO_WUNLOCK(&ripcbinfo); 630 } 631 632 static void 633 rip_dodisconnect(struct socket *so, struct inpcb *inp) 634 { 635 636 INP_WLOCK_ASSERT(inp); 637 638 inp->inp_faddr.s_addr = INADDR_ANY; 639 SOCK_LOCK(so); 640 so->so_state &= ~SS_ISCONNECTED; 641 SOCK_UNLOCK(so); 642 } 643 644 static void 645 rip_abort(struct socket *so) 646 { 647 struct inpcb *inp; 648 649 inp = sotoinpcb(so); 650 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 651 652 INP_INFO_WLOCK(&ripcbinfo); 653 INP_WLOCK(inp); 654 rip_dodisconnect(so, inp); 655 INP_WUNLOCK(inp); 656 INP_INFO_WUNLOCK(&ripcbinfo); 657 } 658 659 static void 660 rip_close(struct socket *so) 661 { 662 struct inpcb *inp; 663 664 inp = sotoinpcb(so); 665 KASSERT(inp != NULL, ("rip_close: inp == NULL")); 666 667 INP_INFO_WLOCK(&ripcbinfo); 668 INP_WLOCK(inp); 669 rip_dodisconnect(so, inp); 670 INP_WUNLOCK(inp); 671 INP_INFO_WUNLOCK(&ripcbinfo); 672 } 673 674 static int 675 rip_disconnect(struct socket *so) 676 { 677 struct inpcb *inp; 678 679 if ((so->so_state & SS_ISCONNECTED) == 0) 680 return ENOTCONN; 681 682 inp = sotoinpcb(so); 683 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL")); 684 INP_INFO_WLOCK(&ripcbinfo); 685 INP_WLOCK(inp); 686 rip_dodisconnect(so, inp); 687 INP_WUNLOCK(inp); 688 INP_INFO_WUNLOCK(&ripcbinfo); 689 return (0); 690 } 691 692 static int 693 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 694 { 695 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 696 struct inpcb *inp; 697 698 if (nam->sa_len != sizeof(*addr)) 699 return EINVAL; 700 701 if (jailed(td->td_ucred)) { 702 if (addr->sin_addr.s_addr == INADDR_ANY) 703 addr->sin_addr.s_addr = 704 htonl(prison_getip(td->td_ucred)); 705 if (htonl(prison_getip(td->td_ucred)) != addr->sin_addr.s_addr) 706 return (EADDRNOTAVAIL); 707 } 708 709 if (TAILQ_EMPTY(&ifnet) || 710 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 711 (addr->sin_addr.s_addr && 712 ifa_ifwithaddr((struct sockaddr *)addr) == 0)) 713 return EADDRNOTAVAIL; 714 715 inp = sotoinpcb(so); 716 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 717 INP_INFO_WLOCK(&ripcbinfo); 718 INP_WLOCK(inp); 719 inp->inp_laddr = addr->sin_addr; 720 INP_WUNLOCK(inp); 721 INP_INFO_WUNLOCK(&ripcbinfo); 722 return 0; 723 } 724 725 static int 726 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 727 { 728 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 729 struct inpcb *inp; 730 731 if (nam->sa_len != sizeof(*addr)) 732 return EINVAL; 733 if (TAILQ_EMPTY(&ifnet)) 734 return EADDRNOTAVAIL; 735 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 736 return EAFNOSUPPORT; 737 738 inp = sotoinpcb(so); 739 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 740 INP_INFO_WLOCK(&ripcbinfo); 741 INP_WLOCK(inp); 742 inp->inp_faddr = addr->sin_addr; 743 soisconnected(so); 744 INP_WUNLOCK(inp); 745 INP_INFO_WUNLOCK(&ripcbinfo); 746 return 0; 747 } 748 749 static int 750 rip_shutdown(struct socket *so) 751 { 752 struct inpcb *inp; 753 754 inp = sotoinpcb(so); 755 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 756 INP_WLOCK(inp); 757 socantsendmore(so); 758 INP_WUNLOCK(inp); 759 return 0; 760 } 761 762 static int 763 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 764 struct mbuf *control, struct thread *td) 765 { 766 struct inpcb *inp; 767 u_long dst; 768 769 inp = sotoinpcb(so); 770 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 771 /* 772 * Note: 'dst' reads below are unlocked. 773 */ 774 if (so->so_state & SS_ISCONNECTED) { 775 if (nam) { 776 m_freem(m); 777 return EISCONN; 778 } 779 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 780 } else { 781 if (nam == NULL) { 782 m_freem(m); 783 return ENOTCONN; 784 } 785 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 786 } 787 return rip_output(m, so, dst); 788 } 789 790 static int 791 rip_pcblist(SYSCTL_HANDLER_ARGS) 792 { 793 int error, i, n; 794 struct inpcb *inp, **inp_list; 795 inp_gen_t gencnt; 796 struct xinpgen xig; 797 798 /* 799 * The process of preparing the TCB list is too time-consuming and 800 * resource-intensive to repeat twice on every request. 801 */ 802 if (req->oldptr == 0) { 803 n = ripcbinfo.ipi_count; 804 req->oldidx = 2 * (sizeof xig) 805 + (n + n/8) * sizeof(struct xinpcb); 806 return 0; 807 } 808 809 if (req->newptr != 0) 810 return EPERM; 811 812 /* 813 * OK, now we're committed to doing something. 814 */ 815 INP_INFO_RLOCK(&ripcbinfo); 816 gencnt = ripcbinfo.ipi_gencnt; 817 n = ripcbinfo.ipi_count; 818 INP_INFO_RUNLOCK(&ripcbinfo); 819 820 xig.xig_len = sizeof xig; 821 xig.xig_count = n; 822 xig.xig_gen = gencnt; 823 xig.xig_sogen = so_gencnt; 824 error = SYSCTL_OUT(req, &xig, sizeof xig); 825 if (error) 826 return error; 827 828 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 829 if (inp_list == 0) 830 return ENOMEM; 831 832 INP_INFO_RLOCK(&ripcbinfo); 833 for (inp = LIST_FIRST(ripcbinfo.ipi_listhead), i = 0; inp && i < n; 834 inp = LIST_NEXT(inp, inp_list)) { 835 INP_RLOCK(inp); 836 if (inp->inp_gencnt <= gencnt && 837 cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0) { 838 /* XXX held references? */ 839 inp_list[i++] = inp; 840 } 841 INP_RUNLOCK(inp); 842 } 843 INP_INFO_RUNLOCK(&ripcbinfo); 844 n = i; 845 846 error = 0; 847 for (i = 0; i < n; i++) { 848 inp = inp_list[i]; 849 INP_RLOCK(inp); 850 if (inp->inp_gencnt <= gencnt) { 851 struct xinpcb xi; 852 bzero(&xi, sizeof(xi)); 853 xi.xi_len = sizeof xi; 854 /* XXX should avoid extra copy */ 855 bcopy(inp, &xi.xi_inp, sizeof *inp); 856 if (inp->inp_socket) 857 sotoxsocket(inp->inp_socket, &xi.xi_socket); 858 INP_RUNLOCK(inp); 859 error = SYSCTL_OUT(req, &xi, sizeof xi); 860 } else 861 INP_RUNLOCK(inp); 862 } 863 if (!error) { 864 /* 865 * Give the user an updated idea of our state. 866 * If the generation differs from what we told 867 * her before, she knows that something happened 868 * while we were processing this request, and it 869 * might be necessary to retry. 870 */ 871 INP_INFO_RLOCK(&ripcbinfo); 872 xig.xig_gen = ripcbinfo.ipi_gencnt; 873 xig.xig_sogen = so_gencnt; 874 xig.xig_count = ripcbinfo.ipi_count; 875 INP_INFO_RUNLOCK(&ripcbinfo); 876 error = SYSCTL_OUT(req, &xig, sizeof xig); 877 } 878 free(inp_list, M_TEMP); 879 return error; 880 } 881 882 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0, 883 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 884 885 struct pr_usrreqs rip_usrreqs = { 886 .pru_abort = rip_abort, 887 .pru_attach = rip_attach, 888 .pru_bind = rip_bind, 889 .pru_connect = rip_connect, 890 .pru_control = in_control, 891 .pru_detach = rip_detach, 892 .pru_disconnect = rip_disconnect, 893 .pru_peeraddr = in_getpeeraddr, 894 .pru_send = rip_send, 895 .pru_shutdown = rip_shutdown, 896 .pru_sockaddr = in_getsockaddr, 897 .pru_sosetlabel = in_pcbsosetlabel, 898 .pru_close = rip_close, 899 }; 900