1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 30 * $FreeBSD$ 31 */ 32 33 #include "opt_inet6.h" 34 #include "opt_ipsec.h" 35 #include "opt_mac.h" 36 37 #include <sys/param.h> 38 #include <sys/jail.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mac.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/proc.h> 45 #include <sys/protosw.h> 46 #include <sys/signalvar.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sx.h> 50 #include <sys/sysctl.h> 51 #include <sys/systm.h> 52 53 #include <vm/uma.h> 54 55 #include <net/if.h> 56 #include <net/route.h> 57 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/in_pcb.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip.h> 63 #include <netinet/ip_var.h> 64 #include <netinet/ip_mroute.h> 65 66 #include <netinet/ip_fw.h> 67 #include <netinet/ip_dummynet.h> 68 69 #ifdef FAST_IPSEC 70 #include <netipsec/ipsec.h> 71 #endif /*FAST_IPSEC*/ 72 73 #ifdef IPSEC 74 #include <netinet6/ipsec.h> 75 #endif /*IPSEC*/ 76 77 struct inpcbhead ripcb; 78 struct inpcbinfo ripcbinfo; 79 80 /* control hooks for ipfw and dummynet */ 81 ip_fw_ctl_t *ip_fw_ctl_ptr = NULL; 82 ip_dn_ctl_t *ip_dn_ctl_ptr = NULL; 83 84 /* 85 * hooks for multicast routing. They all default to NULL, 86 * so leave them not initialized and rely on BSS being set to 0. 87 */ 88 89 /* The socket used to communicate with the multicast routing daemon. */ 90 struct socket *ip_mrouter; 91 92 /* The various mrouter and rsvp functions */ 93 int (*ip_mrouter_set)(struct socket *, struct sockopt *); 94 int (*ip_mrouter_get)(struct socket *, struct sockopt *); 95 int (*ip_mrouter_done)(void); 96 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, 97 struct ip_moptions *); 98 int (*mrt_ioctl)(int, caddr_t); 99 int (*legal_vif_num)(int); 100 u_long (*ip_mcast_src)(int); 101 102 void (*rsvp_input_p)(struct mbuf *m, int off); 103 int (*ip_rsvp_vif)(struct socket *, struct sockopt *); 104 void (*ip_rsvp_force_done)(struct socket *); 105 106 /* 107 * Nominal space allocated to a raw ip socket. 108 */ 109 #define RIPSNDQ 8192 110 #define RIPRCVQ 8192 111 112 /* 113 * Raw interface to IP protocol. 114 */ 115 116 /* 117 * Initialize raw connection block q. 118 */ 119 void 120 rip_init() 121 { 122 INP_INFO_LOCK_INIT(&ripcbinfo, "rip"); 123 LIST_INIT(&ripcb); 124 ripcbinfo.listhead = &ripcb; 125 /* 126 * XXX We don't use the hash list for raw IP, but it's easier 127 * to allocate a one entry hash list than it is to check all 128 * over the place for hashbase == NULL. 129 */ 130 ripcbinfo.hashbase = hashinit(1, M_PCB, &ripcbinfo.hashmask); 131 ripcbinfo.porthashbase = hashinit(1, M_PCB, &ripcbinfo.porthashmask); 132 ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb), 133 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 134 uma_zone_set_max(ripcbinfo.ipi_zone, maxsockets); 135 } 136 137 static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET }; 138 139 static int 140 raw_append(struct inpcb *last, struct ip *ip, struct mbuf *n) 141 { 142 int policyfail = 0; 143 144 INP_LOCK_ASSERT(last); 145 146 #if defined(IPSEC) || defined(FAST_IPSEC) 147 /* check AH/ESP integrity. */ 148 if (ipsec4_in_reject(n, last)) { 149 policyfail = 1; 150 #ifdef IPSEC 151 ipsecstat.in_polvio++; 152 #endif /*IPSEC*/ 153 /* do not inject data to pcb */ 154 } 155 #endif /*IPSEC || FAST_IPSEC*/ 156 #ifdef MAC 157 if (!policyfail && mac_check_inpcb_deliver(last, n) != 0) 158 policyfail = 1; 159 #endif 160 /* Check the minimum TTL for socket. */ 161 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 162 policyfail = 1; 163 if (!policyfail) { 164 struct mbuf *opts = NULL; 165 struct socket *so; 166 167 so = last->inp_socket; 168 if ((last->inp_flags & INP_CONTROLOPTS) || 169 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 170 ip_savecontrol(last, &opts, ip, n); 171 SOCKBUF_LOCK(&so->so_rcv); 172 if (sbappendaddr_locked(&so->so_rcv, 173 (struct sockaddr *)&ripsrc, n, opts) == 0) { 174 /* should notify about lost packet */ 175 m_freem(n); 176 if (opts) 177 m_freem(opts); 178 SOCKBUF_UNLOCK(&so->so_rcv); 179 } else 180 sorwakeup_locked(so); 181 } else 182 m_freem(n); 183 return policyfail; 184 } 185 186 /* 187 * Setup generic address and protocol structures 188 * for raw_input routine, then pass them along with 189 * mbuf chain. 190 */ 191 void 192 rip_input(struct mbuf *m, int off) 193 { 194 struct ip *ip = mtod(m, struct ip *); 195 int proto = ip->ip_p; 196 struct inpcb *inp, *last; 197 198 INP_INFO_RLOCK(&ripcbinfo); 199 ripsrc.sin_addr = ip->ip_src; 200 last = NULL; 201 LIST_FOREACH(inp, &ripcb, inp_list) { 202 INP_LOCK(inp); 203 if (inp->inp_ip_p && inp->inp_ip_p != proto) { 204 docontinue: 205 INP_UNLOCK(inp); 206 continue; 207 } 208 #ifdef INET6 209 if ((inp->inp_vflag & INP_IPV4) == 0) 210 goto docontinue; 211 #endif 212 if (inp->inp_laddr.s_addr && 213 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 214 goto docontinue; 215 if (inp->inp_faddr.s_addr && 216 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 217 goto docontinue; 218 if (jailed(inp->inp_socket->so_cred)) 219 if (htonl(prison_getip(inp->inp_socket->so_cred)) != 220 ip->ip_dst.s_addr) 221 goto docontinue; 222 if (last) { 223 struct mbuf *n; 224 225 n = m_copy(m, 0, (int)M_COPYALL); 226 if (n != NULL) 227 (void) raw_append(last, ip, n); 228 /* XXX count dropped packet */ 229 INP_UNLOCK(last); 230 } 231 last = inp; 232 } 233 if (last != NULL) { 234 if (raw_append(last, ip, m) != 0) 235 ipstat.ips_delivered--; 236 INP_UNLOCK(last); 237 } else { 238 m_freem(m); 239 ipstat.ips_noproto++; 240 ipstat.ips_delivered--; 241 } 242 INP_INFO_RUNLOCK(&ripcbinfo); 243 } 244 245 /* 246 * Generate IP header and pass packet to ip_output. 247 * Tack on options user may have setup with control call. 248 */ 249 int 250 rip_output(struct mbuf *m, struct socket *so, u_long dst) 251 { 252 struct ip *ip; 253 int error; 254 struct inpcb *inp = sotoinpcb(so); 255 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 256 IP_ALLOWBROADCAST; 257 258 /* 259 * If the user handed us a complete IP packet, use it. 260 * Otherwise, allocate an mbuf for a header and fill it in. 261 */ 262 if ((inp->inp_flags & INP_HDRINCL) == 0) { 263 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 264 m_freem(m); 265 return(EMSGSIZE); 266 } 267 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); 268 if (m == NULL) 269 return(ENOBUFS); 270 271 INP_LOCK(inp); 272 ip = mtod(m, struct ip *); 273 ip->ip_tos = inp->inp_ip_tos; 274 if (inp->inp_flags & INP_DONTFRAG) 275 ip->ip_off = IP_DF; 276 else 277 ip->ip_off = 0; 278 ip->ip_p = inp->inp_ip_p; 279 ip->ip_len = m->m_pkthdr.len; 280 if (jailed(inp->inp_socket->so_cred)) 281 ip->ip_src.s_addr = 282 htonl(prison_getip(inp->inp_socket->so_cred)); 283 else 284 ip->ip_src = inp->inp_laddr; 285 ip->ip_dst.s_addr = dst; 286 ip->ip_ttl = inp->inp_ip_ttl; 287 } else { 288 if (m->m_pkthdr.len > IP_MAXPACKET) { 289 m_freem(m); 290 return(EMSGSIZE); 291 } 292 INP_LOCK(inp); 293 ip = mtod(m, struct ip *); 294 if (jailed(inp->inp_socket->so_cred)) { 295 if (ip->ip_src.s_addr != 296 htonl(prison_getip(inp->inp_socket->so_cred))) { 297 INP_UNLOCK(inp); 298 m_freem(m); 299 return (EPERM); 300 } 301 } 302 /* don't allow both user specified and setsockopt options, 303 and don't allow packet length sizes that will crash */ 304 if (((ip->ip_hl != (sizeof (*ip) >> 2)) 305 && inp->inp_options) 306 || (ip->ip_len > m->m_pkthdr.len) 307 || (ip->ip_len < (ip->ip_hl << 2))) { 308 INP_UNLOCK(inp); 309 m_freem(m); 310 return EINVAL; 311 } 312 if (ip->ip_id == 0) 313 ip->ip_id = ip_newid(); 314 /* XXX prevent ip_output from overwriting header fields */ 315 flags |= IP_RAWOUTPUT; 316 ipstat.ips_rawout++; 317 } 318 319 if (inp->inp_vflag & INP_ONESBCAST) 320 flags |= IP_SENDONES; 321 322 #ifdef MAC 323 mac_create_mbuf_from_inpcb(inp, m); 324 #endif 325 326 error = ip_output(m, inp->inp_options, NULL, flags, 327 inp->inp_moptions, inp); 328 INP_UNLOCK(inp); 329 return error; 330 } 331 332 /* 333 * Raw IP socket option processing. 334 * 335 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 336 * only be created by a privileged process, and as such, socket option 337 * operations to manage system properties on any raw socket were allowed to 338 * take place without explicit additional access control checks. However, 339 * raw sockets can now also be created in jail(), and therefore explicit 340 * checks are now required. Likewise, raw sockets can be used by a process 341 * after it gives up privilege, so some caution is required. For options 342 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 343 * performed in ip_ctloutput() and therefore no check occurs here. 344 * Unilaterally checking suser() here breaks normal IP socket option 345 * operations on raw sockets. 346 * 347 * When adding new socket options here, make sure to add access control 348 * checks here as necessary. 349 */ 350 int 351 rip_ctloutput(struct socket *so, struct sockopt *sopt) 352 { 353 struct inpcb *inp = sotoinpcb(so); 354 int error, optval; 355 356 if (sopt->sopt_level != IPPROTO_IP) 357 return (EINVAL); 358 359 error = 0; 360 switch (sopt->sopt_dir) { 361 case SOPT_GET: 362 switch (sopt->sopt_name) { 363 case IP_HDRINCL: 364 optval = inp->inp_flags & INP_HDRINCL; 365 error = sooptcopyout(sopt, &optval, sizeof optval); 366 break; 367 368 case IP_FW_ADD: /* ADD actually returns the body... */ 369 case IP_FW_GET: 370 case IP_FW_TABLE_GETSIZE: 371 case IP_FW_TABLE_LIST: 372 error = suser(curthread); 373 if (error != 0) 374 return (error); 375 if (ip_fw_ctl_ptr != NULL) 376 error = ip_fw_ctl_ptr(sopt); 377 else 378 error = ENOPROTOOPT; 379 break; 380 381 case IP_DUMMYNET_GET: 382 error = suser(curthread); 383 if (error != 0) 384 return (error); 385 if (ip_dn_ctl_ptr != NULL) 386 error = ip_dn_ctl_ptr(sopt); 387 else 388 error = ENOPROTOOPT; 389 break ; 390 391 case MRT_INIT: 392 case MRT_DONE: 393 case MRT_ADD_VIF: 394 case MRT_DEL_VIF: 395 case MRT_ADD_MFC: 396 case MRT_DEL_MFC: 397 case MRT_VERSION: 398 case MRT_ASSERT: 399 case MRT_API_SUPPORT: 400 case MRT_API_CONFIG: 401 case MRT_ADD_BW_UPCALL: 402 case MRT_DEL_BW_UPCALL: 403 error = suser(curthread); 404 if (error != 0) 405 return (error); 406 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 407 EOPNOTSUPP; 408 break; 409 410 default: 411 error = ip_ctloutput(so, sopt); 412 break; 413 } 414 break; 415 416 case SOPT_SET: 417 switch (sopt->sopt_name) { 418 case IP_HDRINCL: 419 error = sooptcopyin(sopt, &optval, sizeof optval, 420 sizeof optval); 421 if (error) 422 break; 423 if (optval) 424 inp->inp_flags |= INP_HDRINCL; 425 else 426 inp->inp_flags &= ~INP_HDRINCL; 427 break; 428 429 case IP_FW_ADD: 430 case IP_FW_DEL: 431 case IP_FW_FLUSH: 432 case IP_FW_ZERO: 433 case IP_FW_RESETLOG: 434 case IP_FW_TABLE_ADD: 435 case IP_FW_TABLE_DEL: 436 case IP_FW_TABLE_FLUSH: 437 error = suser(curthread); 438 if (error != 0) 439 return (error); 440 if (ip_fw_ctl_ptr != NULL) 441 error = ip_fw_ctl_ptr(sopt); 442 else 443 error = ENOPROTOOPT; 444 break; 445 446 case IP_DUMMYNET_CONFIGURE: 447 case IP_DUMMYNET_DEL: 448 case IP_DUMMYNET_FLUSH: 449 error = suser(curthread); 450 if (error != 0) 451 return (error); 452 if (ip_dn_ctl_ptr != NULL) 453 error = ip_dn_ctl_ptr(sopt); 454 else 455 error = ENOPROTOOPT ; 456 break ; 457 458 case IP_RSVP_ON: 459 error = suser(curthread); 460 if (error != 0) 461 return (error); 462 error = ip_rsvp_init(so); 463 break; 464 465 case IP_RSVP_OFF: 466 error = suser(curthread); 467 if (error != 0) 468 return (error); 469 error = ip_rsvp_done(); 470 break; 471 472 case IP_RSVP_VIF_ON: 473 case IP_RSVP_VIF_OFF: 474 error = suser(curthread); 475 if (error != 0) 476 return (error); 477 error = ip_rsvp_vif ? 478 ip_rsvp_vif(so, sopt) : EINVAL; 479 break; 480 481 case MRT_INIT: 482 case MRT_DONE: 483 case MRT_ADD_VIF: 484 case MRT_DEL_VIF: 485 case MRT_ADD_MFC: 486 case MRT_DEL_MFC: 487 case MRT_VERSION: 488 case MRT_ASSERT: 489 case MRT_API_SUPPORT: 490 case MRT_API_CONFIG: 491 case MRT_ADD_BW_UPCALL: 492 case MRT_DEL_BW_UPCALL: 493 error = suser(curthread); 494 if (error != 0) 495 return (error); 496 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 497 EOPNOTSUPP; 498 break; 499 500 default: 501 error = ip_ctloutput(so, sopt); 502 break; 503 } 504 break; 505 } 506 507 return (error); 508 } 509 510 /* 511 * This function exists solely to receive the PRC_IFDOWN messages which 512 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, 513 * and calls in_ifadown() to remove all routes corresponding to that address. 514 * It also receives the PRC_IFUP messages from if_up() and reinstalls the 515 * interface routes. 516 */ 517 void 518 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 519 { 520 struct in_ifaddr *ia; 521 struct ifnet *ifp; 522 int err; 523 int flags; 524 525 switch (cmd) { 526 case PRC_IFDOWN: 527 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 528 if (ia->ia_ifa.ifa_addr == sa 529 && (ia->ia_flags & IFA_ROUTE)) { 530 /* 531 * in_ifscrub kills the interface route. 532 */ 533 in_ifscrub(ia->ia_ifp, ia); 534 /* 535 * in_ifadown gets rid of all the rest of 536 * the routes. This is not quite the right 537 * thing to do, but at least if we are running 538 * a routing process they will come back. 539 */ 540 in_ifadown(&ia->ia_ifa, 0); 541 break; 542 } 543 } 544 break; 545 546 case PRC_IFUP: 547 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 548 if (ia->ia_ifa.ifa_addr == sa) 549 break; 550 } 551 if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) 552 return; 553 flags = RTF_UP; 554 ifp = ia->ia_ifa.ifa_ifp; 555 556 if ((ifp->if_flags & IFF_LOOPBACK) 557 || (ifp->if_flags & IFF_POINTOPOINT)) 558 flags |= RTF_HOST; 559 560 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 561 if (err == 0) 562 ia->ia_flags |= IFA_ROUTE; 563 break; 564 } 565 } 566 567 u_long rip_sendspace = RIPSNDQ; 568 u_long rip_recvspace = RIPRCVQ; 569 570 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 571 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 572 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 573 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 574 575 static int 576 rip_attach(struct socket *so, int proto, struct thread *td) 577 { 578 struct inpcb *inp; 579 int error; 580 581 /* XXX why not lower? */ 582 INP_INFO_WLOCK(&ripcbinfo); 583 inp = sotoinpcb(so); 584 if (inp) { 585 /* XXX counter, printf */ 586 INP_INFO_WUNLOCK(&ripcbinfo); 587 return EINVAL; 588 } 589 if (jailed(td->td_ucred) && !jail_allow_raw_sockets) { 590 INP_INFO_WUNLOCK(&ripcbinfo); 591 return (EPERM); 592 } 593 if ((error = suser_cred(td->td_ucred, SUSER_ALLOWJAIL)) != 0) { 594 INP_INFO_WUNLOCK(&ripcbinfo); 595 return error; 596 } 597 if (proto >= IPPROTO_MAX || proto < 0) { 598 INP_INFO_WUNLOCK(&ripcbinfo); 599 return EPROTONOSUPPORT; 600 } 601 602 error = soreserve(so, rip_sendspace, rip_recvspace); 603 if (error) { 604 INP_INFO_WUNLOCK(&ripcbinfo); 605 return error; 606 } 607 error = in_pcballoc(so, &ripcbinfo, "rawinp"); 608 if (error) { 609 INP_INFO_WUNLOCK(&ripcbinfo); 610 return error; 611 } 612 inp = (struct inpcb *)so->so_pcb; 613 INP_LOCK(inp); 614 INP_INFO_WUNLOCK(&ripcbinfo); 615 inp->inp_vflag |= INP_IPV4; 616 inp->inp_ip_p = proto; 617 inp->inp_ip_ttl = ip_defttl; 618 INP_UNLOCK(inp); 619 return 0; 620 } 621 622 static void 623 rip_pcbdetach(struct socket *so, struct inpcb *inp) 624 { 625 626 INP_INFO_WLOCK_ASSERT(&ripcbinfo); 627 INP_LOCK_ASSERT(inp); 628 629 if (so == ip_mrouter && ip_mrouter_done) 630 ip_mrouter_done(); 631 if (ip_rsvp_force_done) 632 ip_rsvp_force_done(so); 633 if (so == ip_rsvpd) 634 ip_rsvp_done(); 635 in_pcbdetach(inp); 636 } 637 638 static int 639 rip_detach(struct socket *so) 640 { 641 struct inpcb *inp; 642 643 INP_INFO_WLOCK(&ripcbinfo); 644 inp = sotoinpcb(so); 645 if (inp == 0) { 646 /* XXX counter, printf */ 647 INP_INFO_WUNLOCK(&ripcbinfo); 648 return EINVAL; 649 } 650 INP_LOCK(inp); 651 rip_pcbdetach(so, inp); 652 INP_INFO_WUNLOCK(&ripcbinfo); 653 return 0; 654 } 655 656 static int 657 rip_abort(struct socket *so) 658 { 659 struct inpcb *inp; 660 661 INP_INFO_WLOCK(&ripcbinfo); 662 inp = sotoinpcb(so); 663 if (inp == 0) { 664 INP_INFO_WUNLOCK(&ripcbinfo); 665 return EINVAL; /* ??? possible? panic instead? */ 666 } 667 INP_LOCK(inp); 668 soisdisconnected(so); 669 if (so->so_state & SS_NOFDREF) 670 rip_pcbdetach(so, inp); 671 else 672 INP_UNLOCK(inp); 673 INP_INFO_WUNLOCK(&ripcbinfo); 674 return 0; 675 } 676 677 static int 678 rip_disconnect(struct socket *so) 679 { 680 if ((so->so_state & SS_ISCONNECTED) == 0) 681 return ENOTCONN; 682 return rip_abort(so); 683 } 684 685 static int 686 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 687 { 688 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 689 struct inpcb *inp; 690 691 if (nam->sa_len != sizeof(*addr)) 692 return EINVAL; 693 694 if (jailed(td->td_ucred)) { 695 if (addr->sin_addr.s_addr == INADDR_ANY) 696 addr->sin_addr.s_addr = 697 htonl(prison_getip(td->td_ucred)); 698 if (htonl(prison_getip(td->td_ucred)) != addr->sin_addr.s_addr) 699 return (EADDRNOTAVAIL); 700 } 701 702 if (TAILQ_EMPTY(&ifnet) || 703 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 704 (addr->sin_addr.s_addr && 705 ifa_ifwithaddr((struct sockaddr *)addr) == 0)) 706 return EADDRNOTAVAIL; 707 708 INP_INFO_WLOCK(&ripcbinfo); 709 inp = sotoinpcb(so); 710 if (inp == 0) { 711 INP_INFO_WUNLOCK(&ripcbinfo); 712 return EINVAL; 713 } 714 INP_LOCK(inp); 715 inp->inp_laddr = addr->sin_addr; 716 INP_UNLOCK(inp); 717 INP_INFO_WUNLOCK(&ripcbinfo); 718 return 0; 719 } 720 721 static int 722 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 723 { 724 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 725 struct inpcb *inp; 726 727 if (nam->sa_len != sizeof(*addr)) 728 return EINVAL; 729 if (TAILQ_EMPTY(&ifnet)) 730 return EADDRNOTAVAIL; 731 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 732 return EAFNOSUPPORT; 733 734 INP_INFO_WLOCK(&ripcbinfo); 735 inp = sotoinpcb(so); 736 if (inp == 0) { 737 INP_INFO_WUNLOCK(&ripcbinfo); 738 return EINVAL; 739 } 740 INP_LOCK(inp); 741 inp->inp_faddr = addr->sin_addr; 742 soisconnected(so); 743 INP_UNLOCK(inp); 744 INP_INFO_WUNLOCK(&ripcbinfo); 745 return 0; 746 } 747 748 static int 749 rip_shutdown(struct socket *so) 750 { 751 struct inpcb *inp; 752 753 INP_INFO_RLOCK(&ripcbinfo); 754 inp = sotoinpcb(so); 755 if (inp == 0) { 756 INP_INFO_RUNLOCK(&ripcbinfo); 757 return EINVAL; 758 } 759 INP_LOCK(inp); 760 INP_INFO_RUNLOCK(&ripcbinfo); 761 socantsendmore(so); 762 INP_UNLOCK(inp); 763 return 0; 764 } 765 766 static int 767 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 768 struct mbuf *control, struct thread *td) 769 { 770 struct inpcb *inp; 771 u_long dst; 772 int ret; 773 774 INP_INFO_WLOCK(&ripcbinfo); 775 inp = sotoinpcb(so); 776 if (so->so_state & SS_ISCONNECTED) { 777 if (nam) { 778 INP_INFO_WUNLOCK(&ripcbinfo); 779 m_freem(m); 780 return EISCONN; 781 } 782 dst = inp->inp_faddr.s_addr; 783 } else { 784 if (nam == NULL) { 785 INP_INFO_WUNLOCK(&ripcbinfo); 786 m_freem(m); 787 return ENOTCONN; 788 } 789 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 790 } 791 ret = rip_output(m, so, dst); 792 INP_INFO_WUNLOCK(&ripcbinfo); 793 return ret; 794 } 795 796 static int 797 rip_pcblist(SYSCTL_HANDLER_ARGS) 798 { 799 int error, i, n; 800 struct inpcb *inp, **inp_list; 801 inp_gen_t gencnt; 802 struct xinpgen xig; 803 804 /* 805 * The process of preparing the TCB list is too time-consuming and 806 * resource-intensive to repeat twice on every request. 807 */ 808 if (req->oldptr == 0) { 809 n = ripcbinfo.ipi_count; 810 req->oldidx = 2 * (sizeof xig) 811 + (n + n/8) * sizeof(struct xinpcb); 812 return 0; 813 } 814 815 if (req->newptr != 0) 816 return EPERM; 817 818 /* 819 * OK, now we're committed to doing something. 820 */ 821 INP_INFO_RLOCK(&ripcbinfo); 822 gencnt = ripcbinfo.ipi_gencnt; 823 n = ripcbinfo.ipi_count; 824 INP_INFO_RUNLOCK(&ripcbinfo); 825 826 xig.xig_len = sizeof xig; 827 xig.xig_count = n; 828 xig.xig_gen = gencnt; 829 xig.xig_sogen = so_gencnt; 830 error = SYSCTL_OUT(req, &xig, sizeof xig); 831 if (error) 832 return error; 833 834 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 835 if (inp_list == 0) 836 return ENOMEM; 837 838 INP_INFO_RLOCK(&ripcbinfo); 839 for (inp = LIST_FIRST(ripcbinfo.listhead), i = 0; inp && i < n; 840 inp = LIST_NEXT(inp, inp_list)) { 841 INP_LOCK(inp); 842 if (inp->inp_gencnt <= gencnt && 843 cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0) { 844 /* XXX held references? */ 845 inp_list[i++] = inp; 846 } 847 INP_UNLOCK(inp); 848 } 849 INP_INFO_RUNLOCK(&ripcbinfo); 850 n = i; 851 852 error = 0; 853 for (i = 0; i < n; i++) { 854 inp = inp_list[i]; 855 if (inp->inp_gencnt <= gencnt) { 856 struct xinpcb xi; 857 bzero(&xi, sizeof(xi)); 858 xi.xi_len = sizeof xi; 859 /* XXX should avoid extra copy */ 860 bcopy(inp, &xi.xi_inp, sizeof *inp); 861 if (inp->inp_socket) 862 sotoxsocket(inp->inp_socket, &xi.xi_socket); 863 error = SYSCTL_OUT(req, &xi, sizeof xi); 864 } 865 } 866 if (!error) { 867 /* 868 * Give the user an updated idea of our state. 869 * If the generation differs from what we told 870 * her before, she knows that something happened 871 * while we were processing this request, and it 872 * might be necessary to retry. 873 */ 874 INP_INFO_RLOCK(&ripcbinfo); 875 xig.xig_gen = ripcbinfo.ipi_gencnt; 876 xig.xig_sogen = so_gencnt; 877 xig.xig_count = ripcbinfo.ipi_count; 878 INP_INFO_RUNLOCK(&ripcbinfo); 879 error = SYSCTL_OUT(req, &xig, sizeof xig); 880 } 881 free(inp_list, M_TEMP); 882 return error; 883 } 884 885 /* 886 * This is the wrapper function for in_setsockaddr. We just pass down 887 * the pcbinfo for in_setpeeraddr to lock. 888 */ 889 static int 890 rip_sockaddr(struct socket *so, struct sockaddr **nam) 891 { 892 return (in_setsockaddr(so, nam, &ripcbinfo)); 893 } 894 895 /* 896 * This is the wrapper function for in_setpeeraddr. We just pass down 897 * the pcbinfo for in_setpeeraddr to lock. 898 */ 899 static int 900 rip_peeraddr(struct socket *so, struct sockaddr **nam) 901 { 902 return (in_setpeeraddr(so, nam, &ripcbinfo)); 903 } 904 905 906 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0, 907 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 908 909 struct pr_usrreqs rip_usrreqs = { 910 .pru_abort = rip_abort, 911 .pru_attach = rip_attach, 912 .pru_bind = rip_bind, 913 .pru_connect = rip_connect, 914 .pru_control = in_control, 915 .pru_detach = rip_detach, 916 .pru_disconnect = rip_disconnect, 917 .pru_peeraddr = rip_peeraddr, 918 .pru_send = rip_send, 919 .pru_shutdown = rip_shutdown, 920 .pru_sockaddr = rip_sockaddr, 921 .pru_sosetlabel = in_pcbsosetlabel 922 }; 923