1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 34 * $FreeBSD$ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/fcntl.h> 41 #include <sys/domain.h> 42 #include <sys/filedesc.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> /* XXX must be before <sys/file.h> */ 45 #include <sys/file.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/namei.h> 49 #include <sys/proc.h> 50 #include <sys/protosw.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/resourcevar.h> 54 #include <sys/stat.h> 55 #include <sys/sysctl.h> 56 #include <sys/un.h> 57 #include <sys/unpcb.h> 58 #include <sys/vnode.h> 59 #include <sys/jail.h> 60 #include <sys/sx.h> 61 62 #include <vm/vm_zone.h> 63 64 static struct vm_zone *unp_zone; 65 static unp_gen_t unp_gencnt; 66 static u_int unp_count; 67 68 static struct unp_head unp_shead, unp_dhead; 69 70 /* 71 * Unix communications domain. 72 * 73 * TODO: 74 * SEQPACKET, RDM 75 * rethink name space problems 76 * need a proper out-of-band 77 * lock pushdown 78 */ 79 static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL }; 80 static ino_t unp_ino; /* prototype for fake inode numbers */ 81 82 static int unp_attach __P((struct socket *)); 83 static void unp_detach __P((struct unpcb *)); 84 static int unp_bind __P((struct unpcb *,struct sockaddr *, struct thread *)); 85 static int unp_connect __P((struct socket *,struct sockaddr *, 86 struct thread *)); 87 static void unp_disconnect __P((struct unpcb *)); 88 static void unp_shutdown __P((struct unpcb *)); 89 static void unp_drop __P((struct unpcb *, int)); 90 static void unp_gc __P((void)); 91 static void unp_scan __P((struct mbuf *, void (*)(struct file *))); 92 static void unp_mark __P((struct file *)); 93 static void unp_discard __P((struct file *)); 94 static void unp_freerights __P((struct file **, int)); 95 static int unp_internalize __P((struct mbuf **, struct thread *)); 96 static int unp_listen __P((struct unpcb *, struct proc *)); 97 98 static int 99 uipc_abort(struct socket *so) 100 { 101 struct unpcb *unp = sotounpcb(so); 102 103 if (unp == 0) 104 return EINVAL; 105 unp_drop(unp, ECONNABORTED); 106 return 0; 107 } 108 109 static int 110 uipc_accept(struct socket *so, struct sockaddr **nam) 111 { 112 struct unpcb *unp = sotounpcb(so); 113 114 if (unp == 0) 115 return EINVAL; 116 117 /* 118 * Pass back name of connected socket, 119 * if it was bound and we are still connected 120 * (our peer may have closed already!). 121 */ 122 if (unp->unp_conn && unp->unp_conn->unp_addr) { 123 *nam = dup_sockaddr((struct sockaddr *)unp->unp_conn->unp_addr, 124 1); 125 } else { 126 *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1); 127 } 128 return 0; 129 } 130 131 static int 132 uipc_attach(struct socket *so, int proto, struct thread *td) 133 { 134 struct unpcb *unp = sotounpcb(so); 135 136 if (unp != 0) 137 return EISCONN; 138 return unp_attach(so); 139 } 140 141 static int 142 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 143 { 144 struct unpcb *unp = sotounpcb(so); 145 146 if (unp == 0) 147 return EINVAL; 148 149 return unp_bind(unp, nam, td); 150 } 151 152 static int 153 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 154 { 155 struct unpcb *unp = sotounpcb(so); 156 157 if (unp == 0) 158 return EINVAL; 159 return unp_connect(so, nam, curthread); 160 } 161 162 static int 163 uipc_connect2(struct socket *so1, struct socket *so2) 164 { 165 struct unpcb *unp = sotounpcb(so1); 166 167 if (unp == 0) 168 return EINVAL; 169 170 return unp_connect2(so1, so2); 171 } 172 173 /* control is EOPNOTSUPP */ 174 175 static int 176 uipc_detach(struct socket *so) 177 { 178 struct unpcb *unp = sotounpcb(so); 179 180 if (unp == 0) 181 return EINVAL; 182 183 unp_detach(unp); 184 return 0; 185 } 186 187 static int 188 uipc_disconnect(struct socket *so) 189 { 190 struct unpcb *unp = sotounpcb(so); 191 192 if (unp == 0) 193 return EINVAL; 194 unp_disconnect(unp); 195 return 0; 196 } 197 198 static int 199 uipc_listen(struct socket *so, struct thread *td) 200 { 201 struct unpcb *unp = sotounpcb(so); 202 203 if (unp == 0 || unp->unp_vnode == 0) 204 return EINVAL; 205 return unp_listen(unp, td->td_proc); 206 } 207 208 static int 209 uipc_peeraddr(struct socket *so, struct sockaddr **nam) 210 { 211 struct unpcb *unp = sotounpcb(so); 212 213 if (unp == 0) 214 return EINVAL; 215 if (unp->unp_conn && unp->unp_conn->unp_addr) 216 *nam = dup_sockaddr((struct sockaddr *)unp->unp_conn->unp_addr, 217 1); 218 return 0; 219 } 220 221 static int 222 uipc_rcvd(struct socket *so, int flags) 223 { 224 struct unpcb *unp = sotounpcb(so); 225 struct socket *so2; 226 u_long newhiwat; 227 228 if (unp == 0) 229 return EINVAL; 230 switch (so->so_type) { 231 case SOCK_DGRAM: 232 panic("uipc_rcvd DGRAM?"); 233 /*NOTREACHED*/ 234 235 case SOCK_STREAM: 236 if (unp->unp_conn == 0) 237 break; 238 so2 = unp->unp_conn->unp_socket; 239 /* 240 * Adjust backpressure on sender 241 * and wakeup any waiting to write. 242 */ 243 so2->so_snd.sb_mbmax += unp->unp_mbcnt - so->so_rcv.sb_mbcnt; 244 unp->unp_mbcnt = so->so_rcv.sb_mbcnt; 245 newhiwat = so2->so_snd.sb_hiwat + unp->unp_cc - 246 so->so_rcv.sb_cc; 247 (void)chgsbsize(so2->so_cred->cr_uidinfo, &so2->so_snd.sb_hiwat, 248 newhiwat, RLIM_INFINITY); 249 unp->unp_cc = so->so_rcv.sb_cc; 250 sowwakeup(so2); 251 break; 252 253 default: 254 panic("uipc_rcvd unknown socktype"); 255 } 256 return 0; 257 } 258 259 /* pru_rcvoob is EOPNOTSUPP */ 260 261 static int 262 uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 263 struct mbuf *control, struct thread *td) 264 { 265 int error = 0; 266 struct unpcb *unp = sotounpcb(so); 267 struct socket *so2; 268 u_long newhiwat; 269 270 if (unp == 0) { 271 error = EINVAL; 272 goto release; 273 } 274 if (flags & PRUS_OOB) { 275 error = EOPNOTSUPP; 276 goto release; 277 } 278 279 if (control && (error = unp_internalize(&control, td))) 280 goto release; 281 282 switch (so->so_type) { 283 case SOCK_DGRAM: 284 { 285 struct sockaddr *from; 286 287 if (nam) { 288 if (unp->unp_conn) { 289 error = EISCONN; 290 break; 291 } 292 error = unp_connect(so, nam, td); 293 if (error) 294 break; 295 } else { 296 if (unp->unp_conn == 0) { 297 error = ENOTCONN; 298 break; 299 } 300 } 301 so2 = unp->unp_conn->unp_socket; 302 if (unp->unp_addr) 303 from = (struct sockaddr *)unp->unp_addr; 304 else 305 from = &sun_noname; 306 if (sbappendaddr(&so2->so_rcv, from, m, control)) { 307 sorwakeup(so2); 308 m = 0; 309 control = 0; 310 } else 311 error = ENOBUFS; 312 if (nam) 313 unp_disconnect(unp); 314 break; 315 } 316 317 case SOCK_STREAM: 318 /* Connect if not connected yet. */ 319 /* 320 * Note: A better implementation would complain 321 * if not equal to the peer's address. 322 */ 323 if ((so->so_state & SS_ISCONNECTED) == 0) { 324 if (nam) { 325 error = unp_connect(so, nam, td); 326 if (error) 327 break; /* XXX */ 328 } else { 329 error = ENOTCONN; 330 break; 331 } 332 } 333 334 if (so->so_state & SS_CANTSENDMORE) { 335 error = EPIPE; 336 break; 337 } 338 if (unp->unp_conn == 0) 339 panic("uipc_send connected but no connection?"); 340 so2 = unp->unp_conn->unp_socket; 341 /* 342 * Send to paired receive port, and then reduce 343 * send buffer hiwater marks to maintain backpressure. 344 * Wake up readers. 345 */ 346 if (control) { 347 if (sbappendcontrol(&so2->so_rcv, m, control)) 348 control = 0; 349 } else 350 sbappend(&so2->so_rcv, m); 351 so->so_snd.sb_mbmax -= 352 so2->so_rcv.sb_mbcnt - unp->unp_conn->unp_mbcnt; 353 unp->unp_conn->unp_mbcnt = so2->so_rcv.sb_mbcnt; 354 newhiwat = so->so_snd.sb_hiwat - 355 (so2->so_rcv.sb_cc - unp->unp_conn->unp_cc); 356 (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat, 357 newhiwat, RLIM_INFINITY); 358 unp->unp_conn->unp_cc = so2->so_rcv.sb_cc; 359 sorwakeup(so2); 360 m = 0; 361 break; 362 363 default: 364 panic("uipc_send unknown socktype"); 365 } 366 367 /* 368 * SEND_EOF is equivalent to a SEND followed by 369 * a SHUTDOWN. 370 */ 371 if (flags & PRUS_EOF) { 372 socantsendmore(so); 373 unp_shutdown(unp); 374 } 375 376 if (control && error != 0) 377 unp_dispose(control); 378 379 release: 380 if (control) 381 m_freem(control); 382 if (m) 383 m_freem(m); 384 return error; 385 } 386 387 static int 388 uipc_sense(struct socket *so, struct stat *sb) 389 { 390 struct unpcb *unp = sotounpcb(so); 391 struct socket *so2; 392 393 if (unp == 0) 394 return EINVAL; 395 sb->st_blksize = so->so_snd.sb_hiwat; 396 if (so->so_type == SOCK_STREAM && unp->unp_conn != 0) { 397 so2 = unp->unp_conn->unp_socket; 398 sb->st_blksize += so2->so_rcv.sb_cc; 399 } 400 sb->st_dev = NOUDEV; 401 if (unp->unp_ino == 0) 402 unp->unp_ino = unp_ino++; 403 sb->st_ino = unp->unp_ino; 404 return (0); 405 } 406 407 static int 408 uipc_shutdown(struct socket *so) 409 { 410 struct unpcb *unp = sotounpcb(so); 411 412 if (unp == 0) 413 return EINVAL; 414 socantsendmore(so); 415 unp_shutdown(unp); 416 return 0; 417 } 418 419 static int 420 uipc_sockaddr(struct socket *so, struct sockaddr **nam) 421 { 422 struct unpcb *unp = sotounpcb(so); 423 424 if (unp == 0) 425 return EINVAL; 426 if (unp->unp_addr) 427 *nam = dup_sockaddr((struct sockaddr *)unp->unp_addr, 1); 428 else 429 *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1); 430 return 0; 431 } 432 433 struct pr_usrreqs uipc_usrreqs = { 434 uipc_abort, uipc_accept, uipc_attach, uipc_bind, uipc_connect, 435 uipc_connect2, pru_control_notsupp, uipc_detach, uipc_disconnect, 436 uipc_listen, uipc_peeraddr, uipc_rcvd, pru_rcvoob_notsupp, 437 uipc_send, uipc_sense, uipc_shutdown, uipc_sockaddr, 438 sosend, soreceive, sopoll 439 }; 440 441 int 442 uipc_ctloutput(so, sopt) 443 struct socket *so; 444 struct sockopt *sopt; 445 { 446 struct unpcb *unp = sotounpcb(so); 447 int error; 448 449 switch (sopt->sopt_dir) { 450 case SOPT_GET: 451 switch (sopt->sopt_name) { 452 case LOCAL_PEERCRED: 453 if (unp->unp_flags & UNP_HAVEPC) 454 error = sooptcopyout(sopt, &unp->unp_peercred, 455 sizeof(unp->unp_peercred)); 456 else { 457 if (so->so_type == SOCK_STREAM) 458 error = ENOTCONN; 459 else 460 error = EINVAL; 461 } 462 break; 463 default: 464 error = EOPNOTSUPP; 465 break; 466 } 467 break; 468 case SOPT_SET: 469 default: 470 error = EOPNOTSUPP; 471 break; 472 } 473 return (error); 474 } 475 476 /* 477 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 478 * for stream sockets, although the total for sender and receiver is 479 * actually only PIPSIZ. 480 * Datagram sockets really use the sendspace as the maximum datagram size, 481 * and don't really want to reserve the sendspace. Their recvspace should 482 * be large enough for at least one max-size datagram plus address. 483 */ 484 #ifndef PIPSIZ 485 #define PIPSIZ 8192 486 #endif 487 static u_long unpst_sendspace = PIPSIZ; 488 static u_long unpst_recvspace = PIPSIZ; 489 static u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 490 static u_long unpdg_recvspace = 4*1024; 491 492 static int unp_rights; /* file descriptors in flight */ 493 494 SYSCTL_DECL(_net_local_stream); 495 SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, 496 &unpst_sendspace, 0, ""); 497 SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, 498 &unpst_recvspace, 0, ""); 499 SYSCTL_DECL(_net_local_dgram); 500 SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, 501 &unpdg_sendspace, 0, ""); 502 SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, 503 &unpdg_recvspace, 0, ""); 504 SYSCTL_DECL(_net_local); 505 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, ""); 506 507 static int 508 unp_attach(so) 509 struct socket *so; 510 { 511 register struct unpcb *unp; 512 int error; 513 514 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 515 switch (so->so_type) { 516 517 case SOCK_STREAM: 518 error = soreserve(so, unpst_sendspace, unpst_recvspace); 519 break; 520 521 case SOCK_DGRAM: 522 error = soreserve(so, unpdg_sendspace, unpdg_recvspace); 523 break; 524 525 default: 526 panic("unp_attach"); 527 } 528 if (error) 529 return (error); 530 } 531 unp = zalloc(unp_zone); 532 if (unp == NULL) 533 return (ENOBUFS); 534 bzero(unp, sizeof *unp); 535 unp->unp_gencnt = ++unp_gencnt; 536 unp_count++; 537 LIST_INIT(&unp->unp_refs); 538 unp->unp_socket = so; 539 FILEDESC_LOCK(curproc->p_fd); 540 unp->unp_rvnode = curthread->td_proc->p_fd->fd_rdir; 541 FILEDESC_UNLOCK(curproc->p_fd); 542 LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead 543 : &unp_shead, unp, unp_link); 544 so->so_pcb = (caddr_t)unp; 545 return (0); 546 } 547 548 static void 549 unp_detach(unp) 550 register struct unpcb *unp; 551 { 552 LIST_REMOVE(unp, unp_link); 553 unp->unp_gencnt = ++unp_gencnt; 554 --unp_count; 555 if (unp->unp_vnode) { 556 unp->unp_vnode->v_socket = 0; 557 vrele(unp->unp_vnode); 558 unp->unp_vnode = 0; 559 } 560 if (unp->unp_conn) 561 unp_disconnect(unp); 562 while (!LIST_EMPTY(&unp->unp_refs)) 563 unp_drop(LIST_FIRST(&unp->unp_refs), ECONNRESET); 564 soisdisconnected(unp->unp_socket); 565 unp->unp_socket->so_pcb = 0; 566 if (unp_rights) { 567 /* 568 * Normally the receive buffer is flushed later, 569 * in sofree, but if our receive buffer holds references 570 * to descriptors that are now garbage, we will dispose 571 * of those descriptor references after the garbage collector 572 * gets them (resulting in a "panic: closef: count < 0"). 573 */ 574 sorflush(unp->unp_socket); 575 unp_gc(); 576 } 577 if (unp->unp_addr) 578 FREE(unp->unp_addr, M_SONAME); 579 zfree(unp_zone, unp); 580 } 581 582 static int 583 unp_bind(unp, nam, td) 584 struct unpcb *unp; 585 struct sockaddr *nam; 586 struct thread *td; 587 { 588 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 589 struct vnode *vp; 590 struct mount *mp; 591 struct vattr vattr; 592 int error, namelen; 593 struct nameidata nd; 594 char *buf; 595 596 if (unp->unp_vnode != NULL) 597 return (EINVAL); 598 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 599 if (namelen <= 0) 600 return EINVAL; 601 buf = malloc(SOCK_MAXADDRLEN, M_TEMP, M_WAITOK); 602 strncpy(buf, soun->sun_path, namelen); 603 buf[namelen] = 0; /* null-terminate the string */ 604 restart: 605 NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE, 606 buf, td); 607 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 608 error = namei(&nd); 609 if (error) { 610 free(buf, M_TEMP); 611 return (error); 612 } 613 vp = nd.ni_vp; 614 if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) { 615 NDFREE(&nd, NDF_ONLY_PNBUF); 616 if (nd.ni_dvp == vp) 617 vrele(nd.ni_dvp); 618 else 619 vput(nd.ni_dvp); 620 if (vp != NULL) { 621 vrele(vp); 622 free(buf, M_TEMP); 623 return (EADDRINUSE); 624 } 625 error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH); 626 if (error) { 627 free(buf, M_TEMP); 628 return (error); 629 } 630 goto restart; 631 } 632 VATTR_NULL(&vattr); 633 vattr.va_type = VSOCK; 634 FILEDESC_LOCK(td->td_proc->p_fd); 635 vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask); 636 FILEDESC_UNLOCK(td->td_proc->p_fd); 637 VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE); 638 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 639 NDFREE(&nd, NDF_ONLY_PNBUF); 640 vput(nd.ni_dvp); 641 if (error) { 642 free(buf, M_TEMP); 643 return (error); 644 } 645 vp = nd.ni_vp; 646 vp->v_socket = unp->unp_socket; 647 unp->unp_vnode = vp; 648 unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam, 1); 649 VOP_UNLOCK(vp, 0, td); 650 vn_finished_write(mp); 651 free(buf, M_TEMP); 652 return (0); 653 } 654 655 static int 656 unp_connect(so, nam, td) 657 struct socket *so; 658 struct sockaddr *nam; 659 struct thread *td; 660 { 661 register struct sockaddr_un *soun = (struct sockaddr_un *)nam; 662 register struct vnode *vp; 663 register struct socket *so2, *so3; 664 struct unpcb *unp, *unp2, *unp3; 665 int error, len; 666 struct nameidata nd; 667 char buf[SOCK_MAXADDRLEN]; 668 669 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); 670 if (len <= 0) 671 return EINVAL; 672 strncpy(buf, soun->sun_path, len); 673 buf[len] = 0; 674 675 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, buf, td); 676 error = namei(&nd); 677 if (error) 678 return (error); 679 vp = nd.ni_vp; 680 NDFREE(&nd, NDF_ONLY_PNBUF); 681 if (vp->v_type != VSOCK) { 682 error = ENOTSOCK; 683 goto bad; 684 } 685 error = VOP_ACCESS(vp, VWRITE, td->td_proc->p_ucred, td); 686 if (error) 687 goto bad; 688 so2 = vp->v_socket; 689 if (so2 == 0) { 690 error = ECONNREFUSED; 691 goto bad; 692 } 693 if (so->so_type != so2->so_type) { 694 error = EPROTOTYPE; 695 goto bad; 696 } 697 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 698 if ((so2->so_options & SO_ACCEPTCONN) == 0 || 699 (so3 = sonewconn(so2, 0)) == 0) { 700 error = ECONNREFUSED; 701 goto bad; 702 } 703 unp = sotounpcb(so); 704 unp2 = sotounpcb(so2); 705 unp3 = sotounpcb(so3); 706 if (unp2->unp_addr) 707 unp3->unp_addr = (struct sockaddr_un *) 708 dup_sockaddr((struct sockaddr *) 709 unp2->unp_addr, 1); 710 711 /* 712 * unp_peercred management: 713 * 714 * The connecter's (client's) credentials are copied 715 * from its process structure at the time of connect() 716 * (which is now). 717 */ 718 memset(&unp3->unp_peercred, '\0', sizeof(unp3->unp_peercred)); 719 unp3->unp_peercred.cr_uid = td->td_proc->p_ucred->cr_uid; 720 unp3->unp_peercred.cr_ngroups = td->td_proc->p_ucred->cr_ngroups; 721 memcpy(unp3->unp_peercred.cr_groups, td->td_proc->p_ucred->cr_groups, 722 sizeof(unp3->unp_peercred.cr_groups)); 723 unp3->unp_flags |= UNP_HAVEPC; 724 /* 725 * The receiver's (server's) credentials are copied 726 * from the unp_peercred member of socket on which the 727 * former called listen(); unp_listen() cached that 728 * process's credentials at that time so we can use 729 * them now. 730 */ 731 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED, 732 ("unp_connect: listener without cached peercred")); 733 memcpy(&unp->unp_peercred, &unp2->unp_peercred, 734 sizeof(unp->unp_peercred)); 735 unp->unp_flags |= UNP_HAVEPC; 736 737 so2 = so3; 738 } 739 error = unp_connect2(so, so2); 740 bad: 741 vput(vp); 742 return (error); 743 } 744 745 int 746 unp_connect2(so, so2) 747 register struct socket *so; 748 register struct socket *so2; 749 { 750 register struct unpcb *unp = sotounpcb(so); 751 register struct unpcb *unp2; 752 753 if (so2->so_type != so->so_type) 754 return (EPROTOTYPE); 755 unp2 = sotounpcb(so2); 756 unp->unp_conn = unp2; 757 switch (so->so_type) { 758 759 case SOCK_DGRAM: 760 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); 761 soisconnected(so); 762 break; 763 764 case SOCK_STREAM: 765 unp2->unp_conn = unp; 766 soisconnected(so); 767 soisconnected(so2); 768 break; 769 770 default: 771 panic("unp_connect2"); 772 } 773 return (0); 774 } 775 776 static void 777 unp_disconnect(unp) 778 struct unpcb *unp; 779 { 780 register struct unpcb *unp2 = unp->unp_conn; 781 782 if (unp2 == 0) 783 return; 784 unp->unp_conn = 0; 785 switch (unp->unp_socket->so_type) { 786 787 case SOCK_DGRAM: 788 LIST_REMOVE(unp, unp_reflink); 789 unp->unp_socket->so_state &= ~SS_ISCONNECTED; 790 break; 791 792 case SOCK_STREAM: 793 soisdisconnected(unp->unp_socket); 794 unp2->unp_conn = 0; 795 soisdisconnected(unp2->unp_socket); 796 break; 797 } 798 } 799 800 #ifdef notdef 801 void 802 unp_abort(unp) 803 struct unpcb *unp; 804 { 805 806 unp_detach(unp); 807 } 808 #endif 809 810 static int 811 unp_pcblist(SYSCTL_HANDLER_ARGS) 812 { 813 int error, i, n; 814 struct unpcb *unp, **unp_list; 815 unp_gen_t gencnt; 816 struct xunpgen *xug; 817 struct unp_head *head; 818 struct xunpcb *xu; 819 820 head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead); 821 822 /* 823 * The process of preparing the PCB list is too time-consuming and 824 * resource-intensive to repeat twice on every request. 825 */ 826 if (req->oldptr == 0) { 827 n = unp_count; 828 req->oldidx = 2 * (sizeof *xug) 829 + (n + n/8) * sizeof(struct xunpcb); 830 return 0; 831 } 832 833 if (req->newptr != 0) 834 return EPERM; 835 836 /* 837 * OK, now we're committed to doing something. 838 */ 839 xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK); 840 gencnt = unp_gencnt; 841 n = unp_count; 842 843 xug->xug_len = sizeof *xug; 844 xug->xug_count = n; 845 xug->xug_gen = gencnt; 846 xug->xug_sogen = so_gencnt; 847 error = SYSCTL_OUT(req, xug, sizeof *xug); 848 if (error) { 849 free(xug, M_TEMP); 850 return error; 851 } 852 853 unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK); 854 855 for (unp = LIST_FIRST(head), i = 0; unp && i < n; 856 unp = LIST_NEXT(unp, unp_link)) { 857 if (unp->unp_gencnt <= gencnt) { 858 if (cr_cansee(req->td->td_proc->p_ucred, 859 unp->unp_socket->so_cred)) 860 continue; 861 unp_list[i++] = unp; 862 } 863 } 864 n = i; /* in case we lost some during malloc */ 865 866 error = 0; 867 xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK); 868 for (i = 0; i < n; i++) { 869 unp = unp_list[i]; 870 if (unp->unp_gencnt <= gencnt) { 871 xu->xu_len = sizeof *xu; 872 xu->xu_unpp = unp; 873 /* 874 * XXX - need more locking here to protect against 875 * connect/disconnect races for SMP. 876 */ 877 if (unp->unp_addr) 878 bcopy(unp->unp_addr, &xu->xu_addr, 879 unp->unp_addr->sun_len); 880 if (unp->unp_conn && unp->unp_conn->unp_addr) 881 bcopy(unp->unp_conn->unp_addr, 882 &xu->xu_caddr, 883 unp->unp_conn->unp_addr->sun_len); 884 bcopy(unp, &xu->xu_unp, sizeof *unp); 885 sotoxsocket(unp->unp_socket, &xu->xu_socket); 886 error = SYSCTL_OUT(req, xu, sizeof *xu); 887 } 888 } 889 free(xu, M_TEMP); 890 if (!error) { 891 /* 892 * Give the user an updated idea of our state. 893 * If the generation differs from what we told 894 * her before, she knows that something happened 895 * while we were processing this request, and it 896 * might be necessary to retry. 897 */ 898 xug->xug_gen = unp_gencnt; 899 xug->xug_sogen = so_gencnt; 900 xug->xug_count = unp_count; 901 error = SYSCTL_OUT(req, xug, sizeof *xug); 902 } 903 free(unp_list, M_TEMP); 904 free(xug, M_TEMP); 905 return error; 906 } 907 908 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD, 909 (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", 910 "List of active local datagram sockets"); 911 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD, 912 (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", 913 "List of active local stream sockets"); 914 915 static void 916 unp_shutdown(unp) 917 struct unpcb *unp; 918 { 919 struct socket *so; 920 921 if (unp->unp_socket->so_type == SOCK_STREAM && unp->unp_conn && 922 (so = unp->unp_conn->unp_socket)) 923 socantrcvmore(so); 924 } 925 926 static void 927 unp_drop(unp, errno) 928 struct unpcb *unp; 929 int errno; 930 { 931 struct socket *so = unp->unp_socket; 932 933 so->so_error = errno; 934 unp_disconnect(unp); 935 if (so->so_head) { 936 LIST_REMOVE(unp, unp_link); 937 unp->unp_gencnt = ++unp_gencnt; 938 unp_count--; 939 so->so_pcb = (caddr_t) 0; 940 if (unp->unp_addr) 941 FREE(unp->unp_addr, M_SONAME); 942 zfree(unp_zone, unp); 943 sotryfree(so); 944 } 945 } 946 947 #ifdef notdef 948 void 949 unp_drain() 950 { 951 952 } 953 #endif 954 955 static void 956 unp_freerights(rp, fdcount) 957 struct file **rp; 958 int fdcount; 959 { 960 int i; 961 struct file *fp; 962 963 for (i = 0; i < fdcount; i++) { 964 fp = *rp; 965 /* 966 * zero the pointer before calling 967 * unp_discard since it may end up 968 * in unp_gc().. 969 */ 970 *rp++ = 0; 971 unp_discard(fp); 972 } 973 } 974 975 int 976 unp_externalize(control, controlp) 977 struct mbuf *control, **controlp; 978 { 979 struct thread *td = curthread; /* XXX */ 980 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 981 int i; 982 int *fdp; 983 struct file **rp; 984 struct file *fp; 985 void *data; 986 socklen_t clen = control->m_len, datalen; 987 int error, newfds; 988 int f; 989 u_int newlen; 990 991 error = 0; 992 if (controlp != NULL) /* controlp == NULL => free control messages */ 993 *controlp = NULL; 994 995 while (cm != NULL) { 996 if (sizeof(*cm) > clen || cm->cmsg_len > clen) { 997 error = EINVAL; 998 break; 999 } 1000 1001 data = CMSG_DATA(cm); 1002 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; 1003 1004 if (cm->cmsg_level == SOL_SOCKET 1005 && cm->cmsg_type == SCM_RIGHTS) { 1006 newfds = datalen / sizeof(struct file *); 1007 rp = data; 1008 1009 /* If we're not outputting the discriptors free them. */ 1010 if (error || controlp == NULL) { 1011 unp_freerights(rp, newfds); 1012 goto next; 1013 } 1014 FILEDESC_LOCK(td->td_proc->p_fd); 1015 /* if the new FD's will not fit free them. */ 1016 if (!fdavail(td, newfds)) { 1017 FILEDESC_UNLOCK(td->td_proc->p_fd); 1018 error = EMSGSIZE; 1019 unp_freerights(rp, newfds); 1020 goto next; 1021 } 1022 /* 1023 * now change each pointer to an fd in the global 1024 * table to an integer that is the index to the 1025 * local fd table entry that we set up to point 1026 * to the global one we are transferring. 1027 */ 1028 newlen = newfds * sizeof(int); 1029 *controlp = sbcreatecontrol(NULL, newlen, 1030 SCM_RIGHTS, SOL_SOCKET); 1031 if (*controlp == NULL) { 1032 FILEDESC_UNLOCK(td->td_proc->p_fd); 1033 error = E2BIG; 1034 unp_freerights(rp, newfds); 1035 goto next; 1036 } 1037 1038 fdp = (int *) 1039 CMSG_DATA(mtod(*controlp, struct cmsghdr *)); 1040 for (i = 0; i < newfds; i++) { 1041 if (fdalloc(td, 0, &f)) 1042 panic("unp_externalize fdalloc failed"); 1043 fp = *rp++; 1044 td->td_proc->p_fd->fd_ofiles[f] = fp; 1045 FILE_LOCK(fp); 1046 fp->f_msgcount--; 1047 FILE_UNLOCK(fp); 1048 unp_rights--; 1049 *fdp++ = f; 1050 } 1051 FILEDESC_UNLOCK(td->td_proc->p_fd); 1052 } else { /* We can just copy anything else across */ 1053 if (error || controlp == NULL) 1054 goto next; 1055 *controlp = sbcreatecontrol(NULL, datalen, 1056 cm->cmsg_type, cm->cmsg_level); 1057 if (*controlp == NULL) { 1058 error = ENOBUFS; 1059 goto next; 1060 } 1061 bcopy(data, 1062 CMSG_DATA(mtod(*controlp, struct cmsghdr *)), 1063 datalen); 1064 } 1065 1066 controlp = &(*controlp)->m_next; 1067 1068 next: 1069 if (CMSG_SPACE(datalen) < clen) { 1070 clen -= CMSG_SPACE(datalen); 1071 cm = (struct cmsghdr *) 1072 ((caddr_t)cm + CMSG_SPACE(datalen)); 1073 } else { 1074 clen = 0; 1075 cm = NULL; 1076 } 1077 } 1078 1079 m_freem(control); 1080 1081 return (error); 1082 } 1083 1084 void 1085 unp_init(void) 1086 { 1087 unp_zone = zinit("unpcb", sizeof(struct unpcb), nmbclusters, 0, 0); 1088 if (unp_zone == 0) 1089 panic("unp_init"); 1090 LIST_INIT(&unp_dhead); 1091 LIST_INIT(&unp_shead); 1092 } 1093 1094 #ifndef MIN 1095 #define MIN(a,b) (((a)<(b))?(a):(b)) 1096 #endif 1097 1098 static int 1099 unp_internalize(controlp, td) 1100 struct mbuf **controlp; 1101 struct thread *td; 1102 { 1103 struct mbuf *control = *controlp; 1104 struct proc *p = td->td_proc; 1105 struct filedesc *fdescp = p->p_fd; 1106 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 1107 struct cmsgcred *cmcred; 1108 struct file **rp; 1109 struct file *fp; 1110 struct timeval *tv; 1111 int i, fd, *fdp; 1112 void *data; 1113 socklen_t clen = control->m_len, datalen; 1114 int error, oldfds; 1115 u_int newlen; 1116 1117 error = 0; 1118 *controlp = NULL; 1119 1120 while (cm != NULL) { 1121 if (sizeof(*cm) > clen || cm->cmsg_level != SOL_SOCKET 1122 || cm->cmsg_len > clen) { 1123 error = EINVAL; 1124 goto out; 1125 } 1126 1127 data = CMSG_DATA(cm); 1128 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; 1129 1130 switch (cm->cmsg_type) { 1131 /* 1132 * Fill in credential information. 1133 */ 1134 case SCM_CREDS: 1135 *controlp = sbcreatecontrol(NULL, sizeof(*cmcred), 1136 SCM_CREDS, SOL_SOCKET); 1137 if (*controlp == NULL) { 1138 error = ENOBUFS; 1139 goto out; 1140 } 1141 1142 cmcred = (struct cmsgcred *) 1143 CMSG_DATA(mtod(*controlp, struct cmsghdr *)); 1144 cmcred->cmcred_pid = p->p_pid; 1145 cmcred->cmcred_uid = p->p_ucred->cr_ruid; 1146 cmcred->cmcred_gid = p->p_ucred->cr_rgid; 1147 cmcred->cmcred_euid = p->p_ucred->cr_uid; 1148 cmcred->cmcred_ngroups = MIN(p->p_ucred->cr_ngroups, 1149 CMGROUP_MAX); 1150 for (i = 0; i < cmcred->cmcred_ngroups; i++) 1151 cmcred->cmcred_groups[i] = 1152 p->p_ucred->cr_groups[i]; 1153 break; 1154 1155 case SCM_RIGHTS: 1156 oldfds = datalen / sizeof (int); 1157 /* 1158 * check that all the FDs passed in refer to legal files 1159 * If not, reject the entire operation. 1160 */ 1161 fdp = data; 1162 FILEDESC_LOCK(fdescp); 1163 for (i = 0; i < oldfds; i++) { 1164 fd = *fdp++; 1165 if ((unsigned)fd >= fdescp->fd_nfiles || 1166 fdescp->fd_ofiles[fd] == NULL) { 1167 FILEDESC_UNLOCK(fdescp); 1168 error = EBADF; 1169 goto out; 1170 } 1171 } 1172 /* 1173 * Now replace the integer FDs with pointers to 1174 * the associated global file table entry.. 1175 */ 1176 newlen = oldfds * sizeof(struct file *); 1177 *controlp = sbcreatecontrol(NULL, newlen, 1178 SCM_RIGHTS, SOL_SOCKET); 1179 if (*controlp == NULL) { 1180 FILEDESC_UNLOCK(fdescp); 1181 error = E2BIG; 1182 goto out; 1183 } 1184 1185 fdp = data; 1186 rp = (struct file **) 1187 CMSG_DATA(mtod(*controlp, struct cmsghdr *)); 1188 for (i = 0; i < oldfds; i++) { 1189 fp = fdescp->fd_ofiles[*fdp++]; 1190 *rp++ = fp; 1191 FILE_LOCK(fp); 1192 fp->f_count++; 1193 fp->f_msgcount++; 1194 FILE_UNLOCK(fp); 1195 unp_rights++; 1196 } 1197 FILEDESC_UNLOCK(fdescp); 1198 break; 1199 1200 case SCM_TIMESTAMP: 1201 *controlp = sbcreatecontrol(NULL, sizeof(*tv), 1202 SCM_TIMESTAMP, SOL_SOCKET); 1203 if (*controlp == NULL) { 1204 error = ENOBUFS; 1205 goto out; 1206 } 1207 tv = (struct timeval *) 1208 CMSG_DATA(mtod(*controlp, struct cmsghdr *)); 1209 microtime(tv); 1210 break; 1211 1212 default: 1213 error = EINVAL; 1214 goto out; 1215 } 1216 1217 controlp = &(*controlp)->m_next; 1218 1219 if (CMSG_SPACE(datalen) < clen) { 1220 clen -= CMSG_SPACE(datalen); 1221 cm = (struct cmsghdr *) 1222 ((caddr_t)cm + CMSG_SPACE(datalen)); 1223 } else { 1224 clen = 0; 1225 cm = NULL; 1226 } 1227 } 1228 1229 out: 1230 m_freem(control); 1231 1232 return (error); 1233 } 1234 1235 static int unp_defer, unp_gcing; 1236 1237 static void 1238 unp_gc() 1239 { 1240 register struct file *fp, *nextfp; 1241 register struct socket *so; 1242 struct file **extra_ref, **fpp; 1243 int nunref, i; 1244 1245 if (unp_gcing) 1246 return; 1247 unp_gcing = 1; 1248 unp_defer = 0; 1249 /* 1250 * before going through all this, set all FDs to 1251 * be NOT defered and NOT externally accessible 1252 */ 1253 sx_slock(&filelist_lock); 1254 LIST_FOREACH(fp, &filehead, f_list) 1255 fp->f_gcflag &= ~(FMARK|FDEFER); 1256 do { 1257 LIST_FOREACH(fp, &filehead, f_list) { 1258 FILE_LOCK(fp); 1259 /* 1260 * If the file is not open, skip it 1261 */ 1262 if (fp->f_count == 0) { 1263 FILE_UNLOCK(fp); 1264 continue; 1265 } 1266 /* 1267 * If we already marked it as 'defer' in a 1268 * previous pass, then try process it this time 1269 * and un-mark it 1270 */ 1271 if (fp->f_gcflag & FDEFER) { 1272 fp->f_gcflag &= ~FDEFER; 1273 unp_defer--; 1274 } else { 1275 /* 1276 * if it's not defered, then check if it's 1277 * already marked.. if so skip it 1278 */ 1279 if (fp->f_gcflag & FMARK) { 1280 FILE_UNLOCK(fp); 1281 continue; 1282 } 1283 /* 1284 * If all references are from messages 1285 * in transit, then skip it. it's not 1286 * externally accessible. 1287 */ 1288 if (fp->f_count == fp->f_msgcount) { 1289 FILE_UNLOCK(fp); 1290 continue; 1291 } 1292 /* 1293 * If it got this far then it must be 1294 * externally accessible. 1295 */ 1296 fp->f_gcflag |= FMARK; 1297 } 1298 /* 1299 * either it was defered, or it is externally 1300 * accessible and not already marked so. 1301 * Now check if it is possibly one of OUR sockets. 1302 */ 1303 if (fp->f_type != DTYPE_SOCKET || 1304 (so = (struct socket *)fp->f_data) == 0) { 1305 FILE_UNLOCK(fp); 1306 continue; 1307 } 1308 FILE_UNLOCK(fp); 1309 if (so->so_proto->pr_domain != &localdomain || 1310 (so->so_proto->pr_flags&PR_RIGHTS) == 0) 1311 continue; 1312 #ifdef notdef 1313 if (so->so_rcv.sb_flags & SB_LOCK) { 1314 /* 1315 * This is problematical; it's not clear 1316 * we need to wait for the sockbuf to be 1317 * unlocked (on a uniprocessor, at least), 1318 * and it's also not clear what to do 1319 * if sbwait returns an error due to receipt 1320 * of a signal. If sbwait does return 1321 * an error, we'll go into an infinite 1322 * loop. Delete all of this for now. 1323 */ 1324 (void) sbwait(&so->so_rcv); 1325 goto restart; 1326 } 1327 #endif 1328 /* 1329 * So, Ok, it's one of our sockets and it IS externally 1330 * accessible (or was defered). Now we look 1331 * to see if we hold any file descriptors in its 1332 * message buffers. Follow those links and mark them 1333 * as accessible too. 1334 */ 1335 unp_scan(so->so_rcv.sb_mb, unp_mark); 1336 } 1337 } while (unp_defer); 1338 sx_sunlock(&filelist_lock); 1339 /* 1340 * We grab an extra reference to each of the file table entries 1341 * that are not otherwise accessible and then free the rights 1342 * that are stored in messages on them. 1343 * 1344 * The bug in the orginal code is a little tricky, so I'll describe 1345 * what's wrong with it here. 1346 * 1347 * It is incorrect to simply unp_discard each entry for f_msgcount 1348 * times -- consider the case of sockets A and B that contain 1349 * references to each other. On a last close of some other socket, 1350 * we trigger a gc since the number of outstanding rights (unp_rights) 1351 * is non-zero. If during the sweep phase the gc code un_discards, 1352 * we end up doing a (full) closef on the descriptor. A closef on A 1353 * results in the following chain. Closef calls soo_close, which 1354 * calls soclose. Soclose calls first (through the switch 1355 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply 1356 * returns because the previous instance had set unp_gcing, and 1357 * we return all the way back to soclose, which marks the socket 1358 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush 1359 * to free up the rights that are queued in messages on the socket A, 1360 * i.e., the reference on B. The sorflush calls via the dom_dispose 1361 * switch unp_dispose, which unp_scans with unp_discard. This second 1362 * instance of unp_discard just calls closef on B. 1363 * 1364 * Well, a similar chain occurs on B, resulting in a sorflush on B, 1365 * which results in another closef on A. Unfortunately, A is already 1366 * being closed, and the descriptor has already been marked with 1367 * SS_NOFDREF, and soclose panics at this point. 1368 * 1369 * Here, we first take an extra reference to each inaccessible 1370 * descriptor. Then, we call sorflush ourself, since we know 1371 * it is a Unix domain socket anyhow. After we destroy all the 1372 * rights carried in messages, we do a last closef to get rid 1373 * of our extra reference. This is the last close, and the 1374 * unp_detach etc will shut down the socket. 1375 * 1376 * 91/09/19, bsy@cs.cmu.edu 1377 */ 1378 extra_ref = malloc(nfiles * sizeof(struct file *), M_FILE, M_WAITOK); 1379 sx_slock(&filelist_lock); 1380 for (nunref = 0, fp = LIST_FIRST(&filehead), fpp = extra_ref; fp != 0; 1381 fp = nextfp) { 1382 nextfp = LIST_NEXT(fp, f_list); 1383 FILE_LOCK(fp); 1384 /* 1385 * If it's not open, skip it 1386 */ 1387 if (fp->f_count == 0) { 1388 FILE_UNLOCK(fp); 1389 continue; 1390 } 1391 /* 1392 * If all refs are from msgs, and it's not marked accessible 1393 * then it must be referenced from some unreachable cycle 1394 * of (shut-down) FDs, so include it in our 1395 * list of FDs to remove 1396 */ 1397 if (fp->f_count == fp->f_msgcount && !(fp->f_gcflag & FMARK)) { 1398 *fpp++ = fp; 1399 nunref++; 1400 fp->f_count++; 1401 } 1402 FILE_UNLOCK(fp); 1403 } 1404 sx_sunlock(&filelist_lock); 1405 /* 1406 * for each FD on our hit list, do the following two things 1407 */ 1408 for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) { 1409 struct file *tfp = *fpp; 1410 FILE_LOCK(tfp); 1411 if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL) { 1412 FILE_UNLOCK(tfp); 1413 sorflush((struct socket *)(tfp->f_data)); 1414 } else 1415 FILE_UNLOCK(tfp); 1416 } 1417 for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) 1418 closef(*fpp, (struct thread *) NULL); 1419 free((caddr_t)extra_ref, M_FILE); 1420 unp_gcing = 0; 1421 } 1422 1423 void 1424 unp_dispose(m) 1425 struct mbuf *m; 1426 { 1427 1428 if (m) 1429 unp_scan(m, unp_discard); 1430 } 1431 1432 static int 1433 unp_listen(unp, p) 1434 struct unpcb *unp; 1435 struct proc *p; 1436 { 1437 1438 bzero(&unp->unp_peercred, sizeof(unp->unp_peercred)); 1439 unp->unp_peercred.cr_uid = p->p_ucred->cr_uid; 1440 unp->unp_peercred.cr_ngroups = p->p_ucred->cr_ngroups; 1441 bcopy(p->p_ucred->cr_groups, unp->unp_peercred.cr_groups, 1442 sizeof(unp->unp_peercred.cr_groups)); 1443 unp->unp_flags |= UNP_HAVEPCCACHED; 1444 return (0); 1445 } 1446 1447 static void 1448 unp_scan(m0, op) 1449 register struct mbuf *m0; 1450 void (*op) __P((struct file *)); 1451 { 1452 struct mbuf *m; 1453 struct file **rp; 1454 struct cmsghdr *cm; 1455 void *data; 1456 int i; 1457 socklen_t clen, datalen; 1458 int qfds; 1459 1460 while (m0) { 1461 for (m = m0; m; m = m->m_next) { 1462 if (m->m_type != MT_CONTROL) 1463 continue; 1464 1465 cm = mtod(m, struct cmsghdr *); 1466 clen = m->m_len; 1467 1468 while (cm != NULL) { 1469 if (sizeof(*cm) > clen || cm->cmsg_len > clen) 1470 break; 1471 1472 data = CMSG_DATA(cm); 1473 datalen = (caddr_t)cm + cm->cmsg_len 1474 - (caddr_t)data; 1475 1476 if (cm->cmsg_level == SOL_SOCKET && 1477 cm->cmsg_type == SCM_RIGHTS) { 1478 qfds = datalen / sizeof (struct file *); 1479 rp = data; 1480 for (i = 0; i < qfds; i++) 1481 (*op)(*rp++); 1482 } 1483 1484 if (CMSG_SPACE(datalen) < clen) { 1485 clen -= CMSG_SPACE(datalen); 1486 cm = (struct cmsghdr *) 1487 ((caddr_t)cm + CMSG_SPACE(datalen)); 1488 } else { 1489 clen = 0; 1490 cm = NULL; 1491 } 1492 } 1493 } 1494 m0 = m0->m_act; 1495 } 1496 } 1497 1498 static void 1499 unp_mark(fp) 1500 struct file *fp; 1501 { 1502 if (fp->f_gcflag & FMARK) 1503 return; 1504 unp_defer++; 1505 fp->f_gcflag |= (FMARK|FDEFER); 1506 } 1507 1508 static void 1509 unp_discard(fp) 1510 struct file *fp; 1511 { 1512 FILE_LOCK(fp); 1513 fp->f_msgcount--; 1514 unp_rights--; 1515 FILE_UNLOCK(fp); 1516 (void) closef(fp, (struct thread *)NULL); 1517 } 1518