1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All Rights Reserved. 6 * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved. 7 * Copyright (c) 2018 Matthew Macy 8 * Copyright (c) 2022-2025 Gleb Smirnoff <glebius@FreeBSD.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * UNIX Domain (Local) Sockets 37 * 38 * This is an implementation of UNIX (local) domain sockets. Each socket has 39 * an associated struct unpcb (UNIX protocol control block). Stream sockets 40 * may be connected to 0 or 1 other socket. Datagram sockets may be 41 * connected to 0, 1, or many other sockets. Sockets may be created and 42 * connected in pairs (socketpair(2)), or bound/connected to using the file 43 * system name space. For most purposes, only the receive socket buffer is 44 * used, as sending on one socket delivers directly to the receive socket 45 * buffer of a second socket. 46 * 47 * The implementation is substantially complicated by the fact that 48 * "ancillary data", such as file descriptors or credentials, may be passed 49 * across UNIX domain sockets. The potential for passing UNIX domain sockets 50 * over other UNIX domain sockets requires the implementation of a simple 51 * garbage collector to find and tear down cycles of disconnected sockets. 52 * 53 * TODO: 54 * RDM 55 * rethink name space problems 56 * need a proper out-of-band 57 */ 58 59 #include "opt_ddb.h" 60 61 #include <sys/param.h> 62 #include <sys/capsicum.h> 63 #include <sys/domain.h> 64 #include <sys/eventhandler.h> 65 #include <sys/fcntl.h> 66 #include <sys/file.h> 67 #include <sys/filedesc.h> 68 #include <sys/jail.h> 69 #include <sys/kernel.h> 70 #include <sys/lock.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/mount.h> 74 #include <sys/mutex.h> 75 #include <sys/namei.h> 76 #include <sys/poll.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/queue.h> 80 #include <sys/resourcevar.h> 81 #include <sys/rwlock.h> 82 #include <sys/socket.h> 83 #include <sys/socketvar.h> 84 #include <sys/signalvar.h> 85 #include <sys/stat.h> 86 #include <sys/sx.h> 87 #include <sys/sysctl.h> 88 #include <sys/systm.h> 89 #include <sys/taskqueue.h> 90 #include <sys/un.h> 91 #include <sys/unpcb.h> 92 #include <sys/vnode.h> 93 94 #include <net/vnet.h> 95 96 #ifdef DDB 97 #include <ddb/ddb.h> 98 #endif 99 100 #include <security/mac/mac_framework.h> 101 102 #include <vm/uma.h> 103 104 MALLOC_DECLARE(M_FILECAPS); 105 106 static struct domain localdomain; 107 108 static uma_zone_t unp_zone; 109 static unp_gen_t unp_gencnt; /* (l) */ 110 static u_int unp_count; /* (l) Count of local sockets. */ 111 static ino_t unp_ino; /* Prototype for fake inode numbers. */ 112 static int unp_rights; /* (g) File descriptors in flight. */ 113 static struct unp_head unp_shead; /* (l) List of stream sockets. */ 114 static struct unp_head unp_dhead; /* (l) List of datagram sockets. */ 115 static struct unp_head unp_sphead; /* (l) List of seqpacket sockets. */ 116 static struct mtx_pool *unp_vp_mtxpool; 117 118 struct unp_defer { 119 SLIST_ENTRY(unp_defer) ud_link; 120 struct file *ud_fp; 121 }; 122 static SLIST_HEAD(, unp_defer) unp_defers; 123 static int unp_defers_count; 124 125 static const struct sockaddr sun_noname = { 126 .sa_len = sizeof(sun_noname), 127 .sa_family = AF_LOCAL, 128 }; 129 130 /* 131 * Garbage collection of cyclic file descriptor/socket references occurs 132 * asynchronously in a taskqueue context in order to avoid recursion and 133 * reentrance in the UNIX domain socket, file descriptor, and socket layer 134 * code. See unp_gc() for a full description. 135 */ 136 static struct timeout_task unp_gc_task; 137 138 /* 139 * The close of unix domain sockets attached as SCM_RIGHTS is 140 * postponed to the taskqueue, to avoid arbitrary recursion depth. 141 * The attached sockets might have another sockets attached. 142 */ 143 static struct task unp_defer_task; 144 145 /* 146 * SOCK_STREAM and SOCK_SEQPACKET unix(4) sockets fully bypass the send buffer, 147 * however the notion of send buffer still makes sense with them. Its size is 148 * the amount of space that a send(2) syscall may copyin(9) before checking 149 * with the receive buffer of a peer. Although not linked anywhere yet, 150 * pointed to by a stack variable, effectively it is a buffer that needs to be 151 * sized. 152 * 153 * SOCK_DGRAM sockets really use the sendspace as the maximum datagram size, 154 * and don't really want to reserve the sendspace. Their recvspace should be 155 * large enough for at least one max-size datagram plus address. 156 */ 157 static u_long unpst_sendspace = 64*1024; 158 static u_long unpst_recvspace = 64*1024; 159 static u_long unpdg_maxdgram = 8*1024; /* support 8KB syslog msgs */ 160 static u_long unpdg_recvspace = 16*1024; 161 static u_long unpsp_sendspace = 64*1024; 162 static u_long unpsp_recvspace = 64*1024; 163 164 static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 165 "Local domain"); 166 static SYSCTL_NODE(_net_local, SOCK_STREAM, stream, 167 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 168 "SOCK_STREAM"); 169 static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, 170 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 171 "SOCK_DGRAM"); 172 static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket, 173 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 174 "SOCK_SEQPACKET"); 175 176 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, 177 &unpst_sendspace, 0, "Default stream send space."); 178 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, 179 &unpst_recvspace, 0, "Default stream receive space."); 180 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, 181 &unpdg_maxdgram, 0, "Maximum datagram size."); 182 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, 183 &unpdg_recvspace, 0, "Default datagram receive space."); 184 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW, 185 &unpsp_sendspace, 0, "Default seqpacket send space."); 186 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW, 187 &unpsp_recvspace, 0, "Default seqpacket receive space."); 188 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, 189 "File descriptors in flight."); 190 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD, 191 &unp_defers_count, 0, 192 "File descriptors deferred to taskqueue for close."); 193 194 /* 195 * Locking and synchronization: 196 * 197 * Several types of locks exist in the local domain socket implementation: 198 * - a global linkage lock 199 * - a global connection list lock 200 * - the mtxpool lock 201 * - per-unpcb mutexes 202 * 203 * The linkage lock protects the global socket lists, the generation number 204 * counter and garbage collector state. 205 * 206 * The connection list lock protects the list of referring sockets in a datagram 207 * socket PCB. This lock is also overloaded to protect a global list of 208 * sockets whose buffers contain socket references in the form of SCM_RIGHTS 209 * messages. To avoid recursion, such references are released by a dedicated 210 * thread. 211 * 212 * The mtxpool lock protects the vnode from being modified while referenced. 213 * Lock ordering rules require that it be acquired before any PCB locks. 214 * 215 * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the 216 * unpcb. This includes the unp_conn field, which either links two connected 217 * PCBs together (for connected socket types) or points at the destination 218 * socket (for connectionless socket types). The operations of creating or 219 * destroying a connection therefore involve locking multiple PCBs. To avoid 220 * lock order reversals, in some cases this involves dropping a PCB lock and 221 * using a reference counter to maintain liveness. 222 * 223 * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer, 224 * allocated in pr_attach() and freed in pr_detach(). The validity of that 225 * pointer is an invariant, so no lock is required to dereference the so_pcb 226 * pointer if a valid socket reference is held by the caller. In practice, 227 * this is always true during operations performed on a socket. Each unpcb 228 * has a back-pointer to its socket, unp_socket, which will be stable under 229 * the same circumstances. 230 * 231 * This pointer may only be safely dereferenced as long as a valid reference 232 * to the unpcb is held. Typically, this reference will be from the socket, 233 * or from another unpcb when the referring unpcb's lock is held (in order 234 * that the reference not be invalidated during use). For example, to follow 235 * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee 236 * that detach is not run clearing unp_socket. 237 * 238 * Blocking with UNIX domain sockets is a tricky issue: unlike most network 239 * protocols, bind() is a non-atomic operation, and connect() requires 240 * potential sleeping in the protocol, due to potentially waiting on local or 241 * distributed file systems. We try to separate "lookup" operations, which 242 * may sleep, and the IPC operations themselves, which typically can occur 243 * with relative atomicity as locks can be held over the entire operation. 244 * 245 * Another tricky issue is simultaneous multi-threaded or multi-process 246 * access to a single UNIX domain socket. These are handled by the flags 247 * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or 248 * binding, both of which involve dropping UNIX domain socket locks in order 249 * to perform namei() and other file system operations. 250 */ 251 static struct rwlock unp_link_rwlock; 252 static struct mtx unp_defers_lock; 253 254 #define UNP_LINK_LOCK_INIT() rw_init(&unp_link_rwlock, \ 255 "unp_link_rwlock") 256 257 #define UNP_LINK_LOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 258 RA_LOCKED) 259 #define UNP_LINK_UNLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 260 RA_UNLOCKED) 261 262 #define UNP_LINK_RLOCK() rw_rlock(&unp_link_rwlock) 263 #define UNP_LINK_RUNLOCK() rw_runlock(&unp_link_rwlock) 264 #define UNP_LINK_WLOCK() rw_wlock(&unp_link_rwlock) 265 #define UNP_LINK_WUNLOCK() rw_wunlock(&unp_link_rwlock) 266 #define UNP_LINK_WLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 267 RA_WLOCKED) 268 #define UNP_LINK_WOWNED() rw_wowned(&unp_link_rwlock) 269 270 #define UNP_DEFERRED_LOCK_INIT() mtx_init(&unp_defers_lock, \ 271 "unp_defer", NULL, MTX_DEF) 272 #define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock) 273 #define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock) 274 275 #define UNP_REF_LIST_LOCK() UNP_DEFERRED_LOCK(); 276 #define UNP_REF_LIST_UNLOCK() UNP_DEFERRED_UNLOCK(); 277 278 #define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \ 279 "unp", "unp", \ 280 MTX_DUPOK|MTX_DEF) 281 #define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx) 282 #define UNP_PCB_LOCKPTR(unp) (&(unp)->unp_mtx) 283 #define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx) 284 #define UNP_PCB_TRYLOCK(unp) mtx_trylock(&(unp)->unp_mtx) 285 #define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx) 286 #define UNP_PCB_OWNED(unp) mtx_owned(&(unp)->unp_mtx) 287 #define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED) 288 #define UNP_PCB_UNLOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED) 289 290 static int uipc_connect2(struct socket *, struct socket *); 291 static int uipc_ctloutput(struct socket *, struct sockopt *); 292 static int unp_connect(struct socket *, struct sockaddr *, 293 struct thread *); 294 static int unp_connectat(int, struct socket *, struct sockaddr *, 295 struct thread *, bool); 296 static void unp_connect2(struct socket *, struct socket *, bool); 297 static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2); 298 static void unp_dispose(struct socket *so); 299 static void unp_drop(struct unpcb *); 300 static void unp_gc(__unused void *, int); 301 static void unp_scan(struct mbuf *, void (*)(struct filedescent **, int)); 302 static void unp_discard(struct file *); 303 static void unp_freerights(struct filedescent **, int); 304 static int unp_internalize(struct mbuf *, struct mchain *, 305 struct thread *); 306 static void unp_internalize_fp(struct file *); 307 static int unp_externalize(struct mbuf *, struct mbuf **, int); 308 static int unp_externalize_fp(struct file *); 309 static void unp_addsockcred(struct thread *, struct mchain *, int); 310 static void unp_process_defers(void * __unused, int); 311 312 static void uipc_wrknl_lock(void *); 313 static void uipc_wrknl_unlock(void *); 314 static void uipc_wrknl_assert_lock(void *, int); 315 316 static void 317 unp_pcb_hold(struct unpcb *unp) 318 { 319 u_int old __unused; 320 321 old = refcount_acquire(&unp->unp_refcount); 322 KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp)); 323 } 324 325 static __result_use_check bool 326 unp_pcb_rele(struct unpcb *unp) 327 { 328 bool ret; 329 330 UNP_PCB_LOCK_ASSERT(unp); 331 332 if ((ret = refcount_release(&unp->unp_refcount))) { 333 UNP_PCB_UNLOCK(unp); 334 UNP_PCB_LOCK_DESTROY(unp); 335 uma_zfree(unp_zone, unp); 336 } 337 return (ret); 338 } 339 340 static void 341 unp_pcb_rele_notlast(struct unpcb *unp) 342 { 343 bool ret __unused; 344 345 ret = refcount_release(&unp->unp_refcount); 346 KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp)); 347 } 348 349 static void 350 unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2) 351 { 352 UNP_PCB_UNLOCK_ASSERT(unp); 353 UNP_PCB_UNLOCK_ASSERT(unp2); 354 355 if (unp == unp2) { 356 UNP_PCB_LOCK(unp); 357 } else if ((uintptr_t)unp2 > (uintptr_t)unp) { 358 UNP_PCB_LOCK(unp); 359 UNP_PCB_LOCK(unp2); 360 } else { 361 UNP_PCB_LOCK(unp2); 362 UNP_PCB_LOCK(unp); 363 } 364 } 365 366 static void 367 unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2) 368 { 369 UNP_PCB_UNLOCK(unp); 370 if (unp != unp2) 371 UNP_PCB_UNLOCK(unp2); 372 } 373 374 /* 375 * Try to lock the connected peer of an already locked socket. In some cases 376 * this requires that we unlock the current socket. The pairbusy counter is 377 * used to block concurrent connection attempts while the lock is dropped. The 378 * caller must be careful to revalidate PCB state. 379 */ 380 static struct unpcb * 381 unp_pcb_lock_peer(struct unpcb *unp) 382 { 383 struct unpcb *unp2; 384 385 UNP_PCB_LOCK_ASSERT(unp); 386 unp2 = unp->unp_conn; 387 if (unp2 == NULL) 388 return (NULL); 389 if (__predict_false(unp == unp2)) 390 return (unp); 391 392 UNP_PCB_UNLOCK_ASSERT(unp2); 393 394 if (__predict_true(UNP_PCB_TRYLOCK(unp2))) 395 return (unp2); 396 if ((uintptr_t)unp2 > (uintptr_t)unp) { 397 UNP_PCB_LOCK(unp2); 398 return (unp2); 399 } 400 unp->unp_pairbusy++; 401 unp_pcb_hold(unp2); 402 UNP_PCB_UNLOCK(unp); 403 404 UNP_PCB_LOCK(unp2); 405 UNP_PCB_LOCK(unp); 406 KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL, 407 ("%s: socket %p was reconnected", __func__, unp)); 408 if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) { 409 unp->unp_flags &= ~UNP_WAITING; 410 wakeup(unp); 411 } 412 if (unp_pcb_rele(unp2)) { 413 /* unp2 is unlocked. */ 414 return (NULL); 415 } 416 if (unp->unp_conn == NULL) { 417 UNP_PCB_UNLOCK(unp2); 418 return (NULL); 419 } 420 return (unp2); 421 } 422 423 /* 424 * Try to lock peer of our socket for purposes of sending data to it. 425 */ 426 static int 427 uipc_lock_peer(struct socket *so, struct unpcb **unp2) 428 { 429 struct unpcb *unp; 430 int error; 431 432 unp = sotounpcb(so); 433 UNP_PCB_LOCK(unp); 434 *unp2 = unp_pcb_lock_peer(unp); 435 if (__predict_false(so->so_error != 0)) { 436 error = so->so_error; 437 so->so_error = 0; 438 UNP_PCB_UNLOCK(unp); 439 if (*unp2 != NULL) 440 UNP_PCB_UNLOCK(*unp2); 441 return (error); 442 } 443 if (__predict_false(*unp2 == NULL)) { 444 /* 445 * Different error code for a previously connected socket and 446 * a never connected one. The SS_ISDISCONNECTED is set in the 447 * unp_soisdisconnected() and is synchronized by the pcb lock. 448 */ 449 error = so->so_state & SS_ISDISCONNECTED ? EPIPE : ENOTCONN; 450 UNP_PCB_UNLOCK(unp); 451 return (error); 452 } 453 UNP_PCB_UNLOCK(unp); 454 455 return (0); 456 } 457 458 static void 459 uipc_abort(struct socket *so) 460 { 461 struct unpcb *unp, *unp2; 462 463 unp = sotounpcb(so); 464 KASSERT(unp != NULL, ("uipc_abort: unp == NULL")); 465 UNP_PCB_UNLOCK_ASSERT(unp); 466 467 UNP_PCB_LOCK(unp); 468 unp2 = unp->unp_conn; 469 if (unp2 != NULL) { 470 unp_pcb_hold(unp2); 471 UNP_PCB_UNLOCK(unp); 472 unp_drop(unp2); 473 } else 474 UNP_PCB_UNLOCK(unp); 475 } 476 477 static int 478 uipc_attach(struct socket *so, int proto, struct thread *td) 479 { 480 u_long sendspace, recvspace; 481 struct unpcb *unp; 482 int error; 483 bool locked; 484 485 KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL")); 486 switch (so->so_type) { 487 case SOCK_DGRAM: 488 STAILQ_INIT(&so->so_rcv.uxdg_mb); 489 STAILQ_INIT(&so->so_snd.uxdg_mb); 490 TAILQ_INIT(&so->so_rcv.uxdg_conns); 491 /* 492 * Since send buffer is either bypassed or is a part 493 * of one-to-many receive buffer, we assign both space 494 * limits to unpdg_recvspace. 495 */ 496 sendspace = recvspace = unpdg_recvspace; 497 break; 498 499 case SOCK_STREAM: 500 sendspace = unpst_sendspace; 501 recvspace = unpst_recvspace; 502 goto common; 503 504 case SOCK_SEQPACKET: 505 sendspace = unpsp_sendspace; 506 recvspace = unpsp_recvspace; 507 common: 508 /* 509 * XXXGL: we need to initialize the mutex with MTX_DUPOK. 510 * Ideally, protocols that have PR_SOCKBUF should be 511 * responsible for mutex initialization officially, and then 512 * this uglyness with mtx_destroy(); mtx_init(); would go away. 513 */ 514 mtx_destroy(&so->so_rcv_mtx); 515 mtx_init(&so->so_rcv_mtx, "so_rcv", NULL, MTX_DEF | MTX_DUPOK); 516 knlist_init(&so->so_wrsel.si_note, so, uipc_wrknl_lock, 517 uipc_wrknl_unlock, uipc_wrknl_assert_lock); 518 STAILQ_INIT(&so->so_rcv.uxst_mbq); 519 break; 520 default: 521 panic("uipc_attach"); 522 } 523 error = soreserve(so, sendspace, recvspace); 524 if (error) 525 return (error); 526 unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO); 527 if (unp == NULL) 528 return (ENOBUFS); 529 LIST_INIT(&unp->unp_refs); 530 UNP_PCB_LOCK_INIT(unp); 531 unp->unp_socket = so; 532 so->so_pcb = unp; 533 refcount_init(&unp->unp_refcount, 1); 534 unp->unp_mode = ACCESSPERMS; 535 536 if ((locked = UNP_LINK_WOWNED()) == false) 537 UNP_LINK_WLOCK(); 538 539 unp->unp_gencnt = ++unp_gencnt; 540 unp->unp_ino = ++unp_ino; 541 unp_count++; 542 switch (so->so_type) { 543 case SOCK_STREAM: 544 LIST_INSERT_HEAD(&unp_shead, unp, unp_link); 545 break; 546 547 case SOCK_DGRAM: 548 LIST_INSERT_HEAD(&unp_dhead, unp, unp_link); 549 break; 550 551 case SOCK_SEQPACKET: 552 LIST_INSERT_HEAD(&unp_sphead, unp, unp_link); 553 break; 554 555 default: 556 panic("uipc_attach"); 557 } 558 559 if (locked == false) 560 UNP_LINK_WUNLOCK(); 561 562 return (0); 563 } 564 565 static int 566 uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) 567 { 568 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 569 struct vattr vattr; 570 int error, namelen; 571 struct nameidata nd; 572 struct unpcb *unp; 573 struct vnode *vp; 574 struct mount *mp; 575 cap_rights_t rights; 576 char *buf; 577 mode_t mode; 578 579 if (nam->sa_family != AF_UNIX) 580 return (EAFNOSUPPORT); 581 582 unp = sotounpcb(so); 583 KASSERT(unp != NULL, ("uipc_bind: unp == NULL")); 584 585 if (soun->sun_len > sizeof(struct sockaddr_un)) 586 return (EINVAL); 587 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 588 if (namelen <= 0) 589 return (EINVAL); 590 591 /* 592 * We don't allow simultaneous bind() calls on a single UNIX domain 593 * socket, so flag in-progress operations, and return an error if an 594 * operation is already in progress. 595 * 596 * Historically, we have not allowed a socket to be rebound, so this 597 * also returns an error. Not allowing re-binding simplifies the 598 * implementation and avoids a great many possible failure modes. 599 */ 600 UNP_PCB_LOCK(unp); 601 if (unp->unp_vnode != NULL) { 602 UNP_PCB_UNLOCK(unp); 603 return (EINVAL); 604 } 605 if (unp->unp_flags & UNP_BINDING) { 606 UNP_PCB_UNLOCK(unp); 607 return (EALREADY); 608 } 609 unp->unp_flags |= UNP_BINDING; 610 mode = unp->unp_mode & ~td->td_proc->p_pd->pd_cmask; 611 UNP_PCB_UNLOCK(unp); 612 613 buf = malloc(namelen + 1, M_TEMP, M_WAITOK); 614 bcopy(soun->sun_path, buf, namelen); 615 buf[namelen] = 0; 616 617 restart: 618 NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | NOCACHE, 619 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT)); 620 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 621 error = namei(&nd); 622 if (error) 623 goto error; 624 vp = nd.ni_vp; 625 if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) { 626 NDFREE_PNBUF(&nd); 627 if (nd.ni_dvp == vp) 628 vrele(nd.ni_dvp); 629 else 630 vput(nd.ni_dvp); 631 if (vp != NULL) { 632 vrele(vp); 633 error = EADDRINUSE; 634 goto error; 635 } 636 error = vn_start_write(NULL, &mp, V_XSLEEP | V_PCATCH); 637 if (error) 638 goto error; 639 goto restart; 640 } 641 VATTR_NULL(&vattr); 642 vattr.va_type = VSOCK; 643 vattr.va_mode = mode; 644 #ifdef MAC 645 error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd, 646 &vattr); 647 #endif 648 if (error == 0) { 649 /* 650 * The prior lookup may have left LK_SHARED in cn_lkflags, 651 * and VOP_CREATE technically only requires the new vnode to 652 * be locked shared. Most filesystems will return the new vnode 653 * locked exclusive regardless, but we should explicitly 654 * specify that here since we require it and assert to that 655 * effect below. 656 */ 657 nd.ni_cnd.cn_lkflags = (nd.ni_cnd.cn_lkflags & ~LK_SHARED) | 658 LK_EXCLUSIVE; 659 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 660 } 661 NDFREE_PNBUF(&nd); 662 if (error) { 663 VOP_VPUT_PAIR(nd.ni_dvp, NULL, true); 664 vn_finished_write(mp); 665 if (error == ERELOOKUP) 666 goto restart; 667 goto error; 668 } 669 vp = nd.ni_vp; 670 ASSERT_VOP_ELOCKED(vp, "uipc_bind"); 671 soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK); 672 673 UNP_PCB_LOCK(unp); 674 VOP_UNP_BIND(vp, unp); 675 unp->unp_vnode = vp; 676 unp->unp_addr = soun; 677 unp->unp_flags &= ~UNP_BINDING; 678 UNP_PCB_UNLOCK(unp); 679 vref(vp); 680 VOP_VPUT_PAIR(nd.ni_dvp, &vp, true); 681 vn_finished_write(mp); 682 free(buf, M_TEMP); 683 return (0); 684 685 error: 686 UNP_PCB_LOCK(unp); 687 unp->unp_flags &= ~UNP_BINDING; 688 UNP_PCB_UNLOCK(unp); 689 free(buf, M_TEMP); 690 return (error); 691 } 692 693 static int 694 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 695 { 696 697 return (uipc_bindat(AT_FDCWD, so, nam, td)); 698 } 699 700 static int 701 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 702 { 703 int error; 704 705 KASSERT(td == curthread, ("uipc_connect: td != curthread")); 706 error = unp_connect(so, nam, td); 707 return (error); 708 } 709 710 static int 711 uipc_connectat(int fd, struct socket *so, struct sockaddr *nam, 712 struct thread *td) 713 { 714 int error; 715 716 KASSERT(td == curthread, ("uipc_connectat: td != curthread")); 717 error = unp_connectat(fd, so, nam, td, false); 718 return (error); 719 } 720 721 static void 722 uipc_close(struct socket *so) 723 { 724 struct unpcb *unp, *unp2; 725 struct vnode *vp = NULL; 726 struct mtx *vplock; 727 728 unp = sotounpcb(so); 729 KASSERT(unp != NULL, ("uipc_close: unp == NULL")); 730 731 vplock = NULL; 732 if ((vp = unp->unp_vnode) != NULL) { 733 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 734 mtx_lock(vplock); 735 } 736 UNP_PCB_LOCK(unp); 737 if (vp && unp->unp_vnode == NULL) { 738 mtx_unlock(vplock); 739 vp = NULL; 740 } 741 if (vp != NULL) { 742 VOP_UNP_DETACH(vp); 743 unp->unp_vnode = NULL; 744 } 745 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 746 unp_disconnect(unp, unp2); 747 else 748 UNP_PCB_UNLOCK(unp); 749 if (vp) { 750 mtx_unlock(vplock); 751 vrele(vp); 752 } 753 } 754 755 static int 756 uipc_chmod(struct socket *so, mode_t mode, struct ucred *cred __unused, 757 struct thread *td __unused) 758 { 759 struct unpcb *unp; 760 int error; 761 762 if ((mode & ~ACCESSPERMS) != 0) 763 return (EINVAL); 764 765 error = 0; 766 unp = sotounpcb(so); 767 UNP_PCB_LOCK(unp); 768 if (unp->unp_vnode != NULL || (unp->unp_flags & UNP_BINDING) != 0) 769 error = EINVAL; 770 else 771 unp->unp_mode = mode; 772 UNP_PCB_UNLOCK(unp); 773 return (error); 774 } 775 776 static int 777 uipc_connect2(struct socket *so1, struct socket *so2) 778 { 779 struct unpcb *unp, *unp2; 780 781 if (so1->so_type != so2->so_type) 782 return (EPROTOTYPE); 783 784 unp = so1->so_pcb; 785 KASSERT(unp != NULL, ("uipc_connect2: unp == NULL")); 786 unp2 = so2->so_pcb; 787 KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL")); 788 unp_pcb_lock_pair(unp, unp2); 789 unp_connect2(so1, so2, false); 790 unp_pcb_unlock_pair(unp, unp2); 791 792 return (0); 793 } 794 795 static void 796 uipc_detach(struct socket *so) 797 { 798 struct unpcb *unp, *unp2; 799 struct mtx *vplock; 800 struct vnode *vp; 801 int local_unp_rights; 802 803 unp = sotounpcb(so); 804 KASSERT(unp != NULL, ("uipc_detach: unp == NULL")); 805 806 vp = NULL; 807 vplock = NULL; 808 809 if (!SOLISTENING(so)) 810 unp_dispose(so); 811 812 UNP_LINK_WLOCK(); 813 LIST_REMOVE(unp, unp_link); 814 if (unp->unp_gcflag & UNPGC_DEAD) 815 LIST_REMOVE(unp, unp_dead); 816 unp->unp_gencnt = ++unp_gencnt; 817 --unp_count; 818 UNP_LINK_WUNLOCK(); 819 820 UNP_PCB_UNLOCK_ASSERT(unp); 821 restart: 822 if ((vp = unp->unp_vnode) != NULL) { 823 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 824 mtx_lock(vplock); 825 } 826 UNP_PCB_LOCK(unp); 827 if (unp->unp_vnode != vp && unp->unp_vnode != NULL) { 828 if (vplock) 829 mtx_unlock(vplock); 830 UNP_PCB_UNLOCK(unp); 831 goto restart; 832 } 833 if ((vp = unp->unp_vnode) != NULL) { 834 VOP_UNP_DETACH(vp); 835 unp->unp_vnode = NULL; 836 } 837 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 838 unp_disconnect(unp, unp2); 839 else 840 UNP_PCB_UNLOCK(unp); 841 842 UNP_REF_LIST_LOCK(); 843 while (!LIST_EMPTY(&unp->unp_refs)) { 844 struct unpcb *ref = LIST_FIRST(&unp->unp_refs); 845 846 unp_pcb_hold(ref); 847 UNP_REF_LIST_UNLOCK(); 848 849 MPASS(ref != unp); 850 UNP_PCB_UNLOCK_ASSERT(ref); 851 unp_drop(ref); 852 UNP_REF_LIST_LOCK(); 853 } 854 UNP_REF_LIST_UNLOCK(); 855 856 UNP_PCB_LOCK(unp); 857 local_unp_rights = unp_rights; 858 unp->unp_socket->so_pcb = NULL; 859 unp->unp_socket = NULL; 860 free(unp->unp_addr, M_SONAME); 861 unp->unp_addr = NULL; 862 if (!unp_pcb_rele(unp)) 863 UNP_PCB_UNLOCK(unp); 864 if (vp) { 865 mtx_unlock(vplock); 866 vrele(vp); 867 } 868 if (local_unp_rights) 869 taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1); 870 871 switch (so->so_type) { 872 case SOCK_STREAM: 873 case SOCK_SEQPACKET: 874 MPASS(SOLISTENING(so) || (STAILQ_EMPTY(&so->so_rcv.uxst_mbq) && 875 so->so_rcv.uxst_peer == NULL)); 876 break; 877 case SOCK_DGRAM: 878 /* 879 * Everything should have been unlinked/freed by unp_dispose() 880 * and/or unp_disconnect(). 881 */ 882 MPASS(so->so_rcv.uxdg_peeked == NULL); 883 MPASS(STAILQ_EMPTY(&so->so_rcv.uxdg_mb)); 884 MPASS(TAILQ_EMPTY(&so->so_rcv.uxdg_conns)); 885 MPASS(STAILQ_EMPTY(&so->so_snd.uxdg_mb)); 886 } 887 } 888 889 static int 890 uipc_disconnect(struct socket *so) 891 { 892 struct unpcb *unp, *unp2; 893 894 unp = sotounpcb(so); 895 KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL")); 896 897 UNP_PCB_LOCK(unp); 898 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 899 unp_disconnect(unp, unp2); 900 else 901 UNP_PCB_UNLOCK(unp); 902 return (0); 903 } 904 905 static int 906 uipc_listen(struct socket *so, int backlog, struct thread *td) 907 { 908 struct unpcb *unp; 909 int error; 910 911 MPASS(so->so_type != SOCK_DGRAM); 912 913 /* 914 * Synchronize with concurrent connection attempts. 915 */ 916 error = 0; 917 unp = sotounpcb(so); 918 UNP_PCB_LOCK(unp); 919 if (unp->unp_conn != NULL || (unp->unp_flags & UNP_CONNECTING) != 0) 920 error = EINVAL; 921 else if (unp->unp_vnode == NULL) 922 error = EDESTADDRREQ; 923 if (error != 0) { 924 UNP_PCB_UNLOCK(unp); 925 return (error); 926 } 927 928 SOCK_LOCK(so); 929 error = solisten_proto_check(so); 930 if (error == 0) { 931 cru2xt(td, &unp->unp_peercred); 932 if (!SOLISTENING(so)) { 933 (void)chgsbsize(so->so_cred->cr_uidinfo, 934 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 935 (void)chgsbsize(so->so_cred->cr_uidinfo, 936 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 937 } 938 solisten_proto(so, backlog); 939 } 940 SOCK_UNLOCK(so); 941 UNP_PCB_UNLOCK(unp); 942 return (error); 943 } 944 945 static int 946 uipc_peeraddr(struct socket *so, struct sockaddr *ret) 947 { 948 struct unpcb *unp, *unp2; 949 const struct sockaddr *sa; 950 951 unp = sotounpcb(so); 952 KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL")); 953 954 UNP_PCB_LOCK(unp); 955 unp2 = unp_pcb_lock_peer(unp); 956 if (unp2 != NULL) { 957 if (unp2->unp_addr != NULL) 958 sa = (struct sockaddr *)unp2->unp_addr; 959 else 960 sa = &sun_noname; 961 bcopy(sa, ret, sa->sa_len); 962 unp_pcb_unlock_pair(unp, unp2); 963 } else { 964 UNP_PCB_UNLOCK(unp); 965 sa = &sun_noname; 966 bcopy(sa, ret, sa->sa_len); 967 } 968 return (0); 969 } 970 971 /* 972 * pr_sosend() called with mbuf instead of uio is a kernel thread. NFS, 973 * netgraph(4) and other subsystems can call into socket code. The 974 * function will condition the mbuf so that it can be safely put onto socket 975 * buffer and calculate its char count and mbuf count. 976 * 977 * Note: we don't support receiving control data from a kernel thread. Our 978 * pr_sosend methods have MPASS() to check that. This may change. 979 */ 980 static void 981 uipc_reset_kernel_mbuf(struct mbuf *m, struct mchain *mc) 982 { 983 984 M_ASSERTPKTHDR(m); 985 986 m_clrprotoflags(m); 987 m_tag_delete_chain(m, NULL); 988 m->m_pkthdr.rcvif = NULL; 989 m->m_pkthdr.flowid = 0; 990 m->m_pkthdr.csum_flags = 0; 991 m->m_pkthdr.fibnum = 0; 992 m->m_pkthdr.rsstype = 0; 993 994 mc_init_m(mc, m); 995 MPASS(m->m_pkthdr.len == mc->mc_len); 996 } 997 998 #ifdef SOCKBUF_DEBUG 999 static inline void 1000 uipc_stream_sbcheck(struct sockbuf *sb) 1001 { 1002 struct mbuf *d; 1003 u_int dacc, dccc, dctl, dmbcnt; 1004 bool notready = false; 1005 1006 dacc = dccc = dctl = dmbcnt = 0; 1007 STAILQ_FOREACH(d, &sb->uxst_mbq, m_stailq) { 1008 if (d == sb->uxst_fnrdy) { 1009 MPASS(d->m_flags & M_NOTREADY); 1010 notready = true; 1011 } 1012 if (d->m_type == MT_CONTROL) 1013 dctl += d->m_len; 1014 else if (d->m_type == MT_DATA) { 1015 dccc += d->m_len; 1016 if (!notready) 1017 dacc += d->m_len; 1018 } else 1019 MPASS(0); 1020 dmbcnt += MSIZE; 1021 if (d->m_flags & M_EXT) 1022 dmbcnt += d->m_ext.ext_size; 1023 if (d->m_stailq.stqe_next == NULL) 1024 MPASS(sb->uxst_mbq.stqh_last == &d->m_stailq.stqe_next); 1025 } 1026 MPASS(sb->uxst_fnrdy == NULL || notready); 1027 MPASS(dacc == sb->sb_acc); 1028 MPASS(dccc == sb->sb_ccc); 1029 MPASS(dctl == sb->sb_ctl); 1030 MPASS(dmbcnt == sb->sb_mbcnt); 1031 (void)STAILQ_EMPTY(&sb->uxst_mbq); 1032 } 1033 #define UIPC_STREAM_SBCHECK(sb) uipc_stream_sbcheck(sb) 1034 #else 1035 #define UIPC_STREAM_SBCHECK(sb) do {} while (0) 1036 #endif 1037 1038 /* 1039 * uipc_stream_sbspace() returns how much a writer can send, limited by char 1040 * count or mbuf memory use, whatever ends first. 1041 * 1042 * An obvious and legitimate reason for a socket having more data than allowed, 1043 * is lowering the limit with setsockopt(SO_RCVBUF) on already full buffer. 1044 * Also, sb_mbcnt may overcommit sb_mbmax in case if previous write observed 1045 * 'space < mbspace', but mchain allocated to hold 'space' bytes of data ended 1046 * up with 'mc_mlen > mbspace'. A typical scenario would be a full buffer with 1047 * writer trying to push in a large write, and a slow reader, that reads just 1048 * a few bytes at a time. In that case writer will keep creating new mbufs 1049 * with mc_split(). These mbufs will carry little chars, but will all point at 1050 * the same cluster, thus each adding cluster size to sb_mbcnt. This means we 1051 * will count same cluster many times potentially underutilizing socket buffer. 1052 * We aren't optimizing towards ineffective readers. Classic socket buffer had 1053 * the same "feature". 1054 */ 1055 static inline u_int 1056 uipc_stream_sbspace(struct sockbuf *sb) 1057 { 1058 u_int space, mbspace; 1059 1060 if (__predict_true(sb->sb_hiwat >= sb->sb_ccc + sb->sb_ctl)) 1061 space = sb->sb_hiwat - sb->sb_ccc - sb->sb_ctl; 1062 else 1063 return (0); 1064 if (__predict_true(sb->sb_mbmax >= sb->sb_mbcnt)) 1065 mbspace = sb->sb_mbmax - sb->sb_mbcnt; 1066 else 1067 return (0); 1068 1069 return (min(space, mbspace)); 1070 } 1071 1072 /* 1073 * UNIX version of generic sbwait() for writes. We wait on peer's receive 1074 * buffer, using our timeout. 1075 */ 1076 static int 1077 uipc_stream_sbwait(struct socket *so, sbintime_t timeo) 1078 { 1079 struct sockbuf *sb = &so->so_rcv; 1080 1081 SOCK_RECVBUF_LOCK_ASSERT(so); 1082 sb->sb_flags |= SB_WAIT; 1083 return (msleep_sbt(&sb->sb_acc, SOCK_RECVBUF_MTX(so), PSOCK | PCATCH, 1084 "sbwait", timeo, 0, 0)); 1085 } 1086 1087 static int 1088 uipc_sosend_stream_or_seqpacket(struct socket *so, struct sockaddr *addr, 1089 struct uio *uio0, struct mbuf *m, struct mbuf *c, int flags, 1090 struct thread *td) 1091 { 1092 struct unpcb *unp2; 1093 struct socket *so2; 1094 struct sockbuf *sb; 1095 struct uio *uio; 1096 struct mchain mc, cmc; 1097 size_t resid, sent; 1098 bool nonblock, eor, aio; 1099 int error; 1100 1101 MPASS((uio0 != NULL && m == NULL) || (m != NULL && uio0 == NULL)); 1102 MPASS(m == NULL || c == NULL); 1103 1104 if (__predict_false(flags & MSG_OOB)) 1105 return (EOPNOTSUPP); 1106 1107 nonblock = (so->so_state & SS_NBIO) || 1108 (flags & (MSG_DONTWAIT | MSG_NBIO)); 1109 eor = flags & MSG_EOR; 1110 1111 mc = MCHAIN_INITIALIZER(&mc); 1112 cmc = MCHAIN_INITIALIZER(&cmc); 1113 sent = 0; 1114 aio = false; 1115 1116 if (m == NULL) { 1117 if (c != NULL && (error = unp_internalize(c, &cmc, td))) 1118 goto out; 1119 /* 1120 * This function may read more data from the uio than it would 1121 * then place on socket. That would leave uio inconsistent 1122 * upon return. Normally uio is allocated on the stack of the 1123 * syscall thread and we don't care about leaving it consistent. 1124 * However, aio(9) will allocate a uio as part of job and will 1125 * use it to track progress. We detect aio(9) checking the 1126 * SB_AIO_RUNNING flag. It is safe to check it without lock 1127 * cause it is set and cleared in the same taskqueue thread. 1128 * 1129 * This check can also produce a false positive: there is 1130 * aio(9) job and also there is a syscall we are serving now. 1131 * No sane software does that, it would leave to a mess in 1132 * the socket buffer, as aio(9) doesn't grab the I/O sx(9). 1133 * But syzkaller can create this mess. For such false positive 1134 * our goal is just don't panic or leak memory. 1135 */ 1136 if (__predict_false(so->so_snd.sb_flags & SB_AIO_RUNNING)) { 1137 uio = cloneuio(uio0); 1138 aio = true; 1139 } else { 1140 uio = uio0; 1141 resid = uio->uio_resid; 1142 } 1143 /* 1144 * Optimization for a case when our send fits into the receive 1145 * buffer - do the copyin before taking any locks, sized to our 1146 * send buffer. Later copyins will also take into account 1147 * space in the peer's receive buffer. 1148 */ 1149 error = mc_uiotomc(&mc, uio, so->so_snd.sb_hiwat, 0, M_WAITOK, 1150 eor ? M_EOR : 0); 1151 if (__predict_false(error)) 1152 goto out2; 1153 } else 1154 uipc_reset_kernel_mbuf(m, &mc); 1155 1156 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); 1157 if (error) 1158 goto out2; 1159 1160 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 1161 goto out3; 1162 1163 if (unp2->unp_flags & UNP_WANTCRED_MASK) { 1164 /* 1165 * Credentials are passed only once on SOCK_STREAM and 1166 * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or 1167 * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS). 1168 */ 1169 unp_addsockcred(td, &cmc, unp2->unp_flags); 1170 unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT; 1171 } 1172 1173 /* 1174 * Cycle through the data to send and available space in the peer's 1175 * receive buffer. Put a reference on the peer socket, so that it 1176 * doesn't get freed while we sbwait(). If peer goes away, we will 1177 * observe the SBS_CANTRCVMORE and our sorele() will finalize peer's 1178 * socket destruction. 1179 */ 1180 so2 = unp2->unp_socket; 1181 soref(so2); 1182 UNP_PCB_UNLOCK(unp2); 1183 sb = &so2->so_rcv; 1184 while (mc.mc_len + cmc.mc_len > 0) { 1185 struct mchain mcnext = MCHAIN_INITIALIZER(&mcnext); 1186 u_int space; 1187 1188 SOCK_RECVBUF_LOCK(so2); 1189 restart: 1190 UIPC_STREAM_SBCHECK(sb); 1191 if (__predict_false(cmc.mc_len > sb->sb_hiwat)) { 1192 SOCK_RECVBUF_UNLOCK(so2); 1193 error = EMSGSIZE; 1194 goto out4; 1195 } 1196 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1197 SOCK_RECVBUF_UNLOCK(so2); 1198 error = EPIPE; 1199 goto out4; 1200 } 1201 /* 1202 * Wait on the peer socket receive buffer until we have enough 1203 * space to put at least control. The data is a stream and can 1204 * be put partially, but control is really a datagram. 1205 */ 1206 space = uipc_stream_sbspace(sb); 1207 if (space < sb->sb_lowat || space < cmc.mc_len) { 1208 if (nonblock) { 1209 if (aio) 1210 sb->uxst_flags |= UXST_PEER_AIO; 1211 SOCK_RECVBUF_UNLOCK(so2); 1212 if (aio) { 1213 SOCK_SENDBUF_LOCK(so); 1214 so->so_snd.sb_ccc = 1215 so->so_snd.sb_hiwat - space; 1216 SOCK_SENDBUF_UNLOCK(so); 1217 } 1218 error = EWOULDBLOCK; 1219 goto out4; 1220 } 1221 if ((error = uipc_stream_sbwait(so2, 1222 so->so_snd.sb_timeo)) != 0) { 1223 SOCK_RECVBUF_UNLOCK(so2); 1224 goto out4; 1225 } else 1226 goto restart; 1227 } 1228 MPASS(space >= cmc.mc_len); 1229 space -= cmc.mc_len; 1230 if (space == 0) { 1231 /* There is space only to send control. */ 1232 MPASS(!STAILQ_EMPTY(&cmc.mc_q)); 1233 mcnext = mc; 1234 mc = MCHAIN_INITIALIZER(&mc); 1235 } else if (space < mc.mc_len) { 1236 /* Not enough space. */ 1237 if (__predict_false(mc_split(&mc, &mcnext, space, 1238 M_NOWAIT) == ENOMEM)) { 1239 /* 1240 * If allocation failed use M_WAITOK and merge 1241 * the chain back. Next time mc_split() will 1242 * easily split at the same place. Only if we 1243 * race with setsockopt(SO_RCVBUF) shrinking 1244 * sb_hiwat can this happen more than once. 1245 */ 1246 SOCK_RECVBUF_UNLOCK(so2); 1247 (void)mc_split(&mc, &mcnext, space, M_WAITOK); 1248 mc_concat(&mc, &mcnext); 1249 SOCK_RECVBUF_LOCK(so2); 1250 goto restart; 1251 } 1252 MPASS(mc.mc_len == space); 1253 } 1254 if (!STAILQ_EMPTY(&cmc.mc_q)) { 1255 STAILQ_CONCAT(&sb->uxst_mbq, &cmc.mc_q); 1256 sb->sb_ctl += cmc.mc_len; 1257 sb->sb_mbcnt += cmc.mc_mlen; 1258 cmc.mc_len = 0; 1259 } 1260 sent += mc.mc_len; 1261 if (sb->uxst_fnrdy == NULL) 1262 sb->sb_acc += mc.mc_len; 1263 sb->sb_ccc += mc.mc_len; 1264 sb->sb_mbcnt += mc.mc_mlen; 1265 STAILQ_CONCAT(&sb->uxst_mbq, &mc.mc_q); 1266 UIPC_STREAM_SBCHECK(sb); 1267 space = uipc_stream_sbspace(sb); 1268 sorwakeup_locked(so2); 1269 if (!STAILQ_EMPTY(&mcnext.mc_q)) { 1270 /* 1271 * Such assignment is unsafe in general, but it is 1272 * safe with !STAILQ_EMPTY(&mcnext.mc_q). In C++ we 1273 * could reload = for STAILQs :) 1274 */ 1275 mc = mcnext; 1276 } else if (uio != NULL && uio->uio_resid > 0) { 1277 /* 1278 * Copyin sum of peer's receive buffer space and our 1279 * sb_hiwat, which is our virtual send buffer size. 1280 * See comment above unpst_sendspace declaration. 1281 * We are reading sb_hiwat locklessly, cause a) we 1282 * don't care about an application that does send(2) 1283 * and setsockopt(2) racing internally, and for an 1284 * application that does this in sequence we will see 1285 * the correct value cause sbsetopt() uses buffer lock 1286 * and we also have already acquired it at least once. 1287 */ 1288 error = mc_uiotomc(&mc, uio, space + 1289 atomic_load_int(&so->so_snd.sb_hiwat), 0, M_WAITOK, 1290 eor ? M_EOR : 0); 1291 if (__predict_false(error)) 1292 goto out4; 1293 } else 1294 mc = MCHAIN_INITIALIZER(&mc); 1295 } 1296 1297 MPASS(STAILQ_EMPTY(&mc.mc_q)); 1298 1299 td->td_ru.ru_msgsnd++; 1300 out4: 1301 sorele(so2); 1302 out3: 1303 SOCK_IO_SEND_UNLOCK(so); 1304 out2: 1305 if (aio) { 1306 freeuio(uio); 1307 uioadvance(uio0, sent); 1308 } else if (uio != NULL) 1309 uio->uio_resid = resid - sent; 1310 if (!mc_empty(&cmc)) 1311 unp_scan(mc_first(&cmc), unp_freerights); 1312 out: 1313 mc_freem(&mc); 1314 mc_freem(&cmc); 1315 1316 return (error); 1317 } 1318 1319 /* 1320 * Wakeup a writer, used by recv(2) and shutdown(2). 1321 * 1322 * @param so Points to a connected stream socket with receive buffer locked 1323 * 1324 * In a blocking mode peer is sleeping on our receive buffer, and we need just 1325 * wakeup(9) on it. But to wake up various event engines, we need to reach 1326 * over to peer's selinfo. This can be safely done as the socket buffer 1327 * receive lock is protecting us from the peer going away. 1328 */ 1329 static void 1330 uipc_wakeup_writer(struct socket *so) 1331 { 1332 struct sockbuf *sb = &so->so_rcv; 1333 struct selinfo *sel; 1334 1335 SOCK_RECVBUF_LOCK_ASSERT(so); 1336 MPASS(sb->uxst_peer != NULL); 1337 1338 sel = &sb->uxst_peer->so_wrsel; 1339 1340 if (sb->uxst_flags & UXST_PEER_SEL) { 1341 selwakeuppri(sel, PSOCK); 1342 /* 1343 * XXXGL: sowakeup() does SEL_WAITING() without locks. 1344 */ 1345 if (!SEL_WAITING(sel)) 1346 sb->uxst_flags &= ~UXST_PEER_SEL; 1347 } 1348 if (sb->sb_flags & SB_WAIT) { 1349 sb->sb_flags &= ~SB_WAIT; 1350 wakeup(&sb->sb_acc); 1351 } 1352 KNOTE_LOCKED(&sel->si_note, 0); 1353 SOCK_RECVBUF_UNLOCK(so); 1354 } 1355 1356 static void 1357 uipc_cantrcvmore(struct socket *so) 1358 { 1359 1360 SOCK_RECVBUF_LOCK(so); 1361 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 1362 selwakeuppri(&so->so_rdsel, PSOCK); 1363 KNOTE_LOCKED(&so->so_rdsel.si_note, 0); 1364 if (so->so_rcv.uxst_peer != NULL) 1365 uipc_wakeup_writer(so); 1366 else 1367 SOCK_RECVBUF_UNLOCK(so); 1368 } 1369 1370 static int 1371 uipc_soreceive_stream_or_seqpacket(struct socket *so, struct sockaddr **psa, 1372 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1373 { 1374 struct sockbuf *sb = &so->so_rcv; 1375 struct mbuf *control, *m, *first, *last, *next; 1376 u_int ctl, space, datalen, mbcnt, lastlen; 1377 int error, flags; 1378 bool nonblock, waitall, peek; 1379 1380 MPASS(mp0 == NULL); 1381 1382 if (psa != NULL) 1383 *psa = NULL; 1384 if (controlp != NULL) 1385 *controlp = NULL; 1386 1387 flags = flagsp != NULL ? *flagsp : 0; 1388 nonblock = (so->so_state & SS_NBIO) || 1389 (flags & (MSG_DONTWAIT | MSG_NBIO)); 1390 peek = flags & MSG_PEEK; 1391 waitall = (flags & MSG_WAITALL) && !peek; 1392 1393 /* 1394 * This check may fail only on a socket that never went through 1395 * connect(2). We can check this locklessly, cause: a) for a new born 1396 * socket we don't care about applications that may race internally 1397 * between connect(2) and recv(2), and b) for a dying socket if we 1398 * miss update by unp_sosidisconnected(), we would still get the check 1399 * correct. For dying socket we would observe SBS_CANTRCVMORE later. 1400 */ 1401 if (__predict_false((atomic_load_short(&so->so_state) & 1402 (SS_ISCONNECTED|SS_ISDISCONNECTED)) == 0)) 1403 return (ENOTCONN); 1404 1405 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); 1406 if (__predict_false(error)) 1407 return (error); 1408 1409 restart: 1410 SOCK_RECVBUF_LOCK(so); 1411 UIPC_STREAM_SBCHECK(sb); 1412 while (sb->sb_acc < sb->sb_lowat && 1413 (sb->sb_ctl == 0 || controlp == NULL)) { 1414 if (so->so_error) { 1415 error = so->so_error; 1416 if (!peek) 1417 so->so_error = 0; 1418 SOCK_RECVBUF_UNLOCK(so); 1419 SOCK_IO_RECV_UNLOCK(so); 1420 return (error); 1421 } 1422 if (sb->sb_state & SBS_CANTRCVMORE) { 1423 SOCK_RECVBUF_UNLOCK(so); 1424 SOCK_IO_RECV_UNLOCK(so); 1425 return (0); 1426 } 1427 if (nonblock) { 1428 SOCK_RECVBUF_UNLOCK(so); 1429 SOCK_IO_RECV_UNLOCK(so); 1430 return (EWOULDBLOCK); 1431 } 1432 error = sbwait(so, SO_RCV); 1433 if (error) { 1434 SOCK_RECVBUF_UNLOCK(so); 1435 SOCK_IO_RECV_UNLOCK(so); 1436 return (error); 1437 } 1438 } 1439 1440 MPASS(STAILQ_FIRST(&sb->uxst_mbq)); 1441 MPASS(sb->sb_acc > 0 || sb->sb_ctl > 0); 1442 1443 mbcnt = 0; 1444 ctl = 0; 1445 first = STAILQ_FIRST(&sb->uxst_mbq); 1446 if (first->m_type == MT_CONTROL) { 1447 control = first; 1448 STAILQ_FOREACH_FROM(first, &sb->uxst_mbq, m_stailq) { 1449 if (first->m_type != MT_CONTROL) 1450 break; 1451 ctl += first->m_len; 1452 mbcnt += MSIZE; 1453 if (first->m_flags & M_EXT) 1454 mbcnt += first->m_ext.ext_size; 1455 } 1456 } else 1457 control = NULL; 1458 1459 /* 1460 * Find split point for the next copyout. On exit from the loop: 1461 * last == NULL - socket to be flushed 1462 * last != NULL 1463 * lastlen > last->m_len - uio to be filled, last to be adjusted 1464 * lastlen == 0 - MT_CONTROL, M_EOR or M_NOTREADY encountered 1465 */ 1466 space = uio->uio_resid; 1467 datalen = 0; 1468 for (m = first, last = sb->uxst_fnrdy, lastlen = 0; 1469 m != sb->uxst_fnrdy; 1470 m = STAILQ_NEXT(m, m_stailq)) { 1471 if (m->m_type != MT_DATA) { 1472 last = m; 1473 lastlen = 0; 1474 break; 1475 } 1476 if (space >= m->m_len) { 1477 space -= m->m_len; 1478 datalen += m->m_len; 1479 mbcnt += MSIZE; 1480 if (m->m_flags & M_EXT) 1481 mbcnt += m->m_ext.ext_size; 1482 if (m->m_flags & M_EOR) { 1483 last = STAILQ_NEXT(m, m_stailq); 1484 lastlen = 0; 1485 flags |= MSG_EOR; 1486 break; 1487 } 1488 } else { 1489 datalen += space; 1490 last = m; 1491 lastlen = space; 1492 break; 1493 } 1494 } 1495 1496 UIPC_STREAM_SBCHECK(sb); 1497 if (!peek) { 1498 if (last == NULL) 1499 STAILQ_INIT(&sb->uxst_mbq); 1500 else { 1501 STAILQ_FIRST(&sb->uxst_mbq) = last; 1502 MPASS(last->m_len > lastlen); 1503 last->m_len -= lastlen; 1504 last->m_data += lastlen; 1505 } 1506 MPASS(sb->sb_acc >= datalen); 1507 sb->sb_acc -= datalen; 1508 sb->sb_ccc -= datalen; 1509 MPASS(sb->sb_ctl >= ctl); 1510 sb->sb_ctl -= ctl; 1511 MPASS(sb->sb_mbcnt >= mbcnt); 1512 sb->sb_mbcnt -= mbcnt; 1513 UIPC_STREAM_SBCHECK(sb); 1514 if (__predict_true(sb->uxst_peer != NULL)) { 1515 struct unpcb *unp2; 1516 bool aio; 1517 1518 if ((aio = sb->uxst_flags & UXST_PEER_AIO)) 1519 sb->uxst_flags &= ~UXST_PEER_AIO; 1520 1521 uipc_wakeup_writer(so); 1522 /* 1523 * XXXGL: need to go through uipc_lock_peer() after 1524 * the receive buffer lock dropped, it was protecting 1525 * us from unp_soisdisconnected(). The aio workarounds 1526 * should be refactored to the aio(4) side. 1527 */ 1528 if (aio && uipc_lock_peer(so, &unp2) == 0) { 1529 struct socket *so2 = unp2->unp_socket; 1530 1531 SOCK_SENDBUF_LOCK(so2); 1532 so2->so_snd.sb_ccc -= datalen; 1533 sowakeup_aio(so2, SO_SND); 1534 SOCK_SENDBUF_UNLOCK(so2); 1535 UNP_PCB_UNLOCK(unp2); 1536 } 1537 } else 1538 SOCK_RECVBUF_UNLOCK(so); 1539 } else 1540 SOCK_RECVBUF_UNLOCK(so); 1541 1542 while (control != NULL && control->m_type == MT_CONTROL) { 1543 if (!peek) { 1544 /* 1545 * unp_externalize() failure must abort entire read(2). 1546 * Such failure should also free the problematic 1547 * control, but link back the remaining data to the head 1548 * of the buffer, so that socket is not left in a state 1549 * where it can't progress forward with reading. 1550 * Probability of such a failure is really low, so it 1551 * is fine that we need to perform pretty complex 1552 * operation here to reconstruct the buffer. 1553 */ 1554 error = unp_externalize(control, controlp, flags); 1555 control = m_free(control); 1556 if (__predict_false(error && control != NULL)) { 1557 struct mchain cmc; 1558 1559 mc_init_m(&cmc, control); 1560 1561 SOCK_RECVBUF_LOCK(so); 1562 if (__predict_false( 1563 (sb->sb_state & SBS_CANTRCVMORE) || 1564 cmc.mc_len + sb->sb_ccc + sb->sb_ctl > 1565 sb->sb_hiwat)) { 1566 /* 1567 * While the lock was dropped and we 1568 * were failing in unp_externalize(), 1569 * the peer could has a) disconnected, 1570 * b) filled the buffer so that we 1571 * can't prepend data back. 1572 * These are two edge conditions that 1573 * we just can't handle, so lose the 1574 * data and return the error. 1575 */ 1576 SOCK_RECVBUF_UNLOCK(so); 1577 SOCK_IO_RECV_UNLOCK(so); 1578 unp_scan(mc_first(&cmc), 1579 unp_freerights); 1580 mc_freem(&cmc); 1581 return (error); 1582 } 1583 1584 UIPC_STREAM_SBCHECK(sb); 1585 /* XXXGL: STAILQ_PREPEND */ 1586 STAILQ_CONCAT(&cmc.mc_q, &sb->uxst_mbq); 1587 STAILQ_SWAP(&cmc.mc_q, &sb->uxst_mbq, mbuf); 1588 1589 sb->sb_ctl = sb->sb_acc = sb->sb_ccc = 1590 sb->sb_mbcnt = 0; 1591 STAILQ_FOREACH(m, &sb->uxst_mbq, m_stailq) { 1592 if (m->m_type == MT_DATA) { 1593 sb->sb_acc += m->m_len; 1594 sb->sb_ccc += m->m_len; 1595 } else { 1596 sb->sb_ctl += m->m_len; 1597 } 1598 sb->sb_mbcnt += MSIZE; 1599 if (m->m_flags & M_EXT) 1600 sb->sb_mbcnt += 1601 m->m_ext.ext_size; 1602 } 1603 UIPC_STREAM_SBCHECK(sb); 1604 SOCK_RECVBUF_UNLOCK(so); 1605 SOCK_IO_RECV_UNLOCK(so); 1606 return (error); 1607 } 1608 if (controlp != NULL) { 1609 while (*controlp != NULL) 1610 controlp = &(*controlp)->m_next; 1611 } 1612 } else { 1613 /* 1614 * XXXGL 1615 * 1616 * In MSG_PEEK case control is not externalized. This 1617 * means we are leaking some kernel pointers to the 1618 * userland. They are useless to a law-abiding 1619 * application, but may be useful to a malware. This 1620 * is what the historical implementation in the 1621 * soreceive_generic() did. To be improved? 1622 */ 1623 if (controlp != NULL) { 1624 *controlp = m_copym(control, 0, control->m_len, 1625 M_WAITOK); 1626 controlp = &(*controlp)->m_next; 1627 } 1628 control = STAILQ_NEXT(control, m_stailq); 1629 } 1630 } 1631 1632 for (m = first; m != last; m = next) { 1633 next = STAILQ_NEXT(m, m_stailq); 1634 error = uiomove(mtod(m, char *), m->m_len, uio); 1635 if (__predict_false(error)) { 1636 SOCK_IO_RECV_UNLOCK(so); 1637 if (!peek) 1638 for (; m != last; m = next) { 1639 next = STAILQ_NEXT(m, m_stailq); 1640 m_free(m); 1641 } 1642 return (error); 1643 } 1644 if (!peek) 1645 m_free(m); 1646 } 1647 if (last != NULL && lastlen > 0) { 1648 if (!peek) { 1649 MPASS(!(m->m_flags & M_PKTHDR)); 1650 MPASS(last->m_data - M_START(last) >= lastlen); 1651 error = uiomove(mtod(last, char *) - lastlen, 1652 lastlen, uio); 1653 } else 1654 error = uiomove(mtod(last, char *), lastlen, uio); 1655 if (__predict_false(error)) { 1656 SOCK_IO_RECV_UNLOCK(so); 1657 return (error); 1658 } 1659 } 1660 if (waitall && !(flags & MSG_EOR) && uio->uio_resid > 0) 1661 goto restart; 1662 SOCK_IO_RECV_UNLOCK(so); 1663 1664 if (flagsp != NULL) 1665 *flagsp |= flags; 1666 1667 uio->uio_td->td_ru.ru_msgrcv++; 1668 1669 return (0); 1670 } 1671 1672 static int 1673 uipc_sopoll_stream_or_seqpacket(struct socket *so, int events, 1674 struct thread *td) 1675 { 1676 struct unpcb *unp = sotounpcb(so); 1677 int revents; 1678 1679 UNP_PCB_LOCK(unp); 1680 if (SOLISTENING(so)) { 1681 /* The above check is safe, since conversion to listening uses 1682 * both protocol and socket lock. 1683 */ 1684 SOCK_LOCK(so); 1685 if (!(events & (POLLIN | POLLRDNORM))) 1686 revents = 0; 1687 else if (!TAILQ_EMPTY(&so->sol_comp)) 1688 revents = events & (POLLIN | POLLRDNORM); 1689 else if (so->so_error) 1690 revents = (events & (POLLIN | POLLRDNORM)) | POLLHUP; 1691 else { 1692 selrecord(td, &so->so_rdsel); 1693 revents = 0; 1694 } 1695 SOCK_UNLOCK(so); 1696 } else { 1697 if (so->so_state & SS_ISDISCONNECTED) 1698 revents = POLLHUP; 1699 else 1700 revents = 0; 1701 if (events & (POLLIN | POLLRDNORM | POLLRDHUP)) { 1702 SOCK_RECVBUF_LOCK(so); 1703 if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat || 1704 so->so_error || so->so_rerror) 1705 revents |= events & (POLLIN | POLLRDNORM); 1706 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1707 revents |= events & 1708 (POLLIN | POLLRDNORM | POLLRDHUP); 1709 if (!(revents & (POLLIN | POLLRDNORM | POLLRDHUP))) { 1710 selrecord(td, &so->so_rdsel); 1711 so->so_rcv.sb_flags |= SB_SEL; 1712 } 1713 SOCK_RECVBUF_UNLOCK(so); 1714 } 1715 if (events & (POLLOUT | POLLWRNORM)) { 1716 struct socket *so2 = so->so_rcv.uxst_peer; 1717 1718 if (so2 != NULL) { 1719 struct sockbuf *sb = &so2->so_rcv; 1720 1721 SOCK_RECVBUF_LOCK(so2); 1722 if (uipc_stream_sbspace(sb) >= sb->sb_lowat) 1723 revents |= events & 1724 (POLLOUT | POLLWRNORM); 1725 if (sb->sb_state & SBS_CANTRCVMORE) 1726 revents |= POLLHUP; 1727 if (!(revents & (POLLOUT | POLLWRNORM))) { 1728 so2->so_rcv.uxst_flags |= UXST_PEER_SEL; 1729 selrecord(td, &so->so_wrsel); 1730 } 1731 SOCK_RECVBUF_UNLOCK(so2); 1732 } else 1733 selrecord(td, &so->so_wrsel); 1734 } 1735 } 1736 UNP_PCB_UNLOCK(unp); 1737 return (revents); 1738 } 1739 1740 static void 1741 uipc_wrknl_lock(void *arg) 1742 { 1743 struct socket *so = arg; 1744 struct unpcb *unp = sotounpcb(so); 1745 1746 retry: 1747 if (SOLISTENING(so)) { 1748 SOLISTEN_LOCK(so); 1749 } else { 1750 UNP_PCB_LOCK(unp); 1751 if (__predict_false(SOLISTENING(so))) { 1752 UNP_PCB_UNLOCK(unp); 1753 goto retry; 1754 } 1755 if (so->so_rcv.uxst_peer != NULL) 1756 SOCK_RECVBUF_LOCK(so->so_rcv.uxst_peer); 1757 } 1758 } 1759 1760 static void 1761 uipc_wrknl_unlock(void *arg) 1762 { 1763 struct socket *so = arg; 1764 struct unpcb *unp = sotounpcb(so); 1765 1766 if (SOLISTENING(so)) 1767 SOLISTEN_UNLOCK(so); 1768 else { 1769 if (so->so_rcv.uxst_peer != NULL) 1770 SOCK_RECVBUF_UNLOCK(so->so_rcv.uxst_peer); 1771 UNP_PCB_UNLOCK(unp); 1772 } 1773 } 1774 1775 static void 1776 uipc_wrknl_assert_lock(void *arg, int what) 1777 { 1778 struct socket *so = arg; 1779 1780 if (SOLISTENING(so)) { 1781 if (what == LA_LOCKED) 1782 SOLISTEN_LOCK_ASSERT(so); 1783 else 1784 SOLISTEN_UNLOCK_ASSERT(so); 1785 } else { 1786 /* 1787 * The pr_soreceive method will put a note without owning the 1788 * unp lock, so we can't assert it here. But we can safely 1789 * dereference uxst_peer pointer, since receive buffer lock 1790 * is assumed to be held here. 1791 */ 1792 if (what == LA_LOCKED && so->so_rcv.uxst_peer != NULL) 1793 SOCK_RECVBUF_LOCK_ASSERT(so->so_rcv.uxst_peer); 1794 } 1795 } 1796 1797 static void 1798 uipc_filt_sowdetach(struct knote *kn) 1799 { 1800 struct socket *so = kn->kn_fp->f_data; 1801 1802 uipc_wrknl_lock(so); 1803 knlist_remove(&so->so_wrsel.si_note, kn, 1); 1804 uipc_wrknl_unlock(so); 1805 } 1806 1807 static int 1808 uipc_filt_sowrite(struct knote *kn, long hint) 1809 { 1810 struct socket *so = kn->kn_fp->f_data, *so2; 1811 struct unpcb *unp = sotounpcb(so), *unp2 = unp->unp_conn; 1812 1813 if (SOLISTENING(so)) 1814 return (0); 1815 1816 if (unp2 == NULL) { 1817 if (so->so_state & SS_ISDISCONNECTED) { 1818 kn->kn_flags |= EV_EOF; 1819 kn->kn_fflags = so->so_error; 1820 return (1); 1821 } else 1822 return (0); 1823 } 1824 1825 so2 = unp2->unp_socket; 1826 SOCK_RECVBUF_LOCK_ASSERT(so2); 1827 kn->kn_data = uipc_stream_sbspace(&so2->so_rcv); 1828 1829 if (so2->so_rcv.sb_state & SBS_CANTRCVMORE) { 1830 kn->kn_flags |= EV_EOF; 1831 return (1); 1832 } else if (kn->kn_sfflags & NOTE_LOWAT) 1833 return (kn->kn_data >= kn->kn_sdata); 1834 else 1835 return (kn->kn_data >= so2->so_rcv.sb_lowat); 1836 } 1837 1838 static int 1839 uipc_filt_soempty(struct knote *kn, long hint) 1840 { 1841 struct socket *so = kn->kn_fp->f_data, *so2; 1842 struct unpcb *unp = sotounpcb(so), *unp2 = unp->unp_conn; 1843 1844 if (SOLISTENING(so) || unp2 == NULL) 1845 return (1); 1846 1847 so2 = unp2->unp_socket; 1848 SOCK_RECVBUF_LOCK_ASSERT(so2); 1849 kn->kn_data = uipc_stream_sbspace(&so2->so_rcv); 1850 1851 return (kn->kn_data == 0 ? 1 : 0); 1852 } 1853 1854 static const struct filterops uipc_write_filtops = { 1855 .f_isfd = 1, 1856 .f_detach = uipc_filt_sowdetach, 1857 .f_event = uipc_filt_sowrite, 1858 }; 1859 static const struct filterops uipc_empty_filtops = { 1860 .f_isfd = 1, 1861 .f_detach = uipc_filt_sowdetach, 1862 .f_event = uipc_filt_soempty, 1863 }; 1864 1865 static int 1866 uipc_kqfilter_stream_or_seqpacket(struct socket *so, struct knote *kn) 1867 { 1868 struct unpcb *unp = sotounpcb(so); 1869 struct knlist *knl; 1870 1871 switch (kn->kn_filter) { 1872 case EVFILT_READ: 1873 return (sokqfilter_generic(so, kn)); 1874 case EVFILT_WRITE: 1875 kn->kn_fop = &uipc_write_filtops; 1876 break; 1877 case EVFILT_EMPTY: 1878 kn->kn_fop = &uipc_empty_filtops; 1879 break; 1880 default: 1881 return (EINVAL); 1882 } 1883 1884 knl = &so->so_wrsel.si_note; 1885 UNP_PCB_LOCK(unp); 1886 if (SOLISTENING(so)) { 1887 SOLISTEN_LOCK(so); 1888 knlist_add(knl, kn, 1); 1889 SOLISTEN_UNLOCK(so); 1890 } else { 1891 struct socket *so2 = so->so_rcv.uxst_peer; 1892 1893 if (so2 != NULL) 1894 SOCK_RECVBUF_LOCK(so2); 1895 knlist_add(knl, kn, 1); 1896 if (so2 != NULL) 1897 SOCK_RECVBUF_UNLOCK(so2); 1898 } 1899 UNP_PCB_UNLOCK(unp); 1900 return (0); 1901 } 1902 1903 /* PF_UNIX/SOCK_DGRAM version of sbspace() */ 1904 static inline bool 1905 uipc_dgram_sbspace(struct sockbuf *sb, u_int cc, u_int mbcnt) 1906 { 1907 u_int bleft, mleft; 1908 1909 /* 1910 * Negative space may happen if send(2) is followed by 1911 * setsockopt(SO_SNDBUF/SO_RCVBUF) that shrinks maximum. 1912 */ 1913 if (__predict_false(sb->sb_hiwat < sb->uxdg_cc || 1914 sb->sb_mbmax < sb->uxdg_mbcnt)) 1915 return (false); 1916 1917 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) 1918 return (false); 1919 1920 bleft = sb->sb_hiwat - sb->uxdg_cc; 1921 mleft = sb->sb_mbmax - sb->uxdg_mbcnt; 1922 1923 return (bleft >= cc && mleft >= mbcnt); 1924 } 1925 1926 /* 1927 * PF_UNIX/SOCK_DGRAM send 1928 * 1929 * Allocate a record consisting of 3 mbufs in the sequence of 1930 * from -> control -> data and append it to the socket buffer. 1931 * 1932 * The first mbuf carries sender's name and is a pkthdr that stores 1933 * overall length of datagram, its memory consumption and control length. 1934 */ 1935 #define ctllen PH_loc.thirtytwo[1] 1936 _Static_assert(offsetof(struct pkthdr, memlen) + sizeof(u_int) <= 1937 offsetof(struct pkthdr, ctllen), "unix/dgram can not store ctllen"); 1938 static int 1939 uipc_sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, 1940 struct mbuf *m, struct mbuf *c, int flags, struct thread *td) 1941 { 1942 struct unpcb *unp, *unp2; 1943 const struct sockaddr *from; 1944 struct socket *so2; 1945 struct sockbuf *sb; 1946 struct mchain cmc = MCHAIN_INITIALIZER(&cmc); 1947 struct mbuf *f; 1948 u_int cc, ctl, mbcnt; 1949 u_int dcc __diagused, dctl __diagused, dmbcnt __diagused; 1950 int error; 1951 1952 MPASS((uio != NULL && m == NULL) || (m != NULL && uio == NULL)); 1953 1954 error = 0; 1955 f = NULL; 1956 1957 if (__predict_false(flags & MSG_OOB)) { 1958 error = EOPNOTSUPP; 1959 goto out; 1960 } 1961 if (m == NULL) { 1962 if (__predict_false(uio->uio_resid > unpdg_maxdgram)) { 1963 error = EMSGSIZE; 1964 goto out; 1965 } 1966 m = m_uiotombuf(uio, M_WAITOK, 0, max_hdr, M_PKTHDR); 1967 if (__predict_false(m == NULL)) { 1968 error = EFAULT; 1969 goto out; 1970 } 1971 f = m_gethdr(M_WAITOK, MT_SONAME); 1972 cc = m->m_pkthdr.len; 1973 mbcnt = MSIZE + m->m_pkthdr.memlen; 1974 if (c != NULL && (error = unp_internalize(c, &cmc, td))) 1975 goto out; 1976 } else { 1977 struct mchain mc; 1978 1979 uipc_reset_kernel_mbuf(m, &mc); 1980 cc = mc.mc_len; 1981 mbcnt = mc.mc_mlen; 1982 if (__predict_false(m->m_pkthdr.len > unpdg_maxdgram)) { 1983 error = EMSGSIZE; 1984 goto out; 1985 } 1986 if ((f = m_gethdr(M_NOWAIT, MT_SONAME)) == NULL) { 1987 error = ENOBUFS; 1988 goto out; 1989 } 1990 } 1991 1992 unp = sotounpcb(so); 1993 MPASS(unp); 1994 1995 /* 1996 * XXXGL: would be cool to fully remove so_snd out of the equation 1997 * and avoid this lock, which is not only extraneous, but also being 1998 * released, thus still leaving possibility for a race. We can easily 1999 * handle SBS_CANTSENDMORE/SS_ISCONNECTED complement in unpcb, but it 2000 * is more difficult to invent something to handle so_error. 2001 */ 2002 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); 2003 if (error) 2004 goto out2; 2005 SOCK_SENDBUF_LOCK(so); 2006 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2007 SOCK_SENDBUF_UNLOCK(so); 2008 error = EPIPE; 2009 goto out3; 2010 } 2011 if (so->so_error != 0) { 2012 error = so->so_error; 2013 so->so_error = 0; 2014 SOCK_SENDBUF_UNLOCK(so); 2015 goto out3; 2016 } 2017 if (((so->so_state & SS_ISCONNECTED) == 0) && addr == NULL) { 2018 SOCK_SENDBUF_UNLOCK(so); 2019 error = EDESTADDRREQ; 2020 goto out3; 2021 } 2022 SOCK_SENDBUF_UNLOCK(so); 2023 2024 if (addr != NULL) { 2025 if ((error = unp_connectat(AT_FDCWD, so, addr, td, true))) 2026 goto out3; 2027 UNP_PCB_LOCK_ASSERT(unp); 2028 unp2 = unp->unp_conn; 2029 UNP_PCB_LOCK_ASSERT(unp2); 2030 } else { 2031 UNP_PCB_LOCK(unp); 2032 unp2 = unp_pcb_lock_peer(unp); 2033 if (unp2 == NULL) { 2034 UNP_PCB_UNLOCK(unp); 2035 error = ENOTCONN; 2036 goto out3; 2037 } 2038 } 2039 2040 if (unp2->unp_flags & UNP_WANTCRED_MASK) 2041 unp_addsockcred(td, &cmc, unp2->unp_flags); 2042 if (unp->unp_addr != NULL) 2043 from = (struct sockaddr *)unp->unp_addr; 2044 else 2045 from = &sun_noname; 2046 f->m_len = from->sa_len; 2047 MPASS(from->sa_len <= MLEN); 2048 bcopy(from, mtod(f, void *), from->sa_len); 2049 2050 /* 2051 * Concatenate mbufs: from -> control -> data. 2052 * Save overall cc and mbcnt in "from" mbuf. 2053 */ 2054 if (!STAILQ_EMPTY(&cmc.mc_q)) { 2055 f->m_next = mc_first(&cmc); 2056 mc_last(&cmc)->m_next = m; 2057 /* XXXGL: This is dirty as well as rollback after ENOBUFS. */ 2058 STAILQ_INIT(&cmc.mc_q); 2059 } else 2060 f->m_next = m; 2061 m = NULL; 2062 ctl = f->m_len + cmc.mc_len; 2063 mbcnt += cmc.mc_mlen; 2064 #ifdef INVARIANTS 2065 dcc = dctl = dmbcnt = 0; 2066 for (struct mbuf *mb = f; mb != NULL; mb = mb->m_next) { 2067 if (mb->m_type == MT_DATA) 2068 dcc += mb->m_len; 2069 else 2070 dctl += mb->m_len; 2071 dmbcnt += MSIZE; 2072 if (mb->m_flags & M_EXT) 2073 dmbcnt += mb->m_ext.ext_size; 2074 } 2075 MPASS(dcc == cc); 2076 MPASS(dctl == ctl); 2077 MPASS(dmbcnt == mbcnt); 2078 #endif 2079 f->m_pkthdr.len = cc + ctl; 2080 f->m_pkthdr.memlen = mbcnt; 2081 f->m_pkthdr.ctllen = ctl; 2082 2083 /* 2084 * Destination socket buffer selection. 2085 * 2086 * Unconnected sends, when !(so->so_state & SS_ISCONNECTED) and the 2087 * destination address is supplied, create a temporary connection for 2088 * the run time of the function (see call to unp_connectat() above and 2089 * to unp_disconnect() below). We distinguish them by condition of 2090 * (addr != NULL). We intentionally avoid adding 'bool connected' for 2091 * that condition, since, again, through the run time of this code we 2092 * are always connected. For such "unconnected" sends, the destination 2093 * buffer would be the receive buffer of destination socket so2. 2094 * 2095 * For connected sends, data lands on the send buffer of the sender's 2096 * socket "so". Then, if we just added the very first datagram 2097 * on this send buffer, we need to add the send buffer on to the 2098 * receiving socket's buffer list. We put ourselves on top of the 2099 * list. Such logic gives infrequent senders priority over frequent 2100 * senders. 2101 * 2102 * Note on byte count management. As long as event methods kevent(2), 2103 * select(2) are not protocol specific (yet), we need to maintain 2104 * meaningful values on the receive buffer. So, the receive buffer 2105 * would accumulate counters from all connected buffers potentially 2106 * having sb_ccc > sb_hiwat or sb_mbcnt > sb_mbmax. 2107 */ 2108 so2 = unp2->unp_socket; 2109 sb = (addr == NULL) ? &so->so_snd : &so2->so_rcv; 2110 SOCK_RECVBUF_LOCK(so2); 2111 if (uipc_dgram_sbspace(sb, cc + ctl, mbcnt)) { 2112 if (addr == NULL && STAILQ_EMPTY(&sb->uxdg_mb)) 2113 TAILQ_INSERT_HEAD(&so2->so_rcv.uxdg_conns, &so->so_snd, 2114 uxdg_clist); 2115 STAILQ_INSERT_TAIL(&sb->uxdg_mb, f, m_stailqpkt); 2116 sb->uxdg_cc += cc + ctl; 2117 sb->uxdg_ctl += ctl; 2118 sb->uxdg_mbcnt += mbcnt; 2119 so2->so_rcv.sb_acc += cc + ctl; 2120 so2->so_rcv.sb_ccc += cc + ctl; 2121 so2->so_rcv.sb_ctl += ctl; 2122 so2->so_rcv.sb_mbcnt += mbcnt; 2123 sorwakeup_locked(so2); 2124 f = NULL; 2125 } else { 2126 soroverflow_locked(so2); 2127 error = ENOBUFS; 2128 if (f->m_next->m_type == MT_CONTROL) { 2129 STAILQ_FIRST(&cmc.mc_q) = f->m_next; 2130 f->m_next = NULL; 2131 } 2132 } 2133 2134 if (addr != NULL) 2135 unp_disconnect(unp, unp2); 2136 else 2137 unp_pcb_unlock_pair(unp, unp2); 2138 2139 td->td_ru.ru_msgsnd++; 2140 2141 out3: 2142 SOCK_IO_SEND_UNLOCK(so); 2143 out2: 2144 if (!mc_empty(&cmc)) 2145 unp_scan(mc_first(&cmc), unp_freerights); 2146 out: 2147 if (f) 2148 m_freem(f); 2149 mc_freem(&cmc); 2150 if (m) 2151 m_freem(m); 2152 2153 return (error); 2154 } 2155 2156 /* 2157 * PF_UNIX/SOCK_DGRAM receive with MSG_PEEK. 2158 * The mbuf has already been unlinked from the uxdg_mb of socket buffer 2159 * and needs to be linked onto uxdg_peeked of receive socket buffer. 2160 */ 2161 static int 2162 uipc_peek_dgram(struct socket *so, struct mbuf *m, struct sockaddr **psa, 2163 struct uio *uio, struct mbuf **controlp, int *flagsp) 2164 { 2165 ssize_t len = 0; 2166 int error; 2167 2168 so->so_rcv.uxdg_peeked = m; 2169 so->so_rcv.uxdg_cc += m->m_pkthdr.len; 2170 so->so_rcv.uxdg_ctl += m->m_pkthdr.ctllen; 2171 so->so_rcv.uxdg_mbcnt += m->m_pkthdr.memlen; 2172 SOCK_RECVBUF_UNLOCK(so); 2173 2174 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); 2175 if (psa != NULL) 2176 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK); 2177 2178 m = m->m_next; 2179 KASSERT(m, ("%s: no data or control after soname", __func__)); 2180 2181 /* 2182 * With MSG_PEEK the control isn't executed, just copied. 2183 */ 2184 while (m != NULL && m->m_type == MT_CONTROL) { 2185 if (controlp != NULL) { 2186 *controlp = m_copym(m, 0, m->m_len, M_WAITOK); 2187 controlp = &(*controlp)->m_next; 2188 } 2189 m = m->m_next; 2190 } 2191 KASSERT(m == NULL || m->m_type == MT_DATA, 2192 ("%s: not MT_DATA mbuf %p", __func__, m)); 2193 while (m != NULL && uio->uio_resid > 0) { 2194 len = uio->uio_resid; 2195 if (len > m->m_len) 2196 len = m->m_len; 2197 error = uiomove(mtod(m, char *), (int)len, uio); 2198 if (error) { 2199 SOCK_IO_RECV_UNLOCK(so); 2200 return (error); 2201 } 2202 if (len == m->m_len) 2203 m = m->m_next; 2204 } 2205 SOCK_IO_RECV_UNLOCK(so); 2206 2207 if (flagsp != NULL) { 2208 if (m != NULL) { 2209 if (*flagsp & MSG_TRUNC) { 2210 /* Report real length of the packet */ 2211 uio->uio_resid -= m_length(m, NULL) - len; 2212 } 2213 *flagsp |= MSG_TRUNC; 2214 } else 2215 *flagsp &= ~MSG_TRUNC; 2216 } 2217 2218 return (0); 2219 } 2220 2221 /* 2222 * PF_UNIX/SOCK_DGRAM receive 2223 */ 2224 static int 2225 uipc_soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, 2226 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2227 { 2228 struct sockbuf *sb = NULL; 2229 struct mbuf *m; 2230 int flags, error; 2231 ssize_t len = 0; 2232 bool nonblock; 2233 2234 MPASS(mp0 == NULL); 2235 2236 if (psa != NULL) 2237 *psa = NULL; 2238 if (controlp != NULL) 2239 *controlp = NULL; 2240 2241 flags = flagsp != NULL ? *flagsp : 0; 2242 nonblock = (so->so_state & SS_NBIO) || 2243 (flags & (MSG_DONTWAIT | MSG_NBIO)); 2244 2245 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); 2246 if (__predict_false(error)) 2247 return (error); 2248 2249 /* 2250 * Loop blocking while waiting for a datagram. Prioritize connected 2251 * peers over unconnected sends. Set sb to selected socket buffer 2252 * containing an mbuf on exit from the wait loop. A datagram that 2253 * had already been peeked at has top priority. 2254 */ 2255 SOCK_RECVBUF_LOCK(so); 2256 while ((m = so->so_rcv.uxdg_peeked) == NULL && 2257 (sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) == NULL && 2258 (m = STAILQ_FIRST(&so->so_rcv.uxdg_mb)) == NULL) { 2259 if (so->so_error) { 2260 error = so->so_error; 2261 if (!(flags & MSG_PEEK)) 2262 so->so_error = 0; 2263 SOCK_RECVBUF_UNLOCK(so); 2264 SOCK_IO_RECV_UNLOCK(so); 2265 return (error); 2266 } 2267 if (so->so_rcv.sb_state & SBS_CANTRCVMORE || 2268 uio->uio_resid == 0) { 2269 SOCK_RECVBUF_UNLOCK(so); 2270 SOCK_IO_RECV_UNLOCK(so); 2271 return (0); 2272 } 2273 if (nonblock) { 2274 SOCK_RECVBUF_UNLOCK(so); 2275 SOCK_IO_RECV_UNLOCK(so); 2276 return (EWOULDBLOCK); 2277 } 2278 error = sbwait(so, SO_RCV); 2279 if (error) { 2280 SOCK_RECVBUF_UNLOCK(so); 2281 SOCK_IO_RECV_UNLOCK(so); 2282 return (error); 2283 } 2284 } 2285 2286 if (sb == NULL) 2287 sb = &so->so_rcv; 2288 else if (m == NULL) 2289 m = STAILQ_FIRST(&sb->uxdg_mb); 2290 else 2291 MPASS(m == so->so_rcv.uxdg_peeked); 2292 2293 MPASS(sb->uxdg_cc > 0); 2294 M_ASSERTPKTHDR(m); 2295 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); 2296 2297 if (uio->uio_td) 2298 uio->uio_td->td_ru.ru_msgrcv++; 2299 2300 if (__predict_true(m != so->so_rcv.uxdg_peeked)) { 2301 STAILQ_REMOVE_HEAD(&sb->uxdg_mb, m_stailqpkt); 2302 if (STAILQ_EMPTY(&sb->uxdg_mb) && sb != &so->so_rcv) 2303 TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist); 2304 } else 2305 so->so_rcv.uxdg_peeked = NULL; 2306 2307 sb->uxdg_cc -= m->m_pkthdr.len; 2308 sb->uxdg_ctl -= m->m_pkthdr.ctllen; 2309 sb->uxdg_mbcnt -= m->m_pkthdr.memlen; 2310 2311 if (__predict_false(flags & MSG_PEEK)) 2312 return (uipc_peek_dgram(so, m, psa, uio, controlp, flagsp)); 2313 2314 so->so_rcv.sb_acc -= m->m_pkthdr.len; 2315 so->so_rcv.sb_ccc -= m->m_pkthdr.len; 2316 so->so_rcv.sb_ctl -= m->m_pkthdr.ctllen; 2317 so->so_rcv.sb_mbcnt -= m->m_pkthdr.memlen; 2318 SOCK_RECVBUF_UNLOCK(so); 2319 2320 if (psa != NULL) 2321 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK); 2322 m = m_free(m); 2323 KASSERT(m, ("%s: no data or control after soname", __func__)); 2324 2325 /* 2326 * Packet to copyout() is now in 'm' and it is disconnected from the 2327 * queue. 2328 * 2329 * Process one or more MT_CONTROL mbufs present before any data mbufs 2330 * in the first mbuf chain on the socket buffer. We call into the 2331 * unp_externalize() to perform externalization (or freeing if 2332 * controlp == NULL). In some cases there can be only MT_CONTROL mbufs 2333 * without MT_DATA mbufs. 2334 */ 2335 while (m != NULL && m->m_type == MT_CONTROL) { 2336 error = unp_externalize(m, controlp, flags); 2337 m = m_free(m); 2338 if (error != 0) { 2339 SOCK_IO_RECV_UNLOCK(so); 2340 unp_scan(m, unp_freerights); 2341 m_freem(m); 2342 return (error); 2343 } 2344 if (controlp != NULL) { 2345 while (*controlp != NULL) 2346 controlp = &(*controlp)->m_next; 2347 } 2348 } 2349 KASSERT(m == NULL || m->m_type == MT_DATA, 2350 ("%s: not MT_DATA mbuf %p", __func__, m)); 2351 while (m != NULL && uio->uio_resid > 0) { 2352 len = uio->uio_resid; 2353 if (len > m->m_len) 2354 len = m->m_len; 2355 error = uiomove(mtod(m, char *), (int)len, uio); 2356 if (error) { 2357 SOCK_IO_RECV_UNLOCK(so); 2358 m_freem(m); 2359 return (error); 2360 } 2361 if (len == m->m_len) 2362 m = m_free(m); 2363 else { 2364 m->m_data += len; 2365 m->m_len -= len; 2366 } 2367 } 2368 SOCK_IO_RECV_UNLOCK(so); 2369 2370 if (m != NULL) { 2371 if (flagsp != NULL) { 2372 if (flags & MSG_TRUNC) { 2373 /* Report real length of the packet */ 2374 uio->uio_resid -= m_length(m, NULL); 2375 } 2376 *flagsp |= MSG_TRUNC; 2377 } 2378 m_freem(m); 2379 } else if (flagsp != NULL) 2380 *flagsp &= ~MSG_TRUNC; 2381 2382 return (0); 2383 } 2384 2385 static int 2386 uipc_sendfile_wait(struct socket *so, off_t need, int *space) 2387 { 2388 struct unpcb *unp2; 2389 struct socket *so2; 2390 struct sockbuf *sb; 2391 bool nonblock, sockref; 2392 int error; 2393 2394 MPASS(so->so_type == SOCK_STREAM); 2395 MPASS(need > 0); 2396 MPASS(space != NULL); 2397 2398 nonblock = so->so_state & SS_NBIO; 2399 sockref = false; 2400 2401 if (__predict_false((so->so_state & SS_ISCONNECTED) == 0)) 2402 return (ENOTCONN); 2403 2404 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 2405 return (error); 2406 2407 so2 = unp2->unp_socket; 2408 sb = &so2->so_rcv; 2409 SOCK_RECVBUF_LOCK(so2); 2410 UNP_PCB_UNLOCK(unp2); 2411 while ((*space = uipc_stream_sbspace(sb)) < need && 2412 (*space < so->so_snd.sb_hiwat / 2)) { 2413 UIPC_STREAM_SBCHECK(sb); 2414 if (nonblock) { 2415 SOCK_RECVBUF_UNLOCK(so2); 2416 return (EAGAIN); 2417 } 2418 if (!sockref) 2419 soref(so2); 2420 error = uipc_stream_sbwait(so2, so->so_snd.sb_timeo); 2421 if (error == 0 && 2422 __predict_false(sb->sb_state & SBS_CANTRCVMORE)) 2423 error = EPIPE; 2424 if (error) { 2425 SOCK_RECVBUF_UNLOCK(so2); 2426 sorele(so2); 2427 return (error); 2428 } 2429 } 2430 UIPC_STREAM_SBCHECK(sb); 2431 SOCK_RECVBUF_UNLOCK(so2); 2432 if (sockref) 2433 sorele(so2); 2434 2435 return (0); 2436 } 2437 2438 /* 2439 * Although this is a pr_send method, for unix(4) it is called only via 2440 * sendfile(2) path. This means we can be sure that mbufs are clear of 2441 * any extra flags and don't require any conditioning. 2442 */ 2443 static int 2444 uipc_sendfile(struct socket *so, int flags, struct mbuf *m, 2445 struct sockaddr *from, struct mbuf *control, struct thread *td) 2446 { 2447 struct mchain mc; 2448 struct unpcb *unp2; 2449 struct socket *so2; 2450 struct sockbuf *sb; 2451 bool notready, wakeup; 2452 int error; 2453 2454 MPASS(so->so_type == SOCK_STREAM); 2455 MPASS(from == NULL && control == NULL); 2456 KASSERT(!(m->m_flags & M_EXTPG), 2457 ("unix(4): TLS sendfile(2) not supported")); 2458 2459 notready = flags & PRUS_NOTREADY; 2460 2461 if (__predict_false((so->so_state & SS_ISCONNECTED) == 0)) { 2462 error = ENOTCONN; 2463 goto out; 2464 } 2465 2466 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 2467 goto out; 2468 2469 mc_init_m(&mc, m); 2470 2471 so2 = unp2->unp_socket; 2472 sb = &so2->so_rcv; 2473 SOCK_RECVBUF_LOCK(so2); 2474 UNP_PCB_UNLOCK(unp2); 2475 UIPC_STREAM_SBCHECK(sb); 2476 sb->sb_ccc += mc.mc_len; 2477 sb->sb_mbcnt += mc.mc_mlen; 2478 if (sb->uxst_fnrdy == NULL) { 2479 if (notready) { 2480 wakeup = false; 2481 STAILQ_FOREACH(m, &mc.mc_q, m_stailq) { 2482 if (m->m_flags & M_NOTREADY) { 2483 sb->uxst_fnrdy = m; 2484 break; 2485 } else { 2486 sb->sb_acc += m->m_len; 2487 wakeup = true; 2488 } 2489 } 2490 } else { 2491 wakeup = true; 2492 sb->sb_acc += mc.mc_len; 2493 } 2494 } else { 2495 wakeup = false; 2496 } 2497 STAILQ_CONCAT(&sb->uxst_mbq, &mc.mc_q); 2498 UIPC_STREAM_SBCHECK(sb); 2499 if (wakeup) 2500 sorwakeup_locked(so2); 2501 else 2502 SOCK_RECVBUF_UNLOCK(so2); 2503 2504 return (0); 2505 out: 2506 /* 2507 * In case of not ready data, uipc_ready() is responsible 2508 * for freeing memory. 2509 */ 2510 if (m != NULL && !notready) 2511 m_freem(m); 2512 2513 return (error); 2514 } 2515 2516 static int 2517 uipc_sbready(struct sockbuf *sb, struct mbuf *m, int count) 2518 { 2519 bool blocker; 2520 2521 /* assert locked */ 2522 2523 blocker = (sb->uxst_fnrdy == m); 2524 STAILQ_FOREACH_FROM(m, &sb->uxst_mbq, m_stailq) { 2525 if (count > 0) { 2526 MPASS(m->m_flags & M_NOTREADY); 2527 m->m_flags &= ~M_NOTREADY; 2528 if (blocker) 2529 sb->sb_acc += m->m_len; 2530 count--; 2531 } else if (m->m_flags & M_NOTREADY) 2532 break; 2533 else if (blocker) 2534 sb->sb_acc += m->m_len; 2535 } 2536 if (blocker) { 2537 sb->uxst_fnrdy = m; 2538 return (0); 2539 } else 2540 return (EINPROGRESS); 2541 } 2542 2543 static bool 2544 uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp) 2545 { 2546 struct mbuf *mb; 2547 struct sockbuf *sb; 2548 2549 SOCK_LOCK(so); 2550 if (SOLISTENING(so)) { 2551 SOCK_UNLOCK(so); 2552 return (false); 2553 } 2554 mb = NULL; 2555 sb = &so->so_rcv; 2556 SOCK_RECVBUF_LOCK(so); 2557 if (sb->uxst_fnrdy != NULL) { 2558 STAILQ_FOREACH(mb, &sb->uxst_mbq, m_stailq) { 2559 if (mb == m) { 2560 *errorp = uipc_sbready(sb, m, count); 2561 break; 2562 } 2563 } 2564 } 2565 SOCK_RECVBUF_UNLOCK(so); 2566 SOCK_UNLOCK(so); 2567 return (mb != NULL); 2568 } 2569 2570 static int 2571 uipc_ready(struct socket *so, struct mbuf *m, int count) 2572 { 2573 struct unpcb *unp, *unp2; 2574 int error; 2575 2576 MPASS(so->so_type == SOCK_STREAM); 2577 2578 if (__predict_true(uipc_lock_peer(so, &unp2) == 0)) { 2579 struct socket *so2; 2580 struct sockbuf *sb; 2581 2582 so2 = unp2->unp_socket; 2583 sb = &so2->so_rcv; 2584 SOCK_RECVBUF_LOCK(so2); 2585 UNP_PCB_UNLOCK(unp2); 2586 UIPC_STREAM_SBCHECK(sb); 2587 error = uipc_sbready(sb, m, count); 2588 UIPC_STREAM_SBCHECK(sb); 2589 if (error == 0) 2590 sorwakeup_locked(so2); 2591 else 2592 SOCK_RECVBUF_UNLOCK(so2); 2593 } else { 2594 /* 2595 * The receiving socket has been disconnected, but may still 2596 * be valid. In this case, the not-ready mbufs are still 2597 * present in its socket buffer, so perform an exhaustive 2598 * search before giving up and freeing the mbufs. 2599 */ 2600 UNP_LINK_RLOCK(); 2601 LIST_FOREACH(unp, &unp_shead, unp_link) { 2602 if (uipc_ready_scan(unp->unp_socket, m, count, &error)) 2603 break; 2604 } 2605 UNP_LINK_RUNLOCK(); 2606 2607 if (unp == NULL) { 2608 for (int i = 0; i < count; i++) 2609 m = m_free(m); 2610 return (ECONNRESET); 2611 } 2612 } 2613 return (error); 2614 } 2615 2616 static int 2617 uipc_sense(struct socket *so, struct stat *sb) 2618 { 2619 struct unpcb *unp; 2620 2621 unp = sotounpcb(so); 2622 KASSERT(unp != NULL, ("uipc_sense: unp == NULL")); 2623 2624 sb->st_blksize = so->so_snd.sb_hiwat; 2625 sb->st_dev = NODEV; 2626 sb->st_ino = unp->unp_ino; 2627 return (0); 2628 } 2629 2630 static int 2631 uipc_shutdown(struct socket *so, enum shutdown_how how) 2632 { 2633 struct unpcb *unp = sotounpcb(so); 2634 int error; 2635 2636 SOCK_LOCK(so); 2637 if (SOLISTENING(so)) { 2638 if (how != SHUT_WR) { 2639 so->so_error = ECONNABORTED; 2640 solisten_wakeup(so); /* unlocks so */ 2641 } else 2642 SOCK_UNLOCK(so); 2643 return (ENOTCONN); 2644 } else if ((so->so_state & 2645 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 2646 /* 2647 * POSIX mandates us to just return ENOTCONN when shutdown(2) is 2648 * invoked on a datagram sockets, however historically we would 2649 * actually tear socket down. This is known to be leveraged by 2650 * some applications to unblock process waiting in recv(2) by 2651 * other process that it shares that socket with. Try to meet 2652 * both backward-compatibility and POSIX requirements by forcing 2653 * ENOTCONN but still flushing buffers and performing wakeup(9). 2654 * 2655 * XXXGL: it remains unknown what applications expect this 2656 * behavior and is this isolated to unix/dgram or inet/dgram or 2657 * both. See: D10351, D3039. 2658 */ 2659 error = ENOTCONN; 2660 if (so->so_type != SOCK_DGRAM) { 2661 SOCK_UNLOCK(so); 2662 return (error); 2663 } 2664 } else 2665 error = 0; 2666 SOCK_UNLOCK(so); 2667 2668 switch (how) { 2669 case SHUT_RD: 2670 if (so->so_type == SOCK_DGRAM) 2671 socantrcvmore(so); 2672 else 2673 uipc_cantrcvmore(so); 2674 unp_dispose(so); 2675 break; 2676 case SHUT_RDWR: 2677 if (so->so_type == SOCK_DGRAM) 2678 socantrcvmore(so); 2679 else 2680 uipc_cantrcvmore(so); 2681 unp_dispose(so); 2682 /* FALLTHROUGH */ 2683 case SHUT_WR: 2684 if (so->so_type == SOCK_DGRAM) { 2685 socantsendmore(so); 2686 } else { 2687 UNP_PCB_LOCK(unp); 2688 if (unp->unp_conn != NULL) 2689 uipc_cantrcvmore(unp->unp_conn->unp_socket); 2690 UNP_PCB_UNLOCK(unp); 2691 } 2692 } 2693 wakeup(&so->so_timeo); 2694 2695 return (error); 2696 } 2697 2698 static int 2699 uipc_sockaddr(struct socket *so, struct sockaddr *ret) 2700 { 2701 struct unpcb *unp; 2702 const struct sockaddr *sa; 2703 2704 unp = sotounpcb(so); 2705 KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL")); 2706 2707 UNP_PCB_LOCK(unp); 2708 if (unp->unp_addr != NULL) 2709 sa = (struct sockaddr *) unp->unp_addr; 2710 else 2711 sa = &sun_noname; 2712 bcopy(sa, ret, sa->sa_len); 2713 UNP_PCB_UNLOCK(unp); 2714 return (0); 2715 } 2716 2717 static int 2718 uipc_ctloutput(struct socket *so, struct sockopt *sopt) 2719 { 2720 struct unpcb *unp; 2721 struct xucred xu; 2722 int error, optval; 2723 2724 if (sopt->sopt_level != SOL_LOCAL) 2725 return (EINVAL); 2726 2727 unp = sotounpcb(so); 2728 KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL")); 2729 error = 0; 2730 switch (sopt->sopt_dir) { 2731 case SOPT_GET: 2732 switch (sopt->sopt_name) { 2733 case LOCAL_PEERCRED: 2734 UNP_PCB_LOCK(unp); 2735 if (unp->unp_flags & UNP_HAVEPC) 2736 xu = unp->unp_peercred; 2737 else { 2738 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 2739 error = ENOTCONN; 2740 else 2741 error = EINVAL; 2742 } 2743 UNP_PCB_UNLOCK(unp); 2744 if (error == 0) 2745 error = sooptcopyout(sopt, &xu, sizeof(xu)); 2746 break; 2747 2748 case LOCAL_CREDS: 2749 /* Unlocked read. */ 2750 optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0; 2751 error = sooptcopyout(sopt, &optval, sizeof(optval)); 2752 break; 2753 2754 case LOCAL_CREDS_PERSISTENT: 2755 /* Unlocked read. */ 2756 optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0; 2757 error = sooptcopyout(sopt, &optval, sizeof(optval)); 2758 break; 2759 2760 default: 2761 error = EOPNOTSUPP; 2762 break; 2763 } 2764 break; 2765 2766 case SOPT_SET: 2767 switch (sopt->sopt_name) { 2768 case LOCAL_CREDS: 2769 case LOCAL_CREDS_PERSISTENT: 2770 error = sooptcopyin(sopt, &optval, sizeof(optval), 2771 sizeof(optval)); 2772 if (error) 2773 break; 2774 2775 #define OPTSET(bit, exclusive) do { \ 2776 UNP_PCB_LOCK(unp); \ 2777 if (optval) { \ 2778 if ((unp->unp_flags & (exclusive)) != 0) { \ 2779 UNP_PCB_UNLOCK(unp); \ 2780 error = EINVAL; \ 2781 break; \ 2782 } \ 2783 unp->unp_flags |= (bit); \ 2784 } else \ 2785 unp->unp_flags &= ~(bit); \ 2786 UNP_PCB_UNLOCK(unp); \ 2787 } while (0) 2788 2789 switch (sopt->sopt_name) { 2790 case LOCAL_CREDS: 2791 OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS); 2792 break; 2793 2794 case LOCAL_CREDS_PERSISTENT: 2795 OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT); 2796 break; 2797 2798 default: 2799 break; 2800 } 2801 break; 2802 #undef OPTSET 2803 default: 2804 error = ENOPROTOOPT; 2805 break; 2806 } 2807 break; 2808 2809 default: 2810 error = EOPNOTSUPP; 2811 break; 2812 } 2813 return (error); 2814 } 2815 2816 static int 2817 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 2818 { 2819 2820 return (unp_connectat(AT_FDCWD, so, nam, td, false)); 2821 } 2822 2823 static int 2824 unp_connectat(int fd, struct socket *so, struct sockaddr *nam, 2825 struct thread *td, bool return_locked) 2826 { 2827 struct mtx *vplock; 2828 struct sockaddr_un *soun; 2829 struct vnode *vp; 2830 struct socket *so2; 2831 struct unpcb *unp, *unp2, *unp3; 2832 struct nameidata nd; 2833 char buf[SOCK_MAXADDRLEN]; 2834 struct sockaddr *sa; 2835 cap_rights_t rights; 2836 int error, len; 2837 bool connreq; 2838 2839 CURVNET_ASSERT_SET(); 2840 2841 if (nam->sa_family != AF_UNIX) 2842 return (EAFNOSUPPORT); 2843 if (nam->sa_len > sizeof(struct sockaddr_un)) 2844 return (EINVAL); 2845 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); 2846 if (len <= 0) 2847 return (EINVAL); 2848 soun = (struct sockaddr_un *)nam; 2849 bcopy(soun->sun_path, buf, len); 2850 buf[len] = 0; 2851 2852 error = 0; 2853 unp = sotounpcb(so); 2854 UNP_PCB_LOCK(unp); 2855 for (;;) { 2856 /* 2857 * Wait for connection state to stabilize. If a connection 2858 * already exists, give up. For datagram sockets, which permit 2859 * multiple consecutive connect(2) calls, upper layers are 2860 * responsible for disconnecting in advance of a subsequent 2861 * connect(2), but this is not synchronized with PCB connection 2862 * state. 2863 * 2864 * Also make sure that no threads are currently attempting to 2865 * lock the peer socket, to ensure that unp_conn cannot 2866 * transition between two valid sockets while locks are dropped. 2867 */ 2868 if (SOLISTENING(so)) 2869 error = EOPNOTSUPP; 2870 else if (unp->unp_conn != NULL) 2871 error = EISCONN; 2872 else if ((unp->unp_flags & UNP_CONNECTING) != 0) { 2873 error = EALREADY; 2874 } 2875 if (error != 0) { 2876 UNP_PCB_UNLOCK(unp); 2877 return (error); 2878 } 2879 if (unp->unp_pairbusy > 0) { 2880 unp->unp_flags |= UNP_WAITING; 2881 mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0); 2882 continue; 2883 } 2884 break; 2885 } 2886 unp->unp_flags |= UNP_CONNECTING; 2887 UNP_PCB_UNLOCK(unp); 2888 2889 connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0; 2890 if (connreq) 2891 sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); 2892 else 2893 sa = NULL; 2894 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, 2895 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT)); 2896 error = namei(&nd); 2897 if (error) 2898 vp = NULL; 2899 else 2900 vp = nd.ni_vp; 2901 ASSERT_VOP_LOCKED(vp, "unp_connect"); 2902 if (error) 2903 goto bad; 2904 NDFREE_PNBUF(&nd); 2905 2906 if (vp->v_type != VSOCK) { 2907 error = ENOTSOCK; 2908 goto bad; 2909 } 2910 #ifdef MAC 2911 error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD); 2912 if (error) 2913 goto bad; 2914 #endif 2915 error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td); 2916 if (error) 2917 goto bad; 2918 2919 unp = sotounpcb(so); 2920 KASSERT(unp != NULL, ("unp_connect: unp == NULL")); 2921 2922 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 2923 mtx_lock(vplock); 2924 VOP_UNP_CONNECT(vp, &unp2); 2925 if (unp2 == NULL) { 2926 error = ECONNREFUSED; 2927 goto bad2; 2928 } 2929 so2 = unp2->unp_socket; 2930 if (so->so_type != so2->so_type) { 2931 error = EPROTOTYPE; 2932 goto bad2; 2933 } 2934 if (connreq) { 2935 if (SOLISTENING(so2)) 2936 so2 = solisten_clone(so2); 2937 else 2938 so2 = NULL; 2939 if (so2 == NULL) { 2940 error = ECONNREFUSED; 2941 goto bad2; 2942 } 2943 if ((error = uipc_attach(so2, 0, NULL)) != 0) { 2944 sodealloc(so2); 2945 goto bad2; 2946 } 2947 unp3 = sotounpcb(so2); 2948 unp_pcb_lock_pair(unp2, unp3); 2949 if (unp2->unp_addr != NULL) { 2950 bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len); 2951 unp3->unp_addr = (struct sockaddr_un *) sa; 2952 sa = NULL; 2953 } 2954 2955 unp_copy_peercred(td, unp3, unp, unp2); 2956 2957 UNP_PCB_UNLOCK(unp2); 2958 unp2 = unp3; 2959 2960 /* 2961 * It is safe to block on the PCB lock here since unp2 is 2962 * nascent and cannot be connected to any other sockets. 2963 */ 2964 UNP_PCB_LOCK(unp); 2965 #ifdef MAC 2966 mac_socketpeer_set_from_socket(so, so2); 2967 mac_socketpeer_set_from_socket(so2, so); 2968 #endif 2969 } else { 2970 unp_pcb_lock_pair(unp, unp2); 2971 } 2972 KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 && 2973 sotounpcb(so2) == unp2, 2974 ("%s: unp2 %p so2 %p", __func__, unp2, so2)); 2975 unp_connect2(so, so2, connreq); 2976 if (connreq) 2977 (void)solisten_enqueue(so2, SS_ISCONNECTED); 2978 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0, 2979 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp)); 2980 unp->unp_flags &= ~UNP_CONNECTING; 2981 if (!return_locked) 2982 unp_pcb_unlock_pair(unp, unp2); 2983 bad2: 2984 mtx_unlock(vplock); 2985 bad: 2986 if (vp != NULL) { 2987 /* 2988 * If we are returning locked (called via uipc_sosend_dgram()), 2989 * we need to be sure that vput() won't sleep. This is 2990 * guaranteed by VOP_UNP_CONNECT() call above and unp2 lock. 2991 * SOCK_STREAM/SEQPACKET can't request return_locked (yet). 2992 */ 2993 MPASS(!(return_locked && connreq)); 2994 vput(vp); 2995 } 2996 free(sa, M_SONAME); 2997 if (__predict_false(error)) { 2998 UNP_PCB_LOCK(unp); 2999 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0, 3000 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp)); 3001 unp->unp_flags &= ~UNP_CONNECTING; 3002 UNP_PCB_UNLOCK(unp); 3003 } 3004 return (error); 3005 } 3006 3007 /* 3008 * Set socket peer credentials at connection time. 3009 * 3010 * The client's PCB credentials are copied from its process structure. The 3011 * server's PCB credentials are copied from the socket on which it called 3012 * listen(2). uipc_listen cached that process's credentials at the time. 3013 */ 3014 void 3015 unp_copy_peercred(struct thread *td, struct unpcb *client_unp, 3016 struct unpcb *server_unp, struct unpcb *listen_unp) 3017 { 3018 cru2xt(td, &client_unp->unp_peercred); 3019 client_unp->unp_flags |= UNP_HAVEPC; 3020 3021 memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred, 3022 sizeof(server_unp->unp_peercred)); 3023 server_unp->unp_flags |= UNP_HAVEPC; 3024 client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK); 3025 } 3026 3027 /* 3028 * unix/stream & unix/seqpacket version of soisconnected(). 3029 * 3030 * The crucial thing we are doing here is setting up the uxst_peer linkage, 3031 * holding unp and receive buffer locks of the both sockets. The disconnect 3032 * procedure does the same. This gives as a safe way to access the peer in the 3033 * send(2) and recv(2) during the socket lifetime. 3034 * 3035 * The less important thing is event notification of the fact that a socket is 3036 * now connected. It is unusual for a software to put a socket into event 3037 * mechanism before connect(2), but is supposed to be supported. Note that 3038 * there can not be any sleeping I/O on the socket, yet, only presence in the 3039 * select/poll/kevent. 3040 * 3041 * This function can be called via two call paths: 3042 * 1) socketpair(2) - in this case socket has not been yet reported to userland 3043 * and just can't have any event notifications mechanisms set up. The 3044 * 'wakeup' boolean is always false. 3045 * 2) connect(2) of existing socket to a recent clone of a listener: 3046 * 2.1) Socket that connect(2)s will have 'wakeup' true. An application 3047 * could have already put it into event mechanism, is it shall be 3048 * reported as readable and as writable. 3049 * 2.2) Socket that was just cloned with solisten_clone(). Same as 1). 3050 */ 3051 static void 3052 unp_soisconnected(struct socket *so, bool wakeup) 3053 { 3054 struct socket *so2 = sotounpcb(so)->unp_conn->unp_socket; 3055 struct sockbuf *sb; 3056 3057 SOCK_LOCK_ASSERT(so); 3058 UNP_PCB_LOCK_ASSERT(sotounpcb(so)); 3059 UNP_PCB_LOCK_ASSERT(sotounpcb(so2)); 3060 SOCK_RECVBUF_LOCK_ASSERT(so); 3061 SOCK_RECVBUF_LOCK_ASSERT(so2); 3062 3063 MPASS(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET); 3064 MPASS((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 3065 SS_ISDISCONNECTING)) == 0); 3066 MPASS(so->so_qstate == SQ_NONE); 3067 3068 so->so_state &= ~SS_ISDISCONNECTED; 3069 so->so_state |= SS_ISCONNECTED; 3070 3071 sb = &so2->so_rcv; 3072 sb->uxst_peer = so; 3073 3074 if (wakeup) { 3075 KNOTE_LOCKED(&sb->sb_sel->si_note, 0); 3076 sb = &so->so_rcv; 3077 selwakeuppri(sb->sb_sel, PSOCK); 3078 SOCK_SENDBUF_LOCK_ASSERT(so); 3079 sb = &so->so_snd; 3080 selwakeuppri(sb->sb_sel, PSOCK); 3081 SOCK_SENDBUF_UNLOCK(so); 3082 } 3083 } 3084 3085 static void 3086 unp_connect2(struct socket *so, struct socket *so2, bool wakeup) 3087 { 3088 struct unpcb *unp; 3089 struct unpcb *unp2; 3090 3091 MPASS(so2->so_type == so->so_type); 3092 unp = sotounpcb(so); 3093 KASSERT(unp != NULL, ("unp_connect2: unp == NULL")); 3094 unp2 = sotounpcb(so2); 3095 KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL")); 3096 3097 UNP_PCB_LOCK_ASSERT(unp); 3098 UNP_PCB_LOCK_ASSERT(unp2); 3099 KASSERT(unp->unp_conn == NULL, 3100 ("%s: socket %p is already connected", __func__, unp)); 3101 3102 unp->unp_conn = unp2; 3103 unp_pcb_hold(unp2); 3104 unp_pcb_hold(unp); 3105 switch (so->so_type) { 3106 case SOCK_DGRAM: 3107 UNP_REF_LIST_LOCK(); 3108 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); 3109 UNP_REF_LIST_UNLOCK(); 3110 soisconnected(so); 3111 break; 3112 3113 case SOCK_STREAM: 3114 case SOCK_SEQPACKET: 3115 KASSERT(unp2->unp_conn == NULL, 3116 ("%s: socket %p is already connected", __func__, unp2)); 3117 unp2->unp_conn = unp; 3118 SOCK_LOCK(so); 3119 SOCK_LOCK(so2); 3120 if (wakeup) /* Avoid LOR with receive buffer lock. */ 3121 SOCK_SENDBUF_LOCK(so); 3122 SOCK_RECVBUF_LOCK(so); 3123 SOCK_RECVBUF_LOCK(so2); 3124 unp_soisconnected(so, wakeup); /* Will unlock send buffer. */ 3125 unp_soisconnected(so2, false); 3126 SOCK_RECVBUF_UNLOCK(so); 3127 SOCK_RECVBUF_UNLOCK(so2); 3128 SOCK_UNLOCK(so); 3129 SOCK_UNLOCK(so2); 3130 break; 3131 3132 default: 3133 panic("unp_connect2"); 3134 } 3135 } 3136 3137 static void 3138 unp_soisdisconnected(struct socket *so) 3139 { 3140 SOCK_LOCK_ASSERT(so); 3141 SOCK_RECVBUF_LOCK_ASSERT(so); 3142 MPASS(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET); 3143 MPASS(!SOLISTENING(so)); 3144 MPASS((so->so_state & (SS_ISCONNECTING | SS_ISDISCONNECTING | 3145 SS_ISDISCONNECTED)) == 0); 3146 MPASS(so->so_state & SS_ISCONNECTED); 3147 3148 so->so_state |= SS_ISDISCONNECTED; 3149 so->so_state &= ~SS_ISCONNECTED; 3150 so->so_rcv.uxst_peer = NULL; 3151 socantrcvmore_locked(so); 3152 } 3153 3154 static void 3155 unp_disconnect(struct unpcb *unp, struct unpcb *unp2) 3156 { 3157 struct socket *so, *so2; 3158 struct mbuf *m = NULL; 3159 #ifdef INVARIANTS 3160 struct unpcb *unptmp; 3161 #endif 3162 3163 UNP_PCB_LOCK_ASSERT(unp); 3164 UNP_PCB_LOCK_ASSERT(unp2); 3165 KASSERT(unp->unp_conn == unp2, 3166 ("%s: unpcb %p is not connected to %p", __func__, unp, unp2)); 3167 3168 unp->unp_conn = NULL; 3169 so = unp->unp_socket; 3170 so2 = unp2->unp_socket; 3171 switch (unp->unp_socket->so_type) { 3172 case SOCK_DGRAM: 3173 /* 3174 * Remove our send socket buffer from the peer's receive buffer. 3175 * Move the data to the receive buffer only if it is empty. 3176 * This is a protection against a scenario where a peer 3177 * connects, floods and disconnects, effectively blocking 3178 * sendto() from unconnected sockets. 3179 */ 3180 SOCK_RECVBUF_LOCK(so2); 3181 if (!STAILQ_EMPTY(&so->so_snd.uxdg_mb)) { 3182 TAILQ_REMOVE(&so2->so_rcv.uxdg_conns, &so->so_snd, 3183 uxdg_clist); 3184 if (__predict_true((so2->so_rcv.sb_state & 3185 SBS_CANTRCVMORE) == 0) && 3186 STAILQ_EMPTY(&so2->so_rcv.uxdg_mb)) { 3187 STAILQ_CONCAT(&so2->so_rcv.uxdg_mb, 3188 &so->so_snd.uxdg_mb); 3189 so2->so_rcv.uxdg_cc += so->so_snd.uxdg_cc; 3190 so2->so_rcv.uxdg_ctl += so->so_snd.uxdg_ctl; 3191 so2->so_rcv.uxdg_mbcnt += so->so_snd.uxdg_mbcnt; 3192 } else { 3193 m = STAILQ_FIRST(&so->so_snd.uxdg_mb); 3194 STAILQ_INIT(&so->so_snd.uxdg_mb); 3195 so2->so_rcv.sb_acc -= so->so_snd.uxdg_cc; 3196 so2->so_rcv.sb_ccc -= so->so_snd.uxdg_cc; 3197 so2->so_rcv.sb_ctl -= so->so_snd.uxdg_ctl; 3198 so2->so_rcv.sb_mbcnt -= so->so_snd.uxdg_mbcnt; 3199 } 3200 /* Note: so may reconnect. */ 3201 so->so_snd.uxdg_cc = 0; 3202 so->so_snd.uxdg_ctl = 0; 3203 so->so_snd.uxdg_mbcnt = 0; 3204 } 3205 SOCK_RECVBUF_UNLOCK(so2); 3206 UNP_REF_LIST_LOCK(); 3207 #ifdef INVARIANTS 3208 LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) { 3209 if (unptmp == unp) 3210 break; 3211 } 3212 KASSERT(unptmp != NULL, 3213 ("%s: %p not found in reflist of %p", __func__, unp, unp2)); 3214 #endif 3215 LIST_REMOVE(unp, unp_reflink); 3216 UNP_REF_LIST_UNLOCK(); 3217 if (so) { 3218 SOCK_LOCK(so); 3219 so->so_state &= ~SS_ISCONNECTED; 3220 SOCK_UNLOCK(so); 3221 } 3222 break; 3223 3224 case SOCK_STREAM: 3225 case SOCK_SEQPACKET: 3226 SOCK_LOCK(so); 3227 SOCK_LOCK(so2); 3228 SOCK_RECVBUF_LOCK(so); 3229 SOCK_RECVBUF_LOCK(so2); 3230 unp_soisdisconnected(so); 3231 MPASS(unp2->unp_conn == unp); 3232 unp2->unp_conn = NULL; 3233 unp_soisdisconnected(so2); 3234 SOCK_UNLOCK(so); 3235 SOCK_UNLOCK(so2); 3236 break; 3237 } 3238 3239 if (unp == unp2) { 3240 unp_pcb_rele_notlast(unp); 3241 if (!unp_pcb_rele(unp)) 3242 UNP_PCB_UNLOCK(unp); 3243 } else { 3244 if (!unp_pcb_rele(unp)) 3245 UNP_PCB_UNLOCK(unp); 3246 if (!unp_pcb_rele(unp2)) 3247 UNP_PCB_UNLOCK(unp2); 3248 } 3249 3250 if (m != NULL) { 3251 unp_scan(m, unp_freerights); 3252 m_freemp(m); 3253 } 3254 } 3255 3256 /* 3257 * unp_pcblist() walks the global list of struct unpcb's to generate a 3258 * pointer list, bumping the refcount on each unpcb. It then copies them out 3259 * sequentially, validating the generation number on each to see if it has 3260 * been detached. All of this is necessary because copyout() may sleep on 3261 * disk I/O. 3262 */ 3263 static int 3264 unp_pcblist(SYSCTL_HANDLER_ARGS) 3265 { 3266 struct unpcb *unp, **unp_list; 3267 unp_gen_t gencnt; 3268 struct xunpgen *xug; 3269 struct unp_head *head; 3270 struct xunpcb *xu; 3271 u_int i; 3272 int error, n; 3273 3274 switch ((intptr_t)arg1) { 3275 case SOCK_STREAM: 3276 head = &unp_shead; 3277 break; 3278 3279 case SOCK_DGRAM: 3280 head = &unp_dhead; 3281 break; 3282 3283 case SOCK_SEQPACKET: 3284 head = &unp_sphead; 3285 break; 3286 3287 default: 3288 panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1); 3289 } 3290 3291 /* 3292 * The process of preparing the PCB list is too time-consuming and 3293 * resource-intensive to repeat twice on every request. 3294 */ 3295 if (req->oldptr == NULL) { 3296 n = unp_count; 3297 req->oldidx = 2 * (sizeof *xug) 3298 + (n + n/8) * sizeof(struct xunpcb); 3299 return (0); 3300 } 3301 3302 if (req->newptr != NULL) 3303 return (EPERM); 3304 3305 /* 3306 * OK, now we're committed to doing something. 3307 */ 3308 xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO); 3309 UNP_LINK_RLOCK(); 3310 gencnt = unp_gencnt; 3311 n = unp_count; 3312 UNP_LINK_RUNLOCK(); 3313 3314 xug->xug_len = sizeof *xug; 3315 xug->xug_count = n; 3316 xug->xug_gen = gencnt; 3317 xug->xug_sogen = so_gencnt; 3318 error = SYSCTL_OUT(req, xug, sizeof *xug); 3319 if (error) { 3320 free(xug, M_TEMP); 3321 return (error); 3322 } 3323 3324 unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK); 3325 3326 UNP_LINK_RLOCK(); 3327 for (unp = LIST_FIRST(head), i = 0; unp && i < n; 3328 unp = LIST_NEXT(unp, unp_link)) { 3329 UNP_PCB_LOCK(unp); 3330 if (unp->unp_gencnt <= gencnt) { 3331 if (cr_cansee(req->td->td_ucred, 3332 unp->unp_socket->so_cred)) { 3333 UNP_PCB_UNLOCK(unp); 3334 continue; 3335 } 3336 unp_list[i++] = unp; 3337 unp_pcb_hold(unp); 3338 } 3339 UNP_PCB_UNLOCK(unp); 3340 } 3341 UNP_LINK_RUNLOCK(); 3342 n = i; /* In case we lost some during malloc. */ 3343 3344 error = 0; 3345 xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO); 3346 for (i = 0; i < n; i++) { 3347 unp = unp_list[i]; 3348 UNP_PCB_LOCK(unp); 3349 if (unp_pcb_rele(unp)) 3350 continue; 3351 3352 if (unp->unp_gencnt <= gencnt) { 3353 xu->xu_len = sizeof *xu; 3354 xu->xu_unpp = (uintptr_t)unp; 3355 /* 3356 * XXX - need more locking here to protect against 3357 * connect/disconnect races for SMP. 3358 */ 3359 if (unp->unp_addr != NULL) 3360 bcopy(unp->unp_addr, &xu->xu_addr, 3361 unp->unp_addr->sun_len); 3362 else 3363 bzero(&xu->xu_addr, sizeof(xu->xu_addr)); 3364 if (unp->unp_conn != NULL && 3365 unp->unp_conn->unp_addr != NULL) 3366 bcopy(unp->unp_conn->unp_addr, 3367 &xu->xu_caddr, 3368 unp->unp_conn->unp_addr->sun_len); 3369 else 3370 bzero(&xu->xu_caddr, sizeof(xu->xu_caddr)); 3371 xu->unp_vnode = (uintptr_t)unp->unp_vnode; 3372 xu->unp_conn = (uintptr_t)unp->unp_conn; 3373 xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs); 3374 xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink); 3375 xu->unp_gencnt = unp->unp_gencnt; 3376 sotoxsocket(unp->unp_socket, &xu->xu_socket); 3377 UNP_PCB_UNLOCK(unp); 3378 error = SYSCTL_OUT(req, xu, sizeof *xu); 3379 } else { 3380 UNP_PCB_UNLOCK(unp); 3381 } 3382 } 3383 free(xu, M_TEMP); 3384 if (!error) { 3385 /* 3386 * Give the user an updated idea of our state. If the 3387 * generation differs from what we told her before, she knows 3388 * that something happened while we were processing this 3389 * request, and it might be necessary to retry. 3390 */ 3391 xug->xug_gen = unp_gencnt; 3392 xug->xug_sogen = so_gencnt; 3393 xug->xug_count = unp_count; 3394 error = SYSCTL_OUT(req, xug, sizeof *xug); 3395 } 3396 free(unp_list, M_TEMP); 3397 free(xug, M_TEMP); 3398 return (error); 3399 } 3400 3401 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, 3402 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3403 (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", 3404 "List of active local datagram sockets"); 3405 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, 3406 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3407 (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", 3408 "List of active local stream sockets"); 3409 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, 3410 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3411 (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb", 3412 "List of active local seqpacket sockets"); 3413 3414 static void 3415 unp_drop(struct unpcb *unp) 3416 { 3417 struct socket *so; 3418 struct unpcb *unp2; 3419 3420 /* 3421 * Regardless of whether the socket's peer dropped the connection 3422 * with this socket by aborting or disconnecting, POSIX requires 3423 * that ECONNRESET is returned on next connected send(2) in case of 3424 * a SOCK_DGRAM socket and EPIPE for SOCK_STREAM. 3425 */ 3426 UNP_PCB_LOCK(unp); 3427 if ((so = unp->unp_socket) != NULL) 3428 so->so_error = 3429 so->so_proto->pr_type == SOCK_DGRAM ? ECONNRESET : EPIPE; 3430 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) { 3431 /* Last reference dropped in unp_disconnect(). */ 3432 unp_pcb_rele_notlast(unp); 3433 unp_disconnect(unp, unp2); 3434 } else if (!unp_pcb_rele(unp)) { 3435 UNP_PCB_UNLOCK(unp); 3436 } 3437 } 3438 3439 static void 3440 unp_freerights(struct filedescent **fdep, int fdcount) 3441 { 3442 struct file *fp; 3443 int i; 3444 3445 KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount)); 3446 3447 for (i = 0; i < fdcount; i++) { 3448 fp = fdep[i]->fde_file; 3449 filecaps_free(&fdep[i]->fde_caps); 3450 unp_discard(fp); 3451 } 3452 free(fdep[0], M_FILECAPS); 3453 } 3454 3455 static bool 3456 restrict_rights(struct file *fp, struct thread *td) 3457 { 3458 struct prison *prison1, *prison2; 3459 3460 prison1 = fp->f_cred->cr_prison; 3461 prison2 = td->td_ucred->cr_prison; 3462 return (prison1 != prison2 && prison1->pr_root != prison2->pr_root && 3463 prison2 != &prison0); 3464 } 3465 3466 static int 3467 unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags) 3468 { 3469 struct thread *td = curthread; /* XXX */ 3470 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 3471 int *fdp; 3472 struct filedesc *fdesc = td->td_proc->p_fd; 3473 struct filedescent **fdep; 3474 void *data; 3475 socklen_t clen = control->m_len, datalen; 3476 int error, fdflags, newfds; 3477 u_int newlen; 3478 3479 UNP_LINK_UNLOCK_ASSERT(); 3480 3481 fdflags = ((flags & MSG_CMSG_CLOEXEC) ? O_CLOEXEC : 0) | 3482 ((flags & MSG_CMSG_CLOFORK) ? O_CLOFORK : 0); 3483 3484 error = 0; 3485 if (controlp != NULL) /* controlp == NULL => free control messages */ 3486 *controlp = NULL; 3487 while (cm != NULL) { 3488 MPASS(clen >= sizeof(*cm) && clen >= cm->cmsg_len); 3489 3490 data = CMSG_DATA(cm); 3491 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; 3492 if (cm->cmsg_level == SOL_SOCKET 3493 && cm->cmsg_type == SCM_RIGHTS) { 3494 newfds = datalen / sizeof(*fdep); 3495 if (newfds == 0) 3496 goto next; 3497 fdep = data; 3498 3499 /* If we're not outputting the descriptors free them. */ 3500 if (error || controlp == NULL) { 3501 unp_freerights(fdep, newfds); 3502 goto next; 3503 } 3504 FILEDESC_XLOCK(fdesc); 3505 3506 /* 3507 * Now change each pointer to an fd in the global 3508 * table to an integer that is the index to the local 3509 * fd table entry that we set up to point to the 3510 * global one we are transferring. 3511 */ 3512 newlen = newfds * sizeof(int); 3513 *controlp = sbcreatecontrol(NULL, newlen, 3514 SCM_RIGHTS, SOL_SOCKET, M_WAITOK); 3515 3516 fdp = (int *) 3517 CMSG_DATA(mtod(*controlp, struct cmsghdr *)); 3518 if ((error = fdallocn(td, 0, fdp, newfds))) { 3519 FILEDESC_XUNLOCK(fdesc); 3520 unp_freerights(fdep, newfds); 3521 m_freem(*controlp); 3522 *controlp = NULL; 3523 goto next; 3524 } 3525 for (int i = 0; i < newfds; i++, fdp++) { 3526 struct file *fp; 3527 3528 fp = fdep[i]->fde_file; 3529 _finstall(fdesc, fp, *fdp, fdflags | 3530 (restrict_rights(fp, td) ? 3531 O_RESOLVE_BENEATH : 0), &fdep[i]->fde_caps); 3532 unp_externalize_fp(fp); 3533 } 3534 3535 /* 3536 * The new type indicates that the mbuf data refers to 3537 * kernel resources that may need to be released before 3538 * the mbuf is freed. 3539 */ 3540 m_chtype(*controlp, MT_EXTCONTROL); 3541 FILEDESC_XUNLOCK(fdesc); 3542 free(fdep[0], M_FILECAPS); 3543 } else { 3544 /* We can just copy anything else across. */ 3545 if (error || controlp == NULL) 3546 goto next; 3547 *controlp = sbcreatecontrol(NULL, datalen, 3548 cm->cmsg_type, cm->cmsg_level, M_WAITOK); 3549 bcopy(data, 3550 CMSG_DATA(mtod(*controlp, struct cmsghdr *)), 3551 datalen); 3552 } 3553 controlp = &(*controlp)->m_next; 3554 3555 next: 3556 if (CMSG_SPACE(datalen) < clen) { 3557 clen -= CMSG_SPACE(datalen); 3558 cm = (struct cmsghdr *) 3559 ((caddr_t)cm + CMSG_SPACE(datalen)); 3560 } else { 3561 clen = 0; 3562 cm = NULL; 3563 } 3564 } 3565 3566 return (error); 3567 } 3568 3569 static void 3570 unp_zone_change(void *tag) 3571 { 3572 3573 uma_zone_set_max(unp_zone, maxsockets); 3574 } 3575 3576 #ifdef INVARIANTS 3577 static void 3578 unp_zdtor(void *mem, int size __unused, void *arg __unused) 3579 { 3580 struct unpcb *unp; 3581 3582 unp = mem; 3583 3584 KASSERT(LIST_EMPTY(&unp->unp_refs), 3585 ("%s: unpcb %p has lingering refs", __func__, unp)); 3586 KASSERT(unp->unp_socket == NULL, 3587 ("%s: unpcb %p has socket backpointer", __func__, unp)); 3588 KASSERT(unp->unp_vnode == NULL, 3589 ("%s: unpcb %p has vnode references", __func__, unp)); 3590 KASSERT(unp->unp_conn == NULL, 3591 ("%s: unpcb %p is still connected", __func__, unp)); 3592 KASSERT(unp->unp_addr == NULL, 3593 ("%s: unpcb %p has leaked addr", __func__, unp)); 3594 } 3595 #endif 3596 3597 static void 3598 unp_init(void *arg __unused) 3599 { 3600 uma_dtor dtor; 3601 3602 #ifdef INVARIANTS 3603 dtor = unp_zdtor; 3604 #else 3605 dtor = NULL; 3606 #endif 3607 unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor, 3608 NULL, NULL, UMA_ALIGN_CACHE, 0); 3609 uma_zone_set_max(unp_zone, maxsockets); 3610 uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached"); 3611 EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change, 3612 NULL, EVENTHANDLER_PRI_ANY); 3613 LIST_INIT(&unp_dhead); 3614 LIST_INIT(&unp_shead); 3615 LIST_INIT(&unp_sphead); 3616 SLIST_INIT(&unp_defers); 3617 TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL); 3618 TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL); 3619 UNP_LINK_LOCK_INIT(); 3620 UNP_DEFERRED_LOCK_INIT(); 3621 unp_vp_mtxpool = mtx_pool_create("unp vp mtxpool", 32, MTX_DEF); 3622 } 3623 SYSINIT(unp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, unp_init, NULL); 3624 3625 static void 3626 unp_internalize_cleanup_rights(struct mbuf *control) 3627 { 3628 struct cmsghdr *cp; 3629 struct mbuf *m; 3630 void *data; 3631 socklen_t datalen; 3632 3633 for (m = control; m != NULL; m = m->m_next) { 3634 cp = mtod(m, struct cmsghdr *); 3635 if (cp->cmsg_level != SOL_SOCKET || 3636 cp->cmsg_type != SCM_RIGHTS) 3637 continue; 3638 data = CMSG_DATA(cp); 3639 datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data; 3640 unp_freerights(data, datalen / sizeof(struct filedesc *)); 3641 } 3642 } 3643 3644 static int 3645 unp_internalize(struct mbuf *control, struct mchain *mc, struct thread *td) 3646 { 3647 struct proc *p; 3648 struct filedesc *fdesc; 3649 struct bintime *bt; 3650 struct cmsghdr *cm; 3651 struct cmsgcred *cmcred; 3652 struct mbuf *m; 3653 struct filedescent *fde, **fdep, *fdev; 3654 struct file *fp; 3655 struct timeval *tv; 3656 struct timespec *ts; 3657 void *data; 3658 socklen_t clen, datalen; 3659 int i, j, error, *fdp, oldfds; 3660 u_int newlen; 3661 3662 MPASS(control->m_next == NULL); /* COMPAT_OLDSOCK may violate */ 3663 UNP_LINK_UNLOCK_ASSERT(); 3664 3665 p = td->td_proc; 3666 fdesc = p->p_fd; 3667 error = 0; 3668 *mc = MCHAIN_INITIALIZER(mc); 3669 for (clen = control->m_len, cm = mtod(control, struct cmsghdr *), 3670 data = CMSG_DATA(cm); 3671 3672 clen >= sizeof(*cm) && cm->cmsg_level == SOL_SOCKET && 3673 clen >= cm->cmsg_len && cm->cmsg_len >= sizeof(*cm) && 3674 (char *)cm + cm->cmsg_len >= (char *)data; 3675 3676 clen -= min(CMSG_SPACE(datalen), clen), 3677 cm = (struct cmsghdr *) ((char *)cm + CMSG_SPACE(datalen)), 3678 data = CMSG_DATA(cm)) { 3679 datalen = (char *)cm + cm->cmsg_len - (char *)data; 3680 switch (cm->cmsg_type) { 3681 case SCM_CREDS: 3682 m = sbcreatecontrol(NULL, sizeof(*cmcred), SCM_CREDS, 3683 SOL_SOCKET, M_WAITOK); 3684 cmcred = (struct cmsgcred *) 3685 CMSG_DATA(mtod(m, struct cmsghdr *)); 3686 cmcred->cmcred_pid = p->p_pid; 3687 cmcred->cmcred_uid = td->td_ucred->cr_ruid; 3688 cmcred->cmcred_gid = td->td_ucred->cr_rgid; 3689 cmcred->cmcred_euid = td->td_ucred->cr_uid; 3690 _Static_assert(CMGROUP_MAX >= 1, 3691 "Room needed for the effective GID."); 3692 cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups + 1, 3693 CMGROUP_MAX); 3694 cmcred->cmcred_groups[0] = td->td_ucred->cr_gid; 3695 for (i = 1; i < cmcred->cmcred_ngroups; i++) 3696 cmcred->cmcred_groups[i] = 3697 td->td_ucred->cr_groups[i - 1]; 3698 break; 3699 3700 case SCM_RIGHTS: 3701 oldfds = datalen / sizeof (int); 3702 if (oldfds == 0) 3703 continue; 3704 /* On some machines sizeof pointer is bigger than 3705 * sizeof int, so we need to check if data fits into 3706 * single mbuf. We could allocate several mbufs, and 3707 * unp_externalize() should even properly handle that. 3708 * But it is not worth to complicate the code for an 3709 * insane scenario of passing over 200 file descriptors 3710 * at once. 3711 */ 3712 newlen = oldfds * sizeof(fdep[0]); 3713 if (CMSG_SPACE(newlen) > MCLBYTES) { 3714 error = EMSGSIZE; 3715 goto out; 3716 } 3717 /* 3718 * Check that all the FDs passed in refer to legal 3719 * files. If not, reject the entire operation. 3720 */ 3721 fdp = data; 3722 FILEDESC_SLOCK(fdesc); 3723 for (i = 0; i < oldfds; i++, fdp++) { 3724 fp = fget_noref(fdesc, *fdp); 3725 if (fp == NULL) { 3726 FILEDESC_SUNLOCK(fdesc); 3727 error = EBADF; 3728 goto out; 3729 } 3730 if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) { 3731 FILEDESC_SUNLOCK(fdesc); 3732 error = EOPNOTSUPP; 3733 goto out; 3734 } 3735 } 3736 3737 /* 3738 * Now replace the integer FDs with pointers to the 3739 * file structure and capability rights. 3740 */ 3741 m = sbcreatecontrol(NULL, newlen, SCM_RIGHTS, 3742 SOL_SOCKET, M_WAITOK); 3743 fdp = data; 3744 for (i = 0; i < oldfds; i++, fdp++) { 3745 if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) { 3746 fdp = data; 3747 for (j = 0; j < i; j++, fdp++) { 3748 fdrop(fdesc->fd_ofiles[*fdp]. 3749 fde_file, td); 3750 } 3751 FILEDESC_SUNLOCK(fdesc); 3752 error = EBADF; 3753 goto out; 3754 } 3755 } 3756 fdp = data; 3757 fdep = (struct filedescent **) 3758 CMSG_DATA(mtod(m, struct cmsghdr *)); 3759 fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS, 3760 M_WAITOK); 3761 for (i = 0; i < oldfds; i++, fdev++, fdp++) { 3762 fde = &fdesc->fd_ofiles[*fdp]; 3763 fdep[i] = fdev; 3764 fdep[i]->fde_file = fde->fde_file; 3765 filecaps_copy(&fde->fde_caps, 3766 &fdep[i]->fde_caps, true); 3767 unp_internalize_fp(fdep[i]->fde_file); 3768 } 3769 FILEDESC_SUNLOCK(fdesc); 3770 break; 3771 3772 case SCM_TIMESTAMP: 3773 m = sbcreatecontrol(NULL, sizeof(*tv), SCM_TIMESTAMP, 3774 SOL_SOCKET, M_WAITOK); 3775 tv = (struct timeval *) 3776 CMSG_DATA(mtod(m, struct cmsghdr *)); 3777 microtime(tv); 3778 break; 3779 3780 case SCM_BINTIME: 3781 m = sbcreatecontrol(NULL, sizeof(*bt), SCM_BINTIME, 3782 SOL_SOCKET, M_WAITOK); 3783 bt = (struct bintime *) 3784 CMSG_DATA(mtod(m, struct cmsghdr *)); 3785 bintime(bt); 3786 break; 3787 3788 case SCM_REALTIME: 3789 m = sbcreatecontrol(NULL, sizeof(*ts), SCM_REALTIME, 3790 SOL_SOCKET, M_WAITOK); 3791 ts = (struct timespec *) 3792 CMSG_DATA(mtod(m, struct cmsghdr *)); 3793 nanotime(ts); 3794 break; 3795 3796 case SCM_MONOTONIC: 3797 m = sbcreatecontrol(NULL, sizeof(*ts), SCM_MONOTONIC, 3798 SOL_SOCKET, M_WAITOK); 3799 ts = (struct timespec *) 3800 CMSG_DATA(mtod(m, struct cmsghdr *)); 3801 nanouptime(ts); 3802 break; 3803 3804 default: 3805 error = EINVAL; 3806 goto out; 3807 } 3808 3809 mc_append(mc, m); 3810 } 3811 if (clen > 0) 3812 error = EINVAL; 3813 3814 out: 3815 if (error != 0) 3816 unp_internalize_cleanup_rights(mc_first(mc)); 3817 m_freem(control); 3818 return (error); 3819 } 3820 3821 static void 3822 unp_addsockcred(struct thread *td, struct mchain *mc, int mode) 3823 { 3824 struct mbuf *m, *n, *n_prev; 3825 const struct cmsghdr *cm; 3826 int ngroups, i, cmsgtype; 3827 size_t ctrlsz; 3828 3829 ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX); 3830 if (mode & UNP_WANTCRED_ALWAYS) { 3831 ctrlsz = SOCKCRED2SIZE(ngroups); 3832 cmsgtype = SCM_CREDS2; 3833 } else { 3834 ctrlsz = SOCKCREDSIZE(ngroups); 3835 cmsgtype = SCM_CREDS; 3836 } 3837 3838 /* XXXGL: uipc_sosend_*() need to be improved so that we can M_WAITOK */ 3839 m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET, M_NOWAIT); 3840 if (m == NULL) 3841 return; 3842 MPASS((m->m_flags & M_EXT) == 0 && m->m_next == NULL); 3843 3844 if (mode & UNP_WANTCRED_ALWAYS) { 3845 struct sockcred2 *sc; 3846 3847 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *)); 3848 sc->sc_version = 0; 3849 sc->sc_pid = td->td_proc->p_pid; 3850 sc->sc_uid = td->td_ucred->cr_ruid; 3851 sc->sc_euid = td->td_ucred->cr_uid; 3852 sc->sc_gid = td->td_ucred->cr_rgid; 3853 sc->sc_egid = td->td_ucred->cr_gid; 3854 sc->sc_ngroups = ngroups; 3855 for (i = 0; i < sc->sc_ngroups; i++) 3856 sc->sc_groups[i] = td->td_ucred->cr_groups[i]; 3857 } else { 3858 struct sockcred *sc; 3859 3860 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *)); 3861 sc->sc_uid = td->td_ucred->cr_ruid; 3862 sc->sc_euid = td->td_ucred->cr_uid; 3863 sc->sc_gid = td->td_ucred->cr_rgid; 3864 sc->sc_egid = td->td_ucred->cr_gid; 3865 sc->sc_ngroups = ngroups; 3866 for (i = 0; i < sc->sc_ngroups; i++) 3867 sc->sc_groups[i] = td->td_ucred->cr_groups[i]; 3868 } 3869 3870 /* 3871 * Unlink SCM_CREDS control messages (struct cmsgcred), since just 3872 * created SCM_CREDS control message (struct sockcred) has another 3873 * format. 3874 */ 3875 if (!STAILQ_EMPTY(&mc->mc_q) && cmsgtype == SCM_CREDS) 3876 STAILQ_FOREACH_SAFE(n, &mc->mc_q, m_stailq, n_prev) { 3877 cm = mtod(n, struct cmsghdr *); 3878 if (cm->cmsg_level == SOL_SOCKET && 3879 cm->cmsg_type == SCM_CREDS) { 3880 mc_remove(mc, n); 3881 m_free(n); 3882 } 3883 } 3884 3885 /* Prepend it to the head. */ 3886 mc_prepend(mc, m); 3887 } 3888 3889 static struct unpcb * 3890 fptounp(struct file *fp) 3891 { 3892 struct socket *so; 3893 3894 if (fp->f_type != DTYPE_SOCKET) 3895 return (NULL); 3896 if ((so = fp->f_data) == NULL) 3897 return (NULL); 3898 if (so->so_proto->pr_domain != &localdomain) 3899 return (NULL); 3900 return sotounpcb(so); 3901 } 3902 3903 static void 3904 unp_discard(struct file *fp) 3905 { 3906 struct unp_defer *dr; 3907 3908 if (unp_externalize_fp(fp)) { 3909 dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK); 3910 dr->ud_fp = fp; 3911 UNP_DEFERRED_LOCK(); 3912 SLIST_INSERT_HEAD(&unp_defers, dr, ud_link); 3913 UNP_DEFERRED_UNLOCK(); 3914 atomic_add_int(&unp_defers_count, 1); 3915 taskqueue_enqueue(taskqueue_thread, &unp_defer_task); 3916 } else 3917 closef_nothread(fp); 3918 } 3919 3920 static void 3921 unp_process_defers(void *arg __unused, int pending) 3922 { 3923 struct unp_defer *dr; 3924 SLIST_HEAD(, unp_defer) drl; 3925 int count; 3926 3927 SLIST_INIT(&drl); 3928 for (;;) { 3929 UNP_DEFERRED_LOCK(); 3930 if (SLIST_FIRST(&unp_defers) == NULL) { 3931 UNP_DEFERRED_UNLOCK(); 3932 break; 3933 } 3934 SLIST_SWAP(&unp_defers, &drl, unp_defer); 3935 UNP_DEFERRED_UNLOCK(); 3936 count = 0; 3937 while ((dr = SLIST_FIRST(&drl)) != NULL) { 3938 SLIST_REMOVE_HEAD(&drl, ud_link); 3939 closef_nothread(dr->ud_fp); 3940 free(dr, M_TEMP); 3941 count++; 3942 } 3943 atomic_add_int(&unp_defers_count, -count); 3944 } 3945 } 3946 3947 static void 3948 unp_internalize_fp(struct file *fp) 3949 { 3950 struct unpcb *unp; 3951 3952 UNP_LINK_WLOCK(); 3953 if ((unp = fptounp(fp)) != NULL) { 3954 unp->unp_file = fp; 3955 unp->unp_msgcount++; 3956 } 3957 unp_rights++; 3958 UNP_LINK_WUNLOCK(); 3959 } 3960 3961 static int 3962 unp_externalize_fp(struct file *fp) 3963 { 3964 struct unpcb *unp; 3965 int ret; 3966 3967 UNP_LINK_WLOCK(); 3968 if ((unp = fptounp(fp)) != NULL) { 3969 unp->unp_msgcount--; 3970 ret = 1; 3971 } else 3972 ret = 0; 3973 unp_rights--; 3974 UNP_LINK_WUNLOCK(); 3975 return (ret); 3976 } 3977 3978 /* 3979 * unp_defer indicates whether additional work has been defered for a future 3980 * pass through unp_gc(). It is thread local and does not require explicit 3981 * synchronization. 3982 */ 3983 static int unp_marked; 3984 3985 static void 3986 unp_remove_dead_ref(struct filedescent **fdep, int fdcount) 3987 { 3988 struct unpcb *unp; 3989 struct file *fp; 3990 int i; 3991 3992 /* 3993 * This function can only be called from the gc task. 3994 */ 3995 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0, 3996 ("%s: not on gc callout", __func__)); 3997 UNP_LINK_LOCK_ASSERT(); 3998 3999 for (i = 0; i < fdcount; i++) { 4000 fp = fdep[i]->fde_file; 4001 if ((unp = fptounp(fp)) == NULL) 4002 continue; 4003 if ((unp->unp_gcflag & UNPGC_DEAD) == 0) 4004 continue; 4005 unp->unp_gcrefs--; 4006 } 4007 } 4008 4009 static void 4010 unp_restore_undead_ref(struct filedescent **fdep, int fdcount) 4011 { 4012 struct unpcb *unp; 4013 struct file *fp; 4014 int i; 4015 4016 /* 4017 * This function can only be called from the gc task. 4018 */ 4019 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0, 4020 ("%s: not on gc callout", __func__)); 4021 UNP_LINK_LOCK_ASSERT(); 4022 4023 for (i = 0; i < fdcount; i++) { 4024 fp = fdep[i]->fde_file; 4025 if ((unp = fptounp(fp)) == NULL) 4026 continue; 4027 if ((unp->unp_gcflag & UNPGC_DEAD) == 0) 4028 continue; 4029 unp->unp_gcrefs++; 4030 unp_marked++; 4031 } 4032 } 4033 4034 static void 4035 unp_scan_socket(struct socket *so, void (*op)(struct filedescent **, int)) 4036 { 4037 struct sockbuf *sb; 4038 4039 SOCK_LOCK_ASSERT(so); 4040 4041 if (sotounpcb(so)->unp_gcflag & UNPGC_IGNORE_RIGHTS) 4042 return; 4043 4044 SOCK_RECVBUF_LOCK(so); 4045 switch (so->so_type) { 4046 case SOCK_DGRAM: 4047 unp_scan(STAILQ_FIRST(&so->so_rcv.uxdg_mb), op); 4048 unp_scan(so->so_rcv.uxdg_peeked, op); 4049 TAILQ_FOREACH(sb, &so->so_rcv.uxdg_conns, uxdg_clist) 4050 unp_scan(STAILQ_FIRST(&sb->uxdg_mb), op); 4051 break; 4052 case SOCK_STREAM: 4053 case SOCK_SEQPACKET: 4054 unp_scan(STAILQ_FIRST(&so->so_rcv.uxst_mbq), op); 4055 break; 4056 } 4057 SOCK_RECVBUF_UNLOCK(so); 4058 } 4059 4060 static void 4061 unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int)) 4062 { 4063 struct socket *so, *soa; 4064 4065 so = unp->unp_socket; 4066 SOCK_LOCK(so); 4067 if (SOLISTENING(so)) { 4068 /* 4069 * Mark all sockets in our accept queue. 4070 */ 4071 TAILQ_FOREACH(soa, &so->sol_comp, so_list) 4072 unp_scan_socket(soa, op); 4073 } else { 4074 /* 4075 * Mark all sockets we reference with RIGHTS. 4076 */ 4077 unp_scan_socket(so, op); 4078 } 4079 SOCK_UNLOCK(so); 4080 } 4081 4082 static int unp_recycled; 4083 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0, 4084 "Number of unreachable sockets claimed by the garbage collector."); 4085 4086 static int unp_taskcount; 4087 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0, 4088 "Number of times the garbage collector has run."); 4089 4090 SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0, 4091 "Number of active local sockets."); 4092 4093 static void 4094 unp_gc(__unused void *arg, int pending) 4095 { 4096 struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead, 4097 NULL }; 4098 struct unp_head **head; 4099 struct unp_head unp_deadhead; /* List of potentially-dead sockets. */ 4100 struct file *f, **unref; 4101 struct unpcb *unp, *unptmp; 4102 int i, total, unp_unreachable; 4103 4104 LIST_INIT(&unp_deadhead); 4105 unp_taskcount++; 4106 UNP_LINK_RLOCK(); 4107 /* 4108 * First determine which sockets may be in cycles. 4109 */ 4110 unp_unreachable = 0; 4111 4112 for (head = heads; *head != NULL; head++) 4113 LIST_FOREACH(unp, *head, unp_link) { 4114 KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0, 4115 ("%s: unp %p has unexpected gc flags 0x%x", 4116 __func__, unp, (unsigned int)unp->unp_gcflag)); 4117 4118 f = unp->unp_file; 4119 4120 /* 4121 * Check for an unreachable socket potentially in a 4122 * cycle. It must be in a queue as indicated by 4123 * msgcount, and this must equal the file reference 4124 * count. Note that when msgcount is 0 the file is 4125 * NULL. 4126 */ 4127 if (f != NULL && unp->unp_msgcount != 0 && 4128 refcount_load(&f->f_count) == unp->unp_msgcount) { 4129 LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead); 4130 unp->unp_gcflag |= UNPGC_DEAD; 4131 unp->unp_gcrefs = unp->unp_msgcount; 4132 unp_unreachable++; 4133 } 4134 } 4135 4136 /* 4137 * Scan all sockets previously marked as potentially being in a cycle 4138 * and remove the references each socket holds on any UNPGC_DEAD 4139 * sockets in its queue. After this step, all remaining references on 4140 * sockets marked UNPGC_DEAD should not be part of any cycle. 4141 */ 4142 LIST_FOREACH(unp, &unp_deadhead, unp_dead) 4143 unp_gc_scan(unp, unp_remove_dead_ref); 4144 4145 /* 4146 * If a socket still has a non-negative refcount, it cannot be in a 4147 * cycle. In this case increment refcount of all children iteratively. 4148 * Stop the scan once we do a complete loop without discovering 4149 * a new reachable socket. 4150 */ 4151 do { 4152 unp_marked = 0; 4153 LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp) 4154 if (unp->unp_gcrefs > 0) { 4155 unp->unp_gcflag &= ~UNPGC_DEAD; 4156 LIST_REMOVE(unp, unp_dead); 4157 KASSERT(unp_unreachable > 0, 4158 ("%s: unp_unreachable underflow.", 4159 __func__)); 4160 unp_unreachable--; 4161 unp_gc_scan(unp, unp_restore_undead_ref); 4162 } 4163 } while (unp_marked); 4164 4165 UNP_LINK_RUNLOCK(); 4166 4167 if (unp_unreachable == 0) 4168 return; 4169 4170 /* 4171 * Allocate space for a local array of dead unpcbs. 4172 * TODO: can this path be simplified by instead using the local 4173 * dead list at unp_deadhead, after taking out references 4174 * on the file object and/or unpcb and dropping the link lock? 4175 */ 4176 unref = malloc(unp_unreachable * sizeof(struct file *), 4177 M_TEMP, M_WAITOK); 4178 4179 /* 4180 * Iterate looking for sockets which have been specifically marked 4181 * as unreachable and store them locally. 4182 */ 4183 UNP_LINK_RLOCK(); 4184 total = 0; 4185 LIST_FOREACH(unp, &unp_deadhead, unp_dead) { 4186 KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0, 4187 ("%s: unp %p not marked UNPGC_DEAD", __func__, unp)); 4188 unp->unp_gcflag &= ~UNPGC_DEAD; 4189 f = unp->unp_file; 4190 if (unp->unp_msgcount == 0 || f == NULL || 4191 refcount_load(&f->f_count) != unp->unp_msgcount || 4192 !fhold(f)) 4193 continue; 4194 unref[total++] = f; 4195 KASSERT(total <= unp_unreachable, 4196 ("%s: incorrect unreachable count.", __func__)); 4197 } 4198 UNP_LINK_RUNLOCK(); 4199 4200 /* 4201 * Now flush all sockets, free'ing rights. This will free the 4202 * struct files associated with these sockets but leave each socket 4203 * with one remaining ref. 4204 */ 4205 for (i = 0; i < total; i++) { 4206 struct socket *so; 4207 4208 so = unref[i]->f_data; 4209 CURVNET_SET(so->so_vnet); 4210 socantrcvmore(so); 4211 unp_dispose(so); 4212 CURVNET_RESTORE(); 4213 } 4214 4215 /* 4216 * And finally release the sockets so they can be reclaimed. 4217 */ 4218 for (i = 0; i < total; i++) 4219 fdrop(unref[i], NULL); 4220 unp_recycled += total; 4221 free(unref, M_TEMP); 4222 } 4223 4224 /* 4225 * Synchronize against unp_gc, which can trip over data as we are freeing it. 4226 */ 4227 static void 4228 unp_dispose(struct socket *so) 4229 { 4230 struct sockbuf *sb; 4231 struct unpcb *unp; 4232 struct mbuf *m; 4233 int error __diagused; 4234 4235 MPASS(!SOLISTENING(so)); 4236 4237 unp = sotounpcb(so); 4238 UNP_LINK_WLOCK(); 4239 unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS; 4240 UNP_LINK_WUNLOCK(); 4241 4242 /* 4243 * Grab our special mbufs before calling sbrelease(). 4244 */ 4245 error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR); 4246 MPASS(!error); 4247 SOCK_RECVBUF_LOCK(so); 4248 switch (so->so_type) { 4249 case SOCK_DGRAM: 4250 while ((sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) != NULL) { 4251 STAILQ_CONCAT(&so->so_rcv.uxdg_mb, &sb->uxdg_mb); 4252 TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist); 4253 /* Note: socket of sb may reconnect. */ 4254 sb->uxdg_cc = sb->uxdg_ctl = sb->uxdg_mbcnt = 0; 4255 } 4256 sb = &so->so_rcv; 4257 if (sb->uxdg_peeked != NULL) { 4258 STAILQ_INSERT_HEAD(&sb->uxdg_mb, sb->uxdg_peeked, 4259 m_stailqpkt); 4260 sb->uxdg_peeked = NULL; 4261 } 4262 m = STAILQ_FIRST(&sb->uxdg_mb); 4263 STAILQ_INIT(&sb->uxdg_mb); 4264 break; 4265 case SOCK_STREAM: 4266 case SOCK_SEQPACKET: 4267 sb = &so->so_rcv; 4268 m = STAILQ_FIRST(&sb->uxst_mbq); 4269 STAILQ_INIT(&sb->uxst_mbq); 4270 sb->sb_acc = sb->sb_ccc = sb->sb_ctl = sb->sb_mbcnt = 0; 4271 /* 4272 * Trim M_NOTREADY buffers from the free list. They are 4273 * referenced by the I/O thread. 4274 */ 4275 if (sb->uxst_fnrdy != NULL) { 4276 struct mbuf *n, *prev; 4277 4278 while (m != NULL && m->m_flags & M_NOTREADY) 4279 m = m->m_next; 4280 for (prev = n = m; n != NULL; n = n->m_next) { 4281 if (n->m_flags & M_NOTREADY) 4282 prev->m_next = n->m_next; 4283 else 4284 prev = n; 4285 } 4286 sb->uxst_fnrdy = NULL; 4287 } 4288 break; 4289 } 4290 /* 4291 * Mark sb with SBS_CANTRCVMORE. This is needed to prevent 4292 * uipc_sosend_*() or unp_disconnect() adding more data to the socket. 4293 * We came here either through shutdown(2) or from the final sofree(). 4294 * The sofree() case is simple as it guarantees that no more sends will 4295 * happen, however we can race with unp_disconnect() from our peer. 4296 * The shutdown(2) case is more exotic. It would call into 4297 * unp_dispose() only if socket is SS_ISCONNECTED. This is possible if 4298 * we did connect(2) on this socket and we also had it bound with 4299 * bind(2) and receive connections from other sockets. Because 4300 * uipc_shutdown() violates POSIX (see comment there) this applies to 4301 * SOCK_DGRAM as well. For SOCK_DGRAM this SBS_CANTRCVMORE will have 4302 * affect not only on the peer we connect(2)ed to, but also on all of 4303 * the peers who had connect(2)ed to us. Their sends would end up 4304 * with ENOBUFS. 4305 */ 4306 sb->sb_state |= SBS_CANTRCVMORE; 4307 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 4308 RLIM_INFINITY); 4309 SOCK_RECVBUF_UNLOCK(so); 4310 SOCK_IO_RECV_UNLOCK(so); 4311 4312 if (m != NULL) { 4313 unp_scan(m, unp_freerights); 4314 m_freemp(m); 4315 } 4316 } 4317 4318 static void 4319 unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int)) 4320 { 4321 struct mbuf *m; 4322 struct cmsghdr *cm; 4323 void *data; 4324 socklen_t clen, datalen; 4325 4326 while (m0 != NULL) { 4327 for (m = m0; m; m = m->m_next) { 4328 if (m->m_type != MT_CONTROL) 4329 continue; 4330 4331 cm = mtod(m, struct cmsghdr *); 4332 clen = m->m_len; 4333 4334 while (cm != NULL) { 4335 if (sizeof(*cm) > clen || cm->cmsg_len > clen) 4336 break; 4337 4338 data = CMSG_DATA(cm); 4339 datalen = (caddr_t)cm + cm->cmsg_len 4340 - (caddr_t)data; 4341 4342 if (cm->cmsg_level == SOL_SOCKET && 4343 cm->cmsg_type == SCM_RIGHTS) { 4344 (*op)(data, datalen / 4345 sizeof(struct filedescent *)); 4346 } 4347 4348 if (CMSG_SPACE(datalen) < clen) { 4349 clen -= CMSG_SPACE(datalen); 4350 cm = (struct cmsghdr *) 4351 ((caddr_t)cm + CMSG_SPACE(datalen)); 4352 } else { 4353 clen = 0; 4354 cm = NULL; 4355 } 4356 } 4357 } 4358 m0 = m0->m_nextpkt; 4359 } 4360 } 4361 4362 /* 4363 * Definitions of protocols supported in the LOCAL domain. 4364 */ 4365 static struct protosw streamproto = { 4366 .pr_type = SOCK_STREAM, 4367 .pr_flags = PR_CONNREQUIRED | PR_CAPATTACH | PR_SOCKBUF, 4368 .pr_ctloutput = &uipc_ctloutput, 4369 .pr_abort = uipc_abort, 4370 .pr_accept = uipc_peeraddr, 4371 .pr_attach = uipc_attach, 4372 .pr_bind = uipc_bind, 4373 .pr_bindat = uipc_bindat, 4374 .pr_connect = uipc_connect, 4375 .pr_connectat = uipc_connectat, 4376 .pr_connect2 = uipc_connect2, 4377 .pr_detach = uipc_detach, 4378 .pr_disconnect = uipc_disconnect, 4379 .pr_listen = uipc_listen, 4380 .pr_peeraddr = uipc_peeraddr, 4381 .pr_send = uipc_sendfile, 4382 .pr_sendfile_wait = uipc_sendfile_wait, 4383 .pr_ready = uipc_ready, 4384 .pr_sense = uipc_sense, 4385 .pr_shutdown = uipc_shutdown, 4386 .pr_sockaddr = uipc_sockaddr, 4387 .pr_sosend = uipc_sosend_stream_or_seqpacket, 4388 .pr_soreceive = uipc_soreceive_stream_or_seqpacket, 4389 .pr_sopoll = uipc_sopoll_stream_or_seqpacket, 4390 .pr_kqfilter = uipc_kqfilter_stream_or_seqpacket, 4391 .pr_close = uipc_close, 4392 .pr_chmod = uipc_chmod, 4393 }; 4394 4395 static struct protosw dgramproto = { 4396 .pr_type = SOCK_DGRAM, 4397 .pr_flags = PR_ATOMIC | PR_ADDR | PR_CAPATTACH | PR_SOCKBUF, 4398 .pr_ctloutput = &uipc_ctloutput, 4399 .pr_abort = uipc_abort, 4400 .pr_accept = uipc_peeraddr, 4401 .pr_attach = uipc_attach, 4402 .pr_bind = uipc_bind, 4403 .pr_bindat = uipc_bindat, 4404 .pr_connect = uipc_connect, 4405 .pr_connectat = uipc_connectat, 4406 .pr_connect2 = uipc_connect2, 4407 .pr_detach = uipc_detach, 4408 .pr_disconnect = uipc_disconnect, 4409 .pr_peeraddr = uipc_peeraddr, 4410 .pr_sosend = uipc_sosend_dgram, 4411 .pr_sense = uipc_sense, 4412 .pr_shutdown = uipc_shutdown, 4413 .pr_sockaddr = uipc_sockaddr, 4414 .pr_soreceive = uipc_soreceive_dgram, 4415 .pr_close = uipc_close, 4416 .pr_chmod = uipc_chmod, 4417 }; 4418 4419 static struct protosw seqpacketproto = { 4420 .pr_type = SOCK_SEQPACKET, 4421 .pr_flags = PR_CONNREQUIRED | PR_CAPATTACH | PR_SOCKBUF, 4422 .pr_ctloutput = &uipc_ctloutput, 4423 .pr_abort = uipc_abort, 4424 .pr_accept = uipc_peeraddr, 4425 .pr_attach = uipc_attach, 4426 .pr_bind = uipc_bind, 4427 .pr_bindat = uipc_bindat, 4428 .pr_connect = uipc_connect, 4429 .pr_connectat = uipc_connectat, 4430 .pr_connect2 = uipc_connect2, 4431 .pr_detach = uipc_detach, 4432 .pr_disconnect = uipc_disconnect, 4433 .pr_listen = uipc_listen, 4434 .pr_peeraddr = uipc_peeraddr, 4435 .pr_sense = uipc_sense, 4436 .pr_shutdown = uipc_shutdown, 4437 .pr_sockaddr = uipc_sockaddr, 4438 .pr_sosend = uipc_sosend_stream_or_seqpacket, 4439 .pr_soreceive = uipc_soreceive_stream_or_seqpacket, 4440 .pr_sopoll = uipc_sopoll_stream_or_seqpacket, 4441 .pr_kqfilter = uipc_kqfilter_stream_or_seqpacket, 4442 .pr_close = uipc_close, 4443 .pr_chmod = uipc_chmod, 4444 }; 4445 4446 static struct domain localdomain = { 4447 .dom_family = AF_LOCAL, 4448 .dom_name = "local", 4449 .dom_nprotosw = 3, 4450 .dom_protosw = { 4451 &streamproto, 4452 &dgramproto, 4453 &seqpacketproto, 4454 } 4455 }; 4456 DOMAIN_SET(local); 4457 4458 /* 4459 * A helper function called by VFS before socket-type vnode reclamation. 4460 * For an active vnode it clears unp_vnode pointer and decrements unp_vnode 4461 * use count. 4462 */ 4463 void 4464 vfs_unp_reclaim(struct vnode *vp) 4465 { 4466 struct unpcb *unp; 4467 int active; 4468 struct mtx *vplock; 4469 4470 ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim"); 4471 KASSERT(vp->v_type == VSOCK, 4472 ("vfs_unp_reclaim: vp->v_type != VSOCK")); 4473 4474 active = 0; 4475 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 4476 mtx_lock(vplock); 4477 VOP_UNP_CONNECT(vp, &unp); 4478 if (unp == NULL) 4479 goto done; 4480 UNP_PCB_LOCK(unp); 4481 if (unp->unp_vnode == vp) { 4482 VOP_UNP_DETACH(vp); 4483 unp->unp_vnode = NULL; 4484 active = 1; 4485 } 4486 UNP_PCB_UNLOCK(unp); 4487 done: 4488 mtx_unlock(vplock); 4489 if (active) 4490 vunref(vp); 4491 } 4492 4493 #ifdef DDB 4494 static void 4495 db_print_indent(int indent) 4496 { 4497 int i; 4498 4499 for (i = 0; i < indent; i++) 4500 db_printf(" "); 4501 } 4502 4503 static void 4504 db_print_unpflags(int unp_flags) 4505 { 4506 int comma; 4507 4508 comma = 0; 4509 if (unp_flags & UNP_HAVEPC) { 4510 db_printf("%sUNP_HAVEPC", comma ? ", " : ""); 4511 comma = 1; 4512 } 4513 if (unp_flags & UNP_WANTCRED_ALWAYS) { 4514 db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : ""); 4515 comma = 1; 4516 } 4517 if (unp_flags & UNP_WANTCRED_ONESHOT) { 4518 db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : ""); 4519 comma = 1; 4520 } 4521 if (unp_flags & UNP_CONNECTING) { 4522 db_printf("%sUNP_CONNECTING", comma ? ", " : ""); 4523 comma = 1; 4524 } 4525 if (unp_flags & UNP_BINDING) { 4526 db_printf("%sUNP_BINDING", comma ? ", " : ""); 4527 comma = 1; 4528 } 4529 } 4530 4531 static void 4532 db_print_xucred(int indent, struct xucred *xu) 4533 { 4534 int comma, i; 4535 4536 db_print_indent(indent); 4537 db_printf("cr_version: %u cr_uid: %u cr_pid: %d cr_ngroups: %d\n", 4538 xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups); 4539 db_print_indent(indent); 4540 db_printf("cr_groups: "); 4541 comma = 0; 4542 for (i = 0; i < xu->cr_ngroups; i++) { 4543 db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]); 4544 comma = 1; 4545 } 4546 db_printf("\n"); 4547 } 4548 4549 static void 4550 db_print_unprefs(int indent, struct unp_head *uh) 4551 { 4552 struct unpcb *unp; 4553 int counter; 4554 4555 counter = 0; 4556 LIST_FOREACH(unp, uh, unp_reflink) { 4557 if (counter % 4 == 0) 4558 db_print_indent(indent); 4559 db_printf("%p ", unp); 4560 if (counter % 4 == 3) 4561 db_printf("\n"); 4562 counter++; 4563 } 4564 if (counter != 0 && counter % 4 != 0) 4565 db_printf("\n"); 4566 } 4567 4568 DB_SHOW_COMMAND(unpcb, db_show_unpcb) 4569 { 4570 struct unpcb *unp; 4571 4572 if (!have_addr) { 4573 db_printf("usage: show unpcb <addr>\n"); 4574 return; 4575 } 4576 unp = (struct unpcb *)addr; 4577 4578 db_printf("unp_socket: %p unp_vnode: %p\n", unp->unp_socket, 4579 unp->unp_vnode); 4580 4581 db_printf("unp_ino: %ju unp_conn: %p\n", (uintmax_t)unp->unp_ino, 4582 unp->unp_conn); 4583 4584 db_printf("unp_refs:\n"); 4585 db_print_unprefs(2, &unp->unp_refs); 4586 4587 /* XXXRW: Would be nice to print the full address, if any. */ 4588 db_printf("unp_addr: %p\n", unp->unp_addr); 4589 4590 db_printf("unp_gencnt: %llu\n", 4591 (unsigned long long)unp->unp_gencnt); 4592 4593 db_printf("unp_flags: %x (", unp->unp_flags); 4594 db_print_unpflags(unp->unp_flags); 4595 db_printf(")\n"); 4596 4597 db_printf("unp_peercred:\n"); 4598 db_print_xucred(2, &unp->unp_peercred); 4599 4600 db_printf("unp_refcount: %u\n", unp->unp_refcount); 4601 } 4602 #endif 4603